repo stringlengths 2 99 | file stringlengths 13 225 | code stringlengths 0 18.3M | file_length int64 0 18.3M | avg_line_length float64 0 1.36M | max_line_length int64 0 4.26M | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
tvm | tvm-main/tests/python/contrib/test_tensorrt_int8_exp.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
import os
import numpy as np
try:
# See issue #9362.
import torch
except:
pass
import tvm
import tvm.testing
from tvm import relay
from tvm.contrib.download import download_testdata
from tvm.relay.op.contrib.tensorrt import partition_for_tensorrt
from tvm.relay.op.contrib import tensorrt
def skip_codegen_test():
"""Skip test if TensorRT and CUDA codegen are not present"""
if not tvm.runtime.enabled("cuda") or not tvm.cuda(0).exist:
print("Skip because CUDA is not enabled.")
return True
if not tensorrt.is_tensorrt_compiler_enabled():
print("Skip because TensorRT compiler is not available.")
return True
print("TensorRT compiler is available!")
return False
def skip_runtime_test():
if not tvm.runtime.enabled("cuda") or not tvm.cuda(0).exist:
print("Skip because CUDA is not enabled.")
return True
if not tensorrt.is_tensorrt_runtime_enabled():
print("Skip because TensorRT runtime is not available.")
return True
print("TensorRT runtime is available!")
return False
def test_trt_int8():
"""
This Function is used to use tensorrt int8 to compile a resnet34 model,
and compare cosine distance between the output of the original model and trt int8 tvm output
"""
if skip_codegen_test() or skip_runtime_test():
return
try:
from PIL import Image
from scipy.spatial import distance
except:
print("please install scipy and Image python packages")
return
try:
import torch
import torchvision
from torchvision import transforms
except:
print("please install pytorch python package")
return
os.environ["TVM_TENSORRT_USE_INT8"] = "1"
os.environ["TENSORRT_NUM_CALI_INT8"] = "10"
model_name = "resnet34"
model = getattr(torchvision.models, model_name)(pretrained=True)
model = model.eval()
# We grab the TorchScripted model via tracing
input_shape = [1, 3, 224, 224]
input_data = torch.randn(input_shape)
scripted_model = torch.jit.trace(model, input_data).eval()
img_url = "https://github.com/dmlc/mxnet.js/blob/main/data/cat.png?raw=true"
img_path = download_testdata(img_url, "cat.png", module="data")
img = Image.open(img_path).resize((224, 224))
my_preprocess = transforms.Compose(
[
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
]
)
img = my_preprocess(img)
img = np.expand_dims(img, 0)
input_name = "input0"
shape_list = [(input_name, img.shape)]
mod, params = relay.frontend.from_pytorch(scripted_model, shape_list)
# compile the model
target = "cuda"
dev = tvm.cuda()
mod = partition_for_tensorrt(mod, params)
with tvm.transform.PassContext(opt_level=3):
lib = relay.build(mod, target=target, params=params)
gen_module = tvm.contrib.graph_executor.GraphModule(lib["default"](dev))
num_cali_int8 = int(os.environ["TENSORRT_NUM_CALI_INT8"])
if num_cali_int8 != 0:
print("start calibrating data ... ")
for i in range(num_cali_int8):
tvm_data = tvm.nd.array(img)
gen_module.set_input(input_name, tvm_data)
gen_module.run(data=tvm_data)
print("finished calibrating data ... ")
# get output of tvm model
print("rebuild engine and test to run ... ")
tvm_data = tvm.nd.array(img)
gen_module.set_input(input_name, tvm_data)
gen_module.run(data=tvm_data)
out = gen_module.get_output(0)
# check output of tvm and output of pytorch model are equal
torch_data = torch.from_numpy(img)
model = scripted_model.eval()
torch_output = model(torch_data)
cosine_distance_res = distance.cosine(out.numpy(), torch_output.detach().cpu().numpy())
assert cosine_distance_res <= 0.01
# Evaluate
print("Evaluate inference time cost...")
ftimer = gen_module.module.time_evaluator("run", dev, repeat=10, min_repeat_ms=500)
prof_res = np.array(ftimer().results) * 1e3 # convert to millisecond
message = "Mean inference time (std dev): %.2f ms (%.2f ms)" % (
np.mean(prof_res),
np.std(prof_res),
)
print(message)
if __name__ == "__main__":
tvm.testing.main()
| 5,224 | 32.280255 | 96 | py |
tvm | tvm-main/tests/python/contrib/test_rocblas.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
import tvm.testing
from tvm import te
import numpy as np
import tvm.topi.testing
import tvm.testing
from tvm.contrib import rocblas
@tvm.testing.requires_rocm
def test_matmul():
n = 1024
l = 128
m = 235
A = te.placeholder((n, l), name="A")
B = te.placeholder((l, m), name="B")
C = rocblas.matmul(A, B)
s = te.create_schedule(C.op)
def verify(target="rocm"):
if not tvm.get_global_func("tvm.contrib.rocblas.matmul", True):
print("skip because extern function is not available")
return
dev = tvm.rocm(0)
f = tvm.build(s, [A, B, C], target)
a = tvm.nd.array(np.random.uniform(size=(n, l)).astype(A.dtype), dev)
b = tvm.nd.array(np.random.uniform(size=(l, m)).astype(B.dtype), dev)
c = tvm.nd.array(np.zeros((n, m), dtype=C.dtype), dev)
f(a, b, c)
tvm.testing.assert_allclose(c.numpy(), np.dot(a.numpy(), b.numpy()), rtol=1e-5)
verify()
def verify_batch_matmul(batch, m, k, n, lib, transa=False, transb=False, dtype="float32"):
ashape = (batch, k, m) if transa else (batch, m, k)
bshape = (batch, n, k) if transb else (batch, k, n)
A = te.placeholder(ashape, name="A", dtype=dtype)
B = te.placeholder(bshape, name="B", dtype=dtype)
C = lib.batch_matmul(A, B, transa, transb)
s = te.create_schedule(C.op)
def get_numpy(a, b, transa, transb):
if transa:
a = a.transpose(0, 2, 1)
if not transb:
b = b.transpose(0, 2, 1)
return tvm.topi.testing.batch_matmul(a, b)
def verify(target="rocm"):
if not tvm.testing.device_enabled(target):
print("skip because %s is not enabled..." % target)
return
if not tvm.get_global_func(lib.__name__ + ".batch_matmul", True):
print("skip because extern function is not available")
return
dev = tvm.rocm(0)
f = tvm.build(s, [A, B, C], target)
a = tvm.nd.array(np.random.uniform(size=ashape).astype(A.dtype), dev)
b = tvm.nd.array(np.random.uniform(size=bshape).astype(B.dtype), dev)
c = tvm.nd.array(np.zeros((batch, m, n), dtype=C.dtype), dev)
f(a, b, c)
tvm.testing.assert_allclose(
c.numpy(), get_numpy(a.numpy(), b.numpy(), transa, transb), rtol=1e-5
)
verify()
@tvm.testing.requires_rocm
def test_batch_matmul():
verify_batch_matmul(128, 64, 512, 512, rocblas, transa=False, transb=False)
verify_batch_matmul(128, 64, 512, 512, rocblas, transa=False, transb=True)
verify_batch_matmul(128, 64, 512, 512, rocblas, transa=True, transb=False)
verify_batch_matmul(128, 64, 512, 512, rocblas, transa=True, transb=True)
verify_batch_matmul(128, 512, 512, 64, rocblas, transa=False, transb=False)
verify_batch_matmul(128, 512, 512, 64, rocblas, transa=False, transb=True)
verify_batch_matmul(128, 512, 512, 64, rocblas, transa=True, transb=False)
verify_batch_matmul(128, 512, 512, 64, rocblas, transa=True, transb=True)
verify_batch_matmul(128, 512, 64, 512, rocblas, transa=False, transb=False)
verify_batch_matmul(128, 512, 64, 512, rocblas, transa=False, transb=True)
verify_batch_matmul(128, 512, 64, 512, rocblas, transa=True, transb=False)
verify_batch_matmul(128, 512, 64, 512, rocblas, transa=True, transb=True)
verify_batch_matmul(128, 64, 128, 128, rocblas, transa=False, transb=False)
verify_batch_matmul(128, 64, 128, 128, rocblas, transa=False, transb=True)
verify_batch_matmul(128, 64, 128, 128, rocblas, transa=True, transb=False)
verify_batch_matmul(128, 64, 128, 128, rocblas, transa=True, transb=True)
verify_batch_matmul(128, 128, 128, 64, rocblas, transa=False, transb=False)
verify_batch_matmul(128, 128, 128, 64, rocblas, transa=False, transb=True)
verify_batch_matmul(128, 128, 128, 64, rocblas, transa=True, transb=False)
verify_batch_matmul(128, 128, 128, 64, rocblas, transa=True, transb=True)
if __name__ == "__main__":
test_matmul()
test_batch_matmul()
| 4,848 | 41.911504 | 90 | py |
tvm | tvm-main/tests/python/contrib/test_sparse.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
import tvm.testing
from tvm import te
import tvm.contrib.sparse as tvmsp
import tvm.runtime.ndarray as _nd
import numpy as np
from collections import namedtuple
def test_static_tensor():
dtype = "float32"
stype = "csr"
target = "llvm"
dev = tvm.device(target, 0)
m = te.size_var("m")
n = te.size_var("n")
A = tvmsp.placeholder(shape=(m, n), name="A", dtype=dtype)
assert A.stype == "csr"
n = 3
a = np.maximum(np.random.uniform(size=(n, n)).astype(dtype) - 0.6, 0.0)
a = tvmsp.array(a, dev)
A.data = te.placeholder(a.data.shape, dtype, name="A_data")
Ab = tvm.tir.decl_buffer(a.data.shape, dtype, name="A_data")
binds = {A.data: Ab}
C = te.compute(A.data.shape, lambda i: A.data[i] * 2.0, tag="cs_scatter")
s = te.create_schedule(C.op)
f = tvm.build(s, [A.data, C], target, binds=binds)
c = tvmsp.array(np.zeros((n, n), dtype), dev)
c.data = tvm.nd.empty(a.data.shape, dtype)
c.indices = a.indices
c.indptr = a.indptr
f(a.data, c.data)
tvm.testing.assert_allclose(c.numpy(), a.numpy() * 2.0, rtol=1e-5)
def test_dynamic_tensor():
dtype = "float32"
stype = "csr"
target = "llvm"
dev = tvm.device(target, 0)
nr, nc, n = te.size_var("nr"), te.size_var("nc"), te.size_var("n")
A = tvmsp.placeholder(shape=(nr, nc), nonzeros=n, name="A", dtype=dtype)
assert A.stype == "csr"
C = te.compute(A.data.shape, lambda i: A.data[i] * 2.0, tag="cs_scatter")
s = te.create_schedule(C.op)
_nr, _nc = 3, 5
a = np.maximum(np.random.uniform(size=(_nr, _nc)).astype(dtype) - 0.6, 0.0)
a = tvmsp.array(a, dev)
assert a.data.dtype == a.dtype
Ab = namedtuple("CSRBuffer", ["data", "indices", "indptr"])
Ab.data = tvm.tir.decl_buffer(a.data.shape, a.data.dtype, name="A_data")
Ab.indices = tvm.tir.decl_buffer(a.data.shape, a.data.dtype, name="A_indices")
binds = {A.data: Ab.data, A.indices: Ab.indices}
f = tvm.build(s, [nr, A.data, C], target, binds=binds)
c = tvmsp.array(np.zeros((_nr, _nc), dtype), dev)
c.data = tvm.nd.empty(a.data.shape, dtype)
c.indices = a.indices
c.indptr = a.indptr
f(a.data.shape[0], a.data, c.data)
tvm.testing.assert_allclose(c.numpy(), a.numpy() * 2.0, rtol=1e-5)
def test_sparse_array_tuple():
dtype, itype = "float32", "int32"
stype = "csr"
target = "llvm"
dev = tvm.device(target, 0)
nr, nc, n = te.size_var("nr"), te.size_var("nc"), te.size_var("n")
A = tvmsp.placeholder(shape=(nr, nc), nonzeros=n, name="A", dtype=dtype)
assert A.stype == "csr"
C = te.compute(A.data.shape, lambda i: A.data[i] * 2.0, tag="cs_scatter")
s = te.create_schedule(C.op)
_nr, _nc = 3, 5
a = np.maximum(np.random.uniform(size=(_nr, _nc)).astype(dtype) - 0.6, 0.0)
# convert to sparse array tuple
source_array = a
ridx, cidx = np.nonzero(source_array)
data = source_array[ridx, cidx]
a_data = _nd.array(data, dev)
indices = np.nonzero(source_array)[1].astype(itype)
a_indices = _nd.array(indices, dev)
indptr = [0] + np.apply_along_axis(np.count_nonzero, axis=1, arr=source_array).tolist()
indptr = np.cumsum(np.array(indptr, itype)).astype(itype)
a_indptr = _nd.array(indptr, dev)
a_init = (a_data, a_indices, a_indptr)
# construct tvm sparse array with tuple
a = tvmsp.array(a_init, shape=source_array.shape, device=dev)
assert a.data.dtype == a.dtype
Ab = namedtuple("CSRBuffer", ["data", "indices", "indptr"])
Ab.data = tvm.tir.decl_buffer(a.data.shape, a.data.dtype, name="A_data")
Ab.indices = tvm.tir.decl_buffer(a.data.shape, a.data.dtype, name="A_indices")
binds = {A.data: Ab.data, A.indices: Ab.indices}
f = tvm.build(s, [nr, A.data, C], target, binds=binds)
c = tvmsp.array(np.zeros((_nr, _nc), dtype), dev)
c.data = tvm.nd.empty(a.data.shape, dtype)
c.indices = a.indices
c.indptr = a.indptr
f(a.data.shape[0], a.data, c.data)
tvm.testing.assert_allclose(c.numpy(), a.numpy() * 2.0, rtol=1e-5)
if __name__ == "__main__":
test_static_tensor()
test_dynamic_tensor()
test_sparse_array_tuple()
| 4,940 | 39.5 | 91 | py |
tvm | tvm-main/tests/python/contrib/test_amx.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=import-self, invalid-name, unused-argument, too-many-lines, len-as-condition
import tvm
from tvm import relay
from tvm import te
import tvm.testing
from tvm.topi.x86.tensor_intrin import dot_32x128x32_u8s8s32_sapphirerapids
from tvm.topi.x86.tensor_intrin import acc_32x32_int32_sapphirerapids
import numpy as np
import pytest
@tvm.testing.requires_llvm
@pytest.mark.skip("skip due to AMX feature not avaliable yet")
def test_amx_u8s8s32_matmul_tensorize():
m = 1024
k = 1024
n = 1024
# --------------------------Config---------------------------
# Skip this test if "-mcpu=sapphirerapids" not supported by LLVM < 12.0
target = "llvm -mcpu=sapphirerapids"
dev = tvm.device(target, 0)
if not tvm.testing.device_enabled(target):
print("skip because %s is not enabled..." % target)
return
amx_init = tvm.get_global_func("runtime.amx_init")
amx_tileconfig = tvm.get_global_func("runtime.amx_tileconfig")
assert amx_init()
assert amx_tileconfig(16, 64) # config tile size to 16 rows by 64 columns.
# --------------------------Compute--------------------------
X = te.placeholder((m, k), name="X", dtype="uint8")
ak = te.reduce_axis((0, k), name="k")
packedW = te.placeholder((n // 16, k // 4, 16, 4), name="packedW", dtype="int8")
C = te.compute(
(m, n),
lambda i, j: te.sum(
X[i, ak].astype("int32")
* packedW[tvm.tir.indexdiv(j, 16), tvm.tir.indexdiv(ak, 4), j % 16, ak % 4].astype(
"int32"
),
axis=ak,
),
name="F",
)
# --------------------------Schedule--------------------------
s = te.create_schedule(C.op)
a_x, a_y = C.op.axis
(a_k,) = C.op.reduce_axis
CF = s.cache_write(C, "amx.tmm")
a_xo, a_xi = s[C].split(a_x, factor=32)
a_yo, a_yi = s[C].split(a_y, factor=32)
s[C].reorder(a_xo, a_yo, a_xi, a_yi)
s[CF].compute_at(s[C], a_yo)
(a_k_f,) = CF.op.reduce_axis
a_x_f, a_y_f = CF.op.axis
a_xo_f, a_xi_f = s[CF].split(a_x_f, factor=32)
a_yo_f, a_yi_f = s[CF].split(a_y_f, factor=32)
a_ko_f, a_ki_f = s[CF].split(a_k_f, factor=128)
s[CF].reorder(a_ko_f, a_xo_f, a_yo_f, a_ki_f, a_xi_f, a_yi_f)
s[CF].tensorize(a_ki_f, dot_32x128x32_u8s8s32_sapphirerapids(LDA=k))
s[C].tensorize(a_xi, acc_32x32_int32_sapphirerapids(LDC=n))
lib = tvm.build(s, [X, packedW, C], target, name="intrinsic")
asm = lib.get_source("asm")
assert "tilezero" in asm
assert "tileloaddt1" in asm
assert "tdpbusd" in asm
assert "tilestored" in asm
# ----------------------- verify correctness --------------------------------
# generate the plain data
a = np.random.uniform(1, 10, size=(m, k)).astype("uint8")
b = np.random.uniform(1, 10, size=(n, k)).astype("int8")
packW = np.random.uniform(1, 10, size=(n // 16, k // 4, 16, 4)).astype("int8")
# This should occurs in pre_pack (constant folding) stage,
# from plain data to blocked data(NC16n4c)
for i_n in range(n):
for i_k in range(k):
packW[i_n // 16][i_k // 4][i_n % 16][i_k % 4] = b[i_n][i_k]
x = tvm.nd.array(a, dev)
w = tvm.nd.array(packW, dev)
y = tvm.nd.array(np.zeros((m, n), dtype="int32"), dev)
t_evaluator = lib.time_evaluator(lib.entry_name, dev, number=100)
result = t_evaluator(x, w, y)
print(result)
tvm.testing.assert_allclose(y.numpy(), np.dot(a.astype("int32"), b.T.astype("int32")), rtol=0)
@tvm.testing.requires_llvm
@pytest.mark.skip("skip due to AMX feature not avaliable yet")
def test_amx_check_support():
amx_init = tvm.get_global_func("runtime.amx_init")
amx_tileconfig = tvm.get_global_func("runtime.amx_tileconfig")
assert amx_init()
assert amx_tileconfig(16, 64)
if __name__ == "__main__":
pytest.main([__file__])
| 4,664 | 35.732283 | 98 | py |
tvm | tvm-main/tests/python/contrib/test_onnx.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Relay to ONNX serialization test cases"""
import pytest
pytest.importorskip("onnx")
pytest.importorskip("onnxruntime")
import numpy as np
import onnxruntime as rt
import tvm
from tvm import relay
from tvm.contrib.target.onnx import to_onnx
from tvm.relay.testing import run_infer_type
def func_to_onnx(func, name):
mod = tvm.IRModule()
mod["main"] = func
onnx_model = to_onnx(mod, {}, name, path=None)
return onnx_model.SerializeToString()
def run_onnx(onnx_model, input_data):
sess = rt.InferenceSession(onnx_model)
input_names = {}
for input, data in zip(sess.get_inputs(), input_data):
input_names[input.name] = data
output_names = [out.name for out in sess.get_outputs()]
res = sess.run(output_names, input_names)
return res
def run_relay(func, data_tuple, is_dyn=False):
target = "llvm"
dev = tvm.device("llvm", 0)
kind = "graph" if not is_dyn else "vm"
relay_res = relay.create_executor(kind, device=dev, target=target).evaluate(func)(*data_tuple)
result = []
relay_res = relay_res if isinstance(relay_res, list) else [relay_res]
for res in relay_res:
result.append(res.numpy())
return result
def verify_results(relay_func, indata, test_name, rtol=1e-7, atol=0, is_dyn=False):
relay_results = run_relay(relay_func, indata, is_dyn)
onnx_results = run_onnx(func_to_onnx(relay_func, test_name), indata)
for relay_res, onnx_res in zip(relay_results, onnx_results):
np.testing.assert_allclose(relay_res, onnx_res, rtol=rtol, atol=atol)
def test_add():
dtype = "float32"
t1 = relay.TensorType((5, 10, 5))
t2 = relay.TensorType((5, 10, 5))
x = relay.var("x", t1, dtype=dtype)
y = relay.var("y", t2, dtype=dtype)
z = relay.add(x, y)
func = relay.Function([x, y], z)
x_data = np.random.rand(5, 10, 5).astype(dtype)
y_data = np.random.rand(5, 10, 5).astype(dtype)
verify_results(func, [x_data, y_data], "test_add")
def test_bias_add():
for dtype in ["float16", "float32"]:
xshape = (10, 2, 3, 4)
bshape = (2,)
rtol = 1e-2 if dtype == "float16" else 1e-5
x = relay.var("x", shape=xshape, dtype=dtype)
bias = relay.var("bias", shape=bshape, dtype=dtype)
z = relay.nn.bias_add(x, bias)
func = relay.Function([x, bias], z)
x_data = np.random.uniform(size=xshape).astype(dtype)
y_data = np.random.uniform(size=bshape).astype(dtype)
verify_results(func, [x_data, y_data], "test_bias_add", rtol=rtol)
def test_conv2d():
def verify_conv2d(
dtype, scale, dshape, kshape, padding=(1, 1), groups=1, dilation=(1, 1), **attrs
):
x = relay.var("x", shape=dshape, dtype=dtype)
w = relay.var("w", shape=kshape, dtype=dtype)
y = relay.nn.conv2d(x, w, padding=padding, dilation=dilation, groups=groups, **attrs)
func = relay.Function([x, w], y)
data = np.random.uniform(-scale, scale, size=dshape).astype(dtype)
kernel = np.random.uniform(-scale, scale, size=kshape).astype(dtype)
verify_results(func, [data, kernel], "test_conv2d", rtol=1e-5, atol=1e-5, is_dyn=True)
dshape = (1, 32, 18, 18)
kshape = (32, 1, 3, 3)
verify_conv2d(
"float32", 1, dshape, kshape, padding=(1, 1), channels=32, groups=32, kernel_size=(3, 3)
)
dshape = (1, 32, 18, 18)
kshape = (32, 4, 3, 3)
verify_conv2d(
"float32", 1, dshape, kshape, padding=(1, 1), channels=32, groups=8, kernel_size=(3, 3)
)
# also group conv2d
dshape = (1, 32, 18, 18)
kshape = (64, 1, 3, 3)
verify_conv2d(
"float32", 1, dshape, kshape, padding=(1, 1), channels=64, groups=32, kernel_size=(3, 3)
)
# normal conv2d
dshape = (1, 3, 224, 224)
kshape = (10, 3, 3, 3)
verify_conv2d("float32", 1, dshape, kshape, padding=(1, 1), channels=10, kernel_size=(3, 3))
dshape = (1, 3, 224, 224)
kshape = (10, 3, 3, 3)
verify_conv2d("float32", 1, dshape, kshape, padding=(2, 2), channels=10, kernel_size=(3, 3))
dshape = (1, 3, 18, 18)
kshape = (10, 3, 3, 3)
verify_conv2d(
"float32",
1,
dshape,
kshape,
padding=(1, 1),
channels=10,
kernel_size=(3, 3),
dilation=(3, 3),
)
dshape = (1, 3, 18, 18)
kshape = (10, 3, 2, 2)
verify_conv2d(
"float32",
1,
dshape,
kshape,
padding=(2, 2),
channels=10,
kernel_size=(2, 2),
dilation=(1, 1),
)
dshape = (1, 3, 18, 18)
kshape = (10, 3, 4, 4)
verify_conv2d("float32", 1, dshape, kshape, padding=(1, 1), channels=10, kernel_size=(4, 4))
dshape = (1, 3, 18, 18)
kshape = (10, 3, 4, 4)
verify_conv2d("float32", 1, dshape, kshape, padding=(1, 1), channels=10, kernel_size=(4, 4))
def test_conv2d_transpose():
"""Conv2d_Transpose unit tests."""
def verify_conv2d_transpose(
dtype, scale, dshape, kshape, padding=(1, 1), groups=1, dilation=(1, 1), **attrs
):
x = relay.var("x", shape=dshape, dtype=dtype)
w = relay.var("w", shape=kshape, dtype=dtype)
y = relay.nn.conv2d_transpose(
x, w, padding=padding, dilation=dilation, groups=groups, **attrs
)
func = relay.Function([x, w], y)
data = np.random.uniform(-scale, scale, size=dshape).astype(dtype)
kernel = np.random.uniform(-scale, scale, size=kshape).astype(dtype)
verify_results(func, [data, kernel], "test_conv2d_transpose", rtol=1e-5, atol=1e-5)
dshape = (1, 3, 224, 224)
kshape = (3, 10, 3, 3)
verify_conv2d_transpose(
"float32", 1, dshape, kshape, padding=(1, 1), channels=10, kernel_size=(3, 3)
)
dshape = (1, 3, 224, 224)
kshape = (3, 10, 3, 3)
verify_conv2d_transpose(
"float32", 1, dshape, kshape, padding=(2, 2), channels=10, kernel_size=(3, 3)
)
dshape = (1, 3, 18, 18)
kshape = (3, 10, 2, 2)
verify_conv2d_transpose(
"float32",
1,
dshape,
kshape,
padding=(2, 2),
channels=10,
kernel_size=(2, 2),
dilation=(1, 1),
)
dshape = (1, 3, 18, 18)
kshape = (3, 10, 4, 4)
verify_conv2d_transpose(
"float32", 1, dshape, kshape, padding=(1, 1), channels=10, kernel_size=(4, 4)
)
dshape = (1, 3, 18, 18)
kshape = (3, 10, 4, 4)
verify_conv2d_transpose(
"float32", 1, dshape, kshape, padding=(1, 1), channels=10, kernel_size=(4, 4)
)
def test_reshape():
def verify_reshape(shape, newshape):
x = relay.var("x", relay.TensorType(shape, "float32"))
z = relay.reshape(x, newshape=newshape)
func = relay.Function([x], z)
x_data = np.random.uniform(low=-1, high=1, size=shape).astype("float32")
verify_results(func, [x_data], "test_reshape", rtol=1e-5, atol=1e-5)
verify_reshape((2, 3, 4), tuple(np.array([4, 2, 3], dtype=np.int64)))
verify_reshape((2, 3, 4), tuple(np.array([2, 0, 0], dtype=np.int64)))
verify_reshape((2, 3, 4), tuple(np.array([0, -1], dtype=np.int64)))
verify_reshape((2, 3, 4), tuple(np.array([-1, 0], dtype=np.int64)))
def test_transpose():
def verify_reshape(shape, newshape):
x = relay.var("x", relay.TensorType(shape, "float32"))
z = relay.transpose(x, newshape)
func = relay.Function([x], z)
x_data = np.random.uniform(low=-1, high=1, size=shape).astype("float32")
verify_results(func, [x_data], "test_transpose", rtol=1e-5, atol=1e-5)
verify_reshape((1, 2, 3, 4), (0, 2, 3, 1))
verify_reshape((1, 2, 3, 4), (0, 3, 2, 1))
def test_dense():
def verify_dense(d_shape, w_shape):
data = relay.var("data", relay.TensorType(d_shape, "float32"))
weight = relay.var("weight", relay.TensorType(w_shape, "float32"))
func = relay.Function([data, weight], relay.nn.dense(data, weight))
x_data = np.random.uniform(size=d_shape).astype("float32")
w_data = np.random.uniform(size=w_shape).astype("float32")
verify_results(func, [x_data, w_data], "test_dense", rtol=1e-5, atol=1e-5)
verify_dense((1, 8), (16, 8))
verify_dense((1, 4), (3, 4))
def test_max_pool():
def verify_max_pool(x_shape, pool_size, strides, padding, ceil_mode):
x = relay.var("x", relay.TensorType(x_shape, "float32"))
y = tvm.relay.nn.max_pool2d(
x, pool_size=pool_size, strides=strides, padding=padding, ceil_mode=ceil_mode
)
func = relay.Function([x], y)
x_data = np.random.uniform(size=x_shape).astype("float32")
verify_results(func, [x_data], "test_max_pool", rtol=1e-5, atol=1e-5)
verify_max_pool(
(1, 4, 16, 16), pool_size=(2, 2), strides=(2, 2), padding=(0, 0), ceil_mode=False
)
def test_batch_flatten():
def verify_test_batch_flatten(d_shape):
data = relay.var("data", relay.TensorType(d_shape, "float32"))
func = relay.Function([data], relay.nn.batch_flatten(data))
x_data = np.random.uniform(size=d_shape).astype("float32")
verify_results(func, [x_data], "test_batch_flatten", rtol=1e-5, atol=1e-5)
verify_test_batch_flatten((1, 2, 3, 4))
verify_test_batch_flatten((1, 8))
def test_batch_norm():
def verify_batch_norm(axis=1):
for dtype in ["float16", "float32"]:
data = relay.var("data", relay.TensorType((2, 4, 4, 1), dtype))
gamma_shape = (data.type_annotation.shape[axis].value,)
beta = relay.var("beta", relay.TensorType(gamma_shape, dtype))
gamma = relay.var("gamma", relay.TensorType(gamma_shape, dtype))
moving_mean = relay.var("moving_mean", relay.TensorType(gamma_shape, dtype))
moving_var = relay.var("moving_var", relay.TensorType(gamma_shape, dtype))
y = relay.nn.batch_norm(data, gamma, beta, moving_mean, moving_var, axis=axis)
func = relay.Function([data, gamma, beta, moving_mean, moving_var], y[0])
x_data = np.random.uniform(size=(2, 4, 4, 1)).astype(dtype)
beta = np.random.uniform(size=gamma_shape).astype(dtype)
gamma = np.random.uniform(size=gamma_shape).astype(dtype)
moving_mean = np.random.uniform(size=gamma_shape).astype(dtype)
moving_var = np.random.uniform(size=gamma_shape).astype(dtype)
verify_results(
func,
[x_data, gamma, beta, moving_mean, moving_var],
"test_batch_norm",
rtol=1e-1,
atol=1e-1,
)
verify_batch_norm(axis=1)
verify_batch_norm(axis=3)
def test_pad():
"""Pad unit test."""
def verify_pad():
dshape = (4, 10, 7, 7)
x = relay.var("x", shape=dshape, dtype="int32")
y = relay.nn.pad(x, ((1, 1), (2, 2), (3, 3), (4, 4)))
func = relay.Function([x], y)
func = run_infer_type(func)
x_data = np.random.randint(low=-255, high=255, size=dshape).astype(np.int32)
verify_results(func, [x_data], "test_pad", rtol=1e-5, atol=1e-5)
verify_pad()
def test_sofmax():
def verify_sofmax():
for dtype in ["float32"]:
shape = (10, 4)
x = relay.var("x", shape=shape, dtype=dtype)
y = relay.nn.softmax(x, axis=1)
func = relay.Function([x], y)
x_data = np.random.uniform(size=shape).astype(dtype)
verify_results(func, [x_data], "test_softmax", rtol=1e-5, atol=1e-5)
verify_sofmax()
def test_squeeze():
def verify_squeeze(shape, dtype, axis):
x = relay.var("x", relay.TensorType(shape, dtype))
z = relay.squeeze(x, axis=axis)
func = relay.Function([x], z)
x_data = np.random.random_sample(shape).astype(dtype)
verify_results(func, [x_data], "test_squeeze", rtol=1e-5, atol=1e-5)
verify_squeeze((1, 3, 2, 5), "float32", None)
verify_squeeze(
(1, 3, 1),
"float32",
[
2,
],
)
verify_squeeze((1, 2, 1, 2, 1), "float32", [0, 2])
def test_mean():
def verify_mean(data_shape, axis, exclude, keepdims):
dtype = "float32"
x = relay.var("x", shape=data_shape, dtype=dtype)
y = relay.mean(x, axis, keepdims, exclude)
func = relay.Function([x], y)
x_data = np.random.uniform(size=data_shape).astype(dtype)
verify_results(func, [x_data], "test_mean", rtol=1e-5, atol=1e-5)
verify_mean((1, 2), 0, False, False)
verify_mean((1, 2), 0, True, False)
verify_mean((1, 2), 0, True, True)
verify_mean((1, 2), 1, True, True)
verify_mean((3, 2, 1), 1, False, True)
def test_split():
def verify_split(dshape, indices_or_sections, axis=None):
dtype = "float32"
x = relay.var("x", relay.ty.TensorType(dshape, "float32"))
y = relay.split(x, indices_or_sections, axis=axis)
func = relay.Function([x], y.astuple())
x_data = np.random.uniform(size=dshape).astype(dtype)
verify_results(func, [x_data], "test_split", rtol=1e-5, atol=1e-5)
verify_split((5, 5, 2, 2), 5, axis=1)
verify_split((5, 5, 2, 2), 5, axis=0)
verify_split((5, 5, 2, 2), [1, 3, 4], axis=0)
verify_split((5, 5, 2, 2), [1, 3, 4], axis=1)
def test_concatenate():
def verify_concatenate(shapes, axis, dtype="float32"):
in_vars = []
in_data = []
for i, shape in enumerate(shapes):
in_vars.append(relay.var("x" + str(i), relay.ty.TensorType(shape, dtype)))
in_data.append(np.random.uniform(size=shape).astype(dtype))
out_tensor = relay.concatenate(in_vars, axis)
func = relay.Function(in_vars, out_tensor)
verify_results(func, in_data, "test_concatenate", rtol=1e-5, atol=1e-5)
verify_concatenate([(2,), (2,), (2,)], -1)
verify_concatenate([(2, 3, 4), (2, 2, 4), (2, 5, 4)], 1)
verify_concatenate([(1, 2, 4), (1, 2, 3), (1, 2, 7), (1, 2, 8), (1, 2, 1)], -1)
verify_concatenate([(5, 6, 7, 3), (16, 6, 7, 3), (12, 6, 7, 3), (8, 6, 7, 3), (2, 6, 7, 3)], 0)
verify_concatenate([(1, 14400), (1, 2400), (1, 640), (1, 240)], 1)
def test_strided_slice():
def verify_strided_slice(dshape, begin, end, strides, mode):
x = relay.var("x", relay.TensorType(dshape, "float32"))
if mode == "size":
strides = None
z = relay.strided_slice(x, begin=begin, end=end, strides=strides, slice_mode=mode)
func = relay.Function([x], z)
x_data = np.random.uniform(size=dshape).astype("float32")
verify_results(func, [x_data], "test_strided_slice", rtol=1e-5, atol=1e-5)
for mode in ["end", "size"]:
verify_strided_slice((3, 4, 3), [1, 1, 0], [4, 2, 3], None, mode)
verify_strided_slice((3, 4, 3), [1, -1, 0], [4, -1, 3], [1, 2], mode)
verify_strided_slice(
(3, 4, 3),
[
1,
],
[4, -3],
None,
mode,
)
verify_strided_slice((3, 4, 3), [0, 0, 0], [4, -5, 4], [1, -1, 2], mode)
verify_strided_slice((3, 4, 3), [1, 1, 0], [4, 4, -3], [2, 1, 1], mode)
verify_strided_slice((3, 4, 3), [1, -1, 0], [4, -5, 3], [2, -1, 1], mode)
verify_strided_slice((3, 4, 3), [1, 0, 0], [2, 2, 3], [1, 1, 2], mode)
verify_strided_slice((3, 4, 3), [1, -1, 0], [2, -3, 3], [1, -1, 1], mode)
verify_strided_slice((3, 4, 3), [1, 1, 0], [4, 1000, 3], None, mode)
verify_strided_slice((3, 4, 3), [1, 1, 0], [4, 4], None, mode)
verify_strided_slice((3, 4, 3), [1, 1], [4, 4, 3], None, mode)
verify_strided_slice((3, 4, 3), [1, 1], [4, 4, 3], [1, 1, 2], mode)
def test_cmp_type():
for op, ref in ((relay.greater, np.greater), (relay.less, np.less), (relay.equal, np.equal)):
x_shape = (10, 4)
y_shape = (5, 10, 1)
t1 = relay.TensorType(x_shape)
t2 = relay.TensorType(y_shape)
x = relay.var("x", t1)
y = relay.var("y", t2)
z = op(x, y)
x_data = np.random.rand(*x_shape).astype(t1.dtype)
y_data = np.random.rand(*y_shape).astype(t2.dtype)
func = relay.Function([x, y], z)
verify_results(func, [x_data, y_data], "test_cmp_type", rtol=1e-5, atol=1e-5)
def test_unary_identity():
for dtype in ["int16", "float32", "float64"]:
for op, ref in [(relay.zeros_like, np.zeros_like), (relay.ones_like, np.ones_like)]:
shape = (8, 9, 4)
x = relay.var("x", relay.TensorType(shape, dtype))
y = op(x)
func = relay.Function(
[
x,
],
y,
)
x_data = np.random.rand(*shape).astype(dtype)
verify_results(func, [x_data], "test_cmp_type", rtol=1e-5, atol=1e-5)
def test_binary_op():
def check_binary_op(opfunc, dtype):
t1 = relay.TensorType((5, 10, 5))
t2 = relay.TensorType((5, 10, 5))
x = relay.var("x", t1, dtype=dtype)
y = relay.var("y", t2, dtype=dtype)
z = opfunc(x, y)
x_data = np.random.rand(5, 10, 5).astype(dtype)
y_data = np.random.rand(5, 10, 5).astype(dtype)
func = relay.Function([x, y], z)
verify_results(func, [x_data, y_data], "test_binary_op", rtol=1e-5, atol=1e-5)
for opfunc, ref in [
(relay.add, np.add),
(relay.subtract, np.subtract),
(relay.multiply, np.multiply),
(relay.divide, np.divide),
]:
for dtype in ["float32"]:
check_binary_op(opfunc, dtype)
def test_tuple_types():
def verify_tuple_types(dshape, indices_or_sections, axis=None, dtype="float32"):
x = relay.var("x", relay.ty.TensorType(dshape, dtype))
y = relay.split(x, indices_or_sections, axis=axis)
z = relay.concatenate(y, axis=axis)
func = relay.Function([x], z)
x_data = np.random.uniform(size=dshape).astype(dtype)
verify_results(func, [x_data], "test_tuple_types", rtol=1e-5, atol=1e-5)
split_z = relay.split(z, indices_or_sections, axis=axis)
func = relay.Function([x], split_z.astuple())
verify_results(func, [x_data], "test_tuple_types", rtol=1e-5, atol=1e-5)
out = relay.Tuple([y[0] + y[1], y[0] - y[1]])
func = relay.Function([x], out)
verify_results(func, [x_data], "test_tuple_types", rtol=1e-5, atol=1e-5)
z = relay.concatenate(out, axis=axis)
func = relay.Function([x], z)
verify_results(func, [x_data], "test_tuple_types", rtol=1e-5, atol=1e-5)
verify_tuple_types((5, 5, 2, 2), 5, axis=1)
verify_tuple_types((5, 5, 2, 2), 5, axis=0)
verify_tuple_types((5, 5, 2, 2), [1, 3, 4], axis=0)
verify_tuple_types((5, 5, 2, 2), [1, 3, 4], axis=1)
def test_layout_transform():
def verify_layout_transform(dshape, src_layout, dst_layout, dtype="float32"):
x = relay.var("x", relay.ty.TensorType(dshape, dtype))
y = relay.layout_transform(x, src_layout, dst_layout)
func = relay.Function([x], y)
x_data = np.random.uniform(size=dshape).astype(dtype)
verify_results(func, [x_data], "test_layout_transform", rtol=1e-5, atol=1e-5)
verify_layout_transform((1, 3, 8, 8), "NCHW", "NHWC")
verify_layout_transform((1, 8, 8, 3), "NHWC", "NCHW")
def test_clip():
def verify_clip(dshape, a_min, a_max, dtype="float32"):
x = relay.var("x", relay.ty.TensorType(dshape, dtype))
y = relay.clip(x, a_min, a_max)
func = relay.Function([x], y)
x_data = np.random.uniform(size=dshape).astype(dtype)
verify_results(func, [x_data], "test_clip", rtol=1e-5, atol=1e-5)
verify_clip((5, 5, 2, 5), 0, 0.2)
verify_clip((5, 5, 2, 5), 0.2, 0.5)
def test_expand_dims():
def verify_expand_dims(dshape, axis, num_newaxis, dtype="float32"):
x = relay.var("x", relay.ty.TensorType(dshape, dtype))
y = relay.expand_dims(x, axis, num_newaxis)
func = relay.Function([x], y)
x_data = np.random.uniform(size=dshape).astype(dtype)
verify_results(func, [x_data], "test_expand_dims", rtol=1e-5, atol=1e-5)
verify_expand_dims((1, 1001), 0, 2)
verify_expand_dims((1, 1, 1001), 2, 2)
def test_lrn():
"""LRN unit test."""
def verify_lrn(xshape, size, dtype="float32"):
x = relay.var("x", relay.ty.TensorType(xshape, dtype))
y = relay.nn.lrn(x, size=size, axis=1, alpha=1.0, beta=1.0, bias=1.0)
func = relay.Function([x], y)
x_data = np.random.uniform(size=xshape).astype(dtype)
verify_results(func, [x_data], "test_lrn", rtol=1e-5, atol=1e-5)
isize = [(1, 1, 480, 640), (1, 3, 224, 224)]
sizes = [1, 3]
for i in isize:
for s in sizes:
verify_lrn(i, s)
def test_sigmoid():
"""Sigmoid unit test."""
def verify_sigmoid(dshape, dtype="float32"):
x = relay.var("x", relay.ty.TensorType(dshape, dtype))
y = relay.sigmoid(x)
func = relay.Function([x], y)
x_data = np.random.uniform(size=dshape).astype(dtype)
verify_results(func, [x_data], "test_sigmoid", rtol=1e-4, atol=1e-4)
isize = [(1, 3, 480, 640), (1, 3, 224, 224)]
for i in isize:
verify_sigmoid(i)
def test_copy():
"""Copy unit test."""
def verify_copy(dshape, dtype="float32"):
x = relay.var("x", relay.ty.TensorType(dshape, dtype))
y = relay.copy(x)
func = relay.Function([x], y)
x_data = np.random.uniform(size=dshape).astype(dtype)
verify_results(func, [x_data], "test_copy", rtol=1e-4, atol=1e-4)
isize = [(1, 3, 480, 640), (1, 3, 224, 224)]
for i in isize:
verify_copy(i)
def test_round():
"""Round unit test."""
def verify_round(dshape, dtype="float32"):
x = relay.var("x", relay.ty.TensorType(dshape, dtype))
y = relay.round(x)
func = relay.Function([x], y)
x_data = np.random.uniform(size=dshape).astype(dtype)
verify_results(func, [x_data], "test_round", rtol=1e-4, atol=1e-4)
isize = [(1, 3, 480, 640), (1, 3, 224, 224)]
for i in isize:
verify_round(i)
def test_cast():
"""Cast unit test."""
def verify_cast(dshape, dtype):
x = relay.var("x", relay.ty.TensorType(dshape, "float32"))
y = relay.cast(x, dtype)
func = relay.Function([x], y)
x_data = np.random.uniform(size=dshape).astype("float32")
verify_results(func, [x_data], "test_cast", rtol=1e-4, atol=1e-4)
isize = [(1, 3, 480, 640), (1, 3, 224, 224)]
out_dtypes = ["int8", "int16", "uint8", "uint16"]
for i in isize:
for o_dtype in out_dtypes:
verify_cast(i, o_dtype)
@pytest.mark.xfail(reason="Known failing test. See issue #12567.")
def test_resize():
"""Resize unit test."""
def verify_resize(dshape, outsize, method, coord_trans, rounding_method, dtype="float32"):
x = relay.var("x", relay.ty.TensorType(dshape, dtype))
y = relay.image.resize2d(
x,
outsize,
None,
layout="NCHW",
method=method,
coordinate_transformation_mode=coord_trans,
rounding_method=rounding_method,
)
func = relay.Function([x], y)
x_data = np.random.uniform(size=dshape).astype(dtype)
verify_results(func, [x_data], "test_resize", rtol=1e-4, atol=1e-4)
method = ["nearest_neighbor", "linear", "cubic"]
coord_trans = ["half_pixel", "align_corners", "asymmetric"]
rounding_method = ["round", "floor", "ceil"]
isize = (1, 3, 480, 640)
# Downsample
osize = (240, 320)
for i in method:
for j in coord_trans:
for k in rounding_method:
if (i == "nearest_neighbor" and j == "align_corners") or (
i == "cubic" and j in ["half_pixel", "align_corners"]
):
continue
verify_resize(isize, osize, method=i, coord_trans=j, rounding_method=k)
# Upsample
osize = (960, 1280)
for i in method:
for j in coord_trans:
for k in rounding_method:
if (i == "nearest_neighbor" and j == "align_corners") or (i == "cubic"):
continue
verify_resize(isize, osize, method=i, coord_trans=j, rounding_method=k)
def test_dyn():
"""Dynamic unit test."""
def verify_dyn_bcast(lhs_shape, rhs_shape, dtype):
lhs_dyn_shape = tuple(relay.Any() for i in range(len(lhs_shape)))
rhs_dyn_shape = tuple(relay.Any() for i in range(len(rhs_shape)))
x = relay.var("x", shape=lhs_dyn_shape, dtype=dtype)
y = relay.var("y", shape=rhs_dyn_shape, dtype=dtype)
z = relay.add(x, y)
func = relay.Function([x, y], z)
lhs_data = np.random.uniform(size=lhs_shape).astype(dtype)
rhs_data = np.random.uniform(size=rhs_shape).astype(dtype)
verify_results(
func, [lhs_data, rhs_data], "test_dyn_bcast", rtol=1e-5, atol=1e-5, is_dyn=True
)
verify_dyn_bcast((1, 3, 32, 1), (1, 3, 1, 3), "float32")
verify_dyn_bcast((1, 13), (4, 3, 5, 1), "float32")
if __name__ == "__main__":
test_add()
test_bias_add()
test_conv2d()
test_conv2d_transpose()
test_reshape()
test_transpose()
test_dense()
test_max_pool()
test_batch_flatten()
test_batch_norm()
test_pad()
test_mean()
test_split()
test_concatenate()
test_sofmax()
test_squeeze()
test_strided_slice()
test_cmp_type()
test_binary_op()
test_tuple_types()
test_layout_transform()
test_clip()
test_expand_dims()
test_lrn()
test_sigmoid()
test_copy()
test_round()
test_cast()
test_resize()
test_dyn()
| 26,622 | 34.262252 | 99 | py |
tvm | tvm-main/tests/python/contrib/test_onnx_model.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Relay to ONNX target test cases"""
import pytest
pytest.importorskip("onnx")
pytest.importorskip("onnxruntime")
from collections import OrderedDict
import numpy as np
import onnxruntime as rt
import tvm
from tvm import relay
from tvm.contrib.target.onnx import to_onnx
import tvm.relay.testing
from tvm.relay.op.annotation import compiler_begin, compiler_end
from tvm.ir import IRModule
from tvm.relay import transform
def func_to_onnx(mod, params, name):
onnx_model = to_onnx(mod, params, name, path=None)
return onnx_model.SerializeToString()
def run_onnx(mod, params, name, input_data):
onnx_model = func_to_onnx(mod, params, name)
sess = rt.InferenceSession(onnx_model)
input_names = {}
for input, data in zip(sess.get_inputs(), input_data):
input_names[input.name] = data
output_names = [output.name for output in sess.get_outputs()]
res = sess.run(output_names, input_names)
return res[0]
def get_data(in_data_shapes, dtype="float32"):
in_data = OrderedDict()
for name, shape in in_data_shapes.items():
in_data[name] = np.random.uniform(size=shape).astype(dtype)
return in_data
def run_relay(mod, params, in_data):
target = "llvm"
dev = tvm.device("llvm", 0)
in_data = [tvm.nd.array(value) for value in in_data.values()]
return (
relay.create_executor("graph", mod, device=dev, target=target)
.evaluate()(*in_data, **params)
.numpy()
)
def _verify_results(mod, params, in_data):
a = run_relay(mod, params, in_data)
b = run_onnx(mod, params, "test_resent", in_data.values())
np.testing.assert_allclose(a, b, rtol=1e-7, atol=1e-7)
def test_resnet():
num_class = 1000
in_data_shapes = OrderedDict({"data": (1, 3, 224, 224)})
in_data = get_data(in_data_shapes, dtype="float32")
for n in [18, 34, 50, 101]:
mod, params = tvm.relay.testing.resnet.get_workload(1, num_class, num_layers=n)
_verify_results(mod, params, in_data)
def test_squeezenet():
in_data_shapes = OrderedDict({"data": (1, 3, 224, 224)})
in_data = get_data(in_data_shapes, dtype="float32")
for version in ["1.0", "1.1"]:
mod, params = tvm.relay.testing.squeezenet.get_workload(1, version=version)
_verify_results(mod, params, in_data)
@pytest.mark.skip("USE_TARGET_ONNX should be ON")
def test_partition():
in_1 = relay.var("in_1", shape=(10, 10), dtype="float32")
in_2 = relay.var("in_2", shape=(10, 10), dtype="float32")
in_3 = relay.var("in_3", shape=(10, 10), dtype="float32")
in_4 = relay.var("in_4", shape=(10, 10), dtype="float32")
in_5 = relay.var("in_5", shape=(10, 10), dtype="float32")
in_6 = relay.var("in_6", shape=(10, 10), dtype="float32")
in_7 = relay.var("in_7", shape=(10, 10), dtype="float32")
in_8 = relay.var("in_8", shape=(10, 10), dtype="float32")
in_9 = relay.var("in_9", shape=(10, 10), dtype="float32")
in_10 = relay.var("in_10", shape=(10, 10), dtype="float32")
begin0 = compiler_begin(in_1, "onnx")
begin1 = compiler_begin(in_2, "onnx")
begin2 = compiler_begin(in_3, "onnx")
begin3 = compiler_begin(in_4, "onnx")
node0 = relay.add(begin0, begin1)
node1 = relay.add(begin2, begin3)
end0 = compiler_end(node0, "onnx")
end1 = compiler_end(node1, "onnx")
begin4 = compiler_begin(end0, "onnx")
begin5 = compiler_begin(end1, "onnx")
node2 = relay.add(begin4, begin5)
end2 = compiler_end(node2, "onnx")
dbegin0 = compiler_begin(in_5, "default")
dbegin1 = compiler_begin(in_6, "default")
node3 = relay.subtract(dbegin0, dbegin1)
dbegin2 = compiler_begin(in_7, "default")
dend1 = compiler_end(node3, "default")
dbegin3 = compiler_begin(dend1, "default")
node4 = relay.subtract(dbegin2, dbegin3)
dend2 = compiler_end(node4, "default")
begin6 = compiler_begin(end2, "onnx")
begin7 = compiler_begin(dend2, "onnx")
node5 = relay.add(begin6, begin7)
end3 = compiler_end(node5, "onnx")
end4 = compiler_end(node5, "onnx")
dbegin4 = compiler_begin(in_8, "default")
dbegin5 = compiler_begin(end3, "default")
node6 = relay.subtract(dbegin4, dbegin5)
begin8 = compiler_begin(in_9, "onnx")
begin9 = compiler_begin(end4, "onnx")
node7 = relay.multiply(begin8, begin9)
end5 = compiler_end(node7, "onnx")
dend3 = compiler_end(node6, "default")
begin10 = compiler_begin(dend3, "onnx")
begin11 = compiler_begin(end5, "onnx")
node8 = relay.add(begin10, begin11)
end6 = compiler_end(node8, "onnx")
begin12 = compiler_begin(in_10, "onnx")
begin13 = compiler_begin(end6, "onnx")
node9 = relay.add(begin12, begin13)
end7 = compiler_end(node9, "onnx")
func = relay.Function([in_1, in_2, in_3, in_4, in_5, in_6, in_7, in_8, in_9, in_10], end7)
target = "llvm"
mod = IRModule.from_expr(func)
mod = transform.PartitionGraph()(mod)
with tvm.transform.PassContext(opt_level=3, disabled_pass=["FuseOps"]):
graph_json, mod1, params = relay.build(mod, target)
assert mod1.type_key == "metadata"
assert mod1.imported_modules[0].type_key == "llvm"
assert mod1.imported_modules[0].get_source()
assert mod1.imported_modules[1].type_key == "onnx"
assert mod1.imported_modules[1].get_source()
if __name__ == "__main__":
test_resnet()
test_squeezenet()
# test_partition needs USE_TARGET_ONNX to be ON
test_partition()
| 6,232 | 35.238372 | 94 | py |
tvm | tvm-main/tests/python/contrib/test_rpc_tracker.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import te
import logging
import numpy as np
import time
import multiprocessing
from tvm import rpc
def check_server_drop():
"""test when server drops"""
try:
from tvm.rpc import tracker, proxy, base
from tvm.rpc.base import TrackerCode
@tvm.register_func("rpc.test2.addone")
def addone(x):
return x + 1
def _put(tclient, value):
base.sendjson(tclient._sock, value)
base.recvjson(tclient._sock)
tserver = tracker.Tracker("127.0.0.1", 8888)
tproxy = proxy.Proxy("127.0.0.1", 8881, tracker_addr=("127.0.0.1", tserver.port))
tclient = rpc.connect_tracker("127.0.0.1", tserver.port)
server0 = rpc.Server(
"127.0.0.1", port=9099, tracker_addr=("127.0.0.1", tserver.port), key="abc"
)
server1 = rpc.Server(
"127.0.0.1", port=9099, tracker_addr=("127.0.0.1", tserver.port), key="xyz"
)
server2 = rpc.Server("127.0.0.1", tproxy.port, is_proxy=True, key="xyz")
server3 = rpc.Server("127.0.0.1", tproxy.port, is_proxy=True, key="xyz1")
# Fault tolerence to un-handled requested value
_put(tclient, [TrackerCode.REQUEST, "abc", "", 1])
_put(tclient, [TrackerCode.REQUEST, "xyz1", "", 1])
# Fault tolerence to stale worker value
_put(tclient, [TrackerCode.PUT, "xyz", (server1.port, "abc")])
_put(tclient, [TrackerCode.PUT, "xyz", (server1.port, "abcxxx")])
_put(tclient, [TrackerCode.PUT, "xyz", (tproxy.port, "abcxxx11")])
# Fault tolerence server timeout
def check_timeout(timeout, sleeptime):
def myfunc(remote):
time.sleep(sleeptime)
f1 = remote.get_function("rpc.test2.addone")
assert f1(10) == 11
try:
tclient.request_and_run("xyz", myfunc, session_timeout=timeout)
except RuntimeError:
pass
print(tclient.text_summary())
try:
remote = tclient.request("xyz", priority=0, session_timeout=timeout)
remote2 = tclient.request("xyz", session_timeout=timeout)
time.sleep(sleeptime)
f1 = remote.get_function("rpc.test2.addone")
assert f1(10) == 11
f1 = remote2.get_function("rpc.test2.addone")
assert f1(10) == 11
except tvm.error.TVMError as e:
pass
remote3 = tclient.request("abc")
f1 = remote3.get_function("rpc.test2.addone")
remote3 = tclient.request("xyz1")
f1 = remote3.get_function("rpc.test2.addone")
assert f1(10) == 11
check_timeout(0.01, 0.1)
check_timeout(2, 0)
tserver.terminate()
server0.terminate()
server1.terminate()
server2.terminate()
server3.terminate()
tproxy.terminate()
except ImportError:
print("Skip because tornado is not available")
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
check_server_drop()
| 3,930 | 36.084906 | 89 | py |
tvm | tvm-main/tests/python/contrib/test_tedd.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import re
import tvm
from tvm import te
from tvm import topi
from tvm import relay
from tvm.relay import testing
from tvm.relay.backend import Runtime, Executor
def findany(pattern, str):
matches = re.findall(pattern, str)
assert len(matches) > 0, "Pattern not found.\nPattern: " + pattern + "\nString: " + str
def checkdependency():
import pkg_resources
return not {"graphviz", "ipython"} - {pkg.key for pkg in pkg_resources.working_set}
def test_dfg():
A = te.placeholder((1024, 4096), dtype="float32", name="A")
B = topi.nn.softmax(A)
# confirm lower works
s = te.create_schedule([B.op])
def verify():
from tvm.contrib import tedd
str = tedd.viz_dataflow_graph(s, False, "", True)
# Check all edges are available
findany(r"digraph \"Dataflow Graph\"", str)
findany(r"Stage_0:O_0 -> Tensor_0_0", str)
findany(r"Tensor_0_0 -> Stage_1:I_0", str)
findany(r"Stage_1:O_0 -> Tensor_1_0", str)
findany(r"Tensor_0_0 -> Stage_2:I_0", str)
findany(r"Tensor_1_0 -> Stage_2:I_1", str)
findany(r"Stage_2:O_0 -> Tensor_2_0", str)
findany(r"Tensor_2_0 -> Stage_3:I_0", str)
findany(r"Stage_3:O_0 -> Tensor_3_0", str)
findany(r"Tensor_2_0 -> Stage_4:I_0", str)
findany(r"Tensor_3_0 -> Stage_4:I_1", str)
findany(r"Stage_4:O_0 -> Tensor_4_0", str)
if checkdependency():
verify()
def test_itervar_relationship_graph():
n = te.var("n")
m = te.var("m")
A = te.placeholder((n, m), name="A")
k = te.reduce_axis((0, m), "k")
B = te.compute((n,), lambda i: te.sum(A[i, k], axis=k), name="B")
s = te.create_schedule(B.op)
s[B].split(B.op.reduce_axis[0], factor=16)
def verify():
from tvm.contrib import tedd
str = tedd.viz_itervar_relationship_graph(s, False, "", True)
findany(r"digraph \"IterVar Relationship Graph\"", str)
findany(r"subgraph cluster_legend", str)
# Check subgraphs for stages
findany(r"subgraph cluster_Stage_0", str)
findany(r"subgraph cluster_Stage_1", str)
# Check itervars and their types
findany(r"\(kDataPar\)\<br/\>T.Range\(0, n\)", str)
findany(r"\(kCommReduce\)\<br/\>T.Range\(0, m\)", str)
# Check the split node
findany(r"Split_Relation_1_0 +.+\>Split", str)
# Check all edges to/from the split node
findany(r"IterVar_1_1:itervar -> Split_Relation_1_0:Input", str)
findany(r"Split_Relation_1_0:Outer -> IterVar_1_2:itervar", str)
findany(r"Split_Relation_1_0:Inner -> IterVar_1_3:itervar", str)
if checkdependency():
verify()
def test_schedule_tree():
block_x = te.thread_axis("blockIdx.x")
thread_x = te.thread_axis("threadIdx.x")
n = te.var("n")
m = te.var("m")
l = te.var("l")
A = te.placeholder((n, m, l), name="A")
B = te.compute((n, m, l), lambda bi, bj, bk: A[bi, bj, bk] + 1, name="B")
r = te.reduce_axis((0, m), "r")
C = te.compute(
(
n,
m,
),
lambda ci, cj: te.sum(B[ci, cj, r], axis=r),
name="C",
)
s = te.create_schedule(C.op)
s.cache_read(A, "shared", [B])
s[B].vectorize(B.op.axis[-1])
s[C].reorder(C.op.reduce_axis[0], C.op.axis[0])
_, ki = s[C].split(C.op.reduce_axis[0], factor=16)
Cr = s.rfactor(C, ki)
s[Cr].compute_at(s[C], s[C].op.axis[-1])
s[C].bind(s[C].op.axis[0], block_x)
s[C].bind(s[C].op.axis[1], thread_x)
def verify():
from tvm.contrib import tedd
str = tedd.viz_schedule_tree(s, False, "", True)
findany(r"digraph \"Schedule Tree\"", str)
findany(r"subgraph cluster_legend", str)
# Check the A_shared stage, including memory scope, itervars,
# and compute
findany(
r"Stage_1.*A\.shared<br/>Scope: shared.+>0.+>"
r"ax0.*\(kDataPar\).+>1.+ax1.*\(kDataPar\).+>2.+>ax2.*\(kDataPar\).+>"
r"\[A[\[\(]ax0, ax1, ax2[\)\]]\]",
str,
)
# Check itervars of types different from KDataPar
findany(r"bk.*\(kVectorized\)", str)
findany(r"r.outer.*\(kCommReduce\)", str)
findany(r"label=ROOT", str)
# Check the compute_at edge
findany(r"Stage_1.*\[color\=\"\#000000\"\]", str)
if checkdependency():
verify()
@tvm.testing.requires_llvm
def test_tedd_with_schedule_record():
"""Test to build a nn model and check if all schedules could be generated"""
def check_schedule(executor):
from tvm.contrib import tedd
error = {}
for func_name, func_meta in executor.function_metadata.items():
# check converted op only
if "main" not in func_name:
primfunc = list(func_meta.relay_primfuncs.values())[0]
schs = primfunc.attrs["schedule"].schedule_record
for index in range(len(schs)):
try:
sch = schs[index].normalize()
tedd.viz_dataflow_graph(sch, False, "", True)
tedd.viz_itervar_relationship_graph(sch, False, "", True)
tedd.viz_schedule_tree(sch, False, "", True)
except:
if func_name not in error:
error[func_name] = []
error[func_name].append(index)
assert error == {}, str(error)
if checkdependency():
relay_mod, params = testing.mobilenet.get_workload(batch_size=1, dtype="float32")
target_llvm = tvm.target.Target("llvm")
config = {"te.keep_schedule_record": True}
with tvm.transform.PassContext(opt_level=3, config=config):
aot_executor_factory = relay.build(
relay_mod,
target_llvm,
runtime=Runtime("cpp"),
executor=Executor("aot"),
params=params,
)
graph_executor_factory = relay.build(
relay_mod,
target_llvm,
params=params,
)
check_schedule(aot_executor_factory)
check_schedule(graph_executor_factory)
if __name__ == "__main__":
test_dfg()
test_itervar_relationship_graph()
test_schedule_tree()
test_tedd_with_schedule_record()
| 7,188 | 34.068293 | 92 | py |
tvm | tvm-main/tests/python/contrib/test_coreml_codegen.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
import pytest
from unittest import mock
import tvm
import tvm.testing
from tvm import relay
from tvm.relay import transform
from tvm.contrib.target import coreml as _coreml
requires_coremltools = tvm.testing.requires_package("coremltools")
def _has_xcode():
try:
tvm.contrib.xcode.xcrun([])
return True
except FileNotFoundError:
pass
return False
def _create_graph():
shape = (10, 10)
mod = tvm.IRModule()
x = relay.var("x", shape=shape)
y = relay.var("y", shape=shape)
z = x + x
p = y * y
func = relay.Function([x, y], p - z)
mod["main"] = func
return mod
def _create_graph_annotated():
shape = (10, 10)
target = "coremlcompiler"
mod = tvm.IRModule()
# function 0
f0_i0 = relay.var(target + "_0_i0", shape=shape)
func0 = relay.Function([f0_i0], f0_i0 * f0_i0)
func0 = func0.with_attr("Primitive", tvm.tir.IntImm("int32", 1))
func0 = func0.with_attr("Inline", tvm.tir.IntImm("int32", 1))
func0 = func0.with_attr("Compiler", target)
func0 = func0.with_attr("global_symbol", target + "_0")
gv0 = relay.GlobalVar(target + "_0")
mod[gv0] = func0
# function 2
f2_i0 = relay.var(target + "_2_i0", shape=shape)
func2 = relay.Function([f2_i0], f2_i0 + f2_i0)
func2 = func2.with_attr("Primitive", tvm.tir.IntImm("int32", 1))
func2 = func2.with_attr("Inline", tvm.tir.IntImm("int32", 1))
func2 = func2.with_attr("Compiler", target)
func2 = func2.with_attr("global_symbol", target + "_2")
gv2 = relay.GlobalVar(target + "_2")
mod[gv2] = func2
mod = relay.transform.InferType()(mod)
# body
x = relay.var("x", shape=shape)
y = relay.var("y", shape=shape)
func = relay.Function([x, y], gv0(y) - gv2(x))
mod["main"] = func
mod = relay.transform.InferType()(mod)
return mod
@pytest.mark.xfail(
reason="Currently failing test. See tracking issue https://github.com/apache/tvm/issues/8901"
)
@tvm.testing.uses_gpu
@requires_coremltools
def test_annotate():
mod = _create_graph()
mod = transform.AnnotateTarget("coremlcompiler")(mod)
mod = transform.PartitionGraph()(mod)
expected = _create_graph_annotated()
assert tvm.ir.structural_equal(mod, expected, map_free_vars=True)
@pytest.mark.skipif(not _has_xcode(), reason="Xcode is not available")
@tvm.testing.uses_gpu
@requires_coremltools
def test_compile_and_run():
dev = tvm.cpu()
target = "llvm"
tol = 1e-3
with relay.build_config(opt_level=3):
lib = relay.build(_create_graph_annotated(), target=target)
m = tvm.contrib.graph_executor.GraphModule(lib["default"](dev))
shape = (10, 10)
x_data = np.random.rand(*shape).astype("float32")
y_data = np.random.rand(*shape).astype("float32")
m.set_input("x", x_data)
m.set_input("y", y_data)
m.run()
out = tvm.nd.empty(shape, device=dev)
out = m.get_output(0, out)
expected = (y_data * y_data) - (x_data + x_data)
tvm.testing.assert_allclose(out.numpy(), expected, rtol=tol, atol=tol)
@mock.patch("tvm.contrib.coreml_runtime.create")
@mock.patch("tvm.contrib.xcode.compile_coreml")
def _construct_model(func, m1, m2):
mod = tvm.IRModule()
mod["main"] = func
mod = transform.AnnotateTarget("coremlcompiler")(mod)
mod = transform.PartitionGraph()(mod)
fcompile = tvm._ffi.get_global_func("relay.ext.coremlcompiler")
for var, func in mod.functions.items():
if func.attrs and "Compiler" in func.attrs and func.attrs["Compiler"] == "coremlcompiler":
fcompile(func)
@tvm.testing.uses_gpu
@requires_coremltools
def test_add():
shape = (10, 10)
x = relay.var("x", shape=shape)
y = x + x
func = relay.Function([x], y)
_construct_model(func)
@tvm.testing.uses_gpu
@requires_coremltools
def test_multiply():
shape = (10, 10)
x = relay.var("x", shape=shape)
y = x * x
func = relay.Function([x], y)
_construct_model(func)
@tvm.testing.uses_gpu
@requires_coremltools
def test_clip():
shape = (10, 10)
x = relay.var("x", shape=shape)
y = relay.clip(x, a_min=0.0, a_max=1.0)
func = relay.Function([x], y)
_construct_model(func)
@tvm.testing.uses_gpu
@requires_coremltools
def test_batch_flatten():
shape = (10, 10, 10)
x = relay.var("x", shape=shape)
y = relay.nn.batch_flatten(x)
func = relay.Function([x], y)
_construct_model(func)
@tvm.testing.uses_gpu
@requires_coremltools
def test_expand_dims():
shape = (10, 10)
x = relay.var("x", shape=shape)
y = relay.expand_dims(x, axis=0)
func = relay.Function([x], y)
_construct_model(func)
y = relay.expand_dims(x, axis=-1)
func = relay.Function([x], y)
_construct_model(func)
@tvm.testing.uses_gpu
@requires_coremltools
def test_relu():
shape = (10, 10)
x = relay.var("x", shape=shape)
y = relay.nn.relu(x)
func = relay.Function([x], y)
_construct_model(func)
@tvm.testing.uses_gpu
@requires_coremltools
def test_softmax():
shape = (10, 10)
x = relay.var("x", shape=shape)
y = relay.nn.softmax(x, axis=1)
func = relay.Function([x], y)
_construct_model(func)
@tvm.testing.uses_gpu
@requires_coremltools
def test_conv2d():
x = relay.var("x", shape=(1, 3, 224, 224))
w = relay.const(np.zeros((16, 3, 3, 3), dtype="float32"))
y = relay.nn.conv2d(x, w, strides=[2, 2], padding=[1, 1, 1, 1], kernel_size=[3, 3])
func = relay.Function([x], y)
_construct_model(func)
@tvm.testing.uses_gpu
@requires_coremltools
def test_global_avg_pool2d():
shape = (10, 10, 10, 10)
x = relay.var("x", shape=shape)
y = relay.nn.global_avg_pool2d(x)
func = relay.Function([x], y)
_construct_model(func)
if __name__ == "__main__":
test_annotate()
test_compile_and_run()
test_add()
test_multiply()
test_clip()
test_expand_dims()
test_relu()
test_batch_flatten()
test_softmax()
test_conv2d()
test_global_avg_pool2d()
| 6,815 | 25.940711 | 98 | py |
tvm | tvm-main/tests/python/contrib/test_miopen.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
import tvm.testing
from tvm import te
from tvm.contrib import miopen
import numpy as np
import pytest
requires_miopen = pytest.mark.skipif(
tvm.get_global_func("tvm.contrib.miopen.conv2d.setup", True) is None,
reason="MIOpen is not enabled",
)
@tvm.testing.requires_rocm
@requires_miopen
def test_conv2d():
in_channel = 3
out_channel = 64
filter_h = 3
filter_w = 3
pad_h = 1
pad_w = 1
stride_h = 1
stride_w = 1
dilation_h = 1
dilation_w = 1
xshape = [1, in_channel, 128, 128]
wshape = (out_channel, in_channel, filter_h, filter_w)
X = te.placeholder(xshape, name="X")
W = te.placeholder(wshape, name="W")
Y = miopen.conv2d_forward(
X, W, stride_h, stride_w, pad_h, pad_w, dilation_h, dilation_w, conv_mode=0, data_type=1
)
yshape = [x.value for x in Y.shape]
from tvm import topi
s = te.create_schedule(Y.op)
def verify():
dev = tvm.rocm(0)
f = tvm.build(s, [X, W, Y], "rocm --host=llvm", name="conv2d")
x = tvm.nd.array(np.random.uniform(-1, 1, xshape).astype(np.float32), dev)
w = tvm.nd.array(np.random.uniform(-1, 1, wshape).astype(np.float32), dev)
y = tvm.nd.array(np.random.uniform(-1, 1, yshape).astype(np.float32), dev)
f(x, w, y)
Y_ref = topi.nn.conv2d_nchw(
X, W, (stride_h, stride_w), (pad_h, pad_w), (dilation_h, dilation_w)
)
s_ref = te.create_schedule(Y_ref.op)
f_ref = tvm.build(s_ref, [X, W, Y_ref], "rocm --host=llvm")
y_ref = tvm.nd.array(np.random.uniform(-1, 1, yshape).astype(np.float32), dev)
f_ref(x, w, y_ref)
print("Max abs diff:", np.max(np.abs(y.numpy() - y_ref.numpy())))
tvm.testing.assert_allclose(y.numpy(), y_ref.numpy(), atol=1e-3)
verify()
def verify_softmax(shape, axis, dtype="float32", log_softmax=False):
miopen_op = miopen.log_softmax if log_softmax else miopen.softmax
testing_op = (
tvm.topi.testing.log_softmax_python if log_softmax else tvm.topi.testing.softmax_python
)
A = te.placeholder(shape, dtype=dtype, name="A")
B = miopen_op(A, axis)
s = te.create_schedule([B.op])
dev = tvm.rocm(0)
a_np = np.random.uniform(size=shape).astype(dtype)
b_np = testing_op(a_np)
a = tvm.nd.array(a_np, dev)
b = tvm.nd.array(b_np, dev)
f = tvm.build(s, [A, B], target="rocm --host=llvm", name="softmax")
f(a, b)
tvm.testing.assert_allclose(b.numpy(), b_np, rtol=1e-3)
def verify_softmax_4d(shape, dtype="float32", log_softmax=False):
miopen_op = miopen.log_softmax if log_softmax else miopen.softmax
testing_op = (
tvm.topi.testing.log_softmax_python if log_softmax else tvm.topi.testing.softmax_python
)
A = te.placeholder(shape, dtype=dtype, name="A")
B = miopen_op(A, axis=1)
s = te.create_schedule([B.op])
dev = tvm.rocm(0)
n, c, h, w = shape
a_np = np.random.uniform(size=shape).astype(dtype)
b_np = testing_op(a_np.transpose(0, 2, 3, 1).reshape(h * w, c))
b_np = b_np.reshape(n, h, w, c).transpose(0, 3, 1, 2)
a = tvm.nd.array(a_np, dev)
b = tvm.nd.array(b_np, dev)
f = tvm.build(s, [A, B], target="rocm --host=llvm", name="softmax")
f(a, b)
tvm.testing.assert_allclose(b.numpy(), b_np, rtol=1e-3)
@tvm.testing.requires_rocm
@requires_miopen
def test_softmax():
verify_softmax((32, 10), -1)
verify_softmax((3, 4), -1)
verify_softmax_4d((1, 16, 256, 256))
verify_softmax_4d((1, 16, 256, 256))
verify_softmax((32, 10), -1, log_softmax=True)
verify_softmax((3, 4), -1, log_softmax=True)
verify_softmax_4d((1, 16, 256, 256), log_softmax=True)
if __name__ == "__main__":
test_conv2d()
| 4,529 | 32.065693 | 96 | py |
tvm | tvm-main/tests/python/contrib/test_gemm_acc16.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=import-self, invalid-name, unused-argument, too-many-lines, len-as-condition
import tvm
from tvm import te
import numpy as np
from tvm.topi.x86.tensor_intrin import dot_16x1x16_uint8_int8_int16
def benchmark_fc_int8_acc16():
m = 128
n = 128
k = 128
X = te.placeholder((m, k), name="X", dtype="uint8")
W = te.placeholder((n, k), name="W", dtype="int8")
peak = 512 / 16 * 2 * 2 * 2
gops_per_mm = 2 * n * m * k
print("Peak {} Gops/s \n".format(peak))
def verify(target="llvm -mcpu=skylake-avx512"):
if not tvm.runtime.enabled(target):
print("skip because %s is not enabled..." % target)
return
dev = tvm.device(target, 0)
X = te.placeholder((m, k), name="X", dtype="uint8")
W = te.placeholder((n, k), name="W", dtype="int8")
pc = dot_16x1x16_uint8_int8_int16()
ak = te.reduce_axis((0, k), name="k")
packedW = te.placeholder((n // 128, 128 * (k // 2), 2), name="packedW", dtype="int8")
t_fc = te.compute(
(m, n),
lambda i, j: te.sum(
X[i, ak].astype("int16")
* packedW[j // 128, (ak // 2) * 128 + j % 128, ak % 2].astype("int16"),
axis=ak,
),
name="F",
)
t_sch = te.create_schedule(t_fc.op)
a_x, a_y = t_fc.op.axis
(a_k,) = t_fc.op.reduce_axis
a_yo, a_yi = t_sch[t_fc].split(a_y, factor=128)
a_ko, a_ki = t_sch[t_fc].split(a_k, factor=2)
a_xo, a_xi = t_sch[t_fc].split(a_x, factor=128)
a_koo, a_koi = t_sch[t_fc].split(a_ko, factor=32)
t_sch[t_fc].reorder(a_yo, a_xo, a_koo, a_xi, a_koi, a_yi, a_ki)
t_sch[t_fc].tensorize(a_yi, pc)
# print(tvm.lower(t_sch, [X, packedW, t_fc], simple_mode=True))
t_func = tvm.build(t_sch, [X, packedW, t_fc], target, name="intrinsic")
t_evaluator = t_func.time_evaluator(t_func.entry_name, dev, number=10)
# generate the plain data
a_ = np.random.uniform(1, 10, size=(m, k)).astype("uint8")
b_ = np.random.uniform(1, 10, size=(n, k)).astype("int8")
packW = np.random.uniform(1, 10, size=(n // 128, 128 * (k // 2), 2)).astype("int8")
# This occurs in pre_compute stage
for r_idx in range(n // 128):
for s_idx in range(128 * (k // 2)):
for t_idx in range(2):
packW[r_idx][s_idx][t_idx] = b_[r_idx * 128 + s_idx % 128][
s_idx // 128 * 2 + t_idx
]
x = tvm.nd.array(a_, dev)
w = tvm.nd.array(packW, dev)
y = tvm.nd.array(np.zeros((m, n), dtype="int16"), dev)
result = t_evaluator(x, w, y)
gops_per_sec = gops_per_mm / result.mean / 1e9
tvm.testing.assert_allclose(y.numpy(), np.dot(a_, b_.T), rtol=1e-5)
print(
"Tensorization: running time: {:.3f} ms, {:.2f} Gops/s, effiency: {:.2f}.".format(
result.mean * 1000, gops_per_sec, gops_per_sec / peak
)
)
# t_func.export_library("gemm_tensorize.o")
verify()
if __name__ == "__main__":
benchmark_fc_int8_acc16()
| 3,987 | 36.622642 | 94 | py |
tvm | tvm-main/tests/python/contrib/test_coreml_runtime.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import te
import numpy as np
from tvm import rpc
from tvm.contrib import utils, xcode, coreml_runtime
import pytest
import os
proxy_host = os.environ.get("TVM_IOS_RPC_PROXY_HOST", "127.0.0.1")
proxy_port = os.environ.get("TVM_IOS_RPC_PROXY_PORT", 9090)
destination = os.environ.get("TVM_IOS_RPC_DESTINATION", "")
key = "iphone"
@pytest.mark.skip("skip because coremltools is not available in CI")
def test_coreml_runtime():
import coremltools
from coremltools.models.neural_network import NeuralNetworkBuilder
def create_coreml_model():
shape = (2,)
alpha = 2
inputs = [
("input0", coremltools.models.datatypes.Array(*shape)),
("input1", coremltools.models.datatypes.Array(*shape)),
]
outputs = [
("output0", coremltools.models.datatypes.Array(*shape)),
("output1", coremltools.models.datatypes.Array(*shape)),
]
builder = NeuralNetworkBuilder(inputs, outputs)
builder.add_elementwise(
name="Add", input_names=["input0", "input1"], output_name="output0", mode="ADD"
)
builder.add_elementwise(
name="Mul", alpha=alpha, input_names=["input0"], output_name="output1", mode="MULTIPLY"
)
return coremltools.models.MLModel(builder.spec)
def verify(coreml_model, model_path, dev):
coreml_model = create_coreml_model()
out_spec = coreml_model.output_description._fd_spec
out_names = [spec.name for spec in out_spec]
# inference via coremltools
inputs = {}
for in_spec in coreml_model.input_description._fd_spec:
name = in_spec.name
shape = in_spec.type.multiArrayType.shape
inputs[name] = np.random.random_sample(shape)
coreml_outputs = [coreml_model.predict(inputs)[name] for name in out_names]
# inference via tvm coreml runtime
runtime = coreml_runtime.create("main", model_path, dev)
for name in inputs:
runtime.set_input(name, tvm.nd.array(inputs[name], dev))
runtime.invoke()
tvm_outputs = [runtime.get_output(i).numpy() for i in range(runtime.get_num_outputs())]
for c_out, t_out in zip(coreml_outputs, tvm_outputs):
np.testing.assert_almost_equal(c_out, t_out, 3)
def check_remote(coreml_model):
temp = utils.tempdir()
compiled_model = xcode.compile_coreml(coreml_model, out_dir=temp.temp_dir)
xcode.popen_test_rpc(
proxy_host, proxy_port, key, destination=destination, libs=[compiled_model]
)
compiled_model = os.path.basename(compiled_model)
remote = rpc.connect(proxy_host, proxy_port, key=key)
dev = remote.cpu(0)
verify(coreml_model, compiled_model, dev)
def check_local(coreml_model):
temp = utils.tempdir()
compiled_model = xcode.compile_coreml(coreml_model, out_dir=temp.temp_dir)
dev = tvm.cpu(0)
verify(coreml_model, compiled_model, dev)
coreml_model = create_coreml_model()
check_remote(coreml_model)
check_local(coreml_model)
if __name__ == "__main__":
test_coreml_runtime()
| 3,990 | 35.953704 | 99 | py |
tvm | tvm-main/tests/python/contrib/test_edgetpu_runtime.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import tvm
from tvm import te
import numpy as np
from tvm import rpc
from tvm.contrib import utils, tflite_runtime
# import tflite_runtime.interpreter as tflite
# NOTE: This script was tested on tensorflow/tflite (v2.4.1)
def skipped_test_tflite_runtime():
def get_tflite_model_path(target_edgetpu):
# Return a path to the model
edgetpu_path = os.getenv("EDGETPU_PATH", "/home/mendel/edgetpu")
# Obtain mobilenet model from the edgetpu repo path
if target_edgetpu:
model_path = os.path.join(
edgetpu_path, "test_data/mobilenet_v1_1.0_224_quant_edgetpu.tflite"
)
else:
model_path = os.path.join(edgetpu_path, "test_data/mobilenet_v1_1.0_224_quant.tflite")
return model_path
def init_interpreter(model_path, target_edgetpu):
# Initialize interpreter
if target_edgetpu:
edgetpu_path = os.getenv("EDGETPU_PATH", "/home/mendel/edgetpu")
libedgetpu = os.path.join(edgetpu_path, "libedgetpu/direct/aarch64/libedgetpu.so.1")
interpreter = tflite.Interpreter(
model_path=model_path, experimental_delegates=[tflite.load_delegate(libedgetpu)]
)
else:
interpreter = tflite.Interpreter(model_path=model_path)
return interpreter
def check_remote(server, target_edgetpu=False):
tflite_model_path = get_tflite_model_path(target_edgetpu)
# inference via tflite interpreter python apis
interpreter = init_interpreter(tflite_model_path, target_edgetpu)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
input_shape = input_details[0]["shape"]
tflite_input = np.array(np.random.random_sample(input_shape), dtype=np.uint8)
interpreter.set_tensor(input_details[0]["index"], tflite_input)
interpreter.invoke()
tflite_output = interpreter.get_tensor(output_details[0]["index"])
# inference via remote tvm tflite runtime
remote = rpc.connect(server.host, server.port)
dev = remote.cpu(0)
if target_edgetpu:
runtime_target = "edge_tpu"
else:
runtime_target = "cpu"
with open(tflite_model_path, "rb") as model_fin:
runtime = tflite_runtime.create(model_fin.read(), dev, runtime_target)
runtime.set_input(0, tvm.nd.array(tflite_input, dev))
runtime.invoke()
out = runtime.get_output(0)
np.testing.assert_equal(out.numpy(), tflite_output)
# Target CPU on coral board
check_remote(rpc.Server("127.0.0.1"))
# Target EdgeTPU on coral board
check_remote(rpc.Server("127.0.0.1"), target_edgetpu=True)
if __name__ == "__main__":
# skipped_test_tflite_runtime()
pass
| 3,682 | 38.602151 | 98 | py |
tvm | tvm-main/tests/python/contrib/test_cblas.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
import tvm
from tvm import te
import numpy as np
import tvm.topi.testing
from tvm.contrib import cblas
from tvm.contrib import mkl
from tvm.contrib import dnnl
import tvm.testing
def verify_matmul_add(m, l, n, lib, transa=False, transb=False, dtype="float32"):
bias = te.var("bias", dtype=dtype)
ashape = (l, n) if transa else (n, l)
bshape = (m, l) if transb else (l, m)
A = te.placeholder(ashape, name="A", dtype=dtype)
B = te.placeholder(bshape, name="B", dtype=dtype)
C = lib.matmul(A, B, transa, transb)
D = te.compute(C.shape, lambda i, j: C[i, j] + bias, name="D")
s = te.create_schedule(D.op)
def get_numpy(a, b, bb, transa, transb):
if transa:
a = a.transpose()
if transb:
b = b.transpose()
return np.dot(a, b) + bb
def compile(f, name="test_matmul_add", ext=".so"):
path = name + ext
f.export_library(path)
mod = tvm.runtime.load_module(path)
f = mod[name]
return f
def verify(target="llvm"):
if not tvm.testing.device_enabled(target):
print("skip because %s is not enabled..." % target)
return
if not tvm.get_global_func(lib.__name__ + ".matmul", True):
print("skip because extern function is not available")
return
dev = tvm.cpu(0)
name = "test_matmul_add"
f = tvm.build(s, [A, B, D, bias], target, name=name)
if target == "c":
f = compile(f, name)
a = tvm.nd.array(np.random.uniform(size=ashape).astype(A.dtype), dev)
b = tvm.nd.array(np.random.uniform(size=bshape).astype(B.dtype), dev)
d = tvm.nd.array(np.zeros((n, m), dtype=D.dtype), dev)
bb = 10.0
f(a, b, d, bb)
tvm.testing.assert_allclose(
d.numpy(), get_numpy(a.numpy(), b.numpy(), bb, transa, transb), rtol=1e-5
)
verify("llvm")
verify("c")
def test_matmul_add():
verify_matmul_add(235, 128, 1024, cblas)
verify_matmul_add(235, 128, 1024, cblas, True, False)
verify_matmul_add(235, 128, 1024, cblas, False, True)
verify_matmul_add(235, 128, 1024, cblas, True, True)
verify_matmul_add(235, 128, 1024, mkl)
verify_matmul_add(235, 128, 1024, mkl, True, False)
verify_matmul_add(235, 128, 1024, mkl, False, True)
verify_matmul_add(235, 128, 1024, mkl, True, True)
verify_matmul_add(235, 128, 1024, dnnl)
verify_matmul_add(235, 128, 1024, dnnl, True, False)
verify_matmul_add(235, 128, 1024, dnnl, False, True)
verify_matmul_add(235, 128, 1024, dnnl, True, True)
verify_matmul_add(1, 16, 4, cblas)
verify_matmul_add(1, 16, 3, cblas, True, False)
verify_matmul_add(1, 16, 3, cblas, False, False)
verify_matmul_add(1, 16, 3, cblas, True, True)
verify_matmul_add(1, 16, 4, mkl)
verify_matmul_add(1, 16, 3, mkl, True, False)
verify_matmul_add(1, 16, 3, mkl, False, False)
verify_matmul_add(1, 16, 3, mkl, True, True)
verify_matmul_add(1, 16, 4, dnnl)
verify_matmul_add(1, 16, 3, dnnl, True, False)
verify_matmul_add(1, 16, 3, dnnl, False, False)
verify_matmul_add(1, 16, 3, dnnl, True, True)
def verify_quantized_matmul_add(m, l, n, transa=False, transb=False):
if not tvm.get_global_func("tvm.contrib.mkl.matmul_u8s8s32", True):
pytest.skip("Quantized dense is supported only for MKL. TVM GPU CI uses openblas")
data_dtype = "uint8"
kernel_dtype = "int8"
out_dtype = "int32"
bias = te.var("bias", dtype=out_dtype)
ashape = (l, n) if transa else (n, l)
bshape = (m, l) if transb else (l, m)
A = te.placeholder(ashape, name="A", dtype=data_dtype)
B = te.placeholder(bshape, name="B", dtype=kernel_dtype)
C = mkl.matmul_u8s8s32(A, B, transa, transb, dtype=out_dtype)
D = te.compute(C.shape, lambda i, j: C[i, j] + bias, name="D")
s = te.create_schedule(D.op)
def get_numpy(a, b, bb, transa, transb):
if transa:
a = a.transpose()
if transb:
b = b.transpose()
return np.dot(a, b) + bb
def verify(target="llvm"):
if not tvm.testing.device_enabled(target):
print("skip because %s is not enabled..." % target)
return
if not tvm.get_global_func("tvm.contrib.mkl.matmul_u8s8s32", True):
print("skip because extern function is not available")
return
dev = tvm.cpu(0)
f = tvm.build(s, [A, B, D, bias], target)
a = tvm.nd.array(np.random.randint(low=0, high=50, size=ashape).astype(A.dtype), dev)
b = tvm.nd.array(np.random.randint(low=0, high=50, size=bshape).astype(B.dtype), dev)
d = tvm.nd.array(np.zeros((n, m), dtype=D.dtype), dev)
bb = 10
f(a, b, d, bb)
tvm.testing.assert_allclose(
d.numpy(),
get_numpy(a.numpy().astype("int32"), b.numpy().astype("int32"), bb, transa, transb),
rtol=1e-5,
)
verify()
def test_quantized_matmul_add():
verify_quantized_matmul_add(235, 128, 1024)
verify_quantized_matmul_add(235, 128, 1024, True, False)
verify_quantized_matmul_add(235, 128, 1024, False, True)
verify_quantized_matmul_add(235, 128, 1024, True, True)
verify_quantized_matmul_add(1, 16, 4)
verify_quantized_matmul_add(1, 16, 3, True, False)
verify_quantized_matmul_add(1, 16, 3, False, True)
verify_quantized_matmul_add(1, 16, 3, True, True)
def verify_batch_matmul(
batch_a, batch_b, m, l, n, lib, transa=False, transb=False, iterative=False, dtype="float32"
):
batch = max(batch_a, batch_b)
ashape = (batch_a, l, n) if transa else (batch_a, n, l)
bshape = (batch_b, m, l) if transb else (batch_b, l, m)
A = te.placeholder(ashape, name="A", dtype=dtype)
B = te.placeholder(bshape, name="B", dtype=dtype)
C = lib.batch_matmul(A, B, transa, transb)
D = te.compute(C.shape, lambda k, i, j: C[k, i, j], name="D")
s = te.create_schedule(D.op)
def get_numpy(a, b, transa, transb):
if transa:
a = a.transpose(0, 2, 1)
if not transb:
b = b.transpose(0, 2, 1)
return tvm.topi.testing.batch_matmul(a, b)
def compile(f, name="test_batch_matmul", ext=".so"):
path = name + ext
f.export_library(path)
mod = tvm.runtime.load_module(path)
f = mod[name]
return f
def verify(target="llvm"):
if not tvm.testing.device_enabled(target):
print("skip because %s is not enabled..." % target)
return
if not tvm.get_global_func(lib.__name__ + ".matmul", True):
print("skip because extern function is not available")
return
dev = tvm.cpu(0)
name = "test_batch_matmul"
f = tvm.build(s, [A, B, D], target, name=name)
if target == "c":
f = compile(f, name)
a = tvm.nd.array(np.random.uniform(size=ashape).astype(A.dtype), dev)
b = tvm.nd.array(np.random.uniform(size=bshape).astype(B.dtype), dev)
d = tvm.nd.array(np.zeros((batch, n, m), dtype=D.dtype), dev)
f(a, b, d)
tvm.testing.assert_allclose(
d.numpy(), get_numpy(a.numpy(), b.numpy(), transa, transb), rtol=1e-5
)
verify("llvm")
verify("c")
def test_batch_matmul():
verify_batch_matmul(16, 16, 235, 128, 1024, cblas)
verify_batch_matmul(16, 16, 235, 128, 1024, cblas, True, False)
verify_batch_matmul(16, 16, 235, 128, 1024, cblas, False, True)
verify_batch_matmul(16, 16, 235, 128, 1024, cblas, True, True)
verify_batch_matmul(16, 16, 235, 128, 1024, mkl)
verify_batch_matmul(16, 16, 235, 128, 1024, mkl, True, False)
verify_batch_matmul(16, 16, 235, 128, 1024, mkl, False, True)
verify_batch_matmul(16, 16, 235, 128, 1024, mkl, True, True)
verify_batch_matmul(16, 1, 235, 128, 1024, cblas)
verify_batch_matmul(1, 16, 235, 128, 1024, cblas)
verify_batch_matmul(16, 1, 235, 128, 1024, cblas, iterative=True)
verify_batch_matmul(1, 16, 235, 128, 1024, cblas, iterative=True)
verify_batch_matmul(16, 1, 235, 128, 1024, mkl)
verify_batch_matmul(1, 16, 235, 128, 1024, mkl)
verify_batch_matmul(16, 1, 235, 128, 1024, mkl, iterative=True)
verify_batch_matmul(1, 16, 235, 128, 1024, mkl, iterative=True)
verify_batch_matmul(1, 1, 1, 16, 3, cblas)
verify_batch_matmul(1, 1, 1, 16, 3, cblas, True, False)
verify_batch_matmul(1, 1, 1, 16, 3, cblas, False, False)
verify_batch_matmul(1, 1, 1, 16, 3, cblas, True, True)
verify_batch_matmul(1, 1, 1, 16, 3, cblas, iterative=True)
verify_batch_matmul(1, 1, 1, 16, 3, mkl)
verify_batch_matmul(1, 1, 1, 16, 3, mkl, True, False)
verify_batch_matmul(1, 1, 1, 16, 3, mkl, False, False)
verify_batch_matmul(1, 1, 1, 16, 3, mkl, True, True)
verify_batch_matmul(1, 1, 1, 16, 3, mkl, iterative=True)
if __name__ == "__main__":
test_matmul_add()
test_quantized_matmul_add()
test_batch_matmul()
| 9,788 | 39.283951 | 96 | py |
tvm | tvm-main/tests/python/contrib/test_cublas.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
import tvm
from tvm import te
from tvm import relay
import numpy as np
from tvm.contrib import cublas
from tvm.contrib import cublaslt
from tvm.contrib import graph_executor
import tvm.testing
from tvm.relay.op.contrib import get_pattern_table
from tvm.relay.op.contrib.cublas import partition_for_cublas
def verify_matmul_add(in_dtype, out_dtype, rtol=1e-5):
n = 1024
l = 128
m = 236
A = te.placeholder((n, l), name="A", dtype=in_dtype)
B = te.placeholder((l, m), name="B", dtype=in_dtype)
C = cublas.matmul(A, B, dtype=out_dtype)
s = te.create_schedule(C.op)
def verify(target="cuda"):
if not tvm.get_global_func("tvm.contrib.cublas.matmul", True):
print("skip because extern function is not available")
return
dev = tvm.cuda(0)
f = tvm.build(s, [A, B, C], target)
a = tvm.nd.array(np.random.uniform(0, 128, size=(n, l)).astype(A.dtype), dev)
b = tvm.nd.array(np.random.uniform(0, 128, size=(l, m)).astype(B.dtype), dev)
c = tvm.nd.array(np.zeros((n, m), dtype=C.dtype), dev)
f(a, b, c)
tvm.testing.assert_allclose(
c.numpy(), np.dot(a.numpy().astype(C.dtype), b.numpy().astype(C.dtype)), rtol=rtol
)
verify()
def roundoff(v, d):
return int(np.floor((v + d - 1) / d) * d)
def verify_matmul_add_igemm(in_dtype, out_dtype, rtol=1e-5):
n = 1024
l = 1024
m = 1024
L = roundoff(l, 32)
N = roundoff(n, 8)
N_out = roundoff(n, 32)
A = te.placeholder((N, L), name="A", dtype=in_dtype)
B = te.placeholder((m, L), name="B", dtype=in_dtype)
# C has CUBLASLT_ORDER_COL32 layout, thus a different shape
C = cublaslt.matmul(A, B, False, True, m, N_out, dtype=out_dtype)
s = te.create_schedule(C.op)
def verify(target="cuda"):
if not tvm.get_global_func("tvm.contrib.cublaslt.matmul", True):
print("skip because extern function is not available")
return
dev = tvm.cuda(0)
f = tvm.build(s, [A, B, C], target)
a_old = np.random.uniform(0, 128, size=(n, l))
b_old = np.random.uniform(0, 128, size=(l, m))
# Transform a to become CUBLASLT_ORDER_COL4_4R2_8C layout
a_new = np.hstack((a_old.astype(A.dtype), np.zeros([n, L - l])))
a_new = np.vstack((a_new.astype(A.dtype), np.zeros([N - n, L])))
a_even = np.vsplit(a_new[::2], N / 8)
a_odd = np.vsplit(a_new[1::2], N / 8)
a_new = [None] * (len(a_even) + len(a_odd))
a_new[::2] = a_even
a_new[1::2] = a_odd
a_new = np.vstack(a_new)
a_new = np.vstack(
np.vstack(np.vstack(np.hsplit(i, 8)).reshape([4, 32]) for i in np.vsplit(j, N / 4))
for j in np.hsplit(a_new, L / 32)
)
a_new = a_new.reshape([N, L])
# Transform b to become CUBLASLT_ORDER_COL32 layout
b_new = np.vstack(
np.hsplit(np.hstack((b_old.T.astype(B.dtype), np.zeros([m, L - l]))), L / 32)
)
b_new = b_new.reshape([m, L])
a = tvm.nd.array(a_new.astype(A.dtype), dev)
b = tvm.nd.array(b_new.astype(B.dtype), dev)
c = tvm.nd.array(np.zeros((m, N_out), dtype=C.dtype), dev)
f(a, b, c)
# Transform output c from layout CUBLASLT_ORDER_COL32 to row major layout
c_out = c.numpy()
c_out = c_out.reshape([int(m * N_out / 32), 32])
c_out = np.hstack(np.vsplit(c_out, int(N_out / 32)))
c_out = c_out[:, :n]
c_out = c_out.T
tvm.testing.assert_allclose(
c_out, np.dot(a_old.astype(C.dtype), b_old.astype(C.dtype)), rtol=rtol
)
verify()
def verify_batch_matmul(Ashape, Bshape, Cshape, in_dtype, out_dtype, rtol=1e-5):
A = te.placeholder(Ashape, name="A", dtype=in_dtype)
B = te.placeholder(Bshape, name="B", dtype=in_dtype)
C = cublas.batch_matmul(A, B, dtype=out_dtype)
s = te.create_schedule(C.op)
dev = tvm.cuda(0)
f = tvm.build(s, [A, B, C], "cuda")
if "int" in in_dtype:
a = tvm.nd.array(np.random.uniform(1, 10, size=Ashape).astype(in_dtype), dev)
b = tvm.nd.array(np.random.uniform(1, 10, size=Bshape).astype(in_dtype), dev)
else:
a = tvm.nd.array(np.random.uniform(size=Ashape).astype(A.dtype), dev)
b = tvm.nd.array(np.random.uniform(size=Bshape).astype(B.dtype), dev)
c = tvm.nd.array(np.zeros(Cshape, dtype=C.dtype), dev)
f(a, b, c)
tvm.testing.assert_allclose(
c.numpy(),
np.matmul(a.numpy().astype(C.dtype), b.numpy().astype(C.dtype)).astype(C.dtype),
rtol=rtol,
)
@tvm.testing.requires_cuda
def test_matmul_add():
verify_matmul_add("float", "float", rtol=1e-3)
verify_matmul_add("float16", "float")
verify_matmul_add("float16", "float16", rtol=1e-2)
verify_matmul_add("int8", "int32")
@tvm.testing.requires_cuda
def test_matmul_add_igemm():
verify_matmul_add_igemm("int8", "int32")
@tvm.testing.requires_cuda
def test_batch_matmul():
if not tvm.get_global_func("tvm.contrib.cublas.matmul", True):
print("skip because extern function is not available")
return
verify_batch_matmul((16, 1024, 128), (16, 128, 236), (16, 1024, 236), "float", "float")
verify_batch_matmul((16, 1024, 128), (1, 128, 236), (16, 1024, 236), "float", "float")
verify_batch_matmul((16, 1024, 128), (16, 128, 236), (16, 1024, 236), "float16", "float")
verify_batch_matmul((16, 1024, 128), (1, 128, 236), (16, 1024, 236), "float16", "float")
verify_batch_matmul(
(16, 1024, 128), (16, 128, 236), (16, 1024, 236), "float16", "float16", rtol=1e-2
)
verify_batch_matmul(
(16, 1024, 128), (1, 128, 236), (16, 1024, 236), "float16", "float16", rtol=1e-2
)
verify_batch_matmul((16, 1024, 128), (16, 128, 236), (16, 1024, 236), "int8", "int32")
def _verify_cublas_relay(expr):
np.random.seed(42)
mod = tvm.IRModule.from_expr(expr)
mod = relay.transform.InferType()(mod)
func = mod["main"]
cublas_mod = partition_for_cublas(mod)
assert len(cublas_mod.get_global_vars()) == 2
input_data = []
for param in func.params:
shape = [int(x) for x in param.checked_type.shape]
input_data.append(
(param.name_hint, np.random.uniform(0, 32, size=shape).astype(param.checked_type.dtype))
)
# Test against CPU reference
cuda_config = (tvm.target.cuda(), tvm.cuda(), cublas_mod)
cpu_config = (tvm.target.Target("llvm"), tvm.cpu(), mod)
outputs = []
for target, dev, test_mod in [cuda_config, cpu_config]:
with tvm.transform.PassContext(opt_level=3):
lib = relay.build(test_mod, target=target, target_host=cpu_config[0])
module = graph_executor.GraphModule(lib["default"](dev))
for name, data in input_data:
module.set_input(name, tvm.nd.array(data, dev))
module.run()
out_type = func.body.checked_type
outputs.append(
module.get_output(0, tvm.nd.empty(out_type.shape, dtype=out_type.dtype)).numpy()
)
tvm.testing.assert_allclose(
outputs[0],
outputs[1],
rtol=1e-2,
)
@tvm.testing.requires_cuda
@pytest.mark.parametrize(
"n,m,k,transpose_a,transpose_b",
[
(64, 128, 32, False, False),
(17, 32, 16, True, False),
(24, 17, 12, False, True),
(96, 4, 17, True, True),
],
)
@pytest.mark.parametrize(
"in_dtype,out_dtype",
[
("float32", "float32"),
("float16", "float16"),
("float16", "float32"),
("int8", "int32"),
("float64", "float64"),
("int8", "float32"),
],
)
def test_relay_cublas_matmul(n, m, k, in_dtype, out_dtype, transpose_a, transpose_b):
unsupported_configs = [
(17, 32, 16, "int8", "float32", True, False),
(96, 4, 17, "int8", "float32", True, True),
(17, 32, 16, "int8", "int32", True, False),
(96, 4, 17, "int8", "int32", True, True),
]
if (n, m, k, in_dtype, out_dtype, transpose_a, transpose_b) in unsupported_configs:
pytest.skip("Unsupported parameters.")
a_shape = (k, n) if transpose_a else (n, k)
b_shape = (m, k) if transpose_b else (k, m)
a = tvm.relay.var("A", tvm.relay.TensorType(a_shape, in_dtype))
b = tvm.relay.var("B", tvm.relay.TensorType(b_shape, in_dtype))
# Directly use matmul because nn.matmul sometimes defers to nn.dense
matmul = relay.op.nn._make.matmul(a, b, None, out_dtype, transpose_a, transpose_b)
_verify_cublas_relay(matmul)
@tvm.testing.requires_cuda
@pytest.mark.parametrize(
"n,m,k",
[
(64, 128, 32),
(17, 32, 16),
(24, 17, 12),
(96, 4, 17),
],
)
@pytest.mark.parametrize(
"in_dtype,out_dtype",
[
("float32", "float32"),
("float16", "float16"),
("float16", "float32"),
("int8", "int32"),
("float64", "float64"),
("int8", "float32"),
],
)
def test_relay_cublas_dense(n, m, k, in_dtype, out_dtype):
unsupported_configs = [
(96, 4, 17, "int8", "float32"),
(96, 4, 17, "int8", "int32"),
]
if (n, m, k, in_dtype, out_dtype) in unsupported_configs:
pytest.skip("Unsupported parameters.")
data = tvm.relay.var("data", tvm.relay.TensorType((n, k), in_dtype))
weight = tvm.relay.var("weight", tvm.relay.TensorType((m, k), in_dtype))
dense = relay.op.nn.dense(data, weight, out_dtype=out_dtype)
_verify_cublas_relay(dense)
@tvm.testing.requires_cuda
@pytest.mark.parametrize(
"n,m,k,batch_a,batch_b,transpose_a,transpose_b",
[
(64, 128, 32, 16, 16, False, False),
(17, 32, 16, 16, 1, True, False),
(24, 17, 12, 17, 17, False, True),
(96, 4, 17, 53, 1, True, True),
],
)
@pytest.mark.parametrize(
"in_dtype,out_dtype",
[
("float32", "float32"),
("float16", "float16"),
("float16", "float32"),
("int8", "int32"),
("float64", "float64"),
("int8", "float32"),
],
)
def test_relay_cublas_batch_matmul(
n, m, k, batch_a, batch_b, in_dtype, out_dtype, transpose_a, transpose_b
):
unsupported_configs = [
(17, 32, 16, 16, 1, "int8", "float32", True, False),
(96, 4, 17, 53, 1, "int8", "float32", True, True),
(17, 32, 16, 16, 1, "int8", "int32", True, False),
(96, 4, 17, 53, 1, "int8", "int32", True, True),
]
if (
n,
m,
k,
batch_a,
batch_b,
in_dtype,
out_dtype,
transpose_a,
transpose_b,
) in unsupported_configs:
pytest.skip("Unsupported parameters.")
a_shape = (batch_a, k, n) if transpose_a else (batch_a, n, k)
b_shape = (batch_b, m, k) if transpose_b else (batch_b, k, m)
a = tvm.relay.var("A", tvm.relay.TensorType(a_shape, in_dtype))
b = tvm.relay.var("B", tvm.relay.TensorType(b_shape, in_dtype))
batch_matmul = relay.op.nn.batch_matmul(a, b, out_dtype, transpose_a, transpose_b)
_verify_cublas_relay(batch_matmul)
@tvm.testing.requires_cuda
@pytest.mark.parametrize(
"n,m,k",
[
(64, 128, 32),
(17, 32, 16),
(24, 17, 12),
(96, 4, 17),
],
)
@pytest.mark.parametrize(
"in_dtype,out_dtype",
[
("float32", "float32"),
("float16", "float16"),
("float16", "float32"),
("int8", "int32"),
("float64", "float64"),
("int8", "float32"),
],
)
def test_relay_cublas_dense(n, m, k, in_dtype, out_dtype):
unsupported_configs = [
(96, 4, 17, "int8", "float32"),
(96, 4, 17, "int8", "int32"),
]
if (n, m, k, in_dtype, out_dtype) in unsupported_configs:
pytest.skip("Unsupported parameters.")
data = tvm.relay.var("data", tvm.relay.TensorType((n, k), in_dtype))
weight = tvm.relay.var("weight", tvm.relay.TensorType((m, k), in_dtype))
dense = relay.op.nn.dense(data, weight, out_dtype=out_dtype)
_verify_cublas_relay(dense)
if __name__ == "__main__":
tvm.testing.main()
| 12,911 | 32.801047 | 100 | py |
tvm | tvm-main/tests/python/contrib/test_clml/test_adreno_collage_targets.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Compares Collage with various other baselines."""
import tvm
import logging
import tempfile
import os
import shutil
import numpy as np
from tvm.relay import testing
from tvm import rpc
from tvm.contrib import utils, ndk
from tvm.relay.build_module import bind_params_by_name
# The following are necessary to force global functions or pattern tables to be registered
from tvm.relay.collage.collage import *
from tvm.relay.op.contrib import clml
import pytest
logging.basicConfig(level=logging.INFO)
########### Configuration ###########
###
### TVM Opencl AutoTvm log file name
###
TUNING_LOG = ""
###
### If true, run all models
###
ALL_MODELS = False
###
### If true, run all configurations
###
ALL_CONFIGS = False
###
### How aggressively to look for candidates?
###
TVM_MAX_DEPTH = 8
BYOC_MAX_DEPTH = 8
###
### AutoTVM tuning parameters.
###
AUTOTVM_NUM_TRIALS = 1024
AUTOTVM_EARLY_STOPPING = 600
TIMEOUT = 10
MEASURE_NUMBER = tvm.relay.collage.MEASURE_NUMBER
MEASURE_REPEAT = tvm.relay.collage.MEASURE_REPEAT
WARMUP_MIN_REPEAT_MS = tvm.relay.collage.WARMUP_MIN_REPEAT_MS
##
## RPC Build configuration
##
HOST = tvm.target.Target("llvm -mtriple=arm64-linux-android")
OPENCL = tvm.target.Target("opencl", HOST)
RPC_TRACKER_HOST = os.getenv("TVM_TRACKER_HOST", "localhost")
RPC_TRACKER_PORT = int(os.getenv("TVM_TRACKER_PORT", 9090))
RPC_KEY = os.getenv("RPC_DEVICE_KEY", "android")
NDK_CROSS_COMPILER = os.getenv("TVM_NDK_CC", "aarch64-linux-android-g++")
########### AutoTVM tuning helpers ###########
def extract_autotvm_tasks(mod, target):
"""Returns TVM kernels to tune for mod and target."""
return tvm.autotvm.task.extract_from_program(mod, target=target, params=None)
def optional_tuning_records(log_filename):
"""Returns existing tuning records, if any."""
if log_filename == "" or not os.path.exists(log_filename):
return tvm.autotvm.task.FallbackContext()
else:
return tvm.autotvm.task.ApplyHistoryBest(log_filename)
def is_already_tuned(task, log_filename):
"""Returns True if we already have a tuning record for task in turning logs in log_filename"""
if not os.path.exists(log_filename):
return False
dispatch_context = tvm.autotvm.task.ApplyHistoryBest(log_filename)
return dispatch_context._query_inside(task.target, task.workload)
def tune_autotvm_tasks(tasks, log_filename):
"""Appends to log filename the best strategies for tasks"""
if len(tasks) == 0:
return
measure_option = tvm.autotvm.measure_option(
builder=tvm.autotvm.LocalBuilder(build_func=ndk.create_shared, timeout=15),
runner=tvm.autotvm.RPCRunner(
RPC_KEY, host=RPC_TRACKER_HOST, port=RPC_TRACKER_PORT, number=100, timeout=15
),
)
logging.info(
f"Using autotvm tuning for {len(tasks)} tasks with {AUTOTVM_NUM_TRIALS} trials, logging to {log_filename}"
)
# create tmp log file, starting with contents from existing log file
tmp_log_filename = log_filename + ".tmp"
if os.path.exists(tmp_log_filename):
os.remove(tmp_log_filename)
if os.path.exists(log_filename):
logging.info(f"Copying existing log {log_filename} to {tmp_log_filename}")
shutil.copy(log_filename, tmp_log_filename)
for i, task in enumerate(reversed(tasks)):
prefix = "[Task %2d/%2d] " % (i + 1, len(tasks))
logging.info(f"Considering task {task.name} {prefix}")
if is_already_tuned(task, tmp_log_filename):
logging.info(f"Re-using existing record for {task.name}")
continue
logging.info(f"Using autotvm to tune {task.name}")
tuner_obj = tvm.autotvm.tuner.XGBTuner(task, loss_type="reg")
if os.path.exists(tmp_log_filename):
tuner_obj.load_history(tvm.autotvm.record.load_from_file(tmp_log_filename))
# do tuning
n_trial = min(AUTOTVM_NUM_TRIALS, len(task.config_space))
tuner_obj.tune(
n_trial=n_trial,
early_stopping=AUTOTVM_EARLY_STOPPING,
measure_option=measure_option,
callbacks=[
tvm.autotvm.callback.progress_bar(n_trial, prefix=prefix),
tvm.autotvm.callback.log_to_file(tmp_log_filename),
],
)
# Pick best records and copy back to main log file
tvm.autotvm.record.pick_best(tmp_log_filename, log_filename)
os.remove(tmp_log_filename)
logging.info("Done with autotvm tuning")
def autotvm_tune_module(mod, target, log_filename):
if log_filename == "":
logging.info("Not tuning with autotvm since disabled")
return
# Extract and tune any TVM kernels. BYOC partitions will have no tasks extracted.
logging.info("Extracting tasks from overall module")
tasks = extract_autotvm_tasks(mod, target)
logging.info(f"Auto-tuning {len(tasks)} tasks from overall module")
tune_autotvm_tasks(tasks, log_filename)
########### Drivers ###########
def compile_and_benchmark(label, model, targets, tmp_dir):
"""Compile model for target and run it with profiling."""
logging.info(f"Compiling {model['name']} using {label} with {targets}...")
mod = model["mod"]
mod = clml.preprocess_for_clml(mod)
exe = tvm.relay.vm.compile(mod, target=targets, params=model["params"])
lib = exe.mod
lib_path = os.path.join(tmp_dir, "lib.so")
logging.info(f"Exporting library to {lib_path}...")
lib.export_library(lib_path, cc=NDK_CROSS_COMPILER)
tracker = rpc.connect_tracker(RPC_TRACKER_HOST, RPC_TRACKER_PORT)
remote = tracker.request(RPC_KEY, priority=0, session_timeout=600)
ctx = remote.cl(0)
remote_path = "lib.so"
remote.upload(lib_path, target=remote_path)
lib = remote.load_module(remote_path)
vm_factory = tvm.runtime.vm.VirtualMachine(lib, ctx)
args = {v.name_hint: arg_for(v.checked_type, ctx) for v in mod["main"].params}
logging.info(f"Benchmarking for {model['name']} generated by {label}...")
profile = vm_factory.benchmark(
ctx, repeat=MEASURE_REPEAT, number=MEASURE_NUMBER, min_repeat_ms=0, **args
)
logging.info(f"Benchmarked for {model['name']} generated by {label}: {profile}")
logging.info(f"RESULT: {label} | {model['name']} | {profile.median * 1e3}ms")
# Custom cost function for Opencl RPC targets.
@register_func("tvm.relay.collage.opencl_cost_estimator")
def opencl_cost_estimator(mod, target):
mod = clml.preprocess_for_clml(mod) if "clml" == target.kind.name else mod
try:
# Build the module.
logging.info("Compiling module to estimate")
exe = tvm.relay.vm.compile(mod, target)
except RuntimeError as err:
# A build failure indicates the partition is not supported.
# eg trying to build an nn.batch_norm on GPU, which has no schedule since we assume it
# is only ever used with a tuple projection which is rewritten away.
logging.info("Assigning module infinite cost since unable to build: %s", err)
return math.inf
lib = exe.mod
tracker = rpc.connect_tracker(RPC_TRACKER_HOST, RPC_TRACKER_PORT)
remote = tracker.request(RPC_KEY, priority=0, session_timeout=600)
temp = utils.tempdir()
dso_binary = "dev_lib_cl.so"
dso_binary_path = temp.relpath(dso_binary)
ctx = remote.cl(0)
lib.export_library(dso_binary_path, cc=NDK_CROSS_COMPILER)
remote_path = dso_binary
remote.upload(dso_binary_path, target=remote_path)
lib = remote.load_module(remote_path)
vm_factory = tvm.runtime.vm.VirtualMachine(lib, ctx)
func_name = "main"
main_args = {v.name_hint: arg_for(v.checked_type, ctx) for v in mod[func_name].params}
cost = vm_factory.benchmark(
ctx, repeat=5, number=20, min_repeat_ms=0, func_name=func_name, **main_args
)
return cost.mean
def collage(model):
"""Run the Collage partitioner for a set of Opencl Adreno related targets and profile the result"""
logging.info(f"collage | {model['name']}")
logging.info("-------------- BEGIN ORIGINAL --------------")
logging.info(model["mod"])
logging.info("-------------- END ORIGINAL ----------------")
autotvm_tune_module(model["mod"], OPENCL, TUNING_LOG)
with optional_tuning_records(TUNING_LOG):
targets = []
targets.append(OPENCL)
use_fp16 = model["main_dtype"] == "float16"
tmp_dir = tempfile.mkdtemp()
targets.append(tvm.target.Target("clml", HOST))
# Register byoc fusion style for compiler with available
# options [compiler.NoFusion | compiler.TVMFusion | compiler.MaxDepthFusion]
config = {
"relay.collage.tvm_max_depth": TVM_MAX_DEPTH,
"relay.collage.byoc_max_depth": BYOC_MAX_DEPTH,
"relay.collage.byoc_fusion_style": ["clml.NoFusion"],
}
logging.info(f"Using PassContext(config={config}")
ctxt = tvm.transform.PassContext(config=config)
config = tvm.target.make_compilation_config(ctxt, targets)
with ctxt:
mod = model["mod"]
mod = tvm.relay.transform.CapturePostDfsIndexInSpans()(mod)
logging.info("-------------- BEGIN INDEXED --------------")
logging.info(mod)
logging.info("-------------- END INDEXED ----------------")
# Register python custom cost function for targets in
# custom cost estimator module.
cost_estimator = CustomCostEstimator(
py_fn_estimator="tvm.relay.collage.opencl_cost_estimator"
)
mod = tvm.relay.transform.CollagePartition(config, cost_estimator=cost_estimator)(mod)
partitioned_model = model.copy()
partitioned_model["mod"] = mod
logging.info("-------------- BEGIN PARTITIONED --------------")
logging.info(partitioned_model["mod"])
logging.info("-------------- END PARTITIONED ----------------")
compile_and_benchmark("collage", partitioned_model, targets, tmp_dir)
def just_clml(model):
"""Run partition_for_clml, complete the compilation with TVM, and profile the result."""
logging.info(f"just_clml | {model['name']}")
logging.info("-------------- BEGIN ORIGINAL --------------")
logging.info(model["mod"])
logging.info("-------------- END ORIGINAL ----------------")
tmp_dir = tempfile.mkdtemp()
with tvm.transform.PassContext(opt_level=3, disabled_pass=["AlterOpLayout"]):
logging.info("Partitioning for CLML...")
mod = tvm.relay.op.contrib.clml.partition_for_clml(model["mod"], model["params"])
partitioned_model = model.copy()
partitioned_model["mod"] = mod
logging.info("-------------- BEGIN PARTITIONED --------------")
logging.info(partitioned_model["mod"])
logging.info("-------------- END PARTITIONED ----------------")
targets = []
targets.append(OPENCL)
targets.append(tvm.target.Target("clml", HOST))
compile_and_benchmark("just_clml", partitioned_model, targets, tmp_dir)
def just_tvm(model):
"""Compile and profile using vanilla TVM."""
logging.info(f"just_tvm | {model['name']}")
logging.info("-------------- BEGIN ORIGINAL --------------")
logging.info(model["mod"])
logging.info("-------------- END ORIGINAL ----------------")
tmp_dir = tempfile.mkdtemp()
autotvm_tune_module(model["mod"], OPENCL, TUNING_LOG)
with optional_tuning_records(TUNING_LOG):
with tvm.transform.PassContext(opt_level=3, disabled_pass=["AlterOpLayout"]):
compile_and_benchmark("just_tvm", model, OPENCL, tmp_dir)
def get_model(model_name, dtype):
if "mobilenet" in model_name:
mod, params = testing.mobilenet.get_workload(batch_size=1, dtype=dtype)
elif "resnet" in model_name:
mod, params = testing.resnet.get_workload(num_layers=50, batch_size=1, dtype=dtype)
if params:
mod["main"] = bind_params_by_name(mod["main"], params)
mod = tvm.relay.transform.FoldConstant()(mod)
return {
"name": model_name,
"input_shapes": {"data": [1, 3, 224, 224]},
"input_dtypes": {"data": dtype},
"mod": mod,
"params": params,
"main_dtype": dtype,
}
########### Runners ###########
@pytest.mark.parametrize("dtype", ["float32"])
@tvm.testing.requires_openclml
def run_resnet50(dtype):
just_clml(get_model("resnet-50", dtype))
just_tvm(get_model("resnet-50", dtype))
"""Run Collage for tvm and clml compiler target."""
collage(get_model("resnet-50", dtype))
@pytest.mark.parametrize("dtype", ["float32"])
@tvm.testing.requires_openclml
def run_mobilenetv1(dtype):
just_clml(get_model("mobilenet", dtype))
just_tvm(get_model("mobilenet", dtype))
"""Run Collage for tvm and clml compiler target."""
collage(get_model("mobilenet", dtype))
| 13,661 | 37.484507 | 114 | py |
tvm | tvm-main/tests/python/contrib/test_clml/conftest.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import sys
import tvm
import pytest
from test_clml.infrastructure import Device
@pytest.fixture(scope="session")
def device():
return Device()
| 935 | 33.666667 | 62 | py |
tvm | tvm-main/tests/python/contrib/test_clml/test_network.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""OpenCL ML network tests."""
import tvm
import numpy as np
from tvm import relay
from tvm.relay import testing
from tvm.contrib import utils
from test_clml.infrastructure import build_and_run, Device
import pytest
def _build_and_run_network(mod, params, inputs, data, device, atol, rtol, tvm_log=""):
"""Helper function to build and run a network."""
outputs = []
for clml in [True, False]:
outputs.append(
build_and_run(mod, data, 1, params, device, enable_clml=clml, tune_log=tvm_log)[0][0]
)
return outputs
def _get_keras_model(keras_model, inputs_dict, data):
"""Convert Keras graph to relay."""
inputs = {}
for name, (shape, _) in inputs_dict.items():
inputs[keras_model.input_names[0]] = shape
from tensorflow.keras.layers import Input
from tensorflow.keras.models import Model
def get_bottom_top_model(model, layer_name):
layer = model.get_layer(layer_name)
bottom_input = model.layers[0].input
bottom_output = layer.output
bottom_model = Model(bottom_input, bottom_output)
return bottom_model
keras_model = get_bottom_top_model(keras_model, "predictions")
ref_output = keras_model.predict(data["input_1"].transpose(0, 2, 3, 1))
mod, params = relay.frontend.from_keras(keras_model, inputs, layout="NCHW")
return mod, params, ref_output
@pytest.mark.parametrize("dtype", ["float16"])
@tvm.testing.requires_openclml
def test_mobilenet(device, dtype):
def get_model():
from tensorflow.keras.applications import MobileNet
import tensorflow as tf
tf.keras.backend.clear_session()
mobilenet = MobileNet(
include_top=True, weights=None, input_shape=(224, 224, 3), classes=1000
)
inputs = {mobilenet.input_names[0]: ((1, 3, 224, 224), "float32")}
data = {}
np.random.seed(0)
for name, (shape, dtype) in inputs.items():
if dtype == "uint8":
low, high = 0, 1
else:
low, high = -1, 1
data[name] = np.random.uniform(low, high, shape).astype(dtype)
mod, params, ref_outputs = _get_keras_model(mobilenet, inputs, data)
return mod, params, inputs, data, ref_outputs
mod, params, inputs, input_data, ref_outputs = get_model()
outputs = _build_and_run_network(
mod, params, inputs, input_data, device=device, atol=1e-5, rtol=1e-5
)
opencl_sort = np.argsort(outputs[1].asnumpy()).flatten()
clml_sort = np.argsort(outputs[0].asnumpy()).flatten()
tvm.testing.assert_allclose(opencl_sort[:10], clml_sort[:10], rtol=1e-5, atol=1e-5)
@pytest.mark.parametrize("dtype", ["float16"])
@tvm.testing.requires_openclml
def test_inception_v3(device, dtype):
def get_model():
from tensorflow.keras.applications import InceptionV3
import tensorflow as tf
tf.keras.backend.clear_session()
inceptionV3 = InceptionV3(
include_top=True, weights=None, input_shape=(299, 299, 3), classes=1000
)
inputs = {inceptionV3.input_names[0]: ((1, 3, 299, 299), "float16")}
data = {}
np.random.seed(0)
for name, (shape, dtype) in inputs.items():
if dtype == "uint8":
low, high = 0, 1
else:
low, high = -2, 1
data[name] = np.random.uniform(low, high, shape).astype(dtype)
mod, params, ref_outputs = _get_keras_model(inceptionV3, inputs, data)
return mod, params, inputs, data, ref_outputs
mod, params, inputs, input_data, ref_outputs = get_model()
outputs = _build_and_run_network(
mod, params, inputs, input_data, device=device, atol=1e-5, rtol=1e-5
)
opencl_sort = np.argsort(outputs[1].asnumpy()).flatten()
clml_sort = np.argsort(outputs[0].asnumpy()).flatten()
tvm.testing.assert_allclose(opencl_sort[:5], clml_sort[:5], rtol=1e-5, atol=1e-5)
@pytest.mark.parametrize("dtype", ["float16"])
@tvm.testing.requires_openclml
def test_resnet50v2(device, dtype):
def get_model():
from tensorflow.keras.applications import ResNet50V2
import tensorflow as tf
tf.keras.backend.clear_session()
model = ResNet50V2(include_top=True, weights=None, input_shape=(224, 224, 3), classes=1000)
inputs_dict = {model.input_names[0]: ((1, 3, 224, 224), "float32")}
data = {}
np.random.seed(0)
for name, (shape, dtype) in inputs_dict.items():
if dtype == "uint8":
low, high = 0, 1
else:
low, high = -1, 1
data[name] = np.random.uniform(low, high, shape).astype(dtype)
"""Convert Keras graph to relay."""
inputs = {}
for name, (shape, _) in inputs_dict.items():
inputs[model.input_names[0]] = shape
ref_outputs = model.predict(data["input_1"].transpose(0, 2, 3, 1))
mod, params = relay.frontend.from_keras(model, inputs, layout="NCHW")
return mod, params, inputs, data, ref_outputs
mod, params, inputs, input_data, ref_outputs = get_model()
outputs = _build_and_run_network(
mod, params, inputs, input_data, device=device, atol=1e-5, rtol=1e-5
)
opencl_sort = np.argsort(outputs[1].asnumpy()).flatten()
clml_sort = np.argsort(outputs[0].asnumpy()).flatten()
tvm.testing.assert_allclose(opencl_sort[:10], clml_sort[:10], rtol=1e-5, atol=1e-5)
if __name__ == "__main__":
tvm.testing.main()
| 6,326 | 34.15 | 99 | py |
tvm | tvm-main/tests/python/contrib/test_clml/test_ops.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""CLML integration operator tests."""
import tvm
import numpy as np
from tvm import relay
from tvm.relay.op.contrib import clml
from tvm.relay import testing
from tvm.ir import IRModule
from tvm.contrib import utils
from test_clml.infrastructure import (
build_and_run,
Device,
skip_codegen_test,
verify_codegen,
build_module,
get_cpu_op_count,
)
import pytest
def _get_conv_model(
shape,
kernel_h,
kernel_w,
padding,
strides,
dilation,
groups,
dtype,
channels,
var,
has_bias=False,
has_activation=False,
has_pad=False,
):
"""Return a model and any parameters it may have"""
a = relay.var(next(iter(var)), shape=shape, dtype=dtype)
input_arr = var[next(iter(var))]
if has_pad:
p = ((0, 0), (0, 0), (padding[0], padding[0]), (padding[1], padding[1]))
a = relay.nn.pad(a, pad_width=p)
padding = (0, 0, 0, 0)
else:
if len(padding) == 2:
padding = (padding[0], padding[1], padding[0], padding[1])
shape = (shape[0], shape[1], shape[2] + padding[0] * 2, shape[3] + padding[1] * 2)
is_depthwise = shape[1] == channels == groups
weight_format = "OIHW"
weight_shape = (channels, shape[1] // groups, kernel_h, kernel_w)
w = tvm.nd.array(np.random.uniform(-1, 1, weight_shape).astype(dtype))
weights = relay.const(w, dtype)
out = relay.nn.conv2d(
a,
weights,
kernel_size=(kernel_h, kernel_w),
data_layout="NCHW",
kernel_layout=weight_format,
dilation=dilation,
strides=strides,
padding=padding,
groups=groups,
channels=channels,
out_dtype=dtype,
)
params = {"w": w}
if has_bias:
bias_shape = (weight_shape[0],)
b = tvm.nd.array(np.random.uniform(-1, 1, bias_shape).astype(dtype))
biasc = relay.const(b, dtype)
out = relay.nn.bias_add(out, biasc, axis=1)
params["b"] = b
if has_activation:
out = relay.nn.relu(out)
return out, params
def _get_conv_expected_codegen(
shape,
kernel_h,
kernel_w,
padding,
strides,
dilation,
groups,
dtype,
channels,
has_bias=False,
has_activation=False,
):
if len(padding) == 2:
padding = (padding[0], padding[1], padding[0], padding[1])
output_height = ((shape[2] - kernel_h + padding[0] + padding[2]) / strides[0]) + 1
output_width = ((shape[3] - kernel_w + padding[1] + padding[3]) / strides[1]) + 1
output_shape = (1, channels, int(output_height), int(output_width))
out_dtype = dtype
is_depthwise = shape[1] == channels == groups
weight_format = "IOHW" if is_depthwise else "OIHW"
if weight_format == "OIHW":
weight_shape = (channels, shape[1] // groups, kernel_h, kernel_w)
else:
weight_shape = (shape[1] // groups, channels, kernel_h, kernel_w)
if is_depthwise:
name = "nn.depthwise_conv2d"
else:
name = "nn.conv2d"
node = {
"op": "kernel",
"name": name,
"inputs": [],
"attrs": {
"groups": [[str(groups)]],
"num_outputs": "1",
"data_layout": [["NCHW"]],
"kernel_layout": [[weight_format]],
"channels": [[str(channels)]],
"dilation": [[str(dilation[0]), str(dilation[1])]],
"out_layout": [[""]],
"out_dtype": [[out_dtype]],
"kernel_size": [[str(kernel_h), str(kernel_w)]],
"shape": [[list(output_shape)]],
"dtype": [[dtype]],
"padding": [[str(p) for p in padding]],
"strides": [[str(s) for s in strides]],
},
}
if has_activation:
node["attrs"]["activation_type"] = [["relu"]]
inputs = [
{"op": "input", "name": "", "attrs": {"shape": [[list(shape)]], "dtype": [[str(dtype)]]}},
{
"op": "const",
"name": "",
"attrs": {"shape": [[list(weight_shape)]], "dtype": [[str(dtype)]]},
},
]
if has_bias:
bias_dtype = dtype
inputs.append(
{
"op": "const",
"name": "",
"attrs": {
"shape": [[[1, weight_shape[1] if is_depthwise else weight_shape[0], 1, 1]]],
"dtype": [[bias_dtype]],
},
}
)
input_idx = 0
for _ in range(len(inputs)):
node["inputs"].append([input_idx, 0, 0])
input_idx += 1
node["attrs"]["num_inputs"] = str(len(inputs))
inputs.append(node)
return inputs
@pytest.mark.parametrize("dtype", ["float32"])
@tvm.testing.requires_openclml
def test_conv2d(device, dtype):
trials = [
# Normal convolution
[3, 3, (1, 1), (1, 1), (1, 1), 4, (14, 10, 10), (False, False, False), False],
[2, 1, (2, 2), (1, 1), (1, 1), 7, (15, 16, 12), (True, False, True), False],
[3, 3, (2, 1), (1, 1), (1, 1), 4, (14, 10, 10), (False, True, False), False],
[3, 3, (2, 1), (1, 1), (1, 1), 4, (14, 10, 10), (False, True, True), False],
[2, 2, (1, 1), (1, 1), (1, 1), 4, (14, 10, 10), (False, False, False), False],
[2, 1, (2, 2), (1, 1), (1, 1), 7, (16, 12, 15), (False, False, True), False],
[3, 3, (2, 1), (1, 1), (1, 1), 4, (14, 10, 10), (False, True, False), False],
[3, 3, (1, 1), (1, 1), (1, 1), 16, (16, 12, 15), (False, False, False), False],
[5, 5, (1, 1), (2, 2), (1, 1), 4, (14, 10, 10), (False, False, False), False],
[1, 3, (1, 1), (1, 1), (1, 1), 7, (20, 20, 20), (False, False, True), False],
[2, 2, (2, 2), (1, 1), (1, 1), 4, (20, 20, 20), (False, True, False), False],
[5, 5, (1, 1), (2, 2), (1, 1), 4, (14, 10, 10), (False, False, False), False],
[3, 3, (2, 1), (1, 1), (1, 1), 7, (20, 20, 20), (False, False, False), False],
[3, 3, (1, 1), (2, 2), (1, 1), 16, (14, 10, 10), (False, True, True), False],
# Depth-wise convolution
[3, 3, (1, 1), (1, 1), (1, 1), 20, (20, 20, 20), (False, False, True), True],
[5, 5, (2, 2), (1, 1), (1, 1), 20, (20, 20, 20), (False, True, False), True],
[3, 3, (2, 2), (2, 2), (1, 1), 14, (14, 10, 10), (False, False, False), True],
[5, 5, (0, 0), (1, 1), (1, 1), 20, (20, 20, 20), (False, False, False), True],
[3, 3, (1, 1), (2, 2), (1, 1), 14, (14, 10, 10), (False, True, True), True],
]
for (
kernel_h,
kernel_w,
pad,
stride,
dilation,
out_channels,
shape,
composite,
is_depthwise,
) in trials:
shape = (1, *shape)
if is_depthwise:
groups = shape[1]
else:
groups = 1
outputs = []
inputs = {
"a": tvm.nd.array(np.random.uniform(-1, 1, shape).astype(dtype)),
}
func, params = _get_conv_model(
shape,
kernel_h,
kernel_w,
pad,
stride,
dilation,
groups,
dtype,
out_channels,
inputs,
has_pad=composite[0],
has_bias=composite[1],
has_activation=composite[2],
)
opencl_out = build_and_run(func, inputs, 1, params, device, enable_clml=False)[0]
clml_out = build_and_run(func, inputs, 1, params, device, enable_clml=True)[0]
tvm.testing.assert_allclose(
clml_out[0].asnumpy(), opencl_out[0].asnumpy(), rtol=1e-5, atol=1e-5
)
args = (shape, kernel_h, kernel_w, pad, stride, dilation, groups, dtype, out_channels)
exp_codegen = _get_conv_expected_codegen(
*args, has_bias=composite[1], has_activation=composite[2]
)
verify_codegen(func, exp_codegen, device, params)
def _get_conv2d_transpose_expected_codegen(
dshape, kshape, channels, kernel_size, strides, padding, dilation, dtype, output_shape
):
attrs = {
"channels": [[str(channels)]],
"data_layout": [["NCHW"]],
"kernel_layout": [["OIHW"]],
"groups": [["1"]],
"dilation": [[str(p) for p in dilation]],
"num_inputs": "2",
"num_outputs": "1",
"padding": [[str(p) for p in padding]],
"kernel_size": [[str(p) for p in kernel_size]],
"shape": [[list(output_shape)]],
"dtype": [[dtype]],
"strides": [[str(s) for s in strides]],
"out_dtype": [[""]],
"out_layout": [[""]],
"output_padding": [["0", "0"]],
}
kshape = [kshape[1], kshape[0], kshape[2], kshape[3]]
exp_codegen = [
{
"op": "input",
"name": "",
"attrs": {"shape": [[list(dshape)]], "dtype": [[str(dtype)]]},
},
{
"op": "const",
"name": "",
"attrs": {"shape": [[list(kshape)]], "dtype": [[str(dtype)]]},
},
{
"op": "kernel",
"name": "nn.conv2d_transpose",
"inputs": [[0, 0, 0], [1, 0, 0]],
"attrs": attrs,
},
]
return exp_codegen
@pytest.mark.parametrize("dtype", ["float32"])
@tvm.testing.requires_openclml
def test_conv2d_transpose(device, dtype):
trials = [
[(1, 256, 100, 100), (256, 64, 4, 4), 64, (4, 4), (2, 2), (1, 1, 1, 1)],
[(1, 64, 200, 200), (64, 64, 4, 4), 64, (4, 4), (2, 2), (1, 1, 1, 1)],
[(1, 64, 400, 400), (64, 16, 4, 4), 16, (4, 4), (2, 2), (1, 1, 1, 1)],
]
for (dshape, kshape, channels, kernel_size, strides, padding) in trials:
x = relay.var("input", shape=dshape, dtype=dtype)
input_arr = tvm.nd.array(np.random.uniform(-1, 1, dshape).astype(dtype))
w = relay.var("wt", shape=kshape, dtype=dtype)
weight_arr = tvm.nd.array(np.random.uniform(-1, 1, kshape).astype(dtype))
inputs = {
"input": input_arr,
}
params = {
"wt": weight_arr,
}
y = relay.nn.conv2d_transpose(
x,
w,
channels=channels,
kernel_size=kernel_size,
strides=strides,
padding=padding,
kernel_layout="IOHW",
data_layout="NCHW",
)
func = relay.Function([x, w], y)
mod = IRModule.from_expr(func)
opencl_out = build_and_run(mod, inputs, 1, params, device, enable_clml=False)[0]
clml_out = build_and_run(mod, inputs, 1, params, device, enable_clml=True)[0]
tvm.testing.assert_allclose(
clml_out[0].asnumpy(), opencl_out[0].asnumpy(), rtol=1e-3, atol=1e-3
)
args = (
dshape,
kshape,
channels,
kernel_size,
strides,
padding,
(1, 1),
dtype,
opencl_out[0].shape,
)
exp_codegen = _get_conv2d_transpose_expected_codegen(*args)
verify_codegen(mod, exp_codegen, device, params)
@pytest.mark.parametrize("dtype", ["float16"])
@tvm.testing.requires_openclml
def test_batchnorm(device, dtype):
if clml.clml_sdk_version() < 3:
print("Skip due to unsupported CLML version:", clml.clml_sdk_version())
return
in_shape = (1, 8, 64, 64)
channels = 8
np.random.seed(8)
input_arr = tvm.nd.array(np.random.uniform(-1, 1, in_shape).astype(dtype))
inp = relay.var("a", shape=in_shape, dtype=dtype)
gamma_arr = tvm.nd.array(np.random.uniform(-1, 1, (channels)).astype(dtype))
beta_arr = tvm.nd.array(np.random.uniform(-1, 1, (channels)).astype(dtype))
gamma = relay.const(gamma_arr, dtype)
beta = relay.const(beta_arr, dtype)
mean_arr = tvm.nd.array(np.mean(input_arr.asnumpy(), axis=(0, 2, 3), keepdims=False))
mean = relay.const(mean_arr)
variance_arr = tvm.nd.array(np.var(input_arr.asnumpy(), axis=(0, 2, 3), keepdims=False))
variance = relay.const(variance_arr)
params = {}
func = relay.nn.batch_norm(inp, gamma, beta, mean, variance, axis=1, epsilon=0.0003)[0]
mod = IRModule.from_expr(func)
inputs = {
"a": input_arr,
}
opencl_out = build_and_run(mod, inputs, 1, params, device, enable_clml=False)[0]
clml_out = build_and_run(mod, inputs, 1, params, device, enable_clml=True)[0]
tvm.testing.assert_allclose(
clml_out[0].asnumpy(), opencl_out[0].asnumpy(), rtol=1e-3, atol=1e-3
)
@pytest.mark.parametrize("dtype", ["float16"])
@tvm.testing.requires_openclml
def test_concat(device, dtype):
in_shape_1 = (1, 16, 16, 16)
in_shape_2 = (1, 16, 16, 16)
a = relay.var("input_1", shape=in_shape_1, dtype=dtype)
b = relay.var("input_2", shape=in_shape_2, dtype=dtype)
low, high = -1, 1
inputs = {
"input_1": tvm.nd.array(np.random.uniform(-1, 1, in_shape_1).astype(dtype)),
"input_2": tvm.nd.array(np.random.uniform(-1, 1, in_shape_2).astype(dtype)),
}
params = {}
func = relay.concatenate((a, b), axis=1)
mod = IRModule.from_expr(func)
opencl_out = build_and_run(mod, inputs, 1, params, device, enable_clml=False)[0]
clml_out = build_and_run(mod, inputs, 1, params, device, enable_clml=True)[0]
tvm.testing.assert_allclose(
clml_out[0].asnumpy(), opencl_out[0].asnumpy(), rtol=1e-3, atol=1e-3
)
exp_codegen = [
{
"attrs": {
"dtype": [[dtype]],
"shape": [[list(in_shape_1)]],
},
"name": "",
"op": "input",
},
{
"attrs": {
"dtype": [[dtype]],
"shape": [[list(in_shape_2)]],
},
"name": "",
"op": "input",
},
{
"attrs": {
"axis": [["1"]],
"dtype": [[dtype]],
"num_inputs": "2",
"num_outputs": "1",
"shape": [[list(clml_out[0].shape)]],
},
"inputs": [[0, 0, 0], [1, 0, 0]],
"name": "concatenate",
"op": "kernel",
},
]
verify_codegen(func, exp_codegen, device, params)
def _get_pool_expected_codegen(input_shape, pool_size, stride, padding, pool_type, dtype):
import math
pool_height = math.floor(((input_shape[2] + padding[2] - pool_size[0]) / stride[0]) + 1)
pool_width = math.floor(((input_shape[3] + padding[3] - pool_size[1]) / stride[1]) + 1)
output_shape = [input_shape[0], input_shape[1], pool_height, pool_width]
attrs = {
"ceil_mode": [["0"]],
"dilation": [["1", "1"]],
"layout": [["NCHW"]],
"num_inputs": "1",
"num_outputs": "1",
"out_layout": [[""]],
"padding": [[str(p) for p in padding]],
"pool_size": [[str(p) for p in pool_size]],
"shape": [[list(output_shape)]],
"dtype": [[dtype]],
"strides": [[str(s) for s in stride]],
}
if sum(padding):
attrs["count_include_pad"] = [["0"]]
exp_codegen = [
{
"op": "input",
"name": "",
"attrs": {"shape": [[list(input_shape)]], "dtype": [[str(dtype)]]},
},
{
"op": "kernel",
"name": "nn.avg_pool2d" if pool_type == "avg" else "nn.max_pool2d",
"inputs": [[0, 0, 0]],
"attrs": attrs,
},
]
return exp_codegen
@pytest.mark.parametrize("dtype", ["float16"])
@tvm.testing.requires_openclml
def test_pool(device, dtype):
trials = [
# input size pool_size stride paading
[(1, 64, 147, 147), (3, 3), (2, 2), (0, 0, 0, 0), "max"],
[(1, 192, 71, 71), (3, 3), (2, 2), (0, 0, 0, 0), "max"],
[(1, 288, 35, 35), (3, 3), (2, 2), (0, 0, 0, 0), "max"],
[(1, 768, 17, 17), (3, 3), (2, 2), (0, 0, 0, 0), "max"],
[(1, 2048, 17, 17), (3, 3), (2, 2), (0, 0, 0, 0), "max"],
[(1, 192, 35, 35), (3, 3), (1, 1), (0, 0, 1, 1), "avg"],
[(1, 256, 35, 35), (3, 3), (1, 1), (0, 0, 1, 1), "avg"],
[(1, 288, 35, 35), (3, 3), (1, 1), (0, 0, 1, 1), "avg"],
[(1, 768, 17, 17), (3, 3), (1, 1), (0, 0, 1, 1), "avg"],
[(1, 1280, 8, 8), (3, 3), (1, 1), (0, 0, 1, 1), "avg"],
]
params = {}
for (
input_shape,
pool_size,
stride,
padding,
pooling_type,
) in trials:
a = relay.var("input_1", shape=input_shape, dtype=dtype)
input_arr = tvm.nd.array(np.random.uniform(-1, 1, input_shape).astype(dtype))
inputs = {
"input_1": input_arr,
}
if pooling_type == "max":
func = relay.nn.max_pool2d(a, pool_size=pool_size, strides=stride, padding=padding)
else:
func = relay.nn.avg_pool2d(a, pool_size=pool_size, strides=stride, padding=padding)
mod = IRModule.from_expr(func)
opencl_out = build_and_run(mod, inputs, 1, params, device, enable_clml=False)[0]
clml_out = build_and_run(mod, inputs, 1, params, device, enable_clml=True)[0]
tvm.testing.assert_allclose(
clml_out[0].asnumpy(), opencl_out[0].asnumpy(), rtol=1e-3, atol=1e-3
)
args = (input_shape, pool_size, stride, padding, pooling_type, dtype)
exp_codegen = _get_pool_expected_codegen(*args)
verify_codegen(func, exp_codegen, device, params)
@pytest.mark.parametrize("dtype", ["float32"])
@tvm.testing.requires_openclml
def test_dense(device, dtype):
def _get_model(x_shape, k_shape, has_bias=False):
x = relay.var("x", shape=(x_shape), dtype=dtype)
kernel = relay.var("kernel", shape=(k_shape), dtype=dtype)
out = relay.nn.dense(x, kernel, units=k_shape[0])
params = {"kernel": tvm.nd.array(np.random.uniform(-1, 1, k_shape).astype(dtype))}
inputs = {"x": tvm.nd.array(np.random.uniform(-1, 1, x_shape).astype(dtype))}
exp_codegen = [
{
"attrs": {
"dtype": [[dtype]],
"shape": [[list(x_shape)]],
},
"name": "",
"op": "input",
},
{
"attrs": {
"dtype": [[dtype]],
"shape": [[list(k_shape)]],
},
"name": "",
"op": "const",
},
]
dense_node = {
"attrs": {
"num_inputs": "2",
"num_outputs": "1",
"dtype": [[dtype]],
"out_dtype": [[""]],
"shape": [[[x_shape[0], k_shape[0]]]],
"units": [[str(k_shape[0])]],
},
"inputs": [[0, 0, 0], [1, 0, 0]],
"name": "nn.dense",
"op": "kernel",
}
exp_codegen.append(dense_node)
if has_bias:
bias = relay.var("bias", shape=(k_shape[0],), dtype=dtype)
out = relay.nn.bias_add(out, bias)
bias_data_node = {
"attrs": {
"dtype": [[dtype]],
"shape": [[list((1, k_shape[0]))]],
},
"name": "",
"op": "const",
}
exp_codegen.append(bias_data_node)
bias_node = {
"attrs": {
"num_inputs": "2",
"num_outputs": "1",
"dtype": [[dtype]],
"shape": [[[x_shape[0], k_shape[0]]]],
},
"inputs": [[2, 0, 0], [3, 0, 0]],
"name": "add",
"op": "kernel",
}
exp_codegen.append(bias_node)
params["bias"] = tvm.nd.array(np.random.uniform(-1, 1, (k_shape[0],)).astype(dtype))
return out, params, inputs, exp_codegen
def _verify(out, params, inputs, exp_codegen):
mod = IRModule.from_expr(out)
opencl_out = build_and_run(mod, inputs, 1, params, device, enable_clml=False)[0]
clml_out = build_and_run(mod, inputs, 1, params, device, enable_clml=True)[0]
tvm.testing.assert_allclose(
clml_out[0].asnumpy(), opencl_out[0].asnumpy(), rtol=1e-2, atol=1e-2
)
verify_codegen(out, exp_codegen, device, params)
_verify(*(_get_model((5, 16), (32, 16), False)))
_verify(*(_get_model((1, 16), (32, 16), True)))
@pytest.mark.parametrize("dtype", ["float32"])
@tvm.testing.requires_openclml
def test_binary_ops(device, dtype):
def _get_model(a_shape, b_shape, op):
a = relay.var("a", shape=(a_shape), dtype=dtype)
b = relay.var("b", shape=(b_shape), dtype=dtype)
out = op(a, b)
inputs = {
"a": tvm.nd.array(np.random.uniform(-1, 1, a_shape).astype(dtype)),
"b": tvm.nd.array(np.random.uniform(-1, 1, b_shape).astype(dtype)),
}
params = {}
return out, params, inputs
def _verify(out, params, inputs):
mod = IRModule.from_expr(out)
opencl_out = build_and_run(mod, inputs, 1, params, device, enable_clml=False)[0]
clml_out = build_and_run(mod, inputs, 1, params, device, enable_clml=True)[0]
tvm.testing.assert_allclose(
clml_out[0].asnumpy(), opencl_out[0].asnumpy(), rtol=1e-3, atol=1e-3
)
# Check to make sure these ops are offloaded to CLML instead of TVM.
with tvm.transform.PassContext(opt_level=3, disabled_pass=["AlterOpLayout"]):
mod = clml.partition_for_clml(mod, params)
tvm_op_count = get_cpu_op_count(mod)
assert tvm_op_count == 0, "Got {} TVM Native Compute partitions, expected 0".format(
tvm_op_count
)
_verify(*(_get_model((1, 16), (1, 16), relay.add)))
_verify(*(_get_model((1, 16), (1, 16), relay.subtract)))
_verify(*(_get_model((1, 16), (1, 16), relay.multiply)))
_verify(*(_get_model((1, 16), (1, 16), relay.divide)))
_verify(*(_get_model((1, 16), (1, 16), relay.minimum)))
_verify(*(_get_model((1, 16), (1, 16), relay.maximum)))
@pytest.mark.parametrize("dtype", ["float32"])
@tvm.testing.requires_openclml
def test_unary_ops(device, dtype):
def _get_model(a_shape, op):
a = relay.var("a", shape=(a_shape), dtype=dtype)
out = op(a)
inputs = {"a": tvm.nd.array(np.random.uniform(-1, 1, a_shape).astype(dtype))}
params = {}
return out, params, inputs
def _verify(out, params, inputs):
mod = IRModule.from_expr(out)
opencl_out = build_and_run(mod, inputs, 1, params, device, enable_clml=False)[0]
clml_out = build_and_run(mod, inputs, 1, params, device, enable_clml=True)[0]
tvm.testing.assert_allclose(
clml_out[0].asnumpy(), opencl_out[0].asnumpy(), rtol=1e-3, atol=1e-3
)
# Check to make sure these ops are offloaded to CLML instead of TVM.
with tvm.transform.PassContext(opt_level=3, disabled_pass=["AlterOpLayout"]):
mod = clml.partition_for_clml(mod, params)
tvm_op_count = get_cpu_op_count(mod)
assert tvm_op_count == 0, "Got {} TVM Native Compute partitions, expected 0".format(
tvm_op_count
)
_verify(*(_get_model((1, 16), relay.nn.softmax)))
_verify(*(_get_model((1, 16), relay.nn.relu)))
@pytest.mark.parametrize("dtype", ["float32", "float16"])
@tvm.testing.requires_openclml
def test_depth_to_space(device, dtype):
def _get_model(a_shape, block_size):
a = relay.var("a", shape=(a_shape), dtype=dtype)
out = relay.nn.depth_to_space(a, block_size)
inputs = {"a": tvm.nd.array(np.random.uniform(-1, 1, a_shape).astype(dtype))}
params = {}
return out, params, inputs
def _verify(out, params, inputs):
mod = IRModule.from_expr(out)
opencl_out = build_and_run(mod, inputs, 1, params, device, enable_clml=False)[0]
clml_out = build_and_run(mod, inputs, 1, params, device, enable_clml=True)[0]
tvm.testing.assert_allclose(
clml_out[0].asnumpy(), opencl_out[0].asnumpy(), rtol=1e-3, atol=1e-3
)
# Check to make sure these ops are offloaded to CLML instead of TVM.
exp_codegen = [
{
"attrs": {
"dtype": [[dtype]],
"shape": [[list(inputs["a"].shape)]],
},
"name": "",
"op": "input",
},
{
"attrs": {
"block_size": [[str(int(out.attrs.block_size))]],
"layout": [["NCHW"]],
"mode": [["DCR"]],
"dtype": [[dtype]],
"num_inputs": "1",
"num_outputs": "1",
"shape": [[list(clml_out[0].shape)]],
},
"inputs": [[0, 0, 0]],
"name": "nn.depth_to_space",
"op": "kernel",
},
]
verify_codegen(out, exp_codegen, device, params)
_verify(*(_get_model((1, 64, 8, 8), 4)))
_verify(*(_get_model((1, 64, 8, 8), 8)))
@pytest.mark.parametrize("dtype", ["float32", "float16"])
@tvm.testing.requires_openclml
def test_resize_bilinear(device, dtype):
def _get_model(a_shape, scale, align_corners):
a = relay.var("a", shape=(a_shape), dtype=dtype)
out = relay.nn.upsampling(
a, scale_h=scale[0], scale_w=scale[1], method="bilinear", align_corners=align_corners
)
inputs = {"a": tvm.nd.array(np.random.uniform(-1, 1, a_shape).astype(dtype))}
params = {}
return out, params, inputs
def _verify(out, params, inputs):
mod = IRModule.from_expr(out)
opencl_out = build_and_run(mod, inputs, 1, params, device, enable_clml=False)[0]
clml_out = build_and_run(mod, inputs, 1, params, device, enable_clml=True)[0]
tvm.testing.assert_allclose(
clml_out[0].asnumpy(), opencl_out[0].asnumpy(), rtol=1e-3, atol=1e-3
)
# Check to make sure these ops are offloaded to CLML instead of TVM.
exp_codegen = [
{
"attrs": {
"dtype": [[dtype]],
"shape": [[list(inputs["a"].shape)]],
},
"name": "",
"op": "input",
},
{
"attrs": {
"scale_h": [[str(int(out.attrs.scale_h))]],
"scale_w": [[str(int(out.attrs.scale_w))]],
"layout": [["NCHW"]],
"method": [[out.attrs.method]],
"align_corners": [[str(out.attrs.align_corners)]],
"dtype": [[dtype]],
"num_inputs": "1",
"num_outputs": "1",
"shape": [[list(clml_out[0].shape)]],
},
"inputs": [[0, 0, 0]],
"name": "nn.upsampling",
"op": "kernel",
},
]
verify_codegen(out, exp_codegen, device, params)
_verify(*(_get_model((1, 16, 8, 8), (2, 2), False)))
_verify(*(_get_model((1, 16, 7, 7), (2, 2), True)))
@pytest.mark.parametrize("dtype", ["float32"])
@tvm.testing.requires_openclml
def test_batch_matmul(device, dtype):
def _get_model(a_shape, b_shape, a_transpose, b_transpose):
a = relay.var("a", shape=(a_shape), dtype=dtype)
b = relay.var("b", shape=(b_shape), dtype=dtype)
out = relay.nn.batch_matmul(a, b, transpose_a=a_transpose, transpose_b=b_transpose)
inputs = {
"a": tvm.nd.array(np.random.uniform(-1, 1, a_shape).astype(dtype)),
"b": tvm.nd.array(np.random.uniform(-1, 1, b_shape).astype(dtype)),
}
params = {}
return out, params, inputs
def _verify(out, params, inputs):
mod = IRModule.from_expr(out)
opencl_out = build_and_run(mod, inputs, 1, params, device, enable_clml=False)[0]
clml_out = build_and_run(mod, inputs, 1, params, device, enable_clml=True)[0]
tvm.testing.assert_allclose(
clml_out[0].asnumpy(), opencl_out[0].asnumpy(), rtol=1e-3, atol=1e-3
)
# Check to make sure these ops are offloaded to CLML instead of TVM.
exp_codegen = [
{
"attrs": {
"dtype": [[dtype]],
"shape": [[list(inputs["a"].shape)]],
},
"name": "",
"op": "input",
},
{
"attrs": {
"dtype": [[dtype]],
"shape": [[list(inputs["b"].shape)]],
},
"name": "",
"op": "input",
},
{
"attrs": {
"transpose_a": [[str(int(out.attrs.transpose_a))]],
"transpose_b": [[str(int(out.attrs.transpose_b))]],
"out_dtype": [[""]],
"dtype": [[dtype]],
"num_inputs": "2",
"num_outputs": "1",
"shape": [[list(clml_out[0].shape)]],
},
"inputs": [[0, 0, 0], [1, 0, 0]],
"name": "nn.batch_matmul",
"op": "kernel",
},
]
verify_codegen(out, exp_codegen, device, params)
_verify(*(_get_model((1, 128, 32), (1, 128, 32), False, True)))
_verify(*(_get_model((1, 128, 128), (1, 32, 128), False, True)))
if __name__ == "__main__":
tvm.testing.main()
| 30,414 | 34.531542 | 98 | py |
tvm | tvm-main/tests/python/contrib/test_clml/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Infrastructure and tests for CLML"""
| 825 | 44.888889 | 62 | py |
tvm | tvm-main/tests/python/contrib/test_clml/test_compiler.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""CLML compiler tests."""
import tvm
import numpy as np
from tvm import relay
from tvm.relay import testing
from tvm.relay.op.contrib import clml
import pytest
@tvm.testing.requires_openclml
def test_device_annotation():
mod, params = relay.testing.mobilenet.get_workload(batch_size=1)
mod = clml.partition_for_clml(mod, params)
with tvm.transform.PassContext(opt_level=3):
relay.backend.te_compiler.get().clear()
lib = relay.build(
mod,
target="opencl -device=adreno",
target_host="llvm -mtriple=aarch64-linux-gnu",
params=params,
)
if __name__ == "__main__":
tvm.testing.main()
| 1,459 | 32.953488 | 68 | py |
tvm | tvm-main/tests/python/contrib/test_clml/infrastructure.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from itertools import zip_longest, combinations
import json
import os
import warnings
import numpy as np
import tvm
from tvm import relay
from tvm import rpc
# from tvm.contrib.debugger import debug_runtime as graph_executor
from tvm.contrib import graph_executor
from tvm.relay.op.contrib import clml
from tvm.contrib import utils
from tvm import autotvm
from tvm.autotvm.measure import request_remote
from tvm.relay.expr_functor import ExprMutator, Call
class Device:
"""
Configuration for CLML tests.
Check tests/python/contrib/clml/ for the presence of an test_config.json file.
This file can be used to override the default configuration here which will attempt to run the
Open CLML runtime tests locally if the runtime is available. Changing the configuration
will allow these runtime tests to be offloaded to a remote Snapdragon device via a tracker for example.
Notes
-----
The test configuration will be loaded once when the the class is created. If the configuration
changes between tests, any changes will not be picked up.
Parameters
----------
device : RPCSession
Allows tests to connect to and use remote device.
Attributes
----------
connection_type : str
Details the type of RPC connection to use. Options:
local - Use the local device,
tracker - Connect to a tracker to request a remote device,
remote - Connect to a remote device directly.
host : str
Specify IP address or hostname of remote target.
port : int
Specify port number of remote target.
target : str
The compilation target.
device_key : str
The device key of the remote target. Use when connecting to a remote device via a tracker.
cross_compile : str
Specify path to cross compiler to use when connecting a remote device from a non-arm platform.
"""
connection_type = "tracker"
host = os.getenv("TVM_TRACKER_HOST", "localhost")
port = int(os.getenv("TVM_TRACKER_PORT", 9090))
target = "opencl"
target_host = "llvm -mtriple=aarch64-linux-gnu"
device_key = os.getenv("RPC_DEVICE_KEY", "android")
cross_compile = os.getenv("TVM_NDK_CC", "aarch64-linux-android-g++")
def __init__(self):
"""Keep remote device for lifetime of object."""
self.device = self._get_remote()
@classmethod
def _get_remote(cls):
"""Get a remote (or local) device to use for testing."""
if cls.connection_type == "tracker":
device = request_remote(cls.device_key, cls.host, cls.port, timeout=1000)
elif cls.connection_type == "remote":
device = rpc.connect(cls.host, cls.port)
elif cls.connection_type == "local":
device = rpc.LocalSession()
else:
raise ValueError(
"connection_type in test_config.json should be one of: " "local, tracker, remote."
)
return device
def get_cpu_op_count(mod):
"""Traverse graph counting ops offloaded to TVM."""
class Counter(tvm.relay.ExprVisitor):
def __init__(self):
super().__init__()
self.count = 0
def visit_call(self, call):
if isinstance(call.op, tvm.ir.Op):
self.count += 1
super().visit_call(call)
c = Counter()
c.visit(mod["main"])
return c.count
def get_non_cpu_op_count(mod):
"""Traverse graph counting ops not offloaded to TVM."""
class Counter(tvm.relay.ExprVisitor):
def __init__(self):
super().__init__()
self.count = 0
def visit_call(self, call):
if not isinstance(call.op, tvm.ir.Op):
self.count += 1
super().visit_call(call)
c = Counter()
c.visit(mod["main"])
return c.count
def skip_codegen_test():
"""Skip test if it requires the CLML codegen and it's not present."""
if not tvm.get_global_func("relay.ext.clml", True):
print("Skip because CLML codegen is not available.")
return True
def build_module(mod, target, target_host, params=None, enable_clml=True, tune_log=""):
"""Build module with option to build for CLML."""
if isinstance(mod, tvm.relay.expr.Call):
mod = tvm.IRModule.from_expr(mod)
with autotvm.apply_history_best(tune_log):
with tvm.transform.PassContext(opt_level=3, disabled_pass=["AlterOpLayout"]):
if enable_clml:
mod = clml.preprocess_module(mod)
mod = clml.partition_for_clml(mod, params)
relay.backend.te_compiler.get().clear()
return relay.build(mod, target=target, target_host=target_host, params=params)
def build_and_run(
mod, inputs, outputs, params, device, enable_clml=True, no_runs=1, config=None, tune_log=""
):
"""Build and run the relay module."""
if config is None:
config = {}
try:
libm = build_module(mod, device.target, device.target_host, params, enable_clml, tune_log)
clml_modules = extract_clml_modules(libm)
for mod in clml_modules:
source = mod.get_source("json")
codegen = json.loads(source)["nodes"]
# remove input and const names as these cannot be predetermined
for node in range(len(codegen)):
if codegen[node]["op"] == "input" or codegen[node]["op"] == "const":
codegen[node]["name"] = ""
codegen_str = json.dumps(codegen, sort_keys=True, indent=2)
except Exception as e:
err_msg = "The module could not be built.\n"
if config:
err_msg += f"The test failed with the following parameters: {config}\n"
err_msg += str(e)
raise Exception(err_msg)
lib = update_lib(libm, device.device, device.cross_compile)
gen_module = graph_executor.GraphModule(lib["default"](device.device.cl(0)))
gen_module.set_input(**inputs)
out = []
for _ in range(no_runs):
gen_module.run()
out.append([gen_module.get_output(i) for i in range(outputs)])
# time_f = gen_module.module.time_evaluator("run", device.device.cl(0), number=1)
# cost = time_f().mean
# print("%g secs/iteration\n" % cost)
return out
def update_lib(lib, device, cross_compile):
"""Export the library to the remote/local device."""
lib_name = "mod.so"
temp = utils.tempdir()
lib_path = temp.relpath(lib_name)
if cross_compile:
lib.export_library(lib_path, cc=cross_compile)
else:
lib.export_library(lib_path)
device.upload(lib_path)
lib = device.load_module(lib_name)
return lib
def extract_clml_modules(module):
"""Get the CLML module(s) from llvm module."""
return list(filter(lambda mod: mod.type_key == "clml", module.get_lib().imported_modules))
def verify_codegen(
mod,
known_good_codegen,
device,
params,
num_clml_modules=1,
tvm_ops=0,
):
"""Check clml codegen against a known good output."""
if isinstance(mod, tvm.relay.expr.Call):
mod = tvm.IRModule.from_expr(mod)
with tvm.transform.PassContext(opt_level=3, disabled_pass=["AlterOpLayout"]):
mod = clml.preprocess_module(mod)
mod = clml.partition_for_clml(mod, params)
tvm_op_count = get_cpu_op_count(mod)
assert tvm_op_count == tvm_ops, "Got {} TVM operators, expected {}".format(
tvm_op_count, tvm_ops
)
partition_count = 0
for global_var in mod.get_global_vars():
if "clml" in global_var.name_hint:
partition_count += 1
assert (
num_clml_modules == partition_count
), "Got {} Open CLML partitions, expected {}".format(partition_count, num_clml_modules)
relay.backend.te_compiler.get().clear()
module = relay.build(mod, target=device.target, target_host=device.target_host, params=params)
clml_modules = extract_clml_modules(module)
assert len(clml_modules) == num_clml_modules, (
f"The number of CLML modules produced ({len(clml_modules)}) does not "
f"match the expected value ({num_clml_modules})."
)
for mod in clml_modules:
source = mod.get_source("json")
codegen = json.loads(source)["nodes"]
# remove input and const names as these cannot be predetermined
for node in range(len(codegen)):
if codegen[node]["op"] == "input" or codegen[node]["op"] == "const":
codegen[node]["name"] = ""
codegen_str = json.dumps(codegen, sort_keys=True, indent=2)
known_good_codegen_str = json.dumps(known_good_codegen, sort_keys=True, indent=2)
assert codegen_str == known_good_codegen_str, (
f"The JSON produced by codegen does not match the expected result. \n"
f"Actual={codegen_str} \n"
f"Expected={known_good_codegen_str}"
)
| 9,728 | 34.900369 | 107 | py |
tvm | tvm-main/tests/python/contrib/test_bnns/test_onnx_topologies.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""BNNS pattern detection check"""
import pytest
import tvm
from tvm import relay
from tvm.relay import transform
from tvm.contrib import utils, graph_executor
from tvm.contrib.download import download_testdata
from tvm.relay.op.contrib.bnns import partition_for_bnns
import numpy as np
pytest.importorskip("onnx")
bnns_is_absent = tvm.get_global_func("relay.ext.bnns", True) is None
TARGET = "llvm"
INPUT_SHAPE = [1, 3, 224, 224]
BASE_MODEL_URL = "https://github.com/onnx/models/raw/bd206494e8b6a27b25e5cf7199dbcdbfe9d05d1c/"
MODEL_URL_COLLECTION = {
"BERT": "text/machine_comprehension/bert-squad/model/bertsquad-10.onnx",
"MobileNet-v2": "vision/classification/mobilenet/model/mobilenetv2-7.onnx",
"ResNet50-v1": "vision/classification/resnet/model/resnet50-v1-7.onnx",
"ResNet50-v2": "vision/classification/resnet/model/resnet50-v2-7.onnx",
"SqueezeNet-v1.1": "vision/classification/squeezenet/model/squeezenet1.1-7.onnx",
"SqueezeNet-v1.0": "vision/classification/squeezenet/model/squeezenet1.0-7.onnx",
"Inception-v1": "vision/classification/inception_and_googlenet/inception_v1/model/inception-v1-7.onnx",
"Inception-v2": "vision/classification/inception_and_googlenet/inception_v2/model/inception-v2-7.onnx",
}
def get_onnx_input_name(model):
inputs = [node.name for node in model.graph.input]
initializer = [node.name for node in model.graph.initializer]
inputs = list(set(inputs) - set(initializer))
return inputs
def get_model_url(model_name):
return BASE_MODEL_URL + MODEL_URL_COLLECTION[model_name]
def get_name_from_url(url):
return url[url.rfind("/") + 1 :].strip()
def find_of_download(model_name):
model_url = get_model_url(model_name)
model_file_name = get_name_from_url(model_url)
return download_testdata(model_url, model_file_name, module="models")
def get_model(model_name):
model_path = find_of_download(model_name)
onnx_model = onnx.load(model_path)
input_names = get_onnx_input_name(onnx_model)
input_dict = {}
for name in input_names:
input_dict[name] = INPUT_SHAPE # TODO: hardcode
mod, params = relay.frontend.from_onnx(onnx_model, input_dict, freeze_params=True)
return mod, params, input_dict
def simplify_model(mod):
"""
Simplify execution graph
At least merge BatchNorm into convolution. For this purpose decompose BN primitive
into simple operation which can be calculated as const expr and after that merged
into nearest conv/dense primitive.
"""
seq = tvm.transform.Sequential(
[
transform.InferType(),
transform.FoldConstant(),
transform.SimplifyInference(),
transform.FoldScaleAxis(),
]
)
return seq(mod)
def process(model_name):
temp = utils.tempdir()
model, params, input_dict = get_model(model_name)
def run(mod, target, simplify=True, with_bnns=False):
with tvm.transform.PassContext(opt_level=3):
if simplify:
mod = simplify_model(mod)
if with_bnns:
mod = partition_for_bnns(mod)
graph_module = relay.build(mod, target=target, params=params)
lib_name = "deploy.tar"
path_dso = temp.relpath(lib_name)
graph_module.export_library(path_dso)
dev = tvm.cpu(0)
loaded_lib = tvm.runtime.load_module(path_dso)
module = graph_executor.GraphModule(loaded_lib["default"](dev))
module.run()
return module.get_output(0).numpy()
res_llvm = run(model, TARGET, simplify=True, with_bnns=False)
res_bnns = run(model, TARGET, simplify=True, with_bnns=True)
tvm.testing.assert_allclose(
res_llvm,
res_bnns,
atol=0.002,
rtol=0.007,
)
@pytest.mark.skip(reason="Manually disabled because of huge complexity")
@pytest.mark.skipif(bnns_is_absent, reason="BNNS runtime is absent")
@pytest.mark.parametrize("model_name", MODEL_URL_COLLECTION.keys())
def test_topology(model_name):
process(model_name)
| 4,841 | 33.340426 | 107 | py |
tvm | tvm-main/tests/python/contrib/test_bnns/test_normalization.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""BNNS integration normalization tests."""
import numpy as np
import math
import pytest
import tvm
from tvm import relay
from tvm import testing
from .infrastructure import (
Device,
skip_runtime_test,
skip_codegen_test,
verify_codegen,
build_and_run,
verify,
generate_trials,
)
def _get_model(
shape, b_shape, s_shape, dtype, var_names, axis=1, epsilon=1e-5, center=True, scale=True
):
"""Return a model and any parameters it may have"""
src = relay.var(next(var_names), shape=shape, dtype=dtype)
params = {}
b = tvm.nd.array(np.random.uniform(-128, 127, b_shape).astype(dtype))
params["b"] = b
b = relay.const(b, dtype)
s = tvm.nd.array(np.random.uniform(-128, 127, b_shape).astype(dtype))
params["b"] = s
s = relay.const(s, dtype)
out = relay.nn.instance_norm(src, s, b, axis, epsilon, center, scale)
return out, params
def _get_expected_codegen(shape, axis, center, scale, dtype, offload_on_bnns):
output_shape = shape
name = "nn.instance_norm"
node = {
"op": "kernel",
"name": name,
"inputs": [],
"attrs": {
"num_outputs": "1",
"axis": [[str(axis)]],
"center": [[str(int(center))]],
"scale": [[str(int(scale))]],
"shape": [[list(output_shape)]],
"dtype": [[dtype]],
"epsilon": [["1.0000000000000001e-05"]],
},
}
inputs = [
{"op": "input", "name": "", "attrs": {"shape": [[list(shape)]], "dtype": [[str(dtype)]]}},
{
"op": "const",
"name": "",
"attrs": {"shape": [[[shape[axis]]]], "dtype": [[str(dtype)]]},
},
{
"op": "const",
"name": "",
"attrs": {"shape": [[[shape[axis]]]], "dtype": [[str(dtype)]]},
},
]
input_idx = 0
for _ in range(len(inputs)):
node["inputs"].append([input_idx, 0, 0])
input_idx += 1
node["attrs"]["num_inputs"] = str(len(inputs))
inputs.append(node)
return inputs
@pytest.mark.skipif(skip_runtime_test(), reason="Skip because BNNS codegen is not available")
def test_normalization():
device = Device()
np.random.seed(0)
dtype = "float32"
shapes_config = [
[1, 2, 3, 4],
[3, 2, 3, 4],
[2, 2, 3],
[16, 32, 32],
[5, 3],
]
axes = [-1, 0, 1, 2]
for shape in shapes_config:
for axis in axes:
if len(shape) == 2 and axis != 0:
continue
for center in [False, True]:
for scale in [False, True]:
outputs = []
inputs = {
"src": tvm.nd.array(np.random.uniform(-128, 127, shape).astype(dtype)),
}
func, params = _get_model(
shape,
[shape[axis]],
[shape[axis]],
dtype,
var_names=iter(inputs),
axis=axis,
center=center,
scale=scale,
)
for enable_bnns in [False, True]:
outputs.append(
build_and_run(
func,
inputs,
1,
params,
device,
enable_bnns=enable_bnns,
)[0]
)
config = {
"dtype": dtype,
}
verify(outputs, atol=0.001, rtol=0.01, config=config)
@pytest.mark.skipif(skip_codegen_test(), reason="Skip because BNNS codegen is not available")
def test_codegen_normalization():
np.random.seed(0)
dtype = "float32"
shapes_config = [
[1, 2, 3, 4],
[3, 2, 3, 4],
[2, 2, 3],
[16, 32, 32],
[5, 3],
]
axes = [-1, 0, 1, 2]
def check_normalization(rank, axis):
if rank < 3 or rank > 4:
return False
if axis == 0 and rank == 3 or axis == 1 and rank == 4:
return True
return False
for shape in shapes_config:
for axis in axes:
if len(shape) == 2 and axis != 0:
continue
for center in [False, True]:
for scale in [False, True]:
inputs = {"src"}
args = (shape, axis, center, scale, dtype)
func, params = _get_model(
shape,
[shape[axis]],
[shape[axis]],
dtype,
var_names=iter(inputs),
axis=axis,
center=center,
scale=scale,
)
offload_on_bnns = check_normalization(len(shape), axis)
if offload_on_bnns is True:
bnns_blocks = 1
else:
bnns_blocks = 0
exp_codegen = _get_expected_codegen(*args, offload_on_bnns)
verify_codegen(func, exp_codegen, bnns_blocks)
if __name__ == "__main__":
test_normalization()
test_codegen_normalization()
| 6,269 | 30.039604 | 98 | py |
tvm | tvm-main/tests/python/contrib/test_bnns/test_conv2d.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""BNNS integration conv2d tests."""
import numpy as np
import pytest
import tvm
from tvm import relay
from .infrastructure import skip_runtime_test, compare_inference_with_ref, generate_trials
# TODO: Missed cases
# 1. Bias as add with 3d const tensor. Lead to additional unsqueeze op between
# 2. Check unsupported cases of fusion. Like bias add with axis != 1, add with broadcast by spatial dims
# 3. Check if bias/weights is not constants. Should fallback into LLVM or decompose it
# 4. Check if bias/weights is constants expr. Should works somehow.
def _get_model(
shape,
kernel=(3, 3),
padding=(1, 1),
strides=(1, 1),
dilation=(1, 1),
groups=1,
dtype="float32",
channels=-1, # -1 means same as input channels
bias_type="none",
activation_type="none",
):
"""Return a model and any parameters it may have"""
if channels == -1:
channels = shape[1]
a = relay.var("a", shape=shape, dtype=dtype)
weight_shape = (channels, shape[1] // groups, *kernel)
w = tvm.nd.array(np.random.uniform(-128, 127, weight_shape).astype(dtype))
weights = relay.const(w, dtype)
out = relay.nn.conv2d(
a,
weights,
kernel_size=kernel,
dilation=dilation,
strides=strides,
padding=padding,
groups=groups,
channels=channels,
out_dtype=dtype,
)
params = {"w": w}
if bias_type == "bias_add":
b = tvm.nd.array(np.random.uniform(-10, 10, weight_shape[0]).astype(dtype))
biasc = relay.const(b, dtype)
out = relay.nn.bias_add(out, biasc, axis=1)
params["b"] = b
elif bias_type == "add_3d" or bias_type == "add_4d":
bias_shape = (
(weight_shape[0], 1, 1) if bias_type == "add_3d" else (1, weight_shape[0], 1, 1)
)
b = tvm.nd.array(np.random.uniform(-10, 10, bias_shape).astype(dtype))
biasc = relay.const(b, dtype)
out = relay.add(out, biasc)
params["b"] = b
if activation_type == "relu":
out = relay.nn.relu(out)
elif activation_type == "sigmoid":
out = relay.op.sigmoid(out)
return out, params
@pytest.mark.skipif(skip_runtime_test(), reason="Skip because BNNS codegen is not available")
def test_conv2d():
np.random.seed(0)
kernel_hs = [1, 2, 3, 5]
kernel_ws = [1, 2, 3, 5]
pad = [(1, 1), (2, 2), (2, 1)]
strides = [(1, 1), (2, 2)]
dilation = [(1, 1)]
out_channels = [1, 4, 8, 16]
input_shapes = [(10, 10, 14), (12, 15, 16), (20, 20, 20)]
batches = [1, 2]
groups = [1, 2]
bias_kind = ["none", "add_3d", "add_4d", "bias.add"]
activation_kind = ["none", "relu", "sigmoid"]
trials = generate_trials(
[
kernel_hs,
kernel_ws,
pad,
strides,
dilation,
out_channels,
input_shapes,
groups,
batches,
bias_kind,
activation_kind,
],
3,
)
for (
kernel_h,
kernel_w,
pad,
stride,
dilation,
out_channels,
input_shapes,
group,
batch,
bias,
activation,
) in trials:
if out_channels % group != 0:
continue
func, params = _get_model(
shape=(batch, *input_shapes),
kernel=(kernel_h, kernel_w),
padding=pad,
strides=stride,
dilation=dilation,
groups=group,
channels=out_channels,
bias_type=bias,
activation_type=activation,
)
compare_inference_with_ref(func, params)
@pytest.mark.skipif(skip_runtime_test(), reason="Skip because BNNS codegen is not available")
def test_conv2d_dw():
if skip_runtime_test():
return
np.random.seed(0)
shape = [4, 5, 5]
for batch in [1, 2]:
mod, params = _get_model(shape=(batch, *shape), groups=shape[0])
compare_inference_with_ref(mod, params)
@pytest.mark.skipif(skip_runtime_test(), reason="Skip because BNNS codegen is not available")
def test_conv2d_with_oc1():
if skip_runtime_test():
return
np.random.seed(0)
shape = [3, 5, 5]
for batch in [1, 2]:
for bias in ["none", "add_4d"]:
mod, params = _get_model(shape=(batch, *shape), channels=1, bias_type=bias)
compare_inference_with_ref(mod, params)
if __name__ == "__main__":
test_conv2d()
test_conv2d_dw()
test_conv2d_with_oc1()
| 5,331 | 28.955056 | 106 | py |
tvm | tvm-main/tests/python/contrib/test_bnns/test_dense.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""BNNS integration dense tests."""
import numpy as np
import math
import pytest
import tvm
from tvm import relay
from .infrastructure import (
Device,
skip_runtime_test,
skip_codegen_test,
build_and_run,
verify,
verify_codegen,
generate_trials,
)
def _get_model(shape, weight_shape, units, dtype, var_names, has_bias=False, has_gelu=False):
"""Return a model and any parameters it may have"""
a = relay.var(next(var_names), shape=shape, dtype=dtype)
w = tvm.nd.array(np.random.uniform(-128, 127, weight_shape).astype(dtype))
weights = relay.const(w, dtype)
out = relay.nn.dense(a, weights, units=units, out_dtype=dtype)
params = {"w": w}
if has_bias:
b = tvm.nd.array(np.random.randint(-128, 127, weight_shape[0]).astype(dtype))
biasc = relay.const(b, dtype)
out = relay.op.add(out, biasc)
params["b"] = b
if has_gelu:
const1 = relay.const(0.044715)
const2 = relay.const(math.sqrt(2 / math.pi))
bias = out
out = relay.op.power(bias, relay.const(3.0, "float32"))
out = relay.op.multiply(out, const1)
out = relay.op.add(out, bias)
out = relay.op.multiply(out, const2)
out = relay.op.tanh(out)
out = relay.op.add(out, relay.const(1, "float32"))
out = relay.op.multiply(out, relay.const(0.5))
out = relay.op.multiply(out, bias)
return out, params
def _get_expected_codegen(shape, weight_shape, units, dtype, has_bias=False, has_gelu=False):
output_shape = (shape[0], units)
name = "nn.dense"
if has_bias is True:
name = "bnns.dense_bias"
if has_bias is True and has_gelu is True:
name = "bnns.dense_bias_gelu"
node = {
"op": "kernel",
"name": name,
"inputs": [],
"attrs": {
"num_outputs": "1",
"out_dtype": [["float32"]],
"shape": [[list(output_shape)]],
"dtype": [[dtype]],
"units": [[str(units)]],
},
}
inputs = [
{"op": "input", "name": "", "attrs": {"shape": [[list(shape)]], "dtype": [[str(dtype)]]}},
{
"op": "const",
"name": "",
"attrs": {"shape": [[list(weight_shape)]], "dtype": [[str(dtype)]]},
},
]
if has_bias:
inputs.append(
{
"op": "const",
"name": "",
"attrs": {"shape": [[[weight_shape[0]]]], "dtype": [["float32"]]},
}
)
input_idx = 0
for _ in range(len(inputs)):
node["inputs"].append([input_idx, 0, 0])
input_idx += 1
node["attrs"]["num_inputs"] = str(len(inputs))
inputs.append(node)
return inputs
@pytest.mark.skipif(skip_runtime_test(), reason="Skip because BNNS codegen is not available")
def test_dense():
device = Device()
np.random.seed(0)
dtype = ["float32"]
shape = [
((1, 128), (16, 128), 16),
((32, 32), (32, 32), 32),
((1, 64), (1, 64), 1),
((11, 2), (2, 2), 2),
((2, 2), (1, 2), 1),
]
composite = [False, True]
trials = generate_trials([dtype, shape, composite, composite], 3)
for dtype, (shape, weight_shape, units), with_bias, with_gelu in trials:
outputs = []
inputs = {"a": tvm.nd.array(np.random.uniform(-128, 127, shape).astype(dtype))}
func, params = _get_model(
shape,
weight_shape,
units,
dtype,
var_names=iter(inputs),
has_bias=with_bias,
has_gelu=with_gelu,
)
for bnns in [False, True]:
outputs.append(
build_and_run(
func,
inputs,
1,
params,
device,
enable_bnns=bnns,
)[0]
)
config = {
"shape": shape,
"weight_shape": weight_shape,
"units": units,
"dtype": dtype,
"with_bias": with_bias,
"with_gelu": with_gelu,
}
verify(outputs, atol=0.001, rtol=0.01, config=config)
@pytest.mark.skipif(skip_codegen_test(), reason="Skip because BNNS codegen is not available")
def test_codegen_dense():
np.random.seed(0)
dtype = ["float32"]
shape = [
((1, 128), (16, 128), 16),
((32, 32), (32, 32), 32),
((1, 64), (1, 64), 1),
((11, 2), (2, 2), 2),
((2, 2), (1, 2), 1),
]
composite = [False, True]
trials = generate_trials([dtype, shape, composite, composite], 3)
for dtype, (shape, weight_shape, units), with_bias, with_gelu in trials:
inputs = {"a"}
args = (shape, weight_shape, units, dtype)
func, params = _get_model(
*args, var_names=iter(inputs), has_bias=with_bias, has_gelu=with_gelu
)
exp_codegen = _get_expected_codegen(*args, has_bias=with_bias, has_gelu=with_gelu)
verify_codegen(func, exp_codegen, 1)
if __name__ == "__main__":
test_dense()
test_codegen_dense()
| 5,932 | 30.062827 | 98 | py |
tvm | tvm-main/tests/python/contrib/test_bnns/test_conv2d_patterns.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""BNNS pattern detection check"""
import tvm
from tvm import relay
import numpy as np
from tvm.relay.op.contrib.bnns import partition_for_bnns
fp32 = "float32"
def partition(exp):
"""Apply BNNS specific partitioning transformation"""
mod = tvm.IRModule.from_expr(exp)
with tvm.transform.PassContext(opt_level=3):
mod = partition_for_bnns(mod)
return mod
def is_op_fused(func, op_name):
is_fused = False
def visit(op):
if (
isinstance(op, tvm.relay.function.Function)
and op_name in op.attrs["PartitionedFromPattern"]
):
nonlocal is_fused
is_fused = True
tvm.relay.analysis.post_order_visit(func.body, visit)
return is_fused
def test_pattern_conv2d_with_bias_add():
for axis in (1, 2):
a = relay.var("a", shape=(2, 7, 8, 8), dtype=fp32)
w = relay.const(np.random.uniform(-10, 10, (8, 7, 3, 3)).astype(fp32))
res = relay.nn.conv2d(a, w, kernel_size=(3, 3), padding=(1, 1), channels=8, out_dtype=fp32)
b = relay.const(np.random.uniform(-10, 10, 8).astype(fp32))
res = relay.nn.bias_add(res, b, axis=axis)
mod = partition(res)
bias_is_fused = is_op_fused(mod["tvmgen_default_bnns_main_0"], "nn.bias_add")
assert bias_is_fused if axis == 1 else not bias_is_fused
def test_pattern_conv2d_with_add():
workloads = {8: False, (8, 1): False, (8, 1, 1): True, (1, 8, 1, 1): True}
for b_shape, should_be_fused in workloads.items():
a = relay.var("a", shape=(2, 7, 8, 8), dtype=fp32)
w = relay.const(np.random.uniform(-10, 10, (8, 7, 3, 3)).astype(fp32))
res = relay.nn.conv2d(a, w, kernel_size=(3, 3), padding=(1, 1), channels=8, out_dtype=fp32)
b = relay.const(np.random.uniform(-10, 10, b_shape).astype(fp32))
res = relay.add(res, b)
mod = partition(res)
bias_is_fused = is_op_fused(mod["tvmgen_default_bnns_main_0"], "add")
assert bias_is_fused == should_be_fused
def test_pattern_conv2d_with_non_cons_weights():
for const_weights in (True, False):
a = relay.var("a", shape=(2, 7, 8, 8), dtype=fp32)
if const_weights:
w = relay.const(np.random.uniform(-10, 10, (8, 7, 3, 3)).astype(fp32))
else:
w = relay.var("w", shape=(8, 7, 3, 3), dtype=fp32)
res = relay.nn.conv2d(a, w, kernel_size=(3, 3), padding=(1, 1), channels=8, out_dtype=fp32)
mod = partition(res)
use_bnns = len(mod.get_global_vars()) == 2 # GlobalVar: "main" and "bnns_0"
assert use_bnns == const_weights
def test_pattern_conv2d_with_non_cons_bias():
a = relay.var("a", shape=[2, 7, 8, 8], dtype=fp32)
w = relay.const(np.random.uniform(-10, 10, (8, 7, 3, 3)).astype(fp32))
res = relay.nn.conv2d(a, w, kernel_size=(3, 3), padding=(1, 1), channels=8, out_dtype=fp32)
b = relay.var("b", shape=[8], dtype=fp32)
res = relay.nn.bias_add(res, b, axis=1)
mod = partition(res)
bias_is_fused = is_op_fused(mod["tvmgen_default_bnns_main_0"], "nn.bias_add")
assert not bias_is_fused
| 3,899 | 35.111111 | 99 | py |
tvm | tvm-main/tests/python/contrib/test_bnns/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Infrastructure and tests for BNNS"""
| 825 | 44.888889 | 62 | py |
tvm | tvm-main/tests/python/contrib/test_bnns/test_matmul.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""BNNS integration dense tests."""
import numpy as np
import math
import pytest
import tvm
from tvm import relay
from tvm import testing
from .infrastructure import (
Device,
skip_runtime_test,
skip_codegen_test,
verify_codegen,
build_and_run,
verify,
generate_trials,
)
def _get_model(a_shape, b_shape, dtype, var_names, is_a_constant=False, is_b_constant=False):
"""Return a model and any parameters it may have"""
a = relay.var(next(var_names), shape=a_shape, dtype=dtype)
b = relay.var(next(var_names), shape=b_shape, dtype=dtype)
params = {}
if is_b_constant is True:
b = tvm.nd.array(np.random.uniform(-128, 127, b_shape).astype(dtype))
params["b"] = b
b = relay.const(b, dtype)
if is_a_constant is True:
a = tvm.nd.array(np.random.uniform(-128, 127, a_shape).astype(dtype))
params["a"] = a
a = relay.const(a, dtype)
out = relay.nn.batch_matmul(a, b)
return out, params
@pytest.mark.skipif(skip_runtime_test(), reason="Skip because BNNS codegen is not available")
def test_matmul():
device = Device()
np.random.seed(0)
dtype = "float32"
# C[N, I, J] = A[N, I, K] * B[N, J, K]
shapes_config = [
# B, I, J, K
[1, 4, 4, 3],
[1, 16, 32, 32],
[2, 1, 1, 3],
[2, 16, 32, 32],
[5, 1, 1, 3],
]
data_config = [
# A_is_constant, B_is_constant
[False, True],
[True, False],
[False, False],
]
for N, I, J, K in shapes_config:
a_shape = [N, I, K]
b_shape = [N, J, K]
for is_a_constant, is_b_constant in data_config:
outputs = []
inputs = {
"a": tvm.nd.array(np.random.uniform(-128, 127, a_shape).astype(dtype)),
"b": tvm.nd.array(np.random.uniform(-128, 127, b_shape).astype(dtype)),
}
func, params = _get_model(
a_shape,
b_shape,
dtype,
var_names=iter(inputs),
is_a_constant=is_a_constant,
is_b_constant=is_b_constant,
)
for enable_bnns in [False, True]:
outputs.append(
build_and_run(
func,
inputs,
1,
params,
device,
enable_bnns=enable_bnns,
)[0]
)
config = {
"a_shape": a_shape,
"b_shape": b_shape,
"dtype": dtype,
}
verify(outputs, atol=0.001, rtol=0.01, config=config)
if __name__ == "__main__":
test_matmul()
| 3,556 | 30.201754 | 93 | py |
tvm | tvm-main/tests/python/contrib/test_bnns/test_pooling.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""BNNS integration pooling tests."""
import numpy as np
import pytest
import tvm
from tvm import relay
from tvm import testing
from .infrastructure import (
skip_runtime_test,
skip_codegen_test,
build_and_run,
verify,
verify_codegen,
)
from .infrastructure import Device
def _calculate_output_shape(shape, sizes, padding, strides):
"""Calculate pooling output shape."""
output_height = ((shape[2] - sizes[0] + padding[0] + padding[2]) / strides[0]) + 1
output_width = ((shape[3] - sizes[1] + padding[1] + padding[3]) / strides[1]) + 1
return 1, shape[1], int(output_height), int(output_width)
def _get_pooling_model(
shape, dtype, typef, sizes, strides, padding, ceil_mode, count_include_pad, var_names
):
"""Return a model and any parameters it may have."""
if len(padding) == 2:
padding = (padding[0], padding[1], padding[0], padding[1])
out = relay.var(next(var_names), shape=shape, dtype=dtype)
if typef == "nn.max_pool2d":
out = relay.nn.max_pool2d(
out,
pool_size=sizes,
strides=strides,
padding=padding,
ceil_mode=ceil_mode,
)
elif typef == "nn.avg_pool2d":
out = relay.nn.avg_pool2d(
out,
pool_size=sizes,
strides=strides,
padding=padding,
ceil_mode=ceil_mode,
count_include_pad=count_include_pad,
)
else:
raise ValueError("Function not supported")
return out
def _get_global_pooling_model(shape, dtype, typef, var_names):
"""Return a model and any parameters it may have."""
out = relay.var(next(var_names), shape=shape, dtype=dtype)
if typef == "nn.global_max_pool2d":
out = relay.nn.global_max_pool2d(out)
elif typef == "nn.global_avg_pool2d":
out = relay.nn.global_avg_pool2d(out)
else:
raise ValueError("Function not supported")
return out
def _get_expected_pooling_codegen(
shape, dtype, typef, sizes, strides, padding, ceil_mode, count_include_pad
):
if len(padding) == 2:
padding = (padding[0], padding[1], padding[0], padding[1])
output_shape = _calculate_output_shape(shape, sizes, padding, strides)
node = {
"op": "kernel",
"name": typef,
"inputs": [[0, 0, 0]],
"attrs": {
"num_inputs": "1",
"num_outputs": "1",
"layout": [["NCHW"]],
"shape": [[list(output_shape)]],
"dtype": [[dtype]],
"padding": [[str(p) for p in padding]],
"strides": [[str(s) for s in strides]],
"pool_size": [[str(s) for s in sizes]],
"ceil_mode": [[str(1 if ceil_mode else 0)]],
},
}
if typef == "nn.avg_pool2d" or typef == "nn.l2_pool2d":
node["attrs"]["count_include_pad"] = [["1" if count_include_pad else "0"]]
input = {"op": "input", "name": "", "attrs": {"shape": [[list(shape)]], "dtype": [[dtype]]}}
return [input, node]
def _get_expected_global_pooling_codegen(shape, dtype, typef):
node = {
"op": "kernel",
"name": typef,
"inputs": [[0, 0, 0]],
"attrs": {
"num_inputs": "1",
"num_outputs": "1",
"layout": [["NCHW"]],
"shape": [[[1, shape[1], 1, 1]]],
"dtype": [[dtype]],
},
}
input = {"op": "input", "name": "", "attrs": {"shape": [[list(shape)]], "dtype": [[dtype]]}}
return [input, node]
@pytest.mark.skipif(skip_runtime_test(), reason="Skip because BNNS codegen is not available")
def test_pooling():
device = Device()
np.random.seed(0)
dtype = "float32"
trials = [
["nn.max_pool2d", (3, 3), (2, 2), (0, 0), False, False, (27, 27, 512)],
["nn.max_pool2d", (2, 2), (2, 2), (0, 0), False, True, (16, 16, 16)],
["nn.max_pool2d", (3, 3), (2, 2), (1, 1), True, True, (15, 15, 16)],
["nn.max_pool2d", (2, 2), (2, 2), (0, 1), False, False, (16, 16, 16)],
["nn.avg_pool2d", (2, 2), (2, 2), (1, 1), False, False, (16, 16, 16)],
["nn.avg_pool2d", (2, 2), (2, 2), (0, 0), False, True, (16, 16, 16)],
["nn.avg_pool2d", (3, 3), (2, 2), (0, 1), True, False, (15, 15, 16)],
]
for (
typef,
size,
stride,
pad,
ceil_mode,
count_include_pad,
input_shape,
) in trials:
shape = (1, *input_shape)
outputs = []
inputs = {
"a": tvm.nd.array(np.random.uniform(-127, 128, shape).astype(dtype)),
}
func = _get_pooling_model(
shape, dtype, typef, size, stride, pad, ceil_mode, count_include_pad, iter(inputs)
)
config = {
"size": size,
"stride": stride,
"shape": shape,
"pooling type": typef,
"dtype": dtype,
"padding": pad,
"ceil_mode": ceil_mode,
"count_include_pad": count_include_pad,
"inputs": inputs,
}
params = None
for enable_bnns in [False, True]:
outputs.append(
build_and_run(
func, inputs, 1, params, device, enable_bnns=enable_bnns, config=config
)[0]
)
verify(outputs, atol=0.001, rtol=0.001, config=config)
@pytest.mark.skipif(skip_runtime_test(), reason="Skip because BNNS codegen is not available")
def test_global_pooling():
device = Device()
np.random.seed(0)
dtype = "float32"
trials = [
["nn.global_max_pool2d", (8, 8, 16)],
["nn.global_max_pool2d", (9, 9, 16)],
["nn.global_max_pool2d", (8, 8, 16)],
["nn.global_avg_pool2d", (8, 8, 16)],
["nn.global_avg_pool2d", (8, 8, 16)],
["nn.global_avg_pool2d", (9, 9, 16)],
]
for typef, input_shape in trials:
shape = (1, *input_shape)
outputs = []
inputs = {
"a": tvm.nd.array(np.random.uniform(-127, 128, shape).astype(dtype)),
}
func = _get_global_pooling_model(shape, dtype, typef, iter(inputs))
config = {
"shape": shape,
"pooling type": typef,
"dtype": dtype,
}
for enable_bnns in [False, True]:
outputs.append(
build_and_run(
func, inputs, 1, None, device, enable_bnns=enable_bnns, config=config
)[0]
)
verify(outputs, atol=0.001, rtol=0.001, config=config)
@pytest.mark.skipif(skip_codegen_test(), reason="Skip because BNNS codegen is not available")
def test_codegen_pooling():
dtype = "float32"
trials = [
["nn.max_pool2d", (2, 2), (2, 2), (0, 0), False, True, (16, 16, 16)],
["nn.max_pool2d", (3, 3), (2, 2), (1, 1), True, True, (15, 15, 16)],
["nn.max_pool2d", (2, 2), (2, 2), (0, 1), False, False, (16, 16, 16)],
["nn.avg_pool2d", (2, 2), (2, 2), (1, 1), False, False, (16, 16, 16)],
["nn.avg_pool2d", (2, 2), (2, 2), (0, 0), False, True, (16, 16, 16)],
["nn.avg_pool2d", (3, 3), (2, 2), (0, 1), True, False, (15, 15, 16)],
]
for (
typef,
size,
stride,
pad,
ceil_mode,
count_include_pad,
input_shape,
) in trials:
shape = (1, *input_shape)
inputs = {"a"}
args = (shape, dtype, typef, size, stride, pad, False, False)
func = _get_pooling_model(*args, iter(inputs))
exp_codegen = _get_expected_pooling_codegen(*args)
verify_codegen(func, exp_codegen, 1)
@pytest.mark.skipif(skip_codegen_test(), reason="Skip because BNNS codegen is not available")
def test_codegen_global_pooling():
dtype = "float32"
trials = [
["nn.global_max_pool2d", (8, 8, 16)],
["nn.global_max_pool2d", (9, 9, 16)],
["nn.global_max_pool2d", (8, 8, 16)],
["nn.global_avg_pool2d", (8, 8, 16)],
["nn.global_avg_pool2d", (8, 8, 16)],
["nn.global_avg_pool2d", (9, 9, 16)],
]
for typef, input_shape in trials:
shape = (1, *input_shape)
inputs = {"a"}
args = (shape, dtype, typef)
func = _get_global_pooling_model(*args, iter(inputs))
exp_codegen = _get_expected_global_pooling_codegen(*args)
verify_codegen(func, exp_codegen, 1)
if __name__ == "__main__":
test_pooling()
test_global_pooling()
test_codegen_pooling()
test_codegen_global_pooling()
| 9,305 | 31.089655 | 96 | py |
tvm | tvm-main/tests/python/contrib/test_bnns/infrastructure.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from itertools import zip_longest, combinations
import json
import os
import warnings
import numpy as np
import tvm
from tvm import relay
from tvm import rpc
from tvm.contrib import graph_executor
from tvm.relay.op.contrib.bnns import partition_for_bnns
from tvm.contrib import utils
from tvm.autotvm.measure import request_remote
from tvm.relay.analysis import analysis
class Device:
"""
Common device configuration for python tests.
Check tests/python/contrib/arm_compute_lib/ for the presence of an test_config.json file.
This file can be used to override the default configuration here which will attempt to run the BNNS
runtime tests locally if the runtime is available. Changing the configuration will allow these
runtime tests to be offloaded to a remote device with BNNS via a tracker for example.
Notes
-----
The test configuration will be loaded once when the class is created. If the configuration
changes between tests, any changes will not be picked up.
Attributes
----------
connection_type : str
Details the type of RPC connection to use. Options:
local - Use the local device,
tracker - Connect to a tracker to request a remote device,
remote - Connect to a remote device directly.
host : str
Specify IP address or hostname of remote target.
port : int
Specify port number of remote target.
target : str
The compilation target.
device_key : str
The device key of the remote target. Use when connecting to a remote device via a tracker.
cross_compile : str
Specify path to cross compiler to use when connecting a remote device from a non-arm platform.
"""
connection_type = "local"
host = "127.0.0.1"
port = 9090
target = "llvm"
device_key = ""
cross_compile = ""
def __init__(self):
"""Keep remote device for lifetime of object."""
self.device = self._get_remote()
@classmethod
def _get_remote(cls):
"""Get a remote (or local) device to use for testing."""
if cls.connection_type == "tracker":
device = request_remote(cls.device_key, cls.host, cls.port, timeout=1000)
elif cls.connection_type == "remote":
device = rpc.connect(cls.host, cls.port)
elif cls.connection_type == "local":
device = rpc.LocalSession()
else:
raise ValueError(
"connection_type in test_config.json should be one of: " "local, tracker, remote."
)
return device
@classmethod
def load(cls, file_name):
"""Load test config
Load the test configuration by looking for file_name relative
to the test_bnns directory.
"""
location = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
config_file = os.path.join(location, file_name)
if not os.path.exists(config_file):
warnings.warn("Config file doesn't exist, resuming tests with default config.")
return
with open(config_file, mode="r") as config:
test_config = json.load(config)
cls.connection_type = test_config["connection_type"]
cls.host = test_config["host"]
cls.port = test_config["port"]
cls.target = test_config["target"]
cls.device_key = test_config.get("device_key") or ""
cls.cross_compile = test_config.get("cross_compile") or ""
Device.target = "llvm"
def skip_runtime_test():
"""Skip test if it requires the runtime and it's not present."""
# BNNS codegen not present.
if not tvm.get_global_func("relay.ext.bnns", True):
print("Skip because BNNS codegen is not available.")
return True
return False
def skip_codegen_test():
"""Skip test if it requires the BNNS codegen and it's not present."""
if not tvm.get_global_func("relay.ext.bnns", True):
print("Skip because BNNS codegen is not available.")
return True
def build_module(mod, target, params=None, enable_bnns=True, tvm_ops=0):
"""Build module with option to build for BNNS."""
if isinstance(mod, tvm.relay.expr.Call):
mod = tvm.IRModule.from_expr(mod)
with tvm.transform.PassContext(opt_level=3):
if enable_bnns:
mod = partition_for_bnns(mod)
relay.backend.te_compiler.get().clear()
return relay.build(mod, target=target, params=params)
def build_and_run(
mod,
inputs,
outputs,
params,
device,
enable_bnns=True,
no_runs=1,
tvm_ops=0,
config=None,
):
"""Build and run the relay module."""
if config is None:
config = {}
try:
lib = build_module(mod, device.target, params, enable_bnns, tvm_ops)
except Exception as e:
err_msg = "The module could not be built.\n"
if config:
err_msg += f"The test failed with the following parameters: {config}\n"
err_msg += str(e)
raise Exception(err_msg)
lib = update_lib(lib, device.device, device.cross_compile)
gen_module = graph_executor.GraphModule(lib["default"](device.device.cpu(0)))
gen_module.set_input(**inputs)
out = []
for _ in range(no_runs):
gen_module.run()
out.append([gen_module.get_output(i) for i in range(outputs)])
return out
def update_lib(lib, device, cross_compile):
"""Export the library to the remote/local device."""
lib_name = "mod.so"
temp = utils.tempdir()
lib_path = temp.relpath(lib_name)
if cross_compile:
lib.export_library(lib_path, cc=cross_compile)
else:
lib.export_library(lib_path)
device.upload(lib_path)
lib = device.load_module(lib_name)
return lib
def extract_bnns_modules(module):
"""Get the BNNS module(s) from llvm module."""
return list(filter(lambda mod: mod.type_key == "bnns_json", module.get_lib().imported_modules))
def verify(answers, atol, rtol, verify_saturation=False, config=None):
"""Compare the array of answers. Each entry is a list of outputs."""
if config is None:
config = {}
if len(answers) < 2:
raise RuntimeError(f"No results to compare: expected at least two, found {len(answers)}")
for answer in zip_longest(*answers):
for outs in combinations(answer, 2):
try:
if verify_saturation:
assert (
np.count_nonzero(outs[0].numpy() == 255) < 0.25 * outs[0].numpy().size
), "Output is saturated: {}".format(outs[0])
assert (
np.count_nonzero(outs[0].numpy() == 0) < 0.25 * outs[0].numpy().size
), "Output is saturated: {}".format(outs[0])
tvm.testing.assert_allclose(outs[0].numpy(), outs[1].numpy(), rtol=rtol, atol=atol)
except AssertionError as e:
err_msg = "Results not within the acceptable tolerance.\n"
if config:
err_msg += f"The test failed with the following parameters: {config}\n"
err_msg += str(e)
raise AssertionError(err_msg)
def verify_codegen(
module,
known_good_codegen,
num_bnns_modules,
tvm_ops=0,
target=Device.target,
):
"""Check BNNS codegen against a known good output."""
module = build_module(module, target, tvm_ops=tvm_ops)
bnns_modules = extract_bnns_modules(module)
assert len(bnns_modules) == num_bnns_modules, (
f"The number of BNNS modules produced ({len(bnns_modules)}) does not "
f"match the expected value ({num_bnns_modules})."
)
for mod in bnns_modules:
source = mod.get_source("json")
codegen = json.loads(source)["nodes"]
# remove input and const names as these cannot be predetermined
for node in range(len(codegen)):
if codegen[node]["op"] == "input" or codegen[node]["op"] == "const":
codegen[node]["name"] = ""
codegen_str = json.dumps(codegen, sort_keys=True, indent=2)
known_good_codegen_str = json.dumps(known_good_codegen, sort_keys=True, indent=2)
assert codegen_str == known_good_codegen_str, (
f"The JSON produced by codegen does not match the expected result. \n"
f"Actual={codegen_str} \n"
f"Expected={known_good_codegen_str}"
)
def compare_inference_with_ref(func, params, atol=0.002, rtol=0.007):
"""Compare scoring results for compilation with and without BNNS.
Provided function will be compiled two times with and without BNNS.
The scoring results for both type of compilation will be compared
with provided atol and rtol. The input data will be automatically
generated based of shape and dtype info provided for var nodes.
"""
# Generate input tensor values
inputs = {}
for free_param in analysis.free_vars(func):
name = free_param.name_hint
dtype = free_param.type_annotation.dtype
shape = [s.value for s in free_param.type_annotation.shape]
inputs[name] = tvm.nd.array(np.random.uniform(0, 127, shape).astype(dtype))
# Run for both type of compilation
device = Device()
outputs = []
for bnns in [False, True]:
outputs.append(build_and_run(func, inputs, 1, params, device, enable_bnns=bnns)[0])
# Compare result tensors
verify(outputs, atol=atol, rtol=rtol)
def generate_trials(space, r_factor=3):
"""Generates a series of trials.
This algorithm generates a series of non-deterministic trials given a
space of options to test. A trial is generated by pulling a value from
each option in the space. On some occasions the values are shuffled to
ensure a different trial on each r_factor iteration. The algorithm ensures
that each value from an option is used at least once. The total number of
trials is determined by the r_factor * the option with the largest number
of values.
Parameters
----------
space: List[List[Any]]
A list of different options with varying values to test.
r_factor: Optional[int]
The repeat factor.
Returns
-------
result: List[Tuple]
A list of trials specifying values for each option.
"""
np.random.seed(0)
max_len = 1
for option in space:
max_len = max(max_len, len(option))
num_trials = r_factor * max_len
trials = []
for i in range(num_trials):
trial = []
for option in space:
if i % len(option) == 0:
np.random.shuffle(option)
trial.append(option[i % len(option)])
trials.append(trial)
return trials
| 11,542 | 34.085106 | 103 | py |
tvm | tvm-main/tests/python/contrib/test_verilator/test_verilator_ops.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Verilator codegen tests"""
import numpy as np
import tvm
import tvm.testing
from tvm import relay
import pytest
from test_verilator.infrastructure import (
skip_test,
compile_hardware,
compiler_opts,
run_module,
offload,
clear_stats,
stats,
)
def create_module_add(shape, dtype):
"""Create add module.
Paramters
---------
shape : Tuple
The shape tuple.
dtype : Str
The data type.
Returns
-------
mod: Module
The relay module.
"""
x = relay.var("x", shape=shape, dtype=dtype)
y = relay.var("y", shape=shape, dtype=dtype)
z = relay.add(x, y)
f = relay.Function([x, y], z)
mod = tvm.IRModule()
mod["main"] = f
return mod
def create_module_bias_add(xshape, yshape, dtype):
"""Create bias_add module.
Paramters
---------
xshape : Tuple
The x shape tuple.
yshape : Tuple
The y shape tuple.
dtype : Str
The data type.
Returns
-------
mod: Module
The relay module.
"""
x = relay.var("x", shape=xshape, dtype=dtype)
y = relay.var("y", shape=yshape, dtype=dtype)
z = relay.nn.bias_add(x, y, axis=3)
f = relay.Function([x, y], z)
mod = tvm.IRModule()
mod["main"] = f
return mod
def run_and_check(xshape, yshape, dtype, mod, opts):
"""Run and check values.
Paramters
---------
xshape : Tuple
The x shape tuple.
yshape : Tuple
The y shape tuple.
dtype : Str
The data type.
mod: Module
The relay module.
opts: Dict
The compiler options.
Returns
-------
cycles: Int
The number of cycles.
"""
x_data = np.random.randint(5, size=xshape, dtype=dtype)
y_data = np.random.randint(5, size=yshape, dtype=dtype)
ref = x_data + y_data
inp = {"x": x_data, "y": y_data}
clear_stats()
out = run_module(inp, mod, params=None, opts=opts)
values = stats()
tvm.testing.assert_allclose(out.numpy(), ref, rtol=1e-5, atol=1e-5)
return values["cycle_counter"]
def print_test_info(test, lanes, cycles):
"""Print counter
Paramters
---------
test : Str
The name of the test.
lanes : Int
The number of vector lanes.
cycles : Int
The number of cycles.
"""
print("test:{} vector-lanes:{} number of cycles:{}".format(test, lanes, cycles))
@pytest.mark.skipif(skip_test(), reason="Skip because Verilator codegen is not available")
def tadd(lanes):
"""Print counter
Paramters
---------
lanes : Int
The number of vector lanes.
"""
if skip_test():
return
dtype = "int32"
shape = (8, 4)
mod = create_module_add(shape, dtype)
mod = offload(mod)
lib = compile_hardware(lanes)
opts = compiler_opts(lib)
cycles = run_and_check(shape, shape, dtype, mod, opts)
print_test_info("add", lanes, cycles)
@pytest.mark.skipif(skip_test(), reason="Skip because Verilator codegen is not available")
def tbias(lanes):
"""Print counter
Paramters
---------
lanes : Int
The number of vector lanes.
"""
if skip_test():
return
dtype = "int32"
xshape = (1, 112, 112, 32)
yshape = (32,)
mod = create_module_bias_add(xshape, yshape, dtype)
mod = offload(mod)
lib = compile_hardware(lanes)
opts = compiler_opts(lib)
cycles = run_and_check(xshape, yshape, dtype, mod, opts)
print_test_info("nn.bias_add", lanes, cycles)
def test_add():
"""add tests."""
tadd(1)
tadd(4)
def test_bias_add():
"""bias_add tests."""
tbias(1)
tbias(32)
if __name__ == "__main__":
tvm.testing.main()
| 4,511 | 21.56 | 90 | py |
tvm | tvm-main/tests/python/contrib/test_verilator/test_mobilenet.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import te, relay, transform
from tvm.contrib.download import download_testdata
from tvm.contrib import graph_executor as runtime
import os
import pytest
from PIL import Image
import numpy as np
from test_verilator.infrastructure import (
skip_test,
compile_hardware,
compiler_opts,
offload,
clear_stats,
stats,
)
def extract(path):
"""Extract a tgz or gz file.
Paramters
---------
path : Str
The path of the compressed file.
"""
import tarfile
if path.endswith("tgz") or path.endswith("gz"):
dir_path = os.path.dirname(path)
tar = tarfile.open(path)
tar.extractall(path=dir_path)
tar.close()
else:
raise RuntimeError("Could not decompress the file: " + path)
def get_real_image(im_height, im_width):
"""Get a real image.
Paramters
---------
im_height : Int
The image height.
im_width : Int
The image width.
Returns
-------
data: Data
The image array.
"""
repo_base = "https://github.com/dmlc/web-data/raw/master/tensorflow/models/InceptionV1/"
img_name = "elephant-299.jpg"
image_url = os.path.join(repo_base, img_name)
img_path = download_testdata(image_url, img_name, module="data")
image = Image.open(img_path).resize((im_height, im_width))
x = np.array(image).astype("uint8")
data = np.reshape(x, (1, im_height, im_width, 3))
return data
def get_mobilenet_model():
"""Return mobilenet model."""
model_url = "https://storage.googleapis.com/download.tensorflow.org/models/mobilenet_v1_2018_08_02/mobilenet_v1_1.0_224_quant.tgz"
model_path = download_testdata(
model_url, "mobilenet_v1_1.0_224_quant.tgz", module=["tf", "official"]
)
model_dir = os.path.dirname(model_path)
extract(model_path)
tflite_model_file = os.path.join(model_dir, "mobilenet_v1_1.0_224_quant.tflite")
tflite_model_buf = open(tflite_model_file, "rb").read()
try:
import tflite
return tflite.Model.GetRootAsModel(tflite_model_buf, 0)
except AttributeError:
import tflite.Model
return tflite.Model.Model.GetRootAsModel(tflite_model_buf, 0)
def get_input_tensor_name():
"""Return input name."""
return "input"
def compile_model_to_relay(model):
"""Compile model to relay.
Paramters
---------
model : Model
The input model.
Returns
-------
mod: Module
The relay module.
params: Parameters
The model parameters.
"""
input_tensor = get_input_tensor_name()
input_shape = (1, 224, 224, 3)
input_dtype = "uint8"
mod, params = relay.frontend.from_tflite(
model,
shape_dict={input_tensor: input_shape},
dtype_dict={input_tensor: input_dtype},
)
return mod, params
def run_model(mod, params=None, opts=None):
"""Run model.
Paramters
---------
mod: Module
The relay module.
params: Parameters
The model parameters.
opts: Dict
The compiler options.
Returns
-------
out: Data
The output data.
"""
with transform.PassContext(opt_level=3, config={"relay.ext.verilator.options": opts}):
lib = relay.build(mod, target="llvm", params=params)
module = runtime.GraphModule(lib["default"](tvm.cpu()))
image_data = get_real_image(224, 224)
input_tensor = get_input_tensor_name()
module.set_input(input_tensor, image_data)
module.run()
out = module.get_output(0).numpy()
return out
def get_labels():
"""Return labels."""
label_file_url = "".join(
[
"https://raw.githubusercontent.com/",
"tensorflow/tensorflow/master/tensorflow/lite/java/demo/",
"app/src/main/assets/",
"labels_mobilenet_quant_v1_224.txt",
]
)
label_file = "labels_mobilenet_quant_v1_224.txt"
label_path = download_testdata(label_file_url, label_file, module="data")
# List of 1001 classes
with open(label_path) as f:
labels = f.readlines()
return labels
def check_result(res):
"""Check prediction."""
labels = get_labels()
predictions = np.squeeze(res)
prediction = np.argmax(predictions)
# 387 is the elephant
assert prediction == 387
def print_test_info(lanes, cycles):
"""Print test info
Paramters
---------
lanes : Int
The number of vector lanes.
cycles : Int
The number of cycles.
"""
print(
"[mobilenet] vector-lanes:{} number of cycles:{} spent in nn.bias_add".format(lanes, cycles)
)
def is_tflite_available():
"""Skip test if tensorflow-lite is not installed."""
try:
import tflite
return True
except:
return False
@pytest.mark.skipif(skip_test(), reason="Skip because Verilator codegen is not available")
def tmobilenet(lanes):
"""Mobilenet test template.
Paramters
---------
lanes : Int
The number of vector lanes.
"""
if skip_test():
return
if not is_tflite_available():
return
model = get_mobilenet_model()
mod, params = compile_model_to_relay(model)
mod = offload(mod)
lib = compile_hardware(lanes)
opts = compiler_opts(lib)
clear_stats()
res = run_model(mod, params, opts)
values = stats()
check_result(res)
print_test_info(lanes, values["cycle_counter"])
def test_mobilenet():
"""Mobilenet tests."""
tmobilenet(4)
tmobilenet(32)
| 6,339 | 24.772358 | 134 | py |
tvm | tvm-main/tests/python/contrib/test_verilator/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" Infrastructure and tests for Verilator codegen """
| 841 | 43.315789 | 62 | py |
tvm | tvm-main/tests/python/contrib/test_verilator/infrastructure.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Verilator utility functions"""
import os
import sys
import subprocess as sp
import json
import tvm
from tvm import relay
import tvm.relay.testing
from tvm import runtime
from tvm.relay import transform
def _register_verilator_op(op_name, supported=True):
"""The helper function to indicate that a given operator can be supported by Verilator.
Paramters
---------
op_name : Str
The name of operator that will be registered.
Returns
-------
f : callable
A function that returns if the operator is supported by DNNL.
"""
@tvm.ir.register_op_attr(op_name, "target.verilator")
def _func_wrapper(expr):
return supported
return _func_wrapper
_register_verilator_op("add")
_register_verilator_op("nn.bias_add")
def skip_test():
"""Skip test if it requires the Verilator codegen and it's not present."""
if not tvm.get_global_func("relay.ext.verilator", True):
print("Skip test because Verilator codegen is not available.")
return True
if sys.platform == "win32":
print("Skip test on Windows for now")
return True
return False
def clear_stats():
"""Clear profiler statistics."""
f = tvm.get_global_func("verilator.profiler_clear", True)
if f:
f()
def stats():
"""Get profiler statistics."""
x = tvm.get_global_func("verilator.profiler_status")()
return json.loads(x)
def offload(mod):
"""Offload ops based on the registered ops
Paramters
---------
mod : Module
The input module.
Returns
-------
mod : Module
The output module with offloaded ops.
"""
backend = "verilator"
mod = transform.AnnotateTarget([backend])(mod)
mod = transform.PartitionGraph()(mod)
return mod
def verilator_app_path():
"""Create verilator hardware app path."""
cur_dir = os.path.dirname(os.path.realpath(__file__))
return os.path.join(
cur_dir,
"..",
"..",
"..",
"..",
"3rdparty",
"vta-hw",
"apps",
"verilator",
"add",
)
def compile_hardware(lanes):
"""Compile hardware into shared library
Paramters
---------
lanes : Int
The number of vector lanes.
Returns
-------
path : Str
The path of the shared library.
"""
lib_name = "libverilator_{}".format(lanes)
lib_name_ext = "{}.so".format(lib_name)
lib = os.path.join(verilator_app_path(), lib_name_ext)
if not os.path.isfile(lib):
opt_lib_name = "LIB_NAME={}".format(lib_name)
opt_lanes = "LANES={}".format(lanes)
cmd = []
cmd.append("make")
cmd.append("--directory")
cmd.append(verilator_app_path())
cmd.append(opt_lib_name)
cmd.append(opt_lanes)
sp.run(cmd, check=True, stdout=sp.DEVNULL)
return lib
def compiler_opts(lib):
"""Create compiler options
Paramters
---------
lib : Str
The path of the hardware shared library.
Returns
-------
opts : Dict
The compiler options.
"""
opts = {
"lib_path": lib,
"profiler_enable": True,
"profiler_cycle_counter_id": 0,
}
return opts
def run_module(inp, mod, params=None, opts=None):
"""Compile Relay module and hardware library
Paramters
---------
inp : Data
The input data.
mod : Module
The relay module.
params : Parameters
The model Parameters.
opts : Dict
The compiler
Returns
-------
out : Data
The output data.
"""
with tvm.transform.PassContext(opt_level=3, config={"relay.ext.verilator.options": opts}):
lib = relay.vm.compile(mod, target="llvm", params=params)
code, lib = lib.save()
exe = runtime.vm.Executable.load_exec(code, lib)
vm = runtime.vm.VirtualMachine(exe, tvm.cpu())
out = vm.run(**inp)
return out
| 4,753 | 22.889447 | 94 | py |
tvm | tvm-main/tests/python/contrib/test_ethosn/test_inline_partitions.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Tests for the 'InlineNonComputeIntensivePartitions' pass.
"""
import tvm
from tvm import relay
from tvm.testing import requires_ethosn
from tvm.relay.op.contrib.ethosn import InlineNonComputeIntensivePartitions
from . import infrastructure as tei
def _assert_structural_equal(a, b):
"""Check structural equality of two Relay expressions."""
reason = (
"Actual and expected relay functions are not equal. "
"InlineNonComputeIntensiveSubgraphs is not correctly "
"transforming the input graph."
)
assert tvm.ir.structural_equal(a, b, map_free_vars=True), reason
@requires_ethosn
def test_single_reshape():
"""Check that a single reshape is inlined correctly."""
def get_reshape():
x = relay.var("x", shape=(1, 2, 2, 4), dtype="int8")
return relay.reshape(x, newshape=(2, 2, 4))
def before():
reshape = get_reshape()
return tei.make_ethosn_partition(reshape)
def expected():
reshape = get_reshape()
mod = tvm.IRModule.from_expr(reshape)
return relay.transform.InferType()(mod)
mod = before()
mod = InlineNonComputeIntensivePartitions()(mod)
expected_mod = expected()
_assert_structural_equal(mod, expected_mod)
@requires_ethosn
def test_multiple_non_compute_intensive_ops():
"""
Check that a partitioned function is correctly inlined
when it contains multiple non-compute intensive operations.
"""
def get_graph():
x = relay.var("x", shape=(2, 2, 4), dtype="int8")
x = relay.reshape(x, newshape=(1, 2, 2, 4))
x = relay.clip(x, 0.0, 1.0)
x = relay.reshape(x, newshape=(2, 2, 4))
return relay.clip(x, 0.0, 1.0)
def before():
func = get_graph()
return tei.make_ethosn_partition(func)
def expected():
func = get_graph()
mod = tvm.IRModule.from_expr(func)
return relay.transform.InferType()(mod)
mod = before()
mod = InlineNonComputeIntensivePartitions()(mod)
expected_mod = expected()
_assert_structural_equal(mod, expected_mod)
@requires_ethosn
def test_compute_intensive_ops():
"""
Check that a partitioned function that is considered
compute intensive is not inlined.
"""
def before():
x = relay.var("x", shape=(1, 2, 2, 4), dtype="int8")
x = relay.nn.max_pool2d(x, layout="NHWC")
x = relay.reshape(x, newshape=(2, 2, 4))
return tei.make_ethosn_partition(x)
mod = before()
transformed_mod = InlineNonComputeIntensivePartitions()(mod)
for global_var in mod.get_global_vars():
_assert_structural_equal(mod[global_var], transformed_mod[global_var])
@requires_ethosn
def test_multiple_partitioned_functions():
"""
Tests the pass on a number of partitioned functions.
"""
def before():
composite_func_name = "ethos-n_0"
inp = relay.var("x", shape=(1, 2, 2, 4), dtype="int8")
# partitioned func 1 (non compute intensive)
x = relay.reshape(inp, newshape=(1, 2, 2, 4))
partitioned_func_1 = tei.make_ethosn_partition(x)[composite_func_name]
gv_1 = relay.GlobalVar("ethos-n_0")
# partitioned func 2 (compute intensive)
x = relay.nn.max_pool2d(inp, layout="NHWC")
partitioned_func_2 = tei.make_ethosn_partition(x)[composite_func_name]
gv_2 = relay.GlobalVar("ethos-n_1")
# partitioned func 3 (non compute intensive)
x = relay.clip(inp, 0.0, 1.0)
partitioned_func_3 = tei.make_ethosn_partition(x)[composite_func_name]
gv_3 = relay.GlobalVar("ethos-n_2")
mod = tvm.IRModule({})
mod[gv_1] = partitioned_func_1
mod[gv_2] = partitioned_func_2
mod[gv_3] = partitioned_func_3
main_expr = relay.Call(gv_1, [inp])
main_expr = relay.Call(gv_2, [main_expr])
main_expr = relay.Call(gv_3, [main_expr])
mod["main"] = relay.Function([inp], main_expr)
return relay.transform.InferType()(mod)
def expected():
composite_func_name = "ethos-n_0"
inp = relay.var("x", shape=(1, 2, 2, 4), dtype="int8")
# partitioned func 2 (compute intensive)
x = relay.nn.max_pool2d(inp, layout="NHWC")
partitioned_func_2 = tei.make_ethosn_partition(x)[composite_func_name]
gv_2 = relay.GlobalVar("ethos-n_1")
mod = tvm.IRModule({})
mod[gv_2] = partitioned_func_2
main_expr = relay.reshape(inp, newshape=(1, 2, 2, 4))
main_expr = relay.Call(gv_2, [main_expr])
main_expr = relay.clip(main_expr, 0.0, 1.0)
mod["main"] = relay.Function([inp], main_expr)
return relay.transform.InferType()(mod)
mod = before()
mod = InlineNonComputeIntensivePartitions()(mod)
expected_mod = expected()
for global_var in mod.get_global_vars():
_assert_structural_equal(mod[global_var.name_hint], expected_mod[global_var.name_hint])
| 5,735 | 33.142857 | 95 | py |
tvm | tvm-main/tests/python/contrib/test_ethosn/test_fullyconnected.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Arm(R) Ethos(TM)-N integration fully connected tests"""
import numpy as np
import pytest
import tvm
from tvm import relay
from tvm.testing import requires_ethosn
from . import infrastructure as tei
def _get_model(
shape, weight_shape, input_zp, input_sc, kernel_zp, kernel_sc, output_zp, output_sc, dtype
):
"""Return a model an any parameters it may have"""
a = relay.var("a", shape=shape, dtype=dtype)
weights_array = tvm.nd.array(
np.random.randint(
np.iinfo(dtype).min, high=np.iinfo(dtype).max, size=weight_shape, dtype=dtype
)
)
weights = relay.const(weights_array, dtype)
dense = relay.qnn.op.dense(
a,
weights,
input_zero_point=relay.const(input_zp, "int32"),
kernel_zero_point=relay.const(kernel_zp, "int32"),
input_scale=relay.const(input_sc, "float32"),
kernel_scale=relay.const(kernel_sc, "float32"),
units=weight_shape[0],
out_dtype="int32",
)
b = tvm.nd.array(np.random.randint(0, high=255, size=(weight_shape[0],), dtype="int32"))
biasc = relay.const(b, "int32")
bias = relay.nn.bias_add(dense, biasc)
req = relay.qnn.op.requantize(
bias,
relay.const(input_sc * kernel_sc, "float32"), # input zero scale
relay.const(input_zp * kernel_zp, "int32"), # input zero point
relay.const(output_sc, "float32"), # output zero scale
relay.const(output_zp, "int32"), # output zero point
out_dtype=dtype,
)
params = {"w": weights_array, "b": b}
return req, params
@requires_ethosn
@pytest.mark.parametrize(
"shape,out_channels",
[
((1, 1024), 64),
((1, 16384), 1),
((1, 1280), 1000),
],
)
@pytest.mark.parametrize("dtype", ["uint8", "int8"])
def test_fullyconnected(shape, out_channels, dtype):
"""Compare Fully Connected output with TVM."""
np.random.seed(0)
iinfo = np.iinfo(dtype)
data_min = iinfo.min
data_max = iinfo.max
inputs = {
"a": tvm.nd.array(np.random.randint(data_min, data_max + 1, size=shape, dtype=dtype)),
}
outputs = []
input_zp = np.random.randint(data_min, data_max)
input_sc = np.random.random() * 2
kernel_zp = np.random.randint(data_min, data_max)
kernel_sc = np.random.random() * 2
output_zp, output_sc = tei.get_conv2d_qnn_params(
dtype,
input_zp,
input_sc,
kernel_zp,
kernel_sc,
shape[0],
shape[1],
1,
)
model, params = _get_model(
shape,
(out_channels, shape[1]),
input_zp,
input_sc,
kernel_zp,
kernel_sc,
output_zp,
output_sc,
dtype,
)
for npu in [False, True]:
mod = tei.make_module(model, params)
outputs.append(tei.build_and_run(mod, inputs, 1, params, npu=npu))
tei.verify(outputs, dtype, 1)
@requires_ethosn
@pytest.mark.parametrize(
"shape,weight_shape,err_msg",
[
(
(1, 1, 1, 64),
(1, 64),
"Weights tensor must have I dimension equal to the number"
" of channels of the input tensor.;",
),
((1024, 64), (1, 64), "batch size=1024, batch size must = 1;"),
],
)
def test_fullyconnected_failure(shape, weight_shape, err_msg):
"""Check Fully Connected error messages."""
np.random.seed(0)
dtype = "uint8"
model, _ = _get_model(
shape,
weight_shape,
0,
1,
0,
1,
0,
1,
dtype,
)
model = tei.make_ethosn_composite(model, "ethos-n.qnn_fc")
mod = tei.make_ethosn_partition(model)
tei.test_error(mod, {}, err_msg)
@requires_ethosn
def test_fullyconnected_scale_out_of_range():
"""Check Fully Connected out of range scale error message."""
np.random.seed(0)
input_sc = 1024
kernel_sc = 1024
output_sc = 1
model, _ = _get_model(
(1, 64),
(1, 64),
0,
input_sc,
0,
kernel_sc,
0,
output_sc,
"uint8",
)
model = tei.make_ethosn_composite(model, "ethos-n.qnn_fc")
mod = tei.make_ethosn_partition(model)
expected_error_msg = (
"Overall scale (of the input * weights / output) should be in the range (2^-32, 65536)"
)
tei.test_error(mod, {}, expected_error_msg)
| 5,173 | 27.585635 | 95 | py |
tvm | tvm-main/tests/python/contrib/test_ethosn/test_constant_duplication.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test that constants aren't duplicated for Arm(R) Ethos(TM)-N"""
import numpy as np
import tvm
from tvm import relay
from tvm.testing import requires_ethosn
from . import infrastructure as tei
def _get_model():
"""Return a model and any parameters it may have"""
shape = (1, 4, 4, 4)
kernel_h = 3
kernel_w = 3
out_channels = 8
a = relay.var("a", shape=shape, dtype="uint8")
add_const_value = tvm.nd.array(np.random.randint(0, high=10, size=shape, dtype="uint8"))
add_const = relay.const(add_const_value, "uint8")
a = relay.add(a, add_const)
weight_shape = (kernel_h, kernel_w, shape[3], out_channels)
weights_array = tvm.nd.array(
np.random.randint(low=0, high=255, size=weight_shape, dtype="uint8")
)
weights = relay.const(weights_array, "uint8")
conv = relay.qnn.op.conv2d(
a,
weights,
input_zero_point=relay.const(0, "int32"),
kernel_zero_point=relay.const(0, "int32"),
input_scale=relay.const(0.3, "float32"),
kernel_scale=relay.const(0.4, "float32"),
kernel_size=(kernel_h, kernel_w),
data_layout="NHWC",
kernel_layout="HWIO",
dilation=(1, 1),
strides=(1, 1),
groups=1,
channels=out_channels,
padding=(0, 0, 0, 0),
out_dtype="int32",
)
b = tvm.nd.array(np.random.randint(0, high=10, size=(out_channels,), dtype="int32"))
biasc = relay.const(b, "int32")
bias = relay.nn.bias_add(conv, biasc, axis=3)
req = relay.qnn.op.requantize(
bias,
relay.const(0.3 * 0.4, "float32"), # input zero scale
relay.const(0, "int32"), # input zero point
relay.const(0.4, "float32"), # output zero scale
relay.const(0, "int32"), # output zero point
out_dtype="uint8",
)
params = {"w": weights_array, "b": b}
return req, params
@requires_ethosn
def test_constant_duplication():
"""Test that constants are not duplicated."""
np.random.seed(0)
model, params = _get_model()
mod = tei.make_module(model, params)
res = tei.build(mod, params, npu=True, expected_host_ops=1)
for key, value in res.params.items():
assert key == "p0"
assert value.numpy().size == 64
| 3,043 | 34.395349 | 92 | py |
tvm | tvm-main/tests/python/contrib/test_ethosn/test_depth_to_space.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Arm(R) Ethos(TM)-N integration depth-to-space tests"""
import pytest
import numpy as np
import tvm
from tvm import relay
from tvm.testing import requires_ethosn
from . import infrastructure as tei
def _get_model(shape, block, dtype, layout):
a = relay.var("a", shape=shape, dtype=dtype)
depth = relay.nn.depth_to_space(a, layout=layout, block_size=block)
return depth
@requires_ethosn
@pytest.mark.parametrize("dtype", ["uint8", "int8"])
@pytest.mark.parametrize(
"shape",
[
(1, 16, 16, 16),
(1, 64, 32, 16),
],
)
def test_depth_to_space(dtype, shape):
"""Compare Depth To Space output with TVM."""
np.random.seed(0)
inputs = {
"a": tvm.nd.array(
np.random.randint(np.iinfo(dtype).min, np.iinfo(dtype).max + 1, size=shape, dtype=dtype)
)
}
outputs = []
for npu in [False, True]:
model = _get_model(shape, 2, dtype, "NHWC")
mod = tei.make_module(model, {})
outputs.append(
tei.build_and_run(
mod,
inputs,
1,
{},
npu=npu,
additional_config_args={"inline_non_compute_intensive_partitions": False},
)
)
tei.verify(outputs, dtype, 1)
@requires_ethosn
@pytest.mark.parametrize(
"shape,block,dtype,layout,err_msg",
[
((2, 16, 16, 16), 2, "uint8", "NHWC", "batch size=2, batch size must = 1"),
(
(1, 16, 16, 16),
2,
"int16",
"NHWC",
"dtype='int16', dtype must be either uint8, int8 or int32;",
),
((1, 16, 16, 16), 4, "uint8", "NHWC", "Only block size of 2 is supported"),
((1, 16, 16, 16), 2, "uint8", "NCHW", "Input layer must be NHWC or NHWCB"),
],
)
def test_depth_to_space_failure(shape, block, dtype, layout, err_msg):
"""Check Depth To Space error messages."""
model = _get_model(shape, block, dtype, layout)
mod = tei.make_ethosn_partition(model)
tei.test_error(mod, {}, err_msg)
| 2,854 | 30.373626 | 100 | py |
tvm | tvm-main/tests/python/contrib/test_ethosn/test_conv2d.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Arm(R) Ethos(TM)-N integration conv2d tests"""
import numpy as np
import pytest
import tvm
from tvm import relay
from tvm.testing import requires_ethosn
from . import infrastructure as tei
def _get_model(
shape,
kernel_h,
kernel_w,
input_zp,
input_sc,
kernel_zp,
kernel_sc,
output_zp,
output_sc,
pad,
strides,
dilation,
groups,
dtype,
out_channels,
weight_format,
):
"""Return a model and any parameters it may have"""
a = relay.var("a", shape=shape, dtype=dtype)
if pad in ("op", "both"):
p = tei.get_same_padding((shape[1], shape[2]), (kernel_h, kernel_w), dilation, strides)
a = relay.nn.pad(
a,
pad_width=[(0, 0), (p[0], p[2]), (p[1], p[3]), (0, 0)],
pad_value=input_zp,
pad_mode="constant",
)
shape = (shape[0], shape[1] + p[0] + p[2], shape[2] + p[1] + p[3], shape[3])
p = tei.get_same_padding((shape[1], shape[2]), (kernel_h, kernel_w), dilation, strides)
if weight_format == "HWIO":
weight_shape = (kernel_h, kernel_w, shape[3] // groups, out_channels)
else:
weight_shape = (kernel_h, kernel_w, out_channels, 1)
weights_array = tvm.nd.array(
np.random.randint(
np.iinfo(dtype).min, high=np.iinfo(dtype).max + 1, size=weight_shape, dtype=dtype
)
)
weights = relay.const(weights_array, dtype)
conv = relay.qnn.op.conv2d(
a,
weights,
input_zero_point=relay.const(input_zp, "int32"),
kernel_zero_point=relay.const(kernel_zp, "int32"),
input_scale=relay.const(input_sc, "float32"),
kernel_scale=relay.const(kernel_sc, "float32"),
kernel_size=(kernel_h, kernel_w),
data_layout="NHWC",
kernel_layout=weight_format,
dilation=dilation,
strides=strides,
groups=groups,
channels=out_channels,
padding=p if pad in ("attr", "both") else (0, 0, 0, 0),
out_dtype="int32",
)
bias_data = tvm.nd.array(
np.random.randint(
np.iinfo(dtype).min, high=np.iinfo(dtype).max + 1, size=(out_channels,), dtype="int32"
)
)
biasc = relay.const(bias_data, "int32")
bias = relay.nn.bias_add(conv, biasc, axis=3)
if isinstance(kernel_sc, tvm.runtime.ndarray.NDArray):
req_input_sc = [sc * input_sc for sc in kernel_sc.numpy()]
else:
req_input_sc = input_sc * kernel_sc
req = relay.qnn.op.requantize(
bias,
relay.const(req_input_sc, "float32"), # input zero scale
relay.const(0, "int32"), # input zero point
relay.const(output_sc, "float32"), # output zero scale
relay.const(output_zp, "int32"), # output zero point
out_dtype=dtype,
)
params = {"w": weights_array, "b": bias_data}
return req, params
@requires_ethosn
@pytest.mark.parametrize(
"dtype,qnn_per_channel", [("uint8", False), ("int8", False), ("int8", True)]
)
@pytest.mark.parametrize("pad,stride", [("attr", (2, 2)), ("none", (2, 2)), ("op", (1, 1))])
@pytest.mark.parametrize(
"shape,out_channels,kernel_size",
[
[(1, 17, 20, 26), 4, (3, 1)],
[(1, 9, 20, 30), 7, (1, 5)],
[(1, 21, 21, 22), 8, (2, 2)],
],
)
def test_conv2d(
dtype,
shape,
out_channels,
kernel_size,
pad,
stride,
qnn_per_channel,
):
"""Compare Conv2D output with TVM."""
np.random.seed(0)
dilation = (1, 1)
groups = 1
weight_format = "HWIO"
outputs = []
inputs = {
"a": tvm.nd.array(
np.random.randint(
np.iinfo(dtype).min,
np.iinfo(dtype).max + 1,
size=shape,
dtype=dtype,
)
),
}
input_zp = np.random.randint(np.iinfo(dtype).min, np.iinfo(dtype).max)
input_sc = np.random.random() * 2
if qnn_per_channel:
kernel_sc = tvm.nd.array(
np.random.uniform(low=0, high=2, size=(out_channels,)).astype(np.float32)
)
else:
kernel_sc = np.random.random() * 2
kernel_zp = (
0 if dtype == "int8" else np.random.randint(np.iinfo(dtype).min, np.iinfo(dtype).max)
)
output_zp, output_sc = tei.get_conv2d_qnn_params(
dtype, input_zp, input_sc, kernel_zp, kernel_sc, kernel_size[0], kernel_size[1], shape[3]
)
model, params = _get_model(
shape,
kernel_size[0],
kernel_size[1],
input_zp,
input_sc,
kernel_zp,
kernel_sc,
output_zp,
output_sc,
pad,
stride,
dilation,
groups,
dtype,
out_channels,
weight_format,
)
for npu in [False, True]:
mod = tei.make_module(model, params)
outputs.append(tei.build_and_run(mod, inputs, 1, params, npu=npu))
tei.verify(outputs, dtype, 1)
@requires_ethosn
@pytest.mark.parametrize(
"dtype,qnn_per_channel", [("uint8", False), ("int8", False), ("int8", True)]
)
@pytest.mark.parametrize("pad,stride", [("attr", (2, 2)), ("none", (2, 2)), ("op", (1, 1))])
@pytest.mark.parametrize(
"shape,kernel_size",
[
[(1, 17, 20, 28), (3, 3)],
[(1, 9, 20, 30), (5, 5)],
[(1, 21, 21, 22), (2, 2)],
],
)
def test_conv2d_depthwise(
dtype,
shape,
kernel_size,
pad,
stride,
qnn_per_channel,
):
"""Compare Conv2D output with TVM."""
np.random.seed(0)
dilation = (1, 1)
out_channels = shape[3]
groups = out_channels
weight_format = "HWOI"
outputs = []
inputs = {
"a": tvm.nd.array(
np.random.randint(
np.iinfo(dtype).min,
np.iinfo(dtype).max + 1,
size=shape,
dtype=dtype,
)
),
}
input_zp = np.random.randint(0, np.iinfo(dtype).max)
input_sc = np.random.random() * 2
if qnn_per_channel:
kernel_sc = tvm.nd.array(
np.random.uniform(low=0, high=2, size=(out_channels,)).astype(np.float32)
)
else:
kernel_sc = np.random.random() * 2
kernel_zp = (
0 if dtype == "int8" else np.random.randint(np.iinfo(dtype).min, np.iinfo(dtype).max)
)
output_zp, output_sc = tei.get_conv2d_qnn_params(
dtype, input_zp, input_sc, kernel_zp, kernel_sc, kernel_size[0], kernel_size[1], shape[3]
)
model, params = _get_model(
shape,
kernel_size[0],
kernel_size[1],
input_zp,
input_sc,
kernel_zp,
kernel_sc,
output_zp,
output_sc,
pad,
stride,
dilation,
groups,
dtype,
out_channels,
weight_format,
)
for npu in [False, True]:
mod = tei.make_module(model, params)
outputs.append(tei.build_and_run(mod, inputs, 1, params, npu=npu))
tei.verify(outputs, dtype, 1)
@requires_ethosn
@pytest.mark.parametrize(
"shape,pad,stride,dilation,err_msg",
[
(
(1, 4, 4, 4),
"both",
(1, 1),
(1, 1),
"both op and attr padding exist, must be either op/attr only or no padding",
),
(
(1, 4, 4, 4),
"none",
(1, 1, 1),
(1, 1),
"stride size=3, stride size must = 2",
),
(
(1, 4, 4, 4),
"none",
(1, 1),
(2, 1),
"dilation=[2, 1], dilation must = [1, 1]",
),
(
(2, 4, 4, 4),
"none",
(1, 1),
(1, 1),
"batch size=2, batch size must = 1",
),
],
)
def test_conv2d_failure(shape, pad, stride, dilation, err_msg):
"""Check Conv2D error messages."""
np.random.seed(0)
kernel_size = (2, 2)
groups = 1
dtype = "uint8"
out_channels = 8
weight_format = "HWIO"
model, _ = _get_model(
shape,
kernel_size[0],
kernel_size[1],
0,
1,
0,
1,
0,
1,
pad,
stride,
dilation,
groups,
dtype,
out_channels,
weight_format,
)
model = tei.make_ethosn_composite(model, "ethos-n.qnn_conv2d")
mod = tei.make_ethosn_partition(model)
tei.test_error(mod, {}, err_msg)
@requires_ethosn
def test_conv2d_out_of_range_scale():
"""Check Conv2D scale out of range error."""
np.random.seed(0)
input_sc = 1024
kernel_sc = 1024
output_sc = 1
model, _ = _get_model(
(1, 4, 4, 4),
1,
1,
0,
input_sc,
0,
kernel_sc,
0,
output_sc,
"none",
(1, 1),
(1, 1),
1,
"uint8",
8,
"HWIO",
)
model = tei.make_ethosn_composite(model, "ethos-n.qnn_conv2d")
mod = tei.make_ethosn_partition(model)
expected_err_msg = (
"Overall scale (of the input * weights / output) should be in the range (2^-32, 65536)"
)
tei.test_error(mod, {}, expected_err_msg)
| 9,904 | 25.77027 | 98 | py |
tvm | tvm-main/tests/python/contrib/test_ethosn/test_conv2d_transpose.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Arm(R) Ethos(TM)-N integration conv2d tests"""
import pytest
import numpy as np
import tvm
from tvm import relay
from tvm.relay.op.contrib import ethosn_api_version
from tvm.testing import requires_ethosn
from . import infrastructure as tei
def _get_model(
shape,
kernel_h,
kernel_w,
input_zp,
input_sc,
kernel_zp,
kernel_sc,
output_zp,
output_sc,
stride,
dilation,
groups,
kernel_layout,
dtype,
out_channels,
bias,
):
"""Return a model and any parameters it may have"""
a = relay.var("a", shape=shape, dtype=dtype)
p = tei.get_same_padding((shape[1], shape[2]), (kernel_h, kernel_w), dilation, stride)
weight_shape = (shape[3], out_channels // groups, kernel_h, kernel_w)
weight_data = tvm.nd.array(
np.random.randint(
np.iinfo(dtype).min,
high=(np.iinfo(dtype).max + 1),
size=weight_shape,
dtype=dtype,
)
)
weights = relay.const(weight_data, dtype)
op = relay.qnn.op.conv2d_transpose(
a,
weights,
input_zero_point=relay.const(input_zp, "int32"),
input_scale=relay.const(input_sc, "float32"),
kernel_zero_point=relay.const(kernel_zp, "int32"),
kernel_scale=relay.const(kernel_sc, "float32"),
kernel_size=(kernel_h, kernel_w),
padding=p,
strides=stride,
dilation=dilation,
data_layout="NHWC",
kernel_layout=kernel_layout,
out_dtype="int32",
channels=out_channels,
groups=groups,
)
if bias:
bias_data = tvm.nd.array(
np.random.randint(
np.iinfo(dtype).min,
high=np.iinfo(dtype).max + 1,
size=(out_channels,),
dtype="int32",
)
)
biasc = relay.const(bias_data, "int32")
op = relay.nn.bias_add(op, biasc, axis=3)
if isinstance(kernel_sc, tvm.runtime.ndarray.NDArray):
req_input_sc = [sc * input_sc for sc in kernel_sc.numpy()]
else:
req_input_sc = input_sc * kernel_sc
op = relay.qnn.op.requantize(
op,
input_zero_point=relay.const(input_zp, "int32"),
input_scale=relay.const(req_input_sc, "float32"),
output_zero_point=relay.const(output_zp, "int32"),
output_scale=relay.const(output_sc, "float32"),
axis=3,
rounding="UPWARD",
out_dtype=dtype,
)
params = {"w": weight_data}
if bias:
params["b"] = bias_data
return op, params
@requires_ethosn
@pytest.mark.parametrize("dtype", ["uint8", "int8"])
@pytest.mark.parametrize(
"ifm_shape,strides,kernel_size,out_channels,bias",
[
((1, 2, 2, 1), (2, 2), (1, 1), 1, False),
((1, 2, 2, 5), (2, 2), (3, 5), 4, False),
((1, 7, 7, 4), (2, 2), (7, 7), 8, True),
],
)
def test_conv2d_transpose(ifm_shape, strides, kernel_size, out_channels, dtype, bias):
"""Check transpose convolution output with TVM."""
np.random.seed(0)
kernel_layout = "IOHW"
dilation = (1, 1)
groups = 1
iinfo = np.iinfo(dtype)
data_min = iinfo.min
data_max = iinfo.max
input_zp = np.random.randint(data_min, data_max)
input_sc = np.random.random() * 2
kernel_zp = np.random.randint(data_min, data_max)
kernel_sc = np.random.random() * 4
output_zp, output_sc = tei.get_conv2d_qnn_params(
dtype, input_zp, input_sc, kernel_zp, kernel_sc, ifm_shape[1], ifm_shape[2], ifm_shape[3]
)
model, params = _get_model(
shape=ifm_shape,
kernel_h=kernel_size[0],
kernel_w=kernel_size[1],
input_zp=input_zp,
input_sc=input_sc,
kernel_zp=kernel_zp,
kernel_sc=kernel_sc,
output_zp=output_zp,
output_sc=output_sc,
stride=strides,
dilation=dilation,
groups=groups,
kernel_layout=kernel_layout,
dtype=dtype,
out_channels=out_channels,
bias=bias,
)
outputs = []
inputs = {
"a": tvm.nd.array(np.random.randint(data_min, data_max + 1, size=ifm_shape, dtype=dtype))
}
for npu in [False, True]:
mod = tei.make_module(model, params)
outputs.append(tei.build_and_run(mod, inputs, 1, params, npu=npu))
tei.verify(outputs, dtype, 1)
@requires_ethosn
@pytest.mark.parametrize("dtype", ["uint8", "int8"])
@pytest.mark.parametrize(
"ifm_shape,strides,kernel_size,out_channels,bias",
[
((1, 10, 20, 3), (1, 1), (8, 5), 4, False),
((1, 10, 10, 2), (2, 2), (7, 9), 8, True),
],
)
def test_conv2d_transpose_kernel_size_gt_8(
ifm_shape, strides, kernel_size, out_channels, dtype, bias
):
"""Check transpose convolution for big kernel sizes."""
if ethosn_api_version() in ["3.2.0", "3.1.0"]:
pytest.skip("Skipping because NPU driver 22.11 fails to interpret zp used in the test.")
np.random.seed(0)
kernel_layout = "IOHW"
dilation = (1, 1)
groups = 1
iinfo = np.iinfo(dtype)
data_min = iinfo.min
data_max = iinfo.max
input_zp = np.random.randint(data_min, data_max)
input_sc = np.random.random() * 2
kernel_zp = np.random.randint(data_min, data_max)
kernel_sc = np.random.random() * 4
output_zp, output_sc = tei.get_conv2d_qnn_params(
dtype, input_zp, input_sc, kernel_zp, kernel_sc, ifm_shape[1], ifm_shape[2], ifm_shape[3]
)
model, params = _get_model(
shape=ifm_shape,
kernel_h=kernel_size[0],
kernel_w=kernel_size[1],
input_zp=input_zp,
input_sc=input_sc,
kernel_zp=kernel_zp,
kernel_sc=kernel_sc,
output_zp=output_zp,
output_sc=output_sc,
stride=strides,
dilation=dilation,
groups=groups,
kernel_layout=kernel_layout,
dtype=dtype,
out_channels=out_channels,
bias=bias,
)
outputs = []
inputs = {
"a": tvm.nd.array(np.random.randint(data_min, data_max + 1, size=ifm_shape, dtype=dtype))
}
for npu in [False, True]:
mod = tei.make_module(model, params)
outputs.append(tei.build_and_run(mod, inputs, 1, params, npu=npu))
tei.verify(outputs, dtype, 1)
@requires_ethosn
@pytest.mark.parametrize("dtype", ["uint8", "int8"])
@pytest.mark.parametrize(
"shape, stride, dilation, groups, err_msg",
[
(
(1, 4, 4, 4),
(1, 1, 1),
(1, 1),
1,
"stride size=3, stride size must = 2",
),
(
(1, 4, 4, 4),
(2, 2),
(2, 2),
2,
"dilation=[2, 2], dilation must = [1, 1]",
),
(
(2, 4, 4, 4),
(1, 1),
(1, 1),
1,
"batch size=2, batch size must = 1",
),
],
)
def test_conv2d_transpose_failure(
shape,
stride,
dilation,
groups,
err_msg,
dtype,
):
"""
Test transpose_conv2d error messages.
"""
np.random.seed(0)
out_channels = 8
model, _ = _get_model(
shape=shape,
kernel_h=1,
kernel_w=1,
input_zp=0,
input_sc=1,
kernel_zp=0,
kernel_sc=1,
output_zp=0,
output_sc=1,
stride=stride,
dilation=dilation,
groups=groups,
kernel_layout="IOHW",
dtype=dtype,
out_channels=out_channels,
bias=False,
)
model = tei.make_ethosn_composite(model, "ethos-n.qnn_conv2d_transpose")
mod = tei.make_ethosn_partition(model)
tei.test_error(mod, {}, err_msg)
| 8,419 | 26.973422 | 97 | py |
tvm | tvm-main/tests/python/contrib/test_ethosn/test_leaky_relu.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Integration tests for Leaky ReLU"""
import pytest
import numpy as np
import tvm
from tvm import relay
from tvm.testing import requires_ethosn
from . import infrastructure as tei
def _get_model(shape, input_zp, input_sc, output_zp, output_sc, dtype, alpha):
x = relay.var("x", shape=shape, dtype=dtype)
x = relay.qnn.op.dequantize(
x,
input_scale=relay.const(input_sc, "float32"),
input_zero_point=relay.const(input_zp, "int32"),
)
x = relay.nn.leaky_relu(x, alpha=alpha)
return relay.qnn.op.quantize(
x,
output_scale=relay.const(output_sc, "float32"),
output_zero_point=relay.const(output_zp, "int32"),
out_dtype=dtype,
)
@requires_ethosn
@pytest.mark.parametrize("dtype", ["uint8", "int8"])
@pytest.mark.parametrize("shape", [(1, 52, 52, 3), (1, 3, 8, 2)])
@pytest.mark.parametrize("alpha", [0.001, 0.5678])
def test_leaky_relu(dtype, shape, alpha):
"""Compare Leaky ReLU output with TVM."""
np.random.seed(0)
iinfo = np.iinfo(dtype)
zp_min = iinfo.min
zp_max = iinfo.max
input_zp = zp_min + 128
input_sc = 0.0068132
output_zp = zp_min + 126 # values offset more than 126 can cause saturation
output_sc = 0.0078125
inputs = {"x": tvm.nd.array(np.random.randint(zp_min, high=zp_max, size=shape, dtype=dtype))}
outputs = []
for npu in [False, True]:
model = _get_model(shape, input_zp, input_sc, output_zp, output_sc, dtype, alpha)
mod = tei.make_module(model, [])
outputs.append(
tei.build_and_run(
mod,
inputs,
1,
{},
npu=npu,
additional_config_args={"inline_non_compute_intensive_partitions": False},
)
)
tei.verify(outputs, dtype, 1)
@requires_ethosn
@pytest.mark.parametrize("dtype", ["int8"])
@pytest.mark.parametrize("shape", [(1, 14, 14, 2)])
@pytest.mark.parametrize("alpha", [-1.34, 2.32, 1, 0])
def test_leaky_relu_unsupported_alpha(dtype, shape, alpha):
"""Test unsupported values of alpha (<= 0, >= 1) in Leaky ReLU."""
iinfo = np.iinfo(dtype)
zp_min = iinfo.min
err_msg = f"leaky relu alpha must be less than 1 and greater than 0, but was {alpha}"
model = _get_model(shape, zp_min + 120, 0.0068132, zp_min + 128, 0.0078125, dtype, alpha)
model = tei.make_ethosn_composite(model, "ethos-n.qnn_leaky_relu")
mod = tei.make_ethosn_partition(model)
tei.test_error(mod, {}, err_msg)
| 3,312 | 32.806122 | 97 | py |
tvm | tvm-main/tests/python/contrib/test_ethosn/_infrastructure.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Expose test functions to the Python front end"""
import tvm._ffi
tvm._ffi._init_api("relay.ethos-n.test.infra", __name__)
| 913 | 38.73913 | 62 | py |
tvm | tvm-main/tests/python/contrib/test_ethosn/test_networks.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=wrong-import-position, wrong-import-order
"""Arm(R) Ethos(TM)-N integration end-to-end network tests"""
import pytest
pytest.importorskip("tflite")
pytest.importorskip("tensorflow")
import tflite.Model
from tvm import relay
from tvm.testing import requires_ethosn
from tvm.contrib import download
import tvm.relay.testing.tf as tf_testing
from . import infrastructure as tei
def _get_tflite_model(tflite_model_path, inputs_dict, dtype):
with open(tflite_model_path, "rb") as f:
tflite_model_buffer = f.read()
tflite_model = tflite.Model.Model.GetRootAsModel(tflite_model_buffer, 0)
shape_dict = {}
dtype_dict = {}
for value in inputs_dict:
input_shape = inputs_dict[value]
shape_dict[value] = input_shape
dtype_dict[value] = dtype
return relay.frontend.from_tflite(
tflite_model,
shape_dict=shape_dict,
dtype_dict=dtype_dict,
)
def _test_image_network(
model_url,
model_sub_path,
input_dict,
compile_hash,
output_count,
host_ops=0,
npu_partitions=1,
run=False,
):
"""Test an image network.
Parameters
----------
model_url : str
The URL to the model.
model_sub_path : str
The name of the model file.
input_dict : dict
The input dict.
compile_hash : str, set
The compile hash(es) to check the compilation output against.
output_count : int
The expected number of outputs.
host_ops : int
The expected number of host operators.
npu_partitions : int
The expected number of Ethos-N partitions.
run : bool
Whether or not to try running the network. If hardware isn't
available, the run will still take place but with a mocked
inference function, so the results will be incorrect. This is
therefore just to test the runtime flow is working rather than
to check the correctness/accuracy.
"""
def get_model():
if model_url[-3:] in ("tgz", "zip"):
model_path = tf_testing.get_workload_official(
model_url,
model_sub_path,
)
else:
model_path = download.download_testdata(
model_url,
model_sub_path,
)
return _get_tflite_model(model_path, input_dict, "uint8")
inputs = {}
for input_name in input_dict:
input_shape = input_dict[input_name]
inputs[input_name] = tei.get_real_image(input_shape[1], input_shape[2])
mod, params = get_model()
m = tei.build(mod, params, npu=True, expected_host_ops=host_ops, npu_partitions=npu_partitions)
tei.assert_lib_hash(m.get_lib(), compile_hash)
if run:
tei.run(m, inputs, output_count, npu=True)
@requires_ethosn
def test_mobilenet_v1():
"""Compare compile hashes for mobilenetv1 with an expected result."""
# If this test is failing due to a hash mismatch, please notify @lhutton1 and
# @Leo-arm. The hash is there to catch any changes in the behaviour of the
# codegen, which could come about from either a change in Support Library
# version or a change in the Ethos-N codegen. To update this requires running
# on hardware that isn't available in CI.
_compile_hash = {"c37fec1f214c7f93ce49ee4e3b587969"}
_test_image_network(
model_url="https://storage.googleapis.com/download.tensorflow.org/"
"models/mobilenet_v1_2018_08_02/mobilenet_v1_1.0_224_quant.tgz",
model_sub_path="mobilenet_v1_1.0_224_quant.tflite",
input_dict={"input": (1, 224, 224, 3)},
compile_hash=_compile_hash,
output_count=1,
host_ops=3,
npu_partitions=1,
run=True,
)
@requires_ethosn
def test_resnet_50_int8():
"""Compare compile hashes for resnet50 with an expected result."""
# If this test is failing due to a hash mismatch, please notify @lhutton1 and
# @Leo-arm. The hash is there to catch any changes in the behaviour of the
# codegen, which could come about from either a change in Support Library
# version or a change in the Ethos-N codegen. To update this requires running
# on hardware that isn't available in CI.
_compile_hash = {
"f16dc9caa8e696bc5da8a5c6a644eb72",
"41acecca37b2735bd580f6ec38d8c2e0",
}
_test_image_network(
model_url="https://raw.githubusercontent.com/dmlc/web-data/main/tensorflow/"
"models/Quantized/resnet_50_quantized.tflite",
model_sub_path="resnet_50_quantized.tflite",
input_dict={"input": (1, 224, 224, 3)},
compile_hash=_compile_hash,
output_count=1,
host_ops=10,
npu_partitions=2,
)
@requires_ethosn
def test_inception_v3():
"""Compare compile hashes for inceptionv3 with an expected result."""
# If this test is failing due to a hash mismatch, please notify @lhutton1 and
# @Leo-arm. The hash is there to catch any changes in the behaviour of the
# codegen, which could come about from either a change in Support Library
# version or a change in the Ethos-N codegen. To update this requires running
# on hardware that isn't available in CI.
_compile_hash = {"cff892eb15944756f22dad4b83c756d2"}
_test_image_network(
model_url="https://storage.googleapis.com/download.tensorflow.org/"
"models/tflite_11_05_08/inception_v3_quant.tgz",
model_sub_path="inception_v3_quant.tflite",
input_dict={"input": (1, 299, 299, 3)},
compile_hash=_compile_hash,
output_count=1,
host_ops=0,
npu_partitions=1,
)
@requires_ethosn
def test_inception_v4():
"""Compare compile hashes for inceptionv4 with an expected result."""
# If this test is failing due to a hash mismatch, please notify @lhutton1 and
# @Leo-arm. The hash is there to catch any changes in the behaviour of the
# codegen, which could come about from either a change in Support Library
# version or a change in the Ethos-N codegen. To update this requires running
# on hardware that isn't available in CI.
_compile_hash = {"c00c119506b34c8e87f81aa009b42431"}
_test_image_network(
model_url="https://storage.googleapis.com/download.tensorflow.org/"
"models/inception_v4_299_quant_20181026.tgz",
model_sub_path="inception_v4_299_quant.tflite",
input_dict={"input": (1, 299, 299, 3)},
compile_hash=_compile_hash,
output_count=1,
host_ops=3,
npu_partitions=1,
)
@requires_ethosn
def test_ssd_mobilenet_v1():
"""Compare compile hashes for ssdmobilenetv1 with an expected result."""
# If this test is failing due to a hash mismatch, please notify @lhutton1 and
# @Leo-arm. The hash is there to catch any changes in the behaviour of the
# codegen, which could come about from either a change in Support Library
# version or a change in the Ethos-N codegen. To update this requires running
# on hardware that isn't available in CI.
_compile_hash = {"04855b9b9e0ab3f3768495059e12c5cf"}
_test_image_network(
model_url="https://storage.googleapis.com/download.tensorflow.org/"
"models/tflite/coco_ssd_mobilenet_v1_1.0_quant_2018_06_29.zip",
model_sub_path="detect.tflite",
input_dict={"normalized_input_image_tensor": (1, 300, 300, 3)},
compile_hash=_compile_hash,
output_count=4,
host_ops=14,
npu_partitions=1,
)
| 8,291 | 36.017857 | 99 | py |
tvm | tvm-main/tests/python/contrib/test_ethosn/test_tanh.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Arm(R) Ethos(TM)-N NPU integration tanh tests"""
import pytest
import numpy as np
import tvm
from tvm import relay
from tvm.testing import requires_ethosn
from . import infrastructure as tei
def _get_model(shape, input_zp, input_sc, output_zp, output_sc, dtype):
a = relay.var("a", shape=shape, dtype=dtype)
dequantize = relay.qnn.op.dequantize(
a,
input_scale=relay.const(input_sc, "float32"),
input_zero_point=relay.const(input_zp, "int32"),
)
tanh = relay.tanh(dequantize)
model = relay.qnn.op.quantize(
tanh,
output_scale=relay.const(output_sc, "float32"),
output_zero_point=relay.const(output_zp, "int32"),
out_dtype=dtype,
)
return model
@requires_ethosn
@pytest.mark.parametrize("dtype", ["uint8", "int8"])
@pytest.mark.parametrize("shape", [(1, 52, 52, 3)])
def test_tanh(dtype, shape):
"""Compare Tanh output with TVM."""
zp_min = np.iinfo(dtype).min
zp_max = np.iinfo(dtype).max
np.random.seed(0)
inputs = {
"a": tvm.nd.array(np.random.randint(zp_min, high=zp_max, size=shape, dtype=dtype)),
}
outputs = []
for npu in [False, True]:
model = _get_model(shape, zp_min + 128, 1 / 256, zp_min + 128, 1 / 128, dtype)
mod = tei.make_module(model, [])
outputs.append(
tei.build_and_run(
mod,
inputs,
1,
{},
npu=npu,
additional_config_args={"inline_non_compute_intensive_partitions": False},
)
)
tei.verify(outputs, dtype, 1)
@requires_ethosn
@pytest.mark.parametrize("dtype", ["uint8", "int8"])
@pytest.mark.parametrize(
"shape, input_zp, input_sc, output_zp, output_sc, err_msg",
[
(
(1, 16, 16, 16),
120,
0.0250629,
64,
0.0078125,
"output quantization params=(64, 0.0078125), must = ({test_zp}, 1/256);",
)
],
)
def test_tanh_failure(shape, input_zp, input_sc, output_zp, output_sc, err_msg, dtype):
"""Check Tanh error messages."""
test_zp = 0 if dtype == "int8" else 128
model = _get_model(shape, input_zp, input_sc, output_zp, output_sc, dtype)
model = tei.make_ethosn_composite(model, "ethos-n.qnn_tanh")
mod = tei.make_ethosn_partition(model)
tei.test_error(mod, {}, err_msg.format(test_zp=test_zp))
| 3,213 | 31.795918 | 91 | py |
tvm | tvm-main/tests/python/contrib/test_ethosn/test_split.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Split tests for Arm(R) Ethos(TM)-N"""
import numpy as np
import pytest
import tvm
from tvm import relay
from tvm.testing import requires_ethosn
from . import infrastructure as tei
def _get_model(shape, dtype, splits, axis):
a = relay.var("a", shape=shape, dtype=dtype)
split = relay.op.split(a, indices_or_sections=splits, axis=axis)
return split.astuple()
@requires_ethosn
@pytest.mark.parametrize("dtype", ["uint8", "int8"])
@pytest.mark.parametrize(
"shape,splits,axis",
[
((1, 16, 16, 32), (2, 7, 10), 2),
((1, 12, 8, 16), 3, 1),
],
)
def test_split(dtype, shape, splits, axis):
"""Compare Split output with TVM."""
np.random.seed(0)
outputs = []
inputs = {
"a": tvm.nd.array(
np.random.randint(np.iinfo(dtype).min, np.iinfo(dtype).max + 1, size=shape, dtype=dtype)
)
}
for npu in [False, True]:
model = _get_model(shape, dtype, splits, axis)
mod = tei.make_module(model, {})
output_count = splits if isinstance(splits, int) else len(splits) + 1
outputs.append(
tei.build_and_run(
mod,
inputs,
output_count,
{},
npu=npu,
additional_config_args={"inline_non_compute_intensive_partitions": False},
)
)
tei.verify(outputs, dtype, 0)
@requires_ethosn
@pytest.mark.parametrize(
"shape,dtype,splits,axis,err_msg",
[
((1, 4, 4, 4, 4), "uint8", 4, 2, "dimensions=5, dimensions must be <= 4;"),
((1, 4, 4, 4), "int16", 4, 2, "dtype='int16', dtype must be either uint8, int8 or int32;"),
((2, 4, 4, 4), "uint8", 4, 2, "batch size=2, batch size must = 1;"),
((1, 4, 4, 4), "uint8", 1, 0, "Split cannot be performed along batch axis (axis 0);"),
(
(1, 4, 4, 4),
"uint8",
4,
3,
"Split along the channels dimension (axis 3) requires all output sizes "
"(specified in splitInfo.m_Sizes) to be multiples of 16;",
),
],
)
def test_split_failure(shape, dtype, splits, axis, err_msg):
"""Check Split error messages."""
model = _get_model(shape, dtype, splits, axis)
mod = tei.make_ethosn_partition(model)
tei.test_error(mod, {}, err_msg)
| 3,131 | 31.625 | 100 | py |
tvm | tvm-main/tests/python/contrib/test_ethosn/test_requantize.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Arm(R) Ethos(TM)-N integration requantize tests"""
import pytest
import numpy as np
import tvm
from tvm import relay
from tvm.testing import requires_ethosn
from . import infrastructure as tei
def _get_model(shape, input_zp, input_sc, output_zp, output_sc, in_dtype, out_dtype):
a = relay.var("a", shape=shape, dtype=in_dtype)
model = relay.qnn.op.requantize(
data=a,
input_scale=relay.const(input_sc, "float32"),
input_zero_point=relay.const(input_zp, "int32"),
output_scale=relay.const(output_sc, "float32"),
output_zero_point=relay.const(output_zp, "int32"),
out_dtype=out_dtype,
)
return model
@requires_ethosn
@pytest.mark.parametrize("in_dtype", ["int8", "uint8"])
@pytest.mark.parametrize("out_dtype", ["int8", "uint8"])
@pytest.mark.parametrize("shape", [(1, 52, 52, 3)])
def test_requantize(in_dtype, out_dtype, shape):
"""Compare Requantize output with TVM."""
np.random.seed(0)
low = 0 if in_dtype == "uint8" else -5
high = low + 10
input_zp = (high + low) / 2
inputs = {
"a": tvm.nd.array(np.random.randint(low=low, high=high, size=shape, dtype=in_dtype)),
}
outputs = []
for npu in [False, True]:
model = _get_model(
shape=shape,
input_zp=input_zp,
input_sc=0.002,
output_zp=10,
output_sc=0.008,
in_dtype=in_dtype,
out_dtype=out_dtype,
)
mod = tei.make_module(model, [])
x = tei.build_and_run(
mod,
inputs,
1,
{},
npu=npu,
additional_config_args={"inline_non_compute_intensive_partitions": False},
)
outputs.append(x)
tei.verify(outputs, out_dtype, 1)
@requires_ethosn
def test_requantize_mixed_precision_with_following_op():
"""
Checks a requantize operation that changes precision from uint8 to int8 with a
following add op.
"""
np.random.seed(0)
shape = (1, 4, 6, 8)
in_sc = 0.012566
in_zp = 131
out_sc = 0.012566
out_zp = 3
in_dtype = "uint8"
out_dtype = "int8"
def get_model():
a = relay.var("a", shape=shape, dtype=in_dtype)
b = relay.var("b", shape=shape, dtype=out_dtype)
req = relay.qnn.op.requantize(
data=a,
input_scale=relay.const(in_sc, "float32"),
input_zero_point=relay.const(in_zp, "int32"),
output_scale=relay.const(out_sc, "float32"),
output_zero_point=relay.const(out_zp, "int32"),
out_dtype=out_dtype,
)
req = relay.qnn.op.add(
req,
b,
lhs_scale=relay.const(out_sc, "float32"),
lhs_zero_point=relay.const(out_zp, "int32"),
rhs_scale=relay.const(out_sc, "float32"),
rhs_zero_point=relay.const(out_zp, "int32"),
output_scale=relay.const(out_sc, "float32"),
output_zero_point=relay.const(out_zp, "int32"),
)
return req
inputs = {
"a": tvm.nd.array(
np.random.randint(
low=np.iinfo(in_dtype).min, high=np.iinfo(in_dtype).max, size=shape, dtype=in_dtype
)
),
"b": tvm.nd.array(
np.random.randint(
low=np.iinfo(out_dtype).min,
high=np.iinfo(out_dtype).max,
size=shape,
dtype=out_dtype,
)
),
}
outputs = []
for npu in [False, True]:
model = get_model()
mod = tei.make_module(model, {})
x = tei.build_and_run(
mod,
inputs,
1,
{},
npu=npu,
additional_config_args={"inline_non_compute_intensive_partitions": False},
)
outputs.append(x)
tei.verify(outputs, out_dtype, 1)
@requires_ethosn
def test_requantize_failure():
"""Check Requantize error messages."""
input_sc = 0.8
output_sc = (input_sc / 128) - 0.0001
model = _get_model(
shape=(1, 52, 52, 3),
input_zp=0,
input_sc=input_sc,
output_zp=0,
output_sc=output_sc,
in_dtype="int8",
out_dtype="int8",
)
model = tei.make_ethosn_composite(model, "ethos-n.qnn_requantize")
mod = tei.make_ethosn_partition(model)
tei.test_error(mod, {}, "Output scale must be bigger than input scale / 128")
| 5,238 | 30 | 99 | py |
tvm | tvm-main/tests/python/contrib/test_ethosn/test_reshape.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Arm(R) Ethos(TM)-N integration reshape tests"""
import numpy as np
import pytest
import tvm
from tvm import relay
from tvm.testing import requires_ethosn
from . import infrastructure as tei
def _get_model(input_shape, output_shape, dtype):
"""Return a model and any parameters it may have"""
a = relay.var("a", shape=input_shape, dtype=dtype)
req = relay.reshape(a, output_shape)
return req, {}
@requires_ethosn
@pytest.mark.parametrize("dtype", ["uint8", "int8"])
@pytest.mark.parametrize(
"input_shape, output_shape",
[
((1, 15, 4, 1), (1, 60)),
((1, 15, 4, 1), (1, 30, 2)),
((1, 15, 4, 1), (1, 4, 15, 1)),
((1, 15, 4, 1), (1, 12, 5, 1)),
((1, 15, 4, 1), (1, 0, 2, 2)),
((1, 15, 4, 1), (1, -1, 2, 1)),
((1, 15, 4, 1), (1, -2)),
((1, 15, 4, 1), (1, -3, 1, 1)),
((1, 15, 4, 1), (1, -4, 3, 5, 4)),
((1, 15, 4, 1), (0, -1, -2)),
((1, 15, 4, 1), (0, -1, -3, 1)),
((1, 15, 4, 1), (1, -4, -1, 5, 4)),
],
)
def test_reshape(dtype, input_shape, output_shape):
"""Compare Reshape output with TVM."""
np.random.seed(0)
inputs = {
"a": tvm.nd.array(
np.random.randint(
low=np.iinfo(dtype).min,
high=np.iinfo(dtype).max + 1,
size=input_shape,
dtype=dtype,
)
)
}
outputs = []
for npu in [False, True]:
model, params = _get_model(input_shape, output_shape, dtype)
mod = tei.make_module(model, params)
outputs.append(
tei.build_and_run(
mod,
inputs,
1,
params,
npu=npu,
additional_config_args={"inline_non_compute_intensive_partitions": False},
)
)
tei.verify(outputs, dtype, 1)
@requires_ethosn
@pytest.mark.parametrize(
"input_shape, output_shape",
[
(
(1, 13, 13, 255),
(1, 13, 13, 3, 85),
),
],
)
def test_reshape_failure(input_shape, output_shape):
"""Check Resize is not offloaded."""
model, params = _get_model(input_shape, output_shape, "int8")
mod = tei.make_module(model, params)
tei.build(
mod,
params,
expected_host_ops=1,
npu_partitions=0,
additional_config_args={"inline_non_compute_intensive_partitions": False},
)
| 3,237 | 28.436364 | 90 | py |
tvm | tvm-main/tests/python/contrib/test_ethosn/test_convert_equivalents.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Unit tests for the convert equivalents pass."""
import pytest
import numpy as np
import tvm
from tvm import relay
from tvm.testing import requires_ethosn
from tvm.relay.op.contrib.ethosn import ConvertEquivalents
from tvm.relay import ExprVisitor
from . import infrastructure as tei
from .test_addition import _get_addition_qnn_params
def _assert_structural_equal(a, b):
"""Check structural equality of two Relay expressions."""
reason = (
"Actual and expected relay functions are not equal. "
"ConvertEquivalents is not correctly transforming the input "
"graph."
)
assert tvm.ir.structural_equal(a, b), reason
@requires_ethosn
@pytest.mark.parametrize("dtype", ["uint8", "int8"])
@pytest.mark.parametrize("shape,channels", [((1, 4, 4, 8), 8), ((1, 16, 12, 4), 4)])
@pytest.mark.parametrize("reverse_inputs", [True, False])
def test_multiply_to_depthwise(dtype, shape, channels, reverse_inputs):
"""Check that multiply is correctly converted to a depthwise operation."""
np.random.seed(0)
iinfo = np.iinfo(dtype)
data_min = iinfo.min
data_max = iinfo.max
input_zp = np.random.randint(data_min, data_max)
input_sc = np.random.random() * 2
input2_zp = np.random.randint(data_min, data_max)
input2_sc = np.random.random() * 2
output_zp, output_sc = tei.get_conv2d_qnn_params(
dtype, input_zp, input_sc, input2_zp, input2_sc, 1, 1, shape[3]
)
x = relay.var("x", shape=shape, dtype=dtype)
constant_shape = (1, 1, 1, channels)
y_data = np.random.randint(data_min, data_max + 1, size=constant_shape, dtype=dtype)
def before():
y = relay.const(y_data, dtype=dtype)
expr = relay.qnn.op.mul(
y if reverse_inputs else x,
x if reverse_inputs else y,
relay.const(input_sc, "float32"),
relay.const(input_zp, "int32"),
relay.const(input2_sc, "float32"),
relay.const(input2_zp, "int32"),
relay.const(output_sc, "float32"),
relay.const(output_zp, "int32"),
)
composite = tei.make_ethosn_composite(expr, "ethos-n.qnn_mul_to_depthwise")
return tei.make_ethosn_partition(composite)
def expected():
constant_shape_hwoi = (1, 1, channels, 1)
y_data_hwoi = y_data.reshape(constant_shape_hwoi)
y_hwoi = relay.const(y_data_hwoi, dtype=dtype)
expr = relay.qnn.op.conv2d(
x,
y_hwoi,
relay.const(input2_zp if reverse_inputs else input_zp, "int32"),
relay.const(input_zp if reverse_inputs else input2_zp, "int32"),
relay.const(input2_sc if reverse_inputs else input_sc, "float32"),
relay.const(input_sc if reverse_inputs else input2_sc, "float32"),
(1, 1),
channels,
(1, 1),
(0, 0),
(1, 1),
channels,
"NHWC",
"HWOI",
"NHWC",
"int32",
)
expr = relay.nn.bias_add(expr, relay.const(np.zeros((channels,), dtype="int32")), axis=3)
expr = relay.qnn.op.requantize(
expr,
relay.const(input2_sc if reverse_inputs else input_sc, "float32"),
relay.const(input2_zp if reverse_inputs else input_zp, "int32"),
relay.const(output_sc, "float32"),
relay.const(output_zp, "int32"),
out_dtype=dtype,
)
composite = tei.make_ethosn_composite(expr, "ethos-n.qnn_conv2d")
return tei.make_ethosn_partition(composite)
mod = before()
mod = ConvertEquivalents()(mod)
expected_mod = expected()
_assert_structural_equal(mod["ethos-n_0"], expected_mod["ethos-n_0"])
@requires_ethosn
@pytest.mark.parametrize(
"dtype,shape,constant_shape",
[("int8", (1, 4, 4), (4,)), ("int32", (1, 16, 12, 4), (1, 1, 1, 4))],
)
def test_unsupported_multiply_to_depthwise(dtype, shape, constant_shape):
"""Check that unsupported variants of multiply to depthwise are not converted."""
np.random.seed(0)
iinfo = np.iinfo(dtype)
data_min = iinfo.min
data_max = iinfo.max
input_zp = np.random.randint(data_min, data_max)
input_sc = np.random.random() * 2
input2_zp = np.random.randint(data_min, data_max)
input2_sc = np.random.random() * 2
output_zp, output_sc = tei.get_conv2d_qnn_params(
dtype, input_zp, input_sc, input2_zp, input2_sc, 1, 1, shape[-1]
)
x = relay.var("x", shape=shape, dtype=dtype)
y_data = np.random.randint(data_min, data_max + 1, size=constant_shape, dtype=dtype)
def before():
y = relay.const(y_data, dtype=dtype)
expr = relay.qnn.op.mul(
x,
y,
relay.const(input_sc, "float32"),
relay.const(input_zp, "int32"),
relay.const(input2_sc, "float32"),
relay.const(input2_zp, "int32"),
relay.const(output_sc, "float32"),
relay.const(output_zp, "int32"),
)
composite = tei.make_ethosn_composite(expr, "ethos-n.qnn_mul_to_depthwise")
return tei.make_ethosn_partition(composite)
mod = before()
error_regex = (
r'Operation "ethos-n.qnn_mul_to_depthwise" was marked '
r"as having a valid conversion, but it could not be converted."
)
with pytest.raises(tvm.TVMError, match=error_regex):
mod = ConvertEquivalents()(mod)
@requires_ethosn
@pytest.mark.parametrize(
"shape,constant_shape",
[((1, 4, 4, 8), (1, 1, 1, 1)), ((1, 16, 12, 4), None)],
)
@pytest.mark.parametrize("reverse_inputs", [True, False])
def test_multiply_to_reinterpret_quantize(shape, constant_shape, reverse_inputs):
"""Check that multiply is correctly converted to a reinterpret quantize operation."""
np.random.seed(0)
dtype = "uint8"
# Multiply can only be offloaded as a reinterpret quantize operation if
# it is an identity option. We must choose the quantization and constant
# data carefully to make sure that this is the case.
input_zp = 0
input_sc = 0.007814894430339336
input2_zp = 0
input2_sc = 0.5
output_zp = 0
output_sc = 0.9963990449905396
constant_data = 255
x = relay.var("x", shape=shape, dtype=dtype)
y_data = np.array(constant_data, dtype=dtype).reshape(constant_shape)
def before():
y = relay.const(y_data, dtype=dtype)
expr = relay.qnn.op.mul(
y if reverse_inputs else x,
x if reverse_inputs else y,
relay.const(input2_sc if reverse_inputs else input_sc, "float32"),
relay.const(input2_zp if reverse_inputs else input_zp, "int32"),
relay.const(input_sc if reverse_inputs else input2_sc, "float32"),
relay.const(input_zp if reverse_inputs else input2_zp, "int32"),
relay.const(output_sc, "float32"),
relay.const(output_zp, "int32"),
)
composite = tei.make_ethosn_composite(expr, "ethos-n.qnn_mul_to_reinterpret_quantize")
return tei.make_ethosn_partition(composite)
def expected():
expr = relay.qnn.op.requantize(
x,
relay.const(input_sc, "float32"),
relay.const(input_zp if reverse_inputs else input_zp, "int32"),
relay.const(output_sc, "float32"),
relay.const(output_zp, "int32"),
out_dtype=dtype,
)
composite = tei.make_ethosn_composite(expr, "ethos-n.qnn_reinterpret_quantize")
return tei.make_ethosn_partition(composite)
mod = before()
mod = ConvertEquivalents()(mod)
expected_mod = expected()
_assert_structural_equal(mod["ethos-n_0"], expected_mod["ethos-n_0"])
@requires_ethosn
@pytest.mark.parametrize(
"dtype,shape,constant_shape",
[("float32", (1, 16, 12, 4), None)],
)
def test_unsupported_multiply_to_reinterpret_quantize(dtype, shape, constant_shape):
"""
Check that unsupported variants of multiply conversion to reinterpret
quantize are not converted.
"""
np.random.seed(0)
# Multiply can only be offloaded as a reinterpret quantize operation if
# it is an identity option. We must choose the quantization and constant
# data carefully to make sure that this is the case.
input_zp = 0
input_sc = 0.007814894430339336
input2_zp = 0
input2_sc = 0.5
output_zp = 0
output_sc = 0.9963990449905396
constant_data = 255
x = relay.var("x", shape=shape, dtype=dtype)
y_data = np.array(constant_data, dtype=dtype).reshape(constant_shape)
def before():
y = relay.const(y_data, dtype=dtype)
expr = relay.qnn.op.mul(
x,
y,
relay.const(input_sc, "float32"),
relay.const(input_zp, "int32"),
relay.const(input2_sc, "float32"),
relay.const(input2_zp, "int32"),
relay.const(output_sc, "float32"),
relay.const(output_zp, "int32"),
)
composite = tei.make_ethosn_composite(expr, "ethos-n.qnn_mul_to_reinterpret_quantize")
return tei.make_ethosn_partition(composite)
mod = before()
error_regex = (
r'Operation "ethos-n.qnn_mul_to_reinterpret_quantize" was marked '
r"as having a valid conversion, but it could not be converted."
)
with pytest.raises(tvm.TVMError, match=error_regex):
mod = ConvertEquivalents()(mod)
@requires_ethosn
@pytest.mark.parametrize("reverse_inputs", [True, False])
def test_add_to_depthwise(reverse_inputs):
"""
Check that add is converted correctly.
"""
dtype = "uint8"
lhs_shape = (1, 2, 4, 8)
rhs_shape = (1, 1, 1, 8)
np.random.seed(0)
iinfo = np.iinfo(dtype)
data_min = iinfo.min
data_max = iinfo.max
lhs_zp, lhs_sc, rhs_zp, rhs_sc, out_zp, out_sc = _get_addition_qnn_params(dtype)
x = relay.var("x", shape=lhs_shape, dtype=dtype)
y_data = np.random.randint(data_min, data_max + 1, size=rhs_shape, dtype=dtype)
def before():
y = relay.const(y_data)
expr = relay.qnn.op.add(
lhs=y if reverse_inputs else x,
rhs=x if reverse_inputs else y,
lhs_scale=relay.const(lhs_sc, "float32"),
lhs_zero_point=relay.const(lhs_zp, "int32"),
rhs_scale=relay.const(rhs_sc, "float32"),
rhs_zero_point=relay.const(rhs_zp, "int32"),
output_scale=relay.const(out_sc, "float32"),
output_zero_point=relay.const(out_zp, "int32"),
)
composite = tei.make_ethosn_composite(expr, "ethos-n.qnn_add_to_depthwise")
return tei.make_ethosn_partition(composite)
class ConversionChecker(ExprVisitor):
"""
Pass to check the new composite function is in the expected format.
"""
sequence = ["qnn.conv2d", "nn.bias_add", "qnn.requantize"]
# pylint: disable=invalid-name
def visit_function(self, fn):
composite_name = fn.attrs["Composite"]
expected = "ethos-n.qnn_conv2d"
assert (
composite_name == expected
), f"Expected Composite attribute {expected} but got {composite_name}"
super().visit_function(fn)
def visit_call(self, call):
op_name = call.op.name
expected_name = self.sequence.pop()
assert op_name == expected_name, f"Got operator {op_name} but expected {expected_name}"
super().visit_call(call)
mod = before()
mod = ConvertEquivalents()(mod)
mod = ConversionChecker().visit(mod["ethos-n_0"].body.op)
@requires_ethosn
@pytest.mark.parametrize(
"dtype,lhs_shape,rhs_shape", [("uint8", (1, 4, 4), (1, 1, 4)), ("int32", (1, 4, 4, 4), (4,))]
)
def test_unsupported_add_to_depthwise(dtype, lhs_shape, rhs_shape):
"""Check that unsupported variants of add are not converted."""
np.random.seed(0)
iinfo = np.iinfo(dtype)
data_min = iinfo.min
data_max = iinfo.max
lhs_zp, lhs_sc, rhs_zp, rhs_sc, out_zp, out_sc = _get_addition_qnn_params(dtype)
x = relay.var("x", shape=lhs_shape, dtype=dtype)
y_data = np.random.randint(data_min, data_max + 1, size=rhs_shape, dtype=dtype)
def before():
y = relay.const(y_data)
expr = relay.qnn.op.add(
lhs=x,
rhs=y,
lhs_scale=relay.const(lhs_sc, "float32"),
lhs_zero_point=relay.const(lhs_zp, "int32"),
rhs_scale=relay.const(rhs_sc, "float32"),
rhs_zero_point=relay.const(rhs_zp, "int32"),
output_scale=relay.const(out_sc, "float32"),
output_zero_point=relay.const(out_zp, "int32"),
)
composite = tei.make_ethosn_composite(expr, "ethos-n.qnn_add_to_depthwise")
return tei.make_ethosn_partition(composite)
mod = before()
error_regex = (
r'Operation "ethos-n.qnn_add_to_depthwise" was marked '
r"as having a valid conversion, but it could not be converted."
)
with pytest.raises(tvm.TVMError, match=error_regex):
mod = ConvertEquivalents()(mod)
@requires_ethosn
@pytest.mark.parametrize(
"shape,constant_shape",
[
((1, 4, 4, 8), (1, 1, 1, 1)),
((1, 16, 12, 4), None),
],
)
@pytest.mark.parametrize("reverse_inputs", [True, False])
def test_add_to_reinterpret_quantize(shape, constant_shape, reverse_inputs):
"""Check that add is correctly converted to a reinterpret quantize operation."""
np.random.seed(0)
dtype = "uint8"
# Add can only be offloaded as a reinterpret quantize operation if
# it is an identity option. We must choose the quantization and constant
# data carefully to make sure that this is the case.
input_zp = 128
input_sc = 0.0078125
input2_zp = 0
input2_sc = 0.003921568859368563
output_zp = 0
output_sc = 0.007814894430339336
constant_data = 255
x = relay.var("x", shape=shape, dtype=dtype)
y_data = np.array(constant_data, dtype=dtype).reshape(constant_shape)
def before():
y = relay.const(y_data, dtype=dtype)
expr = relay.qnn.op.add(
y if reverse_inputs else x,
x if reverse_inputs else y,
relay.const(input2_sc if reverse_inputs else input_sc, "float32"),
relay.const(input2_zp if reverse_inputs else input_zp, "int32"),
relay.const(input_sc if reverse_inputs else input2_sc, "float32"),
relay.const(input_zp if reverse_inputs else input2_zp, "int32"),
relay.const(output_sc, "float32"),
relay.const(output_zp, "int32"),
)
composite = tei.make_ethosn_composite(expr, "ethos-n.qnn_add_to_reinterpret_quantize")
return tei.make_ethosn_partition(composite)
def expected():
expr = relay.qnn.op.requantize(
x,
relay.const(input_sc, "float32"),
relay.const(input_zp if reverse_inputs else input_zp, "int32"),
relay.const(output_sc, "float32"),
relay.const(output_zp, "int32"),
out_dtype=dtype,
)
composite = tei.make_ethosn_composite(expr, "ethos-n.qnn_reinterpret_quantize")
return tei.make_ethosn_partition(composite)
mod = before()
mod = ConvertEquivalents()(mod)
expected_mod = expected()
_assert_structural_equal(mod["ethos-n_0"], expected_mod["ethos-n_0"])
@requires_ethosn
@pytest.mark.parametrize(
"dtype,shape,constant_shape",
[
("float32", (1, 16, 12, 4), None),
],
)
def test_unsupported_add_to_reinterpret_quantize(dtype, shape, constant_shape):
"""Check that unsupported variants of add to reinterpret quantize are not converted."""
np.random.seed(0)
# Add can only be offloaded as a reinterpret quantize operation if
# it is an identity option. We must choose the quantization and constant
# data carefully to make sure that this is the case.
input_zp = 128
input_sc = 0.0078125
input2_zp = 0
input2_sc = 0.003921568859368563
output_zp = 0
output_sc = 0.007814894430339336
constant_data = 255
x = relay.var("x", shape=shape, dtype=dtype)
y_data = np.array(constant_data, dtype=dtype).reshape(constant_shape)
def before():
y = relay.const(y_data, dtype=dtype)
expr = relay.qnn.op.add(
x,
y,
relay.const(input_sc, "float32"),
relay.const(input_zp, "int32"),
relay.const(input2_sc, "float32"),
relay.const(input2_zp, "int32"),
relay.const(output_sc, "float32"),
relay.const(output_zp, "int32"),
)
composite = tei.make_ethosn_composite(expr, "ethos-n.qnn_add_to_reinterpret_quantize")
return tei.make_ethosn_partition(composite)
mod = before()
error_regex = (
r'Operation "ethos-n.qnn_add_to_reinterpret_quantize" was marked '
r"as having a valid conversion, but it could not be converted."
)
with pytest.raises(tvm.TVMError, match=error_regex):
mod = ConvertEquivalents()(mod)
| 17,858 | 35.225152 | 99 | py |
tvm | tvm-main/tests/python/contrib/test_ethosn/test_codegen.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""NPU codegen tests"""
import pytest
import numpy as np
import tvm
from tvm import relay
from tvm.testing import requires_ethosn
from . import infrastructure as tei
@requires_ethosn
def test_compile_with_unsupported_variant():
"""Test compilation with unsupported variant."""
dtype = "int8"
input_shape = (1, 2, 2, 2)
x = relay.var("x", shape=input_shape, dtype=dtype)
y = relay.reshape(x, newshape=(1, 1, 1, 8))
mod = tei.make_ethosn_partition(y)
additional_config_args = {
"variant": "foo",
"inline_non_compute_intensive_partitions": False,
}
inputs = {
"x": np.random.randint(
low=np.iinfo(dtype).min, high=np.iinfo(dtype).max, size=input_shape, dtype=dtype
)
}
with pytest.raises(tvm.TVMError, match=r"Unknown NPU type"):
tei.build_and_run(mod, inputs, 1, {}, True, additional_config_args=additional_config_args)
@requires_ethosn
def test_experimental_compiler(capfd):
"""Test compilation with the experimental compiler."""
dtype = "int8"
input_shape = (1, 2, 2, 2)
x = relay.var("x", shape=input_shape, dtype=dtype)
y = relay.reshape(x, newshape=(1, 1, 1, 8))
mod = tei.make_ethosn_partition(y)
additional_config_args = {
"variant": "n78",
"experimental_compiler": True,
"inline_non_compute_intensive_partitions": False,
}
tei.build(mod, {}, True, additional_config_args=additional_config_args)
# Check for hints that the experimental compiler was activated.
# The support library logs a warning to say the the experimental
# compiler is in use. Check that this warning was logged.
captured = capfd.readouterr()
assert (
"WARNING: Experimental Compiler in use." in captured.err
), "Experimental compiler was not activated."
@requires_ethosn
def test_without_experimental_compiler(capfd):
"""Test compilation when the experimental compiler is not enabled."""
dtype = "int8"
input_shape = (1, 2, 2, 2)
x = relay.var("x", shape=input_shape, dtype=dtype)
y = relay.reshape(x, newshape=(1, 1, 1, 8))
mod = tei.make_ethosn_partition(y)
additional_config_args = {
"variant": "n78",
"experimental_compiler": False,
"inline_non_compute_intensive_partitions": False,
}
tei.build(mod, {}, True, additional_config_args=additional_config_args)
# Check for hints that the experimental compiler was activated.
# The support library logs a warning to say the the experimental
# compiler is in use. Check that this warning was logged.
captured = capfd.readouterr()
assert (
"WARNING: Experimental Compiler in use." not in captured.err
), "Experimental compiler was enabled when it is not expected to be."
| 3,581 | 32.476636 | 98 | py |
tvm | tvm-main/tests/python/contrib/test_ethosn/test_topologies.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Arm(R) Ethos(TM)-N tests for complex network topologies."""
import numpy as np
import pytest
import tvm
from tvm import relay
from tvm.testing import requires_ethosn
from tvm.relay.op.contrib.ethosn import Available, ethosn_available
from . import infrastructure as tei
@requires_ethosn
@pytest.mark.parametrize("dtype", ["uint8", "int8"])
def test_split_add_concat(dtype):
"""Test a model with split, add and contatenate."""
def get_model(input_shape, dtype, var_names):
"""Return a model"""
a = relay.var(next(var_names), shape=input_shape, dtype=dtype)
split_scale = relay.const(0.25, "float32")
split_zp = relay.const(100, "int32")
add_scale = relay.const(0.75, "float32")
add_zp = relay.const(120, "int32")
axis = 2
split = relay.split(a, indices_or_sections=4, axis=axis)
b = relay.qnn.op.add(
split[0],
split[1],
lhs_scale=split_scale,
lhs_zero_point=split_zp,
rhs_scale=split_scale,
rhs_zero_point=split_zp,
output_scale=add_scale,
output_zero_point=add_zp,
)
conc = relay.qnn.op.concatenate(
[b, split[2], split[3]],
input_scales=(add_scale, split_scale, split_scale),
input_zero_points=(add_zp, split_zp, split_zp),
output_scale=add_scale,
output_zero_point=add_zp,
axis=axis,
)
return conc
np.random.seed(0)
inputs = {
"a": tvm.nd.array(
np.random.randint(
np.iinfo(dtype).min, np.iinfo(dtype).max + 1, size=(1, 16, 16, 4), dtype=dtype
)
),
}
outputs = []
for npu in [False, True]:
model = get_model(inputs["a"].shape, dtype, iter(inputs))
mod = tei.make_module(model, [])
expected_host_ops = 0
npu_partitions = 1
outputs.append(
tei.build_and_run(
mod,
inputs,
1,
{},
npu=npu,
expected_host_ops=expected_host_ops,
npu_partitions=npu_partitions,
additional_config_args={"inline_non_compute_intensive_partitions": False},
)
)
if outputs:
tei.verify(outputs, dtype, 2)
@requires_ethosn
@pytest.mark.parametrize("dtype", ["uint8", "int8"])
def test_multiple_command_streams(dtype):
"""Check that multiple Ethos-N partitions are correctly handled.
If there's more than one Ethos-N graph partition, more than one command
stream will be created. This should be handled correctly by both the
Ethos-N codegen and Ethos-N runtime module. This test checks against a
simple graph which creates two Ethos-N partitions and checks the result
against an 'all-CPU' run through TVM.
"""
def get_model(dtype):
"""
max_pool2d
|
abs
|
max_pool2d
"""
x = relay.var("x", shape=(1, 4, 4, 4), dtype=dtype)
out = relay.nn.max_pool2d(x, (2, 2), (2, 2), layout="NHWC") # supported
out = relay.op.abs(out) # not supported
out = relay.nn.max_pool2d(out, (2, 2), (2, 2), layout="NHWC") # supported
return out
np.random.seed(0)
inputs = {
"x": tvm.nd.array(
np.random.randint(
np.iinfo(dtype).min, np.iinfo(dtype).max + 1, size=(1, 4, 4, 4), dtype=dtype
)
)
}
model = get_model(dtype)
mod = tei.make_module(model, {})
# Mock inference is only supported when the whole graph is offloaded to the NPU
if ethosn_available() == Available.SW_ONLY:
tei.build(mod, {}, npu=True, expected_host_ops=1, npu_partitions=2)
else:
tei.build_and_run(mod, inputs, 1, {}, npu=True, expected_host_ops=1, npu_partitions=2)
@requires_ethosn
@pytest.mark.parametrize("dtype", ["uint8", "int8"])
def test_output_order(dtype):
"""Test the output order."""
def get_model(input_shape, dtype, var_names):
"""Return a model"""
min_value = np.iinfo(dtype).min
max_value = np.iinfo(dtype).max
a = relay.var(next(var_names), shape=input_shape, dtype=dtype)
op_z = relay.op.clip(a, min_value, max_value)
op_b = relay.op.clip(op_z, min_value, min_value + 15)
op_c = relay.op.clip(op_z, min_value + 16, min_value + 31)
op_d = relay.op.clip(op_z, min_value + 32, min_value + 47)
op_e = relay.op.clip(op_z, min_value + 48, min_value + 63)
op_f = relay.op.clip(op_z, min_value + 64, min_value + 79)
op_g = relay.op.clip(op_z, min_value + 80, min_value + 95)
op_h = relay.op.clip(op_z, min_value + 96, min_value + 111)
op_i = relay.op.clip(op_z, min_value + 112, max_value)
return relay.Tuple((op_d, op_c, op_e, op_f, op_i, op_b, op_h, op_g))
np.random.seed(0)
inputs = {
"a": tvm.nd.array(
np.random.randint(
np.iinfo(dtype).min, np.iinfo(dtype).max + 1, size=(1, 16, 16, 4), dtype=dtype
)
),
}
outputs = []
for npu in [False, True]:
model = get_model(inputs["a"].shape, dtype, iter(inputs))
mod = tei.make_module(model, [])
outputs.append(
tei.build_and_run(
mod,
inputs,
8,
{},
npu=npu,
additional_config_args={"inline_non_compute_intensive_partitions": False},
)
)
tei.verify(outputs, dtype, 1)
@requires_ethosn
@pytest.mark.parametrize("dtype", ["uint8", "int8"])
def test_output_order_different_sizes(dtype):
"""
Test the output order when there are multiple outputs of different sizes.
"""
np.random.seed(0)
input_name = "a"
input_shape = (1, 8, 8, 4)
dtype_min = np.iinfo(dtype).min
dtype_max = np.iinfo(dtype).max
def get_model():
var = relay.var(input_name, shape=input_shape, dtype=dtype)
clip = relay.op.clip(var, dtype_min, dtype_max)
max_pool = relay.nn.max_pool2d(clip, (2, 2), (2, 2), ceil_mode=True, layout="NHWC")
mean = relay.op.cast(clip, "int32")
mean = relay.mean(mean, axis=[1, 2], keepdims=True)
mean = relay.qnn.op.requantize(
mean,
input_scale=relay.const(0.0784314, "float32"),
input_zero_point=relay.const(dtype_min + 128, "int32"),
output_scale=relay.const(0.0784314, "float32"),
output_zero_point=relay.const(dtype_min + 128, "int32"),
out_dtype=dtype,
)
return relay.Tuple((mean, max_pool, clip))
inputs = {
input_name: tvm.nd.array(
np.random.randint(dtype_min, dtype_max + 1, size=input_shape, dtype=dtype)
),
}
outputs = []
for npu in [False, True]:
model = get_model()
mod = tei.make_module(model, [])
outputs.append(
tei.build_and_run(mod, inputs, 3, {}, npu=npu, expected_host_ops=0, npu_partitions=1)
)
tei.verify(outputs, dtype, 1)
@requires_ethosn
@pytest.mark.parametrize("dtype", ["uint8", "int8"])
@pytest.mark.parametrize(
"shape,splits,axis",
[
((1, 16, 16, 32), (2, 7, 10), 2),
],
)
def test_split_with_asym_concats(dtype, shape, splits, axis):
"""Test a model with split and contatenates."""
np.random.seed(0)
def get_model(shape, dtype, splits, axis):
a = relay.var("a", shape=shape, dtype=dtype)
split = relay.op.split(a, indices_or_sections=splits, axis=axis)
zeroi = relay.const(1, "int32")
zerof = relay.const(0.5, "float32")
con1 = relay.qnn.op.concatenate(
[split[0], split[1]],
input_scales=[zerof] * 2,
input_zero_points=[zeroi] * 2,
output_scale=zerof,
output_zero_point=zeroi,
axis=axis,
)
con2 = relay.qnn.op.concatenate(
[split[2], split[3]],
input_scales=[zerof] * 2,
input_zero_points=[zeroi] * 2,
output_scale=zerof,
output_zero_point=zeroi,
axis=axis,
)
return relay.Tuple((con2, con1))
outputs = []
inputs = {
"a": tvm.nd.array(
np.random.randint(np.iinfo(dtype).min, np.iinfo(dtype).max + 1, size=shape, dtype=dtype)
)
}
for npu in [False, True]:
model = get_model(shape, dtype, splits, axis)
mod = tei.make_module(model, {})
expected_host_ops = 0
npu_partitions = 1
# Mock inference is only supported when the whole graph is offloaded to the NPU
if ethosn_available() == Available.SW_ONLY:
tei.build(
mod,
{},
npu=npu,
expected_host_ops=expected_host_ops,
npu_partitions=npu_partitions,
additional_config_args={"inline_non_compute_intensive_partitions": False},
)
else:
outputs.append(
tei.build_and_run(
mod,
inputs,
2,
{},
npu=npu,
expected_host_ops=expected_host_ops,
npu_partitions=npu_partitions,
additional_config_args={"inline_non_compute_intensive_partitions": False},
)
)
if outputs:
tei.verify(outputs, dtype, 0)
@requires_ethosn
@pytest.mark.parametrize("dtype", ["uint8", "int8"])
def test_output_tuple_propagation(dtype):
"""This tests the case where the output tuple must be inferred
as having dummy tensor information."""
def get_model(dtype):
a = relay.var("a", shape=(1, 4, 4, 16), dtype=dtype)
split = relay.op.split(a, indices_or_sections=4, axis=2)
return relay.Tuple((split[0], split[1], split[2], split[3]))
np.random.seed(0)
outputs = []
inputs = {
"a": tvm.nd.array(
np.random.randint(
np.iinfo(dtype).min, np.iinfo(dtype).max + 1, size=(1, 4, 4, 16), dtype=dtype
)
)
}
for npu in [False, True]:
model = get_model(dtype)
mod = tei.make_module(model, {})
outputs.append(
tei.build_and_run(
mod,
inputs,
4,
{},
npu=npu,
additional_config_args={"inline_non_compute_intensive_partitions": False},
)
)
tei.verify(outputs, dtype, 0)
@requires_ethosn
@pytest.mark.parametrize("dtype", ["uint8", "int8"])
def test_input_tuples(dtype):
"""Test a model with a tuple as input."""
def get_model(shapes, dtype, axis):
tup = []
for i, shape in enumerate(shapes):
a = relay.var("in" + str(i), shape=shape, dtype=dtype)
tup.append(a)
zeroi = relay.const(1, "int32")
zerof = relay.const(0.5, "float32")
con = relay.qnn.op.concatenate(
tup,
input_scales=[zerof] * len(shapes),
input_zero_points=[zeroi] * len(shapes),
output_scale=zerof,
output_zero_point=zeroi,
axis=axis,
)
return con
np.random.seed(0)
inputs = {
"in0": tvm.nd.array(
np.random.randint(
np.iinfo(dtype).min, np.iinfo(dtype).max + 1, size=(1, 4), dtype=dtype
)
),
"in1": tvm.nd.array(
np.random.randint(
np.iinfo(dtype).min, np.iinfo(dtype).max + 1, size=(1, 6), dtype=dtype
)
),
}
outputs = []
for npu in [False, True]:
model = get_model([(1, 4), (1, 6)], dtype, 1)
if not npu:
mod = tei.make_module(model, {})
else:
mod = tei.make_ethosn_partition(model)
lib = tei.build(
mod,
{},
npu=False,
additional_config_args={"inline_non_compute_intensive_partitions": False},
)
outputs.append(tei.run(lib, inputs, 1, npu=npu))
tei.verify(outputs, dtype, 0)
@requires_ethosn
def test_inline_non_compute_intensive_operations():
"""Tests the case when a subgraph is unpartitioned."""
np.random.seed(0)
dtype = "int8"
shape = (1, 2, 2, 4)
inp = relay.var("x", shape=shape, dtype=dtype)
reshape = relay.reshape(inp, newshape=(1, 1, 4, 4))
inputs = {
"x": tvm.nd.array(
np.random.randint(np.iinfo(dtype).min, np.iinfo(dtype).max + 1, size=shape, dtype=dtype)
),
}
outputs = []
for npu in [False, True]:
mod = tei.make_module(reshape, {})
outputs.append(
tei.build_and_run(mod, inputs, 1, {}, npu=npu, expected_host_ops=1, npu_partitions=0)
)
tei.verify(outputs, dtype, 0)
| 13,812 | 30.827189 | 100 | py |
tvm | tvm-main/tests/python/contrib/test_ethosn/test_mean.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Arm(R) Ethos(TM)-N integration mean tests"""
import numpy as np
import pytest
import tvm
from tvm import relay
from tvm.testing import requires_ethosn
from . import infrastructure as tei
def _get_model(shape, axis, keepdims, input_zp, input_sc, output_zp, output_sc, dtype):
a = relay.var("a", shape=shape, dtype=dtype)
casted = relay.op.cast(a, "int32")
mean = relay.mean(casted, axis, keepdims)
model = relay.qnn.op.requantize(
mean,
input_scale=relay.const(input_sc, "float32"),
input_zero_point=relay.const(input_zp, "int32"),
output_scale=relay.const(output_sc, "float32"),
output_zero_point=relay.const(output_zp, "int32"),
out_dtype=dtype,
)
return model
@requires_ethosn
@pytest.mark.parametrize("dtype", ["uint8", "int8"])
@pytest.mark.parametrize("shape", [(1, 7, 7, 2048), (1, 8, 8)])
def test_mean(dtype, shape):
"""Compare Mean output with TVM."""
np.random.seed(0)
zp_min = np.iinfo(dtype).min
zp_max = np.iinfo(dtype).max
inputs = {
"a": tvm.nd.array(np.random.randint(zp_min, high=zp_max + 1, size=shape, dtype=dtype)),
}
outputs = []
for npu in [False, True]:
model = _get_model(
shape, [1, 2], True, zp_min + 128, 0.0784314, zp_min + 128, 0.0784314, dtype=dtype
)
mod = tei.make_module(model, [])
outputs.append(tei.build_and_run(mod, inputs, 1, {}, npu=npu))
tei.verify(outputs, dtype, 1)
@requires_ethosn
@pytest.mark.parametrize("dtype", ["int8", "uint8"])
def test_mean_non_equal_quantization(dtype):
"""Test mean is not offloaded when quantization is not equal."""
np.random.seed(0)
shape = (1, 7, 7, 2048)
zp_min = np.iinfo(dtype).min
model = _get_model(shape, [1, 2], True, zp_min + 120, 0.0068132, zp_min + 128, 0.0078125, dtype)
mod = tei.make_module(model, [])
tei.build(mod, {}, npu=True, expected_host_ops=3, npu_partitions=0)
| 2,748 | 32.938272 | 100 | py |
tvm | tvm-main/tests/python/contrib/test_ethosn/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Infrastructure and tests for Arm(R) Ethos(TM)-N"""
| 839 | 45.666667 | 62 | py |
tvm | tvm-main/tests/python/contrib/test_ethosn/test_multiply.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Integration tests for Multiply."""
import pytest
import numpy as np
import tvm
from tvm import relay
from tvm.testing import requires_ethosn
from . import infrastructure as tei
def _get_model(
shape,
constant_shape,
input_zp,
input_sc,
input2_zp,
input2_sc,
output_zp,
output_sc,
dtype,
reverse_inputs=False,
constant_data=None,
):
iinfo = np.iinfo(dtype)
data_min = iinfo.min
data_max = iinfo.max
x = relay.var("x", shape=shape, dtype=dtype)
if constant_data:
y_data = np.array(constant_data, dtype=dtype).reshape(constant_shape)
else:
y_data = np.random.randint(data_min, data_max + 1, size=constant_shape, dtype=dtype)
y = relay.const(y_data, dtype=dtype)
out = relay.qnn.op.mul(
y if reverse_inputs else x,
x if reverse_inputs else y,
relay.const(input_sc, "float32"),
relay.const(input_zp, "int32"),
relay.const(input2_sc, "float32"),
relay.const(input2_zp, "int32"),
relay.const(output_sc, "float32"),
relay.const(output_zp, "int32"),
)
params = {"y": y_data}
return out, params
@requires_ethosn
@pytest.mark.parametrize("dtype", ["uint8", "int8"])
@pytest.mark.parametrize(
"shape,constant_shape",
[((1, 4, 4, 8), (1, 1, 1, 8)), ((1, 16, 12, 4), (4,))],
)
@pytest.mark.parametrize("reverse_inputs", [False, True])
def test_multiply_to_depthwise(dtype, shape, constant_shape, reverse_inputs):
"""Compare Multiply -> Depthwise conversion output with TVM."""
np.random.seed(0)
iinfo = np.iinfo(dtype)
data_min = iinfo.min
data_max = iinfo.max
input_zp = np.random.randint(data_min, data_max)
input_sc = np.random.random() * 2
input2_zp = np.random.randint(data_min, data_max)
input2_sc = np.random.random() * 2
output_zp, output_sc = tei.get_conv2d_qnn_params(
dtype, input_zp, input_sc, input2_zp, input2_sc, 1, 1, shape[3]
)
model, params = _get_model(
shape,
constant_shape,
input_zp,
input_sc,
input2_zp,
input2_sc,
output_zp,
output_sc,
dtype,
reverse_inputs,
)
inputs = {"x": tvm.nd.array(np.random.randint(data_min, data_max + 1, size=shape, dtype=dtype))}
outputs = []
for npu in [False, True]:
mod = tei.make_module(model, params)
outputs.append(tei.build_and_run(mod, inputs, 1, params, npu=npu))
tei.verify(outputs, dtype, 1)
@requires_ethosn
@pytest.mark.parametrize(
"shape,constant_shape", [((1, 4, 5, 8), (1, 1, 1, 1)), ((1, 3, 7, 10), None)]
)
@pytest.mark.parametrize("reverse_inputs", [False, True])
def test_multiply_to_reinterpret_quantize(shape, constant_shape, reverse_inputs):
"""Compare Multiply -> Reinterpret Quantize conversion output with TVM."""
np.random.seed(0)
dtype = "uint8"
iinfo = np.iinfo(dtype)
data_min = iinfo.min
data_max = iinfo.max
# Multiply can only be offloaded as a reinterpret quantize operation if
# it is an identity option. We must choose the quantization and constant
# data carefully to make sure that this is the case.
input_zp = 0
input_sc = 0.007814894430339336
input2_zp = 0
input2_sc = 0.5
output_zp = 0
output_sc = 0.9963990449905396
constant_data = 255
model, params = _get_model(
shape,
constant_shape,
input_zp,
input_sc,
input2_zp,
input2_sc,
output_zp,
output_sc,
dtype,
reverse_inputs,
constant_data,
)
inputs = {"x": tvm.nd.array(np.random.randint(data_min, data_max + 1, size=shape, dtype=dtype))}
outputs = []
for npu in [False, True]:
mod = tei.make_module(model, params)
outputs.append(
tei.build_and_run(
mod,
inputs,
1,
params,
npu=npu,
additional_config_args={"inline_non_compute_intensive_partitions": False},
)
)
tei.verify(outputs, dtype, 1)
@requires_ethosn
def test_multiply_multiple_inputs_unsupported():
"""Check multiply operator with two inputs is not offloaded."""
np.random.seed(0)
shape = (1, 4, 5, 6)
dtype = "int8"
iinfo = np.iinfo(dtype)
data_min = iinfo.min
data_max = iinfo.max
input_zp = np.random.randint(data_min, data_max)
input_sc = np.random.random() * 2
input2_zp = np.random.randint(data_min, data_max)
input2_sc = np.random.random() * 2
output_zp, output_sc = tei.get_conv2d_qnn_params(
dtype, input_zp, input_sc, input2_zp, input2_sc, 1, 1, shape[3]
)
x = relay.var("x", shape=shape, dtype=dtype)
y = relay.var("y", shape=shape, dtype=dtype)
model = relay.qnn.op.mul(
x,
y,
relay.const(input_sc, "float32"),
relay.const(input_zp, "int32"),
relay.const(input2_sc, "float32"),
relay.const(input2_zp, "int32"),
relay.const(output_sc, "float32"),
relay.const(output_zp, "int32"),
)
expected_host_ops = 1
npu_partitions = 0
for npu in [False, True]:
mod = tei.make_module(model, {})
tei.build(
mod,
{},
npu=npu,
expected_host_ops=expected_host_ops,
npu_partitions=npu_partitions,
)
@requires_ethosn
@pytest.mark.parametrize(
"dtype,shape,constant_shape",
[
("int16", (1, 4, 5, 6), (1, 1, 1, 6)),
("int8", (1, 1, 3), (1, 1, 1, 3)),
("int8", (1, 2, 4, 8), (1, 2, 4, 8)),
],
)
def test_multiply_unsupported(dtype, shape, constant_shape):
"""Check multiply operator with unsupported attributes is not offloaded."""
np.random.seed(0)
iinfo = np.iinfo(dtype)
data_min = iinfo.min
data_max = iinfo.max
input_zp = np.random.randint(data_min, data_max)
input_sc = np.random.random() * 2
input2_zp = np.random.randint(data_min, data_max)
input2_sc = np.random.random() * 2
output_zp, output_sc = tei.get_conv2d_qnn_params(
dtype, input_zp, input_sc, input2_zp, input2_sc, 1, 1, shape[-1]
)
model, params = _get_model(
shape,
constant_shape,
input_zp,
input_sc,
input2_zp,
input2_sc,
output_zp,
output_sc,
dtype,
reverse_inputs=False,
constant_data=False,
)
expected_host_ops = 1
npu_partitions = 0
for npu in [False, True]:
mod = tei.make_module(model, {})
tei.build(
mod,
params,
npu=npu,
expected_host_ops=expected_host_ops,
npu_partitions=npu_partitions,
)
| 7,558 | 27.632576 | 100 | py |
tvm | tvm-main/tests/python/contrib/test_ethosn/test_pooling.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Arm(R) Ethos(TM)-N integration pooling tests"""
import numpy as np
import pytest
import tvm
from tvm import relay
from tvm.testing import requires_ethosn
from . import infrastructure as tei
def _get_model(shape, typef, sizes, strides, pads, layout, dtype):
"""Return a model and any parameters it may have"""
req = relay.var("a", shape=shape, dtype=dtype)
if typef is relay.nn.avg_pool2d:
req = relay.cast(req, "int32")
req = typef(req, pool_size=sizes, strides=strides, padding=pads, ceil_mode=True, layout=layout)
if typef is relay.nn.avg_pool2d:
req = relay.cast(req, dtype)
return req
@requires_ethosn
@pytest.mark.parametrize("dtype", ["uint8", "int8"])
@pytest.mark.parametrize(
"shape,typef,size,stride,pad",
[
((1, 8, 8, 8), relay.nn.max_pool2d, (2, 2), (2, 2), (0, 0, 0, 0)),
((1, 9, 9, 9), relay.nn.max_pool2d, (3, 3), (2, 2), (0, 0, 0, 0)),
((1, 8, 8, 8), relay.nn.avg_pool2d, (3, 3), (1, 1), (1, 1, 1, 1)),
],
)
def test_pooling(dtype, shape, typef, size, stride, pad):
"""Compare Pooling output with TVM."""
np.random.seed(0)
layout = "NHWC"
inputs = {
"a": tvm.nd.array(
np.random.randint(
low=np.iinfo(dtype).min, high=np.iinfo(dtype).max + 1, size=shape, dtype=dtype
)
),
}
outputs = []
model = _get_model(shape, typef, size, stride, pad, layout, dtype)
for npu in [False, True]:
mod = tei.make_module(model, {})
outputs.append(tei.build_and_run(mod, inputs, 1, {}, npu=npu))
tei.verify(outputs, dtype, 1)
@requires_ethosn
@pytest.mark.parametrize(
"shape,size,stride,layout,dtype,err_msg",
[
(
(2, 8, 8, 8),
(2, 2),
(2, 2),
"NHWC",
"uint8",
"batch size=2, batch size must = 1",
),
(
(1, 8, 8, 8),
(2, 2),
(2, 2),
"NHWC",
"int16",
"dtype='int16', dtype must be either uint8, int8 or int32",
),
(
(1, 8, 8, 8),
(2, 2),
(2, 2),
"NCHW",
"uint8",
"data format=NCHW, data format must = NHWC",
),
(
(1, 8, 8, 8),
(2, 2),
(2, 2, 2),
"NHWC",
"uint8",
"stride size=3, stride size must = 2",
),
(
(1, 8, 8, 8),
(2, 2, 2),
(2, 2),
"NHWC",
"uint8",
"dimensions=3, dimensions must = 2",
),
],
)
def test_pooling_failure(shape, size, stride, layout, dtype, err_msg):
"""Check Pooling error messages."""
typef = relay.nn.max_pool2d
pad = (0, 0, 0, 0)
model = _get_model(shape, typef, size, stride, pad, layout, dtype)
mod = tei.make_ethosn_partition(model)
tei.test_error(mod, {}, err_msg)
| 3,752 | 28.785714 | 99 | py |
tvm | tvm-main/tests/python/contrib/test_ethosn/test_addition.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Arm(R) Ethos(TM)-N integration addition tests"""
import numpy as np
import pytest
import tvm
from tvm import relay
from tvm.testing import requires_ethosn
from . import infrastructure as tei
def _get_model(
lhs_shape,
rhs_shape,
lhs_zp,
lhs_sc,
rhs_zp,
rhs_sc,
out_zp,
out_sc,
dtype,
lhs_is_constant=False,
rhs_is_constant=False,
constant_data=None,
):
"""Return a model and any parameters it may have"""
def create_or_assign_constant(shape, dtype, default_data):
"""Creates new numpy array or assigns default_data if available."""
iinfo = np.iinfo(dtype)
data_min = iinfo.min
data_max = iinfo.max
nparray = None
if default_data:
nparray = np.array(default_data, dtype=dtype).reshape(shape)
else:
nparray = np.random.randint(data_min, data_max + 1, size=shape, dtype=dtype)
return relay.const(nparray, dtype=dtype)
if lhs_is_constant:
a = create_or_assign_constant(lhs_shape, dtype, constant_data)
else:
a = relay.var("a", shape=lhs_shape, dtype=dtype)
if rhs_is_constant:
b = create_or_assign_constant(rhs_shape, dtype, constant_data)
else:
b = relay.var("b", shape=rhs_shape, dtype=dtype)
model = relay.qnn.op.add(
lhs=a,
rhs=b,
lhs_scale=relay.const(lhs_sc, "float32"),
lhs_zero_point=relay.const(lhs_zp, "int32"),
rhs_scale=relay.const(rhs_sc, "float32"),
rhs_zero_point=relay.const(rhs_zp, "int32"),
output_scale=relay.const(out_sc, "float32"),
output_zero_point=relay.const(out_zp, "int32"),
)
return model
def _get_addition_qnn_params(dtype):
iinfo = np.iinfo(dtype)
data_min = iinfo.min
data_max = iinfo.max
lhs_zp = np.random.randint(data_min, data_max)
lhs_sc = np.random.random() * 2
rhs_zp = np.random.randint(data_min, data_max)
rhs_sc = np.random.random() * 2
input1_max = lhs_sc * (255 - lhs_zp)
input1_min = -lhs_sc * lhs_zp
input2_max = rhs_sc * (255 - rhs_zp)
input2_min = -rhs_sc * rhs_zp
output_max = input1_max + input2_max
output_min = input1_min + input2_min
output_sc = (output_max - output_min) / 255
output_zp = -int(output_min / output_sc)
return lhs_zp, lhs_sc, rhs_zp, rhs_sc, output_zp, output_sc
@requires_ethosn
@pytest.mark.parametrize("dtype", ["uint8", "int8"])
@pytest.mark.parametrize("shape", [(1, 22, 9, 9), (1, 27, 21, 16)])
def test_addition(dtype, shape):
"""Compare Addition output with TVM."""
np.random.seed(0)
iinfo = np.iinfo(dtype)
data_min = iinfo.min
data_max = iinfo.max
lhs_zp, lhs_sc, rhs_zp, rhs_sc, out_zp, out_sc = _get_addition_qnn_params(dtype)
outputs = []
inputs = {
"a": tvm.nd.array(np.random.randint(data_min, data_max + 1, size=shape, dtype=dtype)),
"b": tvm.nd.array(np.random.randint(data_min, data_max + 1, size=shape, dtype=dtype)),
}
model = _get_model(shape, shape, lhs_zp, lhs_sc, rhs_zp, rhs_sc, out_zp, out_sc, dtype)
for npu in [False, True]:
mod = tei.make_module(model, [])
outputs.append(
tei.build_and_run(
mod,
inputs,
1,
{},
npu=npu,
additional_config_args={"inline_non_compute_intensive_partitions": False},
)
)
tei.verify(outputs, dtype, 1)
@requires_ethosn
@pytest.mark.parametrize("dtype", ["uint8", "int8"])
@pytest.mark.parametrize(
"lhs_shape,lhs_is_constant,rhs_shape,rhs_is_constant",
[
((1, 4, 4, 8), True, (1, 1, 1, 8), True),
((4,), True, (1, 16, 12, 4), True),
((1, 1, 1, 8), True, (1, 4, 4, 8), True),
((1, 16, 12, 4), True, (4,), True),
],
)
def test_addition_both_inputs_constants(
dtype, lhs_shape, lhs_is_constant, rhs_shape, rhs_is_constant
):
"""Check if addition is simplified when both inputs are constants."""
np.random.seed(0)
lhs_zp, lhs_sc, rhs_zp, rhs_sc, out_zp, out_sc = _get_addition_qnn_params(dtype)
model = _get_model(
lhs_shape,
rhs_shape,
lhs_zp,
lhs_sc,
rhs_zp,
rhs_sc,
out_zp,
out_sc,
dtype,
lhs_is_constant=lhs_is_constant,
rhs_is_constant=rhs_is_constant,
)
from tvm.relay.op.contrib import partition_for_ethosn # pylint: disable=import-outside-toplevel
mod = tei.make_module(model, {})
assert "qnn.add" in mod.astext(False)
mod = partition_for_ethosn(mod, {})
assert "qnn.add" not in mod.astext(False)
@requires_ethosn
@pytest.mark.parametrize("dtype", ["uint8", "int8"])
@pytest.mark.parametrize(
"lhs_shape,lhs_is_constant,rhs_shape,rhs_is_constant",
[
((1, 4, 4, 8), False, (1, 4, 4, 8), True),
((1, 16, 12, 4), True, (1, 16, 12, 4), False),
],
)
def test_addition_with_one_constant(dtype, lhs_shape, lhs_is_constant, rhs_shape, rhs_is_constant):
"""Validate addition with one input as a constant."""
np.random.seed(0)
iinfo = np.iinfo(dtype)
data_min = iinfo.min
data_max = iinfo.max
lhs_zp, lhs_sc, rhs_zp, rhs_sc, out_zp, out_sc = _get_addition_qnn_params(dtype)
model = _get_model(
lhs_shape,
rhs_shape,
lhs_zp,
lhs_sc,
rhs_zp,
rhs_sc,
out_zp,
out_sc,
dtype,
lhs_is_constant=lhs_is_constant,
rhs_is_constant=rhs_is_constant,
)
input_shape = rhs_shape if lhs_is_constant else lhs_shape
input_name = "b" if lhs_is_constant else "a"
inputs = {
input_name: tvm.nd.array(
np.random.randint(data_min, data_max + 1, size=input_shape, dtype=dtype)
)
}
outputs = []
for npu in [False, True]:
mod = tei.make_module(model, {})
outputs.append(
tei.build_and_run(
mod,
inputs,
1,
{},
npu=npu,
additional_config_args={"inline_non_compute_intensive_partitions": False},
)
)
tei.verify(outputs, dtype, 1)
@requires_ethosn
@pytest.mark.parametrize("dtype", ["uint8", "int8"])
@pytest.mark.parametrize(
"lhs_shape,lhs_is_constant,rhs_shape,rhs_is_constant",
[
((1, 4, 4, 8), False, (1, 1, 1, 8), True),
((4,), True, (1, 16, 12, 4), False),
((1, 1, 1, 8), True, (1, 4, 4, 8), False),
((1, 16, 12, 4), False, (4,), True),
],
)
def test_addition_to_depthwise(dtype, lhs_shape, lhs_is_constant, rhs_shape, rhs_is_constant):
"""Compare addition to depthwise with TVM."""
np.random.seed(0)
iinfo = np.iinfo(dtype)
data_min = iinfo.min
data_max = iinfo.max
lhs_zp, lhs_sc, rhs_zp, rhs_sc, out_zp, out_sc = _get_addition_qnn_params(dtype)
model = _get_model(
lhs_shape,
rhs_shape,
lhs_zp,
lhs_sc,
rhs_zp,
rhs_sc,
out_zp,
out_sc,
dtype,
lhs_is_constant=lhs_is_constant,
rhs_is_constant=rhs_is_constant,
)
input_shape = rhs_shape if lhs_is_constant else lhs_shape
input_name = "b" if lhs_is_constant else "a"
inputs = {
input_name: tvm.nd.array(
np.random.randint(data_min, data_max + 1, size=input_shape, dtype=dtype)
)
}
outputs = []
for npu in [False, True]:
mod = tei.make_module(model, {})
outputs.append(tei.build_and_run(mod, inputs, 1, {}, npu=npu))
tei.verify(outputs, dtype, 1)
@requires_ethosn
@pytest.mark.parametrize(
"lhs_shape,lhs_is_constant,rhs_shape,rhs_is_constant",
[
((1, 2, 8, 4), False, None, True),
((1, 5, 6, 7), False, (1, 1, 1, 1), True),
(None, True, (1, 2, 8, 4), False),
((1, 1, 1, 1), True, (1, 5, 6, 7), False),
],
)
def test_addition_to_reinterpret_quantize(lhs_shape, lhs_is_constant, rhs_shape, rhs_is_constant):
"""Compare addition to depthwise with TVM."""
np.random.seed(0)
dtype = "uint8"
iinfo = np.iinfo(dtype)
data_min = iinfo.min
data_max = iinfo.max
# Add can only be offloaded as a reinterpret quantize operation if
# it is an identity operation. We must choose the quantization and
# constant data carefully to maske sure that this is the case.
if lhs_is_constant:
rhs_zp = 128
rhs_sc = 0.0078125
lhs_zp = 0
lhs_sc = 0.003921568859368563
else:
lhs_zp = 128
lhs_sc = 0.0078125
rhs_zp = 0
rhs_sc = 0.003921568859368563
out_zp = 0
out_sc = 0.007814894430339336
constant_data = 255
model = _get_model(
lhs_shape,
rhs_shape,
lhs_zp,
lhs_sc,
rhs_zp,
rhs_sc,
out_zp,
out_sc,
dtype,
lhs_is_constant=lhs_is_constant,
rhs_is_constant=rhs_is_constant,
constant_data=constant_data,
)
input_shape = rhs_shape if lhs_is_constant else lhs_shape
input_name = "b" if lhs_is_constant else "a"
inputs = {
input_name: tvm.nd.array(
np.random.randint(data_min, data_max + 1, size=input_shape, dtype=dtype)
)
}
outputs = []
for npu in [False, True]:
mod = tei.make_module(model, {})
outputs.append(
tei.build_and_run(
mod,
inputs,
1,
{},
npu=npu,
additional_config_args={"inline_non_compute_intensive_partitions": False},
)
)
tei.verify(outputs, dtype, 1)
@requires_ethosn
@pytest.mark.parametrize(
"dtype,shape,err_msg",
[
(
"uint8",
(2, 4, 4, 4),
"batch size=2, batch size must = 1; batch size=2, batch size must = 1",
),
(
"int16",
(1, 4, 4, 4),
"dtype='int16', dtype must be either uint8, int8 or int32; dtype='int16', "
"dtype must be either uint8, int8 or int32",
),
],
)
def test_addition_failure(dtype, shape, err_msg):
"""Check addition error messages."""
np.random.seed(0)
lhs_zp, lhs_sc, rhs_zp, rhs_sc, out_zp, out_sc = _get_addition_qnn_params(dtype)
model = _get_model(shape, shape, lhs_zp, lhs_sc, rhs_zp, rhs_sc, out_zp, out_sc, dtype)
model = tei.make_ethosn_composite(model, "ethos-n.qnn_add")
mod = tei.make_ethosn_partition(model)
tei.test_error(mod, {}, err_msg)
@requires_ethosn
@pytest.mark.parametrize("dtype", ["uint8", "int8"])
@pytest.mark.parametrize(
"lhs_shape,lhs_is_constant,rhs_shape,rhs_is_constant",
[
((1, 4, 4, 8), True, (1, 1, 4, 8), False),
((1, 4, 4, 8), False, (1, 1, 4, 8), False),
((1, 16, 1, 4), True, (1, 1, 12, 4), False),
],
)
def test_unsupported_broadcast_addition(
dtype, lhs_shape, lhs_is_constant, rhs_shape, rhs_is_constant
):
"""Test broadcast compatible addition falls back to TVM."""
np.random.seed(0)
lhs_zp, lhs_sc, rhs_zp, rhs_sc, out_zp, out_sc = _get_addition_qnn_params(dtype)
model = _get_model(
lhs_shape,
rhs_shape,
lhs_zp,
lhs_sc,
rhs_zp,
rhs_sc,
out_zp,
out_sc,
dtype,
lhs_is_constant=lhs_is_constant,
rhs_is_constant=rhs_is_constant,
)
from tvm.relay.op.contrib import partition_for_ethosn # pylint: disable=import-outside-toplevel
mod = tei.make_module(model, {})
assert "qnn.add" in mod.astext(False)
mod = partition_for_ethosn(mod, {})
assert "qnn.add" in mod.astext(False)
assert "ethos-n.qnn_add" not in mod.astext(False)
| 12,551 | 29.028708 | 100 | py |
tvm | tvm-main/tests/python/contrib/test_ethosn/test_concatenate.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Concatenate tests for Arm(R) Ethos(TM)-N"""
import numpy as np
import pytest
import tvm
from tvm import relay
from tvm.testing import requires_ethosn
from . import infrastructure as tei
def _get_inputs(shapes, dtype):
inputs = {}
for i, shape in enumerate(shapes):
inputs["in" + str(i)] = tvm.nd.array(
np.random.randint(np.iinfo(dtype).min, np.iinfo(dtype).max + 1, size=shape, dtype=dtype)
)
return inputs
def _get_model(shapes, dtype, axis):
tup = []
for i, shape in enumerate(shapes):
a = relay.var("in" + str(i), shape=shape, dtype=dtype)
tup.append(a)
zeroi = relay.const(1, "int32")
zerof = relay.const(0.5, "float32")
con = relay.qnn.op.concatenate(
tup,
input_scales=[zerof] * len(shapes),
input_zero_points=[zeroi] * len(shapes),
output_scale=zerof,
output_zero_point=zeroi,
axis=axis,
)
return con
@requires_ethosn
@pytest.mark.parametrize("dtype", ["uint8", "int8"])
@pytest.mark.parametrize(
"shapes,axis",
[
([(1, 4), (1, 6)], 1),
([(1, 16, 4), (1, 16, 4)], 1),
([(1, 25, 4, 16)] * 3, 3),
([(1, 25, 4, 16), (1, 25, 5, 16), (1, 25, 6, 16)], 2),
([(1, 4), (1, 6)], -1),
([(1, 16, 4), (1, 16, 4)], -2),
],
)
def test_concatenate(dtype, shapes, axis):
"""Compare Concatenate output with TVM."""
np.random.seed(0)
outputs = []
inputs = _get_inputs(shapes, dtype)
for npu in [False, True]:
model = _get_model(shapes, dtype, axis)
mod = tei.make_module(model, {})
outputs.append(
tei.build_and_run(
mod,
inputs,
1,
{},
npu=npu,
additional_config_args={"inline_non_compute_intensive_partitions": False},
)
)
tei.verify(outputs, dtype, 0)
@requires_ethosn
@pytest.mark.parametrize(
"shapes,dtype,axis,err_msg",
[
([(1, 4, 4, 4, 4), (1, 4, 4, 4, 4)], "uint8", 1, "dimensions=5, dimensions must be <= 4;"),
(
[(1, 4, 4, 4), (1, 4, 4, 4)],
"uint8",
3,
"Concatenation along the channels dimension (axis 3) "
"requires input tensors with a multiple of 16 channels;",
),
(
[(1, 4, 4, 4), (1, 4, 4, 4)],
"int16",
2,
"dtype='int16', dtype must be either uint8, int8 or int32; dtype='int16', "
"dtype must be either uint8, int8 or int32;",
),
(
[(2, 4, 4, 4), (2, 4, 4, 4)],
"uint8",
2,
"batch size=2, batch size must = 1; batch size=2, batch size must = 1;",
),
(
[(1, 4, 4, 4)],
"uint8",
0,
"Concatenation cannot be performed along batch axis (axis 0);",
),
],
)
def test_concatenate_failure(shapes, dtype, axis, err_msg):
"""Check Concatenate error messages."""
model = _get_model(shapes, dtype, axis)
mod = tei.make_ethosn_partition(model)
tei.test_error(mod, {}, err_msg)
| 3,963 | 29.259542 | 100 | py |
tvm | tvm-main/tests/python/contrib/test_ethosn/test_relu.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Arm(R) Ethos(TM)-N integration relu tests"""
import numpy as np
import pytest
import tvm
from tvm import relay
from tvm.testing import requires_ethosn
from . import infrastructure as tei
def _get_model(shape, dtype, a_min, a_max):
assert a_min >= np.iinfo(dtype).min and a_max <= np.iinfo(dtype).max
a = relay.var("a", shape=shape, dtype=dtype)
relu = relay.clip(a, a_min=a_min, a_max=a_max)
return relu
@requires_ethosn
@pytest.mark.parametrize(
"shape,a_min,a_max,dtype",
[
((1, 4, 4, 4), 65, 178, "uint8"),
((1, 8, 4, 2), 1, 254, "uint8"),
((1, 8, 4, 2), -100, 100, "int8"),
((1, 16), -120, -20, "int8"),
],
)
def test_relu(dtype, shape, a_min, a_max):
"""Compare Relu output with TVM."""
np.random.seed(0)
inputs = {
"a": tvm.nd.array(
np.random.randint(
low=np.iinfo(dtype).min,
high=np.iinfo(dtype).max + 1,
size=shape,
dtype=dtype,
)
),
}
outputs = []
for npu in [False, True]:
model = _get_model(inputs["a"].shape, dtype, a_min, a_max)
mod = tei.make_module(model, {})
outputs.append(
tei.build_and_run(
mod,
inputs,
1,
{},
npu=npu,
additional_config_args={"inline_non_compute_intensive_partitions": False},
)
)
tei.verify(outputs, dtype, 1)
@requires_ethosn
@pytest.mark.parametrize(
"shape,dtype,a_min,a_max,err_msg",
[
((1, 4, 4, 4, 4), "uint8", 65, 78, "dimensions=5, dimensions must be <= 4"),
((1, 8, 4, 2), "int16", 1, 254, "dtype='int16', dtype must be either uint8, int8 or int32"),
((1, 8, 4, 2), "uint8", 254, 1, "Relu has lower bound > upper bound"),
((2, 2, 2, 2), "uint8", 1, 63, "batch size=2, batch size must = 1; "),
],
)
def test_relu_failure(shape, dtype, a_min, a_max, err_msg):
"""Check Relu error messages."""
model = _get_model(shape, dtype, a_min, a_max)
mod = tei.make_ethosn_partition(model)
tei.test_error(mod, {}, err_msg)
| 2,961 | 31.195652 | 100 | py |
tvm | tvm-main/tests/python/contrib/test_ethosn/test_sigmoid.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Arm(R) Ethos(TM)-N integration sigmoid tests"""
import pytest
import numpy as np
import tvm
from tvm import relay
from tvm.testing import requires_ethosn
from . import infrastructure as tei
def _get_model(shape, input_zp, input_sc, output_zp, output_sc, dtype):
a = relay.var("a", shape=shape, dtype=dtype)
dequantize = relay.qnn.op.dequantize(
a,
input_scale=relay.const(input_sc, "float32"),
input_zero_point=relay.const(input_zp, "int32"),
)
sigmoid = relay.sigmoid(dequantize)
model = relay.qnn.op.quantize(
sigmoid,
output_scale=relay.const(output_sc, "float32"),
output_zero_point=relay.const(output_zp, "int32"),
out_dtype=dtype,
)
return model
@requires_ethosn
@pytest.mark.parametrize("dtype", ["uint8", "int8"])
@pytest.mark.parametrize(
"shape",
[
(1, 16, 16, 16),
(1, 8, 8),
],
)
def test_sigmoid(dtype, shape):
"""Compare Sigmoid output with TVM."""
np.random.seed(0)
inputs = {
"a": tvm.nd.array(
np.random.randint(np.iinfo(dtype).min, np.iinfo(dtype).max + 1, size=shape, dtype=dtype)
),
}
outputs = []
for npu in [False, True]:
for _ in range(1, 2):
if dtype == "uint8":
input_zp = 0
output_zp = 0
else:
input_zp = 127
output_zp = -128
model = _get_model(shape, input_zp, 0.02, output_zp, 1.0 / 256.0, dtype)
mod = tei.make_module(model, [])
outputs.append(tei.build_and_run(mod, inputs, 1, {}, npu=npu))
tei.verify(outputs, dtype, 1)
@requires_ethosn
@pytest.mark.parametrize(
"shape,input_zp,input_sc,output_zp,output_sc,err_msg",
[
((2, 4, 4, 4), 64, 0.2, 0, 1 / 256, "batch size=2, batch size must = 1"),
(
(1, 4, 4, 4),
64,
0.2,
3,
1,
"output quantization params=(3, 1), must = (0, 1/256)",
),
],
)
def test_sigmoid_failure(shape, input_zp, input_sc, output_zp, output_sc, err_msg):
"""Check Sigmoid error messages."""
dtype = "uint8"
model = _get_model(shape, input_zp, input_sc, output_zp, output_sc, dtype)
model = tei.make_ethosn_composite(model, "ethos-n.qnn_sigmoid")
mod = tei.make_ethosn_partition(model)
tei.test_error(mod, {}, err_msg)
| 3,199 | 30.067961 | 100 | py |
tvm | tvm-main/tests/python/contrib/test_ethosn/test_resize.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Arm(R) Ethos(TM)-N integration resize tests"""
import pytest
import numpy as np
import tvm
from tvm import relay
from tvm.testing import requires_ethosn
from . import infrastructure as tei
def _get_model(
shape,
dtype,
size,
coordinate_transformation_mode,
rounding_method,
):
x = relay.var("x", shape=shape, dtype=dtype)
return relay.image.resize2d(
data=x,
size=size,
layout="NHWC",
method="nearest_neighbor",
coordinate_transformation_mode=coordinate_transformation_mode,
rounding_method=rounding_method,
)
@requires_ethosn
@pytest.mark.parametrize("dtype", ["uint8", "int8"])
@pytest.mark.parametrize(
"shape, size, coordinate_transformation_mode, rounding_method",
[
((1, 4, 4, 2), (8, 8), "half_pixel", "round_prefer_ceil"),
((1, 4, 4, 2), (7, 7), "asymmetric", "floor"),
((1, 4, 8, 3), (8, 16), "half_pixel", "round_prefer_ceil"),
((1, 4, 8, 3), (7, 15), "asymmetric", "floor"),
],
)
def test_resize(dtype, shape, size, coordinate_transformation_mode, rounding_method):
"""Compare Resize output with TVM."""
np.random.seed(0)
zp_min = np.iinfo(dtype).min
zp_max = np.iinfo(dtype).max
inputs = {
"x": tvm.nd.array(np.random.randint(zp_min, high=zp_max + 1, size=shape, dtype=dtype)),
}
outputs = []
for npu in [False, True]:
model = _get_model(
shape=shape,
dtype=dtype,
size=size,
coordinate_transformation_mode=coordinate_transformation_mode,
rounding_method=rounding_method,
)
mod = tei.make_module(model, {})
x = tei.build_and_run(mod, inputs, 1, {}, npu=npu)
outputs.append(x)
tei.verify(outputs, dtype, 1)
@requires_ethosn
@pytest.mark.parametrize(
"size,err_msg",
[
(
(30, 20),
"Requested height isn't supported",
),
(
(20, 30),
"Requested width isn't supported",
),
],
)
def test_resize_failure(size, err_msg):
"""Check Resize error messages."""
dtype = "int8"
model = _get_model(
shape=(1, 10, 10, 1),
dtype=dtype,
size=size,
coordinate_transformation_mode="half_pixel",
rounding_method="round_prefer_ceil",
)
model = tei.make_ethosn_composite(model, "ethos-n.qnn_resize")
mod = tei.make_ethosn_partition(model)
tei.test_error(mod, {}, err_msg)
| 3,287 | 28.890909 | 95 | py |
tvm | tvm-main/tests/python/contrib/test_ethosn/infrastructure.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Arm(R) Ethos(TM)-N test functions"""
from __future__ import absolute_import, print_function
from hashlib import md5
from itertools import zip_longest, combinations
import os
from typing import Tuple
import math
import numpy as np
from PIL import Image
import tvm
from tvm import relay
from tvm.contrib import utils, graph_executor, download
from tvm.relay.op.contrib import partition_for_ethosn
from tvm.driver.tvmc.target import parse_target
from . import _infrastructure
def get_real_image(im_height, im_width):
repo_base = "https://github.com/dmlc/web-data/raw/main/tensorflow/models/InceptionV1/"
img_name = "elephant-299.jpg"
image_url = os.path.join(repo_base, img_name)
img_path = download.download_testdata(image_url, img_name, module="data")
image = Image.open(img_path).resize((im_height, im_width))
x = np.array(image).astype("uint8")
data = np.reshape(x, (1, im_height, im_width, 3))
return data
def assert_lib_hash(lib, golden):
"""Check that the Ethos-N runtime modules in a library hash to the same values
as given by the golden hash(es).
If there's only one Ethos-N module, the golden hash may be provided as a str.
If there's multiple, a set of golden hashes should be provided to correspond
with each Ethos-N module that is expected.
This function is used to ensure that no change is made which alters the output
of a compilation. If such a change is made deliberately (eg. to fix a bug) then
the golden hash should be updated after verifying on hardware that the behaviour
is still correct.
This method is used because of the lack of hardware availability in upstream CI.
"""
# Convert str hash into a set of hashes
if isinstance(golden, str):
golden = {golden}
temp = utils.tempdir()
path = temp.relpath("lib.cmm")
hash_set = set()
for mod in lib.imported_modules:
if mod.type_key == "ethos-n":
mod.save(path)
with open(path, "rb") as compiled_model:
lib_hash = md5(compiled_model.read()).hexdigest()
hash_set.add(lib_hash)
assert hash_set == golden, "Expected hash: {} Got hash: {}".format(golden, hash_set)
def make_module(func, params):
func = relay.Function(relay.analysis.free_vars(func), func)
if params:
relay.build_module.bind_params_by_name(func, params)
mod = tvm.IRModule.from_expr(func)
return relay.transform.InferType()(mod)
def make_ethosn_composite(ethosn_expr, name):
variables = relay.analysis.free_vars(ethosn_expr)
inner_vars = [relay.Var(v.name_hint, v.type_annotation) for v in variables]
func = relay.Function(inner_vars, ethosn_expr)
func = func.with_attr("Composite", name)
call = relay.Call(func, variables)
return call
def make_ethosn_partition(ethosn_expr):
"""Make an Ethos(TM)-N partition."""
# Create an Ethos-N global function
mod = tvm.IRModule({})
variables = relay.analysis.free_vars(ethosn_expr)
# NB: it is illegal to reuse variables inside and outside a scope in Relay
# if you want to duplicate types and names you must re-allocate them.
fresh_vars = [relay.Var(v.name_hint, v.type_annotation) for v in variables]
binds = {}
for var, fresh_var in zip(variables, fresh_vars):
binds[var] = fresh_var
ethosn_expr_fresh = relay.bind(ethosn_expr, binds)
func = relay.Function(fresh_vars, ethosn_expr_fresh)
func = func.with_attr("Primitive", tvm.tir.IntImm("int32", 1))
func = func.with_attr("Inline", tvm.tir.IntImm("int32", 1))
func = func.with_attr("Compiler", "ethos-n")
func = func.with_attr("global_symbol", "ethos-n_0")
global_var = relay.GlobalVar("ethos-n_0")
mod[global_var] = func
mod = relay.transform.InferType()(mod)
# These are the vars to call the Ethos-N partition with
more_vars = relay.analysis.free_vars(ethosn_expr)
# Call the Ethos-N partition in main
call_fn1 = global_var(*more_vars)
mod["main"] = relay.Function(more_vars, call_fn1)
return relay.transform.InferType()(mod)
def get_host_op_count(mod):
"""Return the number of host operators."""
class Counter(tvm.relay.ExprVisitor):
def __init__(self):
super().__init__()
self.count = 0
def visit_call(self, call):
if isinstance(call.op, tvm.ir.Op):
self.count += 1
super().visit_call(call)
c = Counter()
c.visit(mod["main"])
return c.count
def build(
mod, params, npu=True, expected_host_ops=0, npu_partitions=1, additional_config_args=None
):
"""Build a network with or without Ethos-N offloading.
Parameters
----------
mod : IRModule
The Relay module to build.
params : dict of str to NDArray
The weights to build with.
npu : bool, optional
Whether to build with Ethos-N offloading.
expected_host_ops : int, optional
The number of ops expected to remain on the host.
npu_partitions : int, optional
The number of Ethos-N partitions expected.
additional_config_args : dict, optional
Additional compiler config options for the NPU.
"""
relay.backend.te_compiler.get().clear()
if not additional_config_args:
additional_config_args = {}
npu_config = {**get_ethosn_device_options(), **additional_config_args}
with tvm.transform.PassContext(opt_level=3, config={"relay.ext.ethos-n.options": npu_config}):
with tvm.target.Target("llvm"):
if npu:
mod = partition_for_ethosn(mod, params)
host_op_count = get_host_op_count(mod)
assert (
host_op_count == expected_host_ops
), "Got {} host operators, expected {}".format(host_op_count, expected_host_ops)
attrs = [
mod[var.name_hint].attrs
for var in mod.get_global_vars()
if mod[var.name_hint].attrs
]
partition_count = sum(
[
key == "Compiler" and value == "ethos-n"
for attr in attrs
for key, value in attr.items()
]
)
assert (
npu_partitions == partition_count
), "Got {} ethos-n partitions, expected {}".format(partition_count, npu_partitions)
return relay.build(mod, params=params)
def run(lib, inputs, outputs, npu=True):
"""Run a module with specified inputs.
Parameters
----------
lib : runtime.Module
The runtime module.
inputs : dict of str to NDArray
The input dictionary.
outputs : int
The expected number of outputs.
npu : bool
Whether or not any part of the lib is offloaded to Ethos-N.
If it's false (i.e. it's all running on the CPU), we set
the mocked result equal to the output so that a subsequent
mocked run on the NPU returns the same value.
Returns
-------
out : list of NDArray
The results.
"""
# Export and load lib to confirm this works
lib_name = "mod.so"
temp = utils.tempdir()
lib_path = temp.relpath(lib_name)
lib.export_library(lib_path)
lib = tvm.runtime.load_module(lib_path)
module = graph_executor.GraphModule(lib["default"](tvm.cpu()))
module.set_input(**inputs)
module.run()
out = [module.get_output(i) for i in range(outputs)]
if not npu:
inference_result(out)
return out
def build_and_run(
mod,
inputs,
outputs,
params,
npu=True,
expected_host_ops=0,
npu_partitions=1,
additional_config_args=None,
):
"""
Convenient wrapper for building and running a module on the NPU.
"""
lib = build(mod, params, npu, expected_host_ops, npu_partitions, additional_config_args)
return run(lib, inputs, outputs, npu)
def verify(answers, dtype, atol, rtol=1e-07, verify_saturation=True):
"""Compare the array of answers. Each entry is a list of outputs"""
if len(answers) < 2:
print("No results to compare: expected at least two, found ", len(answers))
for answer in zip_longest(*answers):
for outs in combinations(answer, 2):
if verify_saturation:
assert (
np.count_nonzero(outs[0].numpy() == np.iinfo(dtype).max)
< 0.25 * outs[0].numpy().size
), "Output is saturated: {}".format(outs[0])
assert (
np.count_nonzero(outs[0].numpy() == np.iinfo(dtype).min)
< 0.25 * outs[0].numpy().size
), "Output is saturated: {}".format(outs[0])
tvm.testing.assert_allclose(outs[0].numpy(), outs[1].numpy(), rtol=rtol, atol=atol)
def inference_result(outputs):
"""Set the expected results of an Ethos inference, if the testing
infrastructure is available. This assumes that the entire graph
was offloaded to the neural processor."""
if tvm.get_global_func("relay.ethos-n.test.infra.inference_result", True):
return _infrastructure.inference_result(*outputs)
return False
def test_error(mod, params, err_msg):
"""Test an operator error message."""
caught = None
with tvm.transform.PassContext(
opt_level=3, config={"relay.ext.ethos-n.options": get_ethosn_device_options()}
):
with tvm.target.Target("llvm"):
try:
mod = relay.transform.InferType()(mod)
relay.build(mod, params=params)
except tvm.error.TVMError as error:
caught = error.args[0]
finally:
relay.backend.te_compiler.get().clear()
assert caught is not None
assert err_msg in caught, caught
def get_conv2d(var, shape, dtype):
"""Standard convolution to test activation functions"""
weight_shape = (1, 1, shape[3], 1)
weights_array = tvm.nd.array(np.ones(weight_shape, dtype))
weights = relay.const(weights_array, dtype)
conv = relay.qnn.op.conv2d(
var,
weights,
input_zero_point=relay.const(0, "int32"),
kernel_zero_point=relay.const(0, "int32"),
input_scale=relay.const(1.0, "float32"),
kernel_scale=relay.const(1.0, "float32"),
kernel_size=(1, 1),
channels=1,
data_layout="NHWC",
kernel_layout="HWIO",
)
b = tvm.nd.array(np.zeros((shape[0],), "int32"))
biasc = relay.const(b, "int32")
bias = relay.nn.bias_add(conv, biasc, axis=0)
req = relay.qnn.op.requantize(
bias,
relay.const(1.0, "float32"), # input zero scale
relay.const(0, "int32"), # input zero point
relay.const(1.1, "float32"), # output zero scale
relay.const(0, "int32"), # output zero point
out_dtype=dtype,
)
params = {"w": weights_array, "b": b}
return req, params
def get_conv2d_qnn_params(
dtype, input_zp, input_sc, kernel_zp, kernel_sc, kernel_h, kernel_w, channels
):
"""Return Conv2D QNN params."""
kernel_sc = (
kernel_sc.numpy() if isinstance(kernel_sc, tvm.runtime.ndarray.NDArray) else [kernel_sc]
)
dtype_min = np.iinfo(dtype).min
dtype_max = np.iinfo(dtype).max
input_max = input_sc * (dtype_max - input_zp)
input_min = input_sc * (dtype_min - input_zp)
kernel_max = max(kernel_sc) * (dtype_max - kernel_zp)
kernel_min = min(kernel_sc) * (dtype_min - kernel_zp)
output_limits = [
kernel_max * kernel_h * kernel_w * channels * input_max,
kernel_min * kernel_h * kernel_w * channels * input_max,
kernel_min * kernel_h * kernel_w * channels * input_min,
kernel_max * kernel_h * kernel_w * channels * input_min,
]
output_max = max(output_limits)
output_min = min(output_limits)
output_sc = (output_max - output_min) / (dtype_max - dtype_min)
output_zp = int(dtype_min - (output_min / output_sc))
return output_zp, output_sc
def get_same_padding(
data: Tuple[int, int],
kernel: Tuple[int, int],
dilation: Tuple[int, int],
stride: Tuple[int, int],
) -> Tuple[int, int, int, int]:
"""
Get the padding values required for 'SAME' padding.
Parameters
----------
data : Tuple[int, int]
The height and width of the data respectively.
kernel : Tuple[int, int]
The height and width of the kernel respectively.
dilation : Tuple[int, int]
The dilation of the kernel.
stride : Tuple[int, int]
The stride of the kernel.
Returns
-------
Tuple[int, int, int, int]
The padding values for top, left, bottom and right respectively.
"""
dilated_kernel_h = dilation[0] * (kernel[0] - 1) + 1
dilated_kernel_w = dilation[1] * (kernel[1] - 1) + 1
out = int(math.ceil(float(data[0]) / float(stride[0])))
pad = max(0, (out - 1) * stride[0] + dilated_kernel_h - data[0])
pad_top = pad // 2
pad_bottom = pad - pad_top
out = int(math.ceil(float(data[1]) / float(stride[1])))
pad = max(0, (out - 1) * stride[1] + dilated_kernel_w - data[1])
pad_left = pad // 2
pad_right = pad - pad_left
return (pad_top, pad_left, pad_bottom, pad_right)
def get_ethosn_device_options():
"""Determine the NPU configuration used for testing."""
default_target_string = "ethos-n -variant=n78 -tops=1 -ple_ratio=2"
target_string = os.getenv("ETHOSN_TEST_TARGET_CONFIG", default_target_string)
target = parse_target(target_string)
return target[0]["opts"]
| 14,450 | 34.246341 | 99 | py |
tvm | tvm-main/tests/python/contrib/test_arm_compute_lib/test_add.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Arm Compute Library integration reshape tests."""
import numpy as np
import pytest
import tvm
import tvm.testing
from tvm import relay
from test_arm_compute_lib.infrastructure import (
skip_runtime_test,
skip_codegen_test,
build_and_run,
verify,
verify_codegen,
)
from test_arm_compute_lib.infrastructure import Device
_qnn_params = {
"lhs_scale": relay.const(0.0156863, "float32"),
"lhs_zero_point": relay.const(127, "int32"),
"rhs_scale": relay.const(0.0117647, "float32"),
"rhs_zero_point": relay.const(85, "int32"),
"output_scale": relay.const(0.0235294, "float32"),
"output_zero_point": relay.const(128, "int32"),
}
def _get_model(shape, dtype, var_names, op, op_params):
a = relay.var(next(var_names), shape=shape, dtype=dtype)
b = relay.var(next(var_names), shape=shape, dtype=dtype)
return op(a, b, **op_params)
def _get_expected_codegen(shape, dtype, op_name, qnn_params):
input_a = {"op": "input", "name": "", "attrs": {"shape": [[list(shape)]], "dtype": [[dtype]]}}
input_b = {"op": "input", "name": "", "attrs": {"shape": [[list(shape)]], "dtype": [[dtype]]}}
input_qnn = [
{
"op": "const",
"name": "",
"attrs": {
"shape": [[list(qnn_params[_].data.shape)]],
"dtype": [[qnn_params[_].data.dtype]],
},
}
for _ in qnn_params
]
inputs = [input_a, input_b, *input_qnn]
node = {
"op": "kernel",
"name": op_name,
"inputs": [[_, 0, 0] for _ in range(len(inputs))],
"attrs": {
"num_inputs": str(len(inputs)),
"num_outputs": "1",
"shape": [[list(shape)]],
"dtype": [[dtype]],
},
}
if qnn_params:
node["attrs"]["lhs_axis"] = [["-1"]]
node["attrs"]["rhs_axis"] = [["-1"]]
return [*inputs, node]
def test_runtime_add():
Device.load("test_config.json")
if skip_runtime_test():
return
device = Device()
np.random.seed(0)
for dtype, low, high, atol, rtol, op, op_params in [
("float32", -127, 128, 1e-7, 1e-7, relay.add, {}),
("uint8", 0, 255, 1.0, 0.0, relay.qnn.op.add, _qnn_params),
("int8", -127, 128, 1.0, 0.0, relay.qnn.op.add, _qnn_params),
]:
shape = (2, 2)
for inputs in [
{
"a": tvm.nd.array(np.random.uniform(low, high, shape).astype(dtype)),
"b": tvm.nd.array(np.random.uniform(low, high, shape).astype(dtype)),
}
]:
outputs = []
func = _get_model(shape, dtype, iter(inputs), op, op_params)
for acl in [True, False]:
outputs.append(build_and_run(func, inputs, 1, None, device, enable_acl=acl)[0])
config = {
"shape": shape,
"dtype": dtype,
"inputs": inputs,
"operation": op,
"op_params": op_params,
}
verify(outputs, atol=atol, rtol=rtol, config=config, verify_saturation=False)
def test_codegen_add():
if skip_codegen_test():
return
inputs = {"a", "b"}
for dtype, op_name, op, qnn_params in [
("float32", "add", relay.add, {}),
("uint8", "qnn.add", relay.qnn.op.add, _qnn_params),
("int8", "qnn.add", relay.qnn.op.add, _qnn_params),
]:
for shape in [(1, 1), (2, 2, 2), (3, 3, 3, 3)]:
func = _get_model(shape, dtype, iter(inputs), op, qnn_params)
exp_codegen = _get_expected_codegen(shape, dtype, op_name, qnn_params)
verify_codegen(func, exp_codegen, 1)
@pytest.mark.parametrize(
"param, param_type",
[
("lhs_scale", "float32"),
("lhs_zero_point", "int32"),
("rhs_scale", "float32"),
("rhs_zero_point", "int32"),
],
)
def test_codegen_add_per_channel_quantization(param, param_type):
if skip_codegen_test():
return
qnn_params = _qnn_params
qnn_params[param] = relay.const([1, 2], param_type)
dtype = "int8"
op_name = "qnn.add"
op = relay.qnn.op.add
inputs = {"a", "b"}
for shape in [(1, 3, 3, 2)]:
func = _get_model(shape, dtype, iter(inputs), op, qnn_params)
exp_codegen = _get_expected_codegen(shape, dtype, op_name, qnn_params)
verify_codegen(func, exp_codegen, num_acl_modules=0, tvm_ops=1)
if __name__ == "__main__":
test_runtime_add()
test_codegen_add()
test_codegen_add_per_channel_quantization()
| 5,345 | 30.633136 | 98 | py |
tvm | tvm-main/tests/python/contrib/test_arm_compute_lib/test_conv2d.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Arm Compute Library integration conv2d tests."""
import numpy as np
import pytest
import tvm
from tvm import relay
from test_arm_compute_lib.infrastructure import (
QNN_DTYPES,
get_low_high_atol_rtol,
skip_runtime_test,
skip_codegen_test,
build_and_run,
verify,
verify_codegen,
)
from test_arm_compute_lib.infrastructure import Device
def _get_model(
shape,
kernel_h,
kernel_w,
padding,
strides,
dilation,
groups,
dtype,
channels,
var_names,
has_bias=False,
has_activation=False,
has_pad=False,
):
"""Return a model and any parameters it may have"""
a = relay.var(next(var_names), shape=shape, dtype=dtype)
if has_pad:
p = ((0, 0), (padding[0], padding[0]), (padding[1], padding[1]), (0, 0))
a = relay.nn.pad(a, pad_width=p)
padding = (0, 0, 0, 0)
else:
if len(padding) == 2:
padding = (padding[0], padding[1], padding[0], padding[1])
shape = (shape[0], shape[1] + padding[0] * 2, shape[2] + padding[1] * 2, shape[3])
is_depthwise = shape[3] == channels == groups
weight_format = "HWOI" if is_depthwise else "HWIO"
if weight_format == "HWIO":
weight_shape = (kernel_h, kernel_w, shape[3] // groups, channels)
else:
weight_shape = (kernel_h, kernel_w, channels, shape[3] // groups)
w = tvm.nd.array(np.random.uniform(-128, 127, weight_shape).astype(dtype))
weights = relay.const(w, dtype)
out = relay.nn.conv2d(
a,
weights,
kernel_size=(kernel_h, kernel_w),
data_layout="NHWC",
kernel_layout=weight_format,
dilation=dilation,
strides=strides,
padding=padding,
groups=groups,
channels=channels,
out_dtype=dtype,
)
params = {"w": w}
if has_bias:
bias_shape = weight_shape[2] if is_depthwise else weight_shape[3]
b = tvm.nd.array(np.random.uniform(-128, 127, bias_shape).astype(dtype))
biasc = relay.const(b, dtype)
out = relay.nn.bias_add(out, biasc, axis=3)
params["b"] = b
if has_activation:
out = relay.nn.relu(out)
return out, params
def _get_qnn_params(input_zp, input_sc, kernel_zp, kernel_sc, kernel_h, kernel_w, channels):
"""Get output qnn parameters given input and kernel parameters."""
input_max = input_sc * (255 - input_zp)
input_min = -input_sc * input_zp
kernel_max = kernel_sc * (255 - kernel_zp)
kernel_min = -kernel_sc * kernel_zp
output_limits = [
kernel_max * kernel_h * kernel_w * channels * input_max,
kernel_min * kernel_h * kernel_w * channels * input_max,
kernel_min * kernel_h * kernel_w * channels * input_min,
kernel_max * kernel_h * kernel_w * channels * input_min,
]
output_max = max(output_limits)
output_min = min(output_limits)
output_sc = (output_max - output_min) / 255
output_zp = -int(output_min / output_sc)
return output_zp, output_sc
def _get_qnn_model(
shape,
kernel_h,
kernel_w,
padding,
strides,
dilation,
groups,
dtype,
channels,
input_zp,
input_sc,
kernel_zp,
kernel_sc,
output_zp,
output_sc,
var_names,
has_bias=False,
has_activation=False,
has_pad=False,
):
"""Return a model and any parameters it may have."""
low, high, _, _ = get_low_high_atol_rtol(dtype)
a = relay.var(next(var_names), shape=shape, dtype=dtype)
if has_pad:
p = ((0, 0), (padding[0], padding[0]), (padding[1], padding[1]), (0, 0))
a = relay.nn.pad(a, pad_width=p, pad_value=input_zp, pad_mode="constant")
padding = (0, 0, 0, 0)
else:
if len(padding) == 2:
padding = (padding[0], padding[1], padding[0], padding[1])
shape = (shape[0], shape[1] + padding[0] * 2, shape[2] + padding[1] * 2, shape[3])
is_depthwise = shape[3] == channels == groups
weight_format = "HWOI" if is_depthwise else "HWIO"
if weight_format == "HWIO":
weight_shape = (kernel_h, kernel_w, shape[3] // groups, channels)
else:
weight_shape = (kernel_h, kernel_w, channels, shape[3] // groups)
w = tvm.nd.array(np.random.uniform(low, high, weight_shape).astype(dtype))
weights = relay.const(w, dtype)
out = relay.qnn.op.conv2d(
a,
weights,
input_zero_point=relay.const(input_zp, "int32"),
kernel_zero_point=relay.const(kernel_zp, "int32"),
input_scale=relay.const(input_sc, "float32"),
kernel_scale=relay.const(kernel_sc, "float32"),
kernel_size=(kernel_h, kernel_w),
data_layout="NHWC",
kernel_layout=weight_format,
dilation=dilation,
strides=strides,
padding=padding,
groups=groups,
channels=channels,
out_dtype="int32",
)
params = {"w": w}
if has_bias:
bias_shape = weight_shape[2] if is_depthwise else weight_shape[3]
b = tvm.nd.array(np.random.uniform(-128, 127, bias_shape).astype("int32"))
biasc = relay.const(b, "int32")
out = relay.nn.bias_add(out, biasc, axis=3)
params["b"] = b
if has_activation:
out = relay.nn.relu(out)
req = relay.qnn.op.requantize(
out,
relay.const(input_sc * kernel_sc, "float32"), # input scale
relay.const(0, "int32"), # input zero point
relay.const(output_sc, "float32"), # output scale
relay.const(output_zp, "int32"), # output zero point
out_dtype=dtype,
)
return req, params
def _get_expected_codegen(
shape,
kernel_h,
kernel_w,
padding,
strides,
dilation,
groups,
dtype,
channels,
has_bias=False,
has_activation=False,
):
if len(padding) == 2:
padding = (padding[0], padding[1], padding[0], padding[1])
output_height = ((shape[1] - kernel_h + padding[0] + padding[2]) / strides[0]) + 1
output_width = ((shape[2] - kernel_w + padding[1] + padding[3]) / strides[1]) + 1
output_shape = (1, int(output_height), int(output_width), channels)
out_dtype = "int32" if dtype in QNN_DTYPES else "float32"
is_depthwise = shape[3] == channels == groups
weight_format = "IHWO" if is_depthwise else "OHWI"
if weight_format == "IHWO":
weight_shape = (shape[3] // groups, kernel_h, kernel_w, channels)
else:
weight_shape = (channels, kernel_h, kernel_w, shape[3] // groups)
if is_depthwise:
name = "nn.depthwise_conv2d"
else:
name = "nn.conv2d"
node = {
"op": "kernel",
"name": name,
"inputs": [],
"attrs": {
"groups": [[str(groups)]],
"num_outputs": "1",
"data_layout": [["NHWC"]],
"kernel_layout": [[weight_format]],
"channels": [[str(channels)]],
"dilation": [[str(dilation[0]), str(dilation[1])]],
"out_layout": [[""]],
"out_dtype": [[out_dtype]],
"kernel_size": [[str(kernel_h), str(kernel_w)]],
"shape": [[list(output_shape)]],
"dtype": [[dtype]],
"padding": [[str(p) for p in padding]],
"strides": [[str(s) for s in strides]],
},
}
if has_activation:
node["attrs"]["activation_type"] = [["relu"]]
inputs = [
{"op": "input", "name": "", "attrs": {"shape": [[list(shape)]], "dtype": [[str(dtype)]]}},
{
"op": "const",
"name": "",
"attrs": {"shape": [[list(weight_shape)]], "dtype": [[str(dtype)]]},
},
]
# qnn.conv2d params, input and kernel
if dtype in QNN_DTYPES:
node["name"] = "qnn." + node["name"].split(".")[1]
for param_dtype in ["int32", "float32"]:
for _ in range(2):
inputs.append(
{
"op": "const",
"name": "",
"attrs": {"shape": [[[]]], "dtype": [[param_dtype]]},
}
)
if has_bias:
bias_dtype = "int32" if dtype in QNN_DTYPES else "float32"
inputs.append(
{
"op": "const",
"name": "",
"attrs": {
"shape": [[[1, 1, 1, weight_shape[3] if is_depthwise else weight_shape[0]]]],
"dtype": [[bias_dtype]],
},
}
)
# qnn.conv2d params, output
if dtype in QNN_DTYPES:
for param_dtype in ["float32", "int32"]:
inputs.append(
{"op": "const", "name": "", "attrs": {"shape": [[[]]], "dtype": [[param_dtype]]}}
)
input_idx = 0
for _ in range(len(inputs)):
node["inputs"].append([input_idx, 0, 0])
input_idx += 1
node["attrs"]["num_inputs"] = str(len(inputs))
inputs.append(node)
return inputs
@pytest.mark.parametrize(
"trial",
[
# Normal convolution
[2, 2, (1, 1), (1, 1), (1, 1), 4, (10, 10, 14), (False, False, False), False],
[2, 1, (2, 2), (1, 1), (1, 1), 7, (12, 15, 16), (False, False, True), False],
[3, 3, (2, 1), (1, 1), (1, 1), 4, (10, 10, 14), (False, True, False), False],
[3, 3, (1, 1), (1, 1), (1, 1), 16, (12, 15, 16), (False, False, False), False],
[5, 5, (1, 1), (2, 2), (1, 1), 4, (10, 10, 14), (True, False, False), False],
[1, 3, (1, 1), (1, 1), (1, 1), 7, (20, 20, 20), (False, False, True), False],
[2, 2, (2, 2), (1, 1), (1, 1), 4, (20, 20, 20), (False, True, False), False],
[5, 5, (1, 1), (2, 2), (1, 1), 4, (10, 10, 14), (True, False, False), False],
[3, 3, (2, 1), (1, 1), (1, 1), 7, (20, 20, 20), (False, False, False), False],
[3, 3, (1, 1), (2, 2), (1, 1), 16, (10, 10, 14), (False, True, True), False],
# Depth-wise convolution
[3, 3, (1, 1), (1, 1), (1, 1), 20, (20, 20, 20), (False, False, True), True],
[5, 5, (2, 2), (1, 1), (1, 1), 20, (20, 20, 20), (False, True, False), True],
[3, 3, (2, 2), (2, 2), (1, 1), 14, (10, 10, 14), (True, False, False), True],
[5, 5, (0, 0), (1, 1), (1, 1), 20, (20, 20, 20), (False, False, False), True],
[3, 3, (1, 1), (2, 2), (1, 1), 14, (10, 10, 14), (False, True, True), True],
],
)
def test_conv2d(trial):
Device.load("test_config.json")
if skip_runtime_test():
return
device = Device()
np.random.seed(0)
dtype = "float32"
(
kernel_h,
kernel_w,
pad,
stride,
dilation,
out_channels,
shape,
composite,
is_depthwise,
) = trial
shape = (1, *shape)
if is_depthwise:
groups = shape[3]
else:
groups = 1
outputs = []
inputs = {
"a": tvm.nd.array(np.random.uniform(-128, 127, shape).astype(dtype)),
}
func, params = _get_model(
shape,
kernel_h,
kernel_w,
pad,
stride,
dilation,
groups,
dtype,
out_channels,
iter(inputs),
has_pad=composite[0],
has_bias=composite[1],
has_activation=composite[2],
)
# Generate results for ACL conv2d and TVM native conv2d for comparison
for acl in [False, True]:
outputs.append(build_and_run(func, inputs, 1, params, device, enable_acl=acl)[0])
config = {
"shape": shape,
"groups": groups,
"kernel size": (kernel_h, kernel_w),
"padding": pad,
"stride": stride,
"dilation": dilation,
"out channels": out_channels,
"composite operators (pad, bias, activation)": composite,
}
verify(outputs, atol=0.002, rtol=0.01, config=config)
@pytest.mark.parametrize(
"trial",
[
# Normal convolution
[2, 2, (1, 1), (1, 1), (1, 1), 4, (10, 10, 14), (False, False, False), False],
[2, 1, (2, 2), (1, 1), (1, 1), 7, (12, 15, 16), (False, False, True), False],
[3, 3, (2, 1), (1, 1), (1, 1), 4, (10, 10, 14), (False, True, False), False],
[3, 3, (1, 1), (1, 1), (1, 1), 16, (12, 15, 16), (False, False, False), False],
[5, 5, (1, 1), (2, 2), (1, 1), 4, (10, 10, 14), (True, False, False), False],
[1, 3, (1, 1), (1, 1), (1, 1), 7, (20, 20, 20), (False, False, True), False],
[2, 2, (2, 2), (1, 1), (1, 1), 4, (20, 20, 20), (False, True, False), False],
[5, 5, (1, 1), (2, 2), (1, 1), 4, (10, 10, 14), (True, False, False), False],
[3, 3, (2, 1), (1, 1), (1, 1), 7, (20, 20, 20), (False, False, False), False],
[3, 3, (1, 1), (2, 2), (1, 1), 16, (10, 10, 14), (False, True, True), False],
# Depth-wise convolution
[3, 3, (1, 1), (1, 1), (1, 1), 20, (20, 20, 20), (False, False, True), True],
[5, 5, (2, 2), (1, 1), (1, 1), 20, (20, 20, 20), (False, True, False), True],
[3, 3, (2, 2), (2, 2), (1, 1), 14, (10, 10, 14), (True, False, False), True],
[5, 5, (0, 0), (1, 1), (1, 1), 20, (20, 20, 20), (False, False, False), True],
[3, 3, (1, 1), (2, 2), (1, 1), 14, (10, 10, 14), (False, True, True), True],
],
)
def test_codegen_conv2d(trial):
if skip_codegen_test():
return
dtype = "float32"
(
kernel_h,
kernel_w,
pad,
stride,
dilation,
out_channels,
shape,
composite,
is_depthwise,
) = trial
shape = (1, *shape)
if is_depthwise:
groups = shape[3]
else:
groups = 1
inputs = {"a"}
args = (shape, kernel_h, kernel_w, pad, stride, dilation, groups, dtype, out_channels)
func, params = _get_model(
*args,
var_names=iter(inputs),
has_pad=composite[0],
has_bias=composite[1],
has_activation=composite[2],
)
exp_codegen = _get_expected_codegen(*args, has_bias=composite[1], has_activation=composite[2])
verify_codegen(func, exp_codegen, 1)
@pytest.mark.parametrize(
"trial",
[
# Normal convolution
[2, 2, (1, 1), (1, 1), (1, 1), 4, (10, 10, 14), (False, False, False), False],
[2, 1, (2, 2), (1, 1), (1, 1), 7, (12, 15, 16), (False, False, True), False],
[3, 3, (2, 1), (1, 1), (1, 1), 4, (10, 10, 14), (False, True, False), False],
[3, 3, (1, 1), (1, 1), (1, 1), 16, (12, 15, 16), (False, False, False), False],
[5, 5, (1, 1), (2, 2), (1, 1), 4, (10, 10, 14), (True, False, False), False],
[1, 3, (1, 1), (1, 1), (1, 1), 7, (20, 20, 20), (False, False, True), False],
[2, 2, (2, 2), (1, 1), (1, 1), 4, (20, 20, 20), (False, True, False), False],
[5, 5, (1, 1), (2, 2), (1, 1), 4, (10, 10, 14), (True, False, False), False],
[3, 3, (2, 1), (1, 1), (1, 1), 7, (20, 20, 20), (False, False, False), False],
[3, 3, (1, 1), (2, 2), (1, 1), 16, (10, 10, 14), (False, True, True), False],
# Depth-wise convolution
[3, 3, (1, 1), (1, 1), (1, 1), 20, (20, 20, 20), (False, False, True), True],
[5, 5, (2, 2), (1, 1), (1, 1), 20, (20, 20, 20), (False, True, False), True],
[3, 3, (2, 2), (2, 2), (1, 1), 14, (10, 10, 14), (True, False, False), True],
[5, 5, (0, 0), (1, 1), (1, 1), 20, (20, 20, 20), (False, False, False), True],
[3, 3, (1, 1), (2, 2), (1, 1), 14, (10, 10, 14), (False, True, True), True],
],
)
@pytest.mark.parametrize("dtype", QNN_DTYPES)
def test_qnn_conv2d(trial, dtype):
Device.load("test_config.json")
if skip_runtime_test():
return
device = Device()
np.random.seed(0)
(
kernel_h,
kernel_w,
pad,
stride,
dilation,
out_channels,
shape,
composite,
is_depthwise,
) = trial
shape = (1, *shape)
if is_depthwise:
groups = shape[3]
else:
groups = 1
outputs = []
inputs = {"a": tvm.nd.array(np.random.uniform(0, 255, shape).astype(dtype))}
input_zp = 100
input_sc = 0.5
kernel_zp = 25
kernel_sc = 0.03
output_zp, output_sc = _get_qnn_params(
input_zp, input_sc, kernel_zp, kernel_sc, kernel_h, kernel_w, shape[3]
)
func, params = _get_qnn_model(
shape,
kernel_h,
kernel_w,
pad,
stride,
dilation,
groups,
dtype,
out_channels,
input_zp,
input_sc,
kernel_zp,
kernel_sc,
output_zp,
output_sc,
iter(inputs),
has_pad=composite[0],
has_bias=composite[1],
has_activation=composite[2],
)
for acl in [False, True]:
outputs.append(build_and_run(func, inputs, 1, params, device, enable_acl=acl)[0])
config = {
"shape": shape,
"groups": groups,
"kernel size": (kernel_h, kernel_w),
"padding": pad,
"stride": stride,
"dilation": dilation,
"out channels": out_channels,
"composite operators (pad, bias, activation)": composite,
"input scale": input_sc,
"input zero point": input_zp,
"kernel scale": kernel_sc,
"kernel zero point": kernel_zp,
"output scale": output_sc,
"output zero point": output_zp,
}
atol = 2 if is_depthwise else 1
verify(outputs, atol=atol, rtol=0, config=config, verify_saturation=True)
@pytest.mark.parametrize("dtype", QNN_DTYPES)
@pytest.mark.parametrize(
"trial",
[
# Normal convolution
[2, 2, (1, 1), (1, 1), (1, 1), 4, (10, 10, 14), (False, False, False), False],
[2, 1, (2, 2), (1, 1), (1, 1), 7, (12, 15, 16), (False, False, True), False],
[3, 3, (2, 1), (1, 1), (1, 1), 4, (10, 10, 14), (False, True, False), False],
[3, 3, (1, 1), (1, 1), (1, 1), 16, (12, 15, 16), (False, False, False), False],
[5, 5, (1, 1), (2, 2), (1, 1), 4, (10, 10, 14), (True, False, False), False],
[1, 3, (1, 1), (1, 1), (1, 1), 7, (20, 20, 20), (False, False, True), False],
[2, 2, (2, 2), (1, 1), (1, 1), 4, (20, 20, 20), (False, True, False), False],
[5, 5, (1, 1), (2, 2), (1, 1), 4, (10, 10, 14), (True, False, False), False],
[3, 3, (2, 1), (1, 1), (1, 1), 7, (20, 20, 20), (False, False, False), False],
[3, 3, (1, 1), (2, 2), (1, 1), 16, (10, 10, 14), (False, True, True), False],
# Depth-wise convolution
[3, 3, (1, 1), (1, 1), (1, 1), 20, (20, 20, 20), (False, False, True), True],
[5, 5, (2, 2), (1, 1), (1, 1), 20, (20, 20, 20), (False, True, False), True],
[3, 3, (2, 2), (2, 2), (1, 1), 14, (10, 10, 14), (True, False, False), True],
[5, 5, (0, 0), (1, 1), (1, 1), 20, (20, 20, 20), (False, False, False), True],
[3, 3, (1, 1), (2, 2), (1, 1), 14, (10, 10, 14), (False, True, True), True],
],
)
def test_codegen_qnn_conv2d(trial, dtype):
if skip_codegen_test():
return
(
kernel_h,
kernel_w,
pad,
stride,
dilation,
out_channels,
shape,
composite,
is_depthwise,
) = trial
shape = (1, *shape)
if is_depthwise:
groups = shape[3]
else:
groups = 1
inputs = {"a"}
input_zp = 100
input_sc = 0.5
kernel_zp = 25
kernel_sc = 0.03
output_zp, output_sc = _get_qnn_params(
input_zp, input_sc, kernel_zp, kernel_sc, kernel_h, kernel_w, shape[3]
)
args = (shape, kernel_h, kernel_w, pad, stride, dilation, groups, dtype, out_channels)
func, params = _get_qnn_model(
*args,
input_zp=input_zp,
input_sc=input_sc,
kernel_zp=kernel_zp,
kernel_sc=kernel_sc,
output_zp=output_zp,
output_sc=output_sc,
var_names=iter(inputs),
has_pad=composite[0],
has_bias=composite[1],
has_activation=composite[2],
)
exp_codegen = _get_expected_codegen(*args, has_bias=composite[1], has_activation=composite[2])
verify_codegen(func, exp_codegen, 1)
@pytest.mark.parametrize(
"param",
["kernel_sc", "kernel_zp"],
)
def test_codegen_qnn_conv2d_per_channel_quantization(param):
if skip_codegen_test():
return
dtype = "int8"
kernel_h = 2
kernel_w = 2
pad = (1, 1)
stride = (1, 1)
dilation = (1, 1)
out_channels = 4
shape = (1, 10, 10, 14)
composite = (False, False, False)
groups = 1
inputs = {"a"}
qnn_params = {
"input_zp": 1,
"input_sc": 1,
"kernel_zp": 1,
"kernel_sc": 1,
"output_zp": 1,
"output_sc": 1,
}
qnn_params[param] = [1, 1, 1, 1]
args = (shape, kernel_h, kernel_w, pad, stride, dilation, groups, dtype, out_channels)
func, params = _get_qnn_model(
*args,
input_zp=qnn_params["input_zp"],
input_sc=qnn_params["input_sc"],
kernel_zp=qnn_params["kernel_zp"],
kernel_sc=qnn_params["kernel_sc"],
output_zp=qnn_params["output_zp"],
output_sc=qnn_params["output_sc"],
var_names=iter(inputs),
has_pad=composite[0],
has_bias=composite[1],
has_activation=composite[2],
)
exp_codegen = _get_expected_codegen(*args, has_bias=composite[1], has_activation=composite[2])
verify_codegen(func, exp_codegen, num_acl_modules=0, tvm_ops=2)
if __name__ == "__main__":
test_conv2d()
test_qnn_conv2d()
test_codegen_conv2d()
test_codegen_qnn_conv2d()
test_codegen_qnn_conv2d_per_channel_quantization()
| 22,312 | 32.154532 | 98 | py |
tvm | tvm-main/tests/python/contrib/test_arm_compute_lib/test_runtime.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Arm Compute Library runtime tests."""
import numpy as np
import tvm
from tvm import relay
from .infrastructure import skip_runtime_test, build_and_run, verify
from .infrastructure import Device
def test_multiple_ops():
"""
Test multiple operators destined for ACL.
The ACL runtime will expect these ops as 2 separate functions for
the time being.
"""
Device.load("test_config.json")
if skip_runtime_test():
return
device = Device()
np.random.seed(0)
def get_model(input_shape, var_names):
"""Return a model and any parameters it may have."""
a = relay.var(next(var_names), shape=input_shape, dtype="float32")
out = relay.reshape(a, (1, 1, 1000))
out = relay.reshape(out, (1, 1000))
return out
inputs = {"a": tvm.nd.array(np.random.uniform(0, 1, (1, 1, 1, 1000)).astype("float32"))}
outputs = []
for acl in [False, True]:
func = get_model(inputs["a"].shape, iter(inputs))
outputs.append(
build_and_run(func, inputs, 1, None, device, enable_acl=acl, acl_partitions=2)[0]
)
verify(outputs, atol=0.002, rtol=0.01)
def test_heterogeneous():
"""
Test to check if offloading only supported operators works,
while leaving unsupported operators computed via tvm.
"""
Device.load("test_config.json")
if skip_runtime_test():
return
device = Device()
np.random.seed(0)
def get_model(input_shape, var_names):
"""Return a model and any parameters it may have."""
a = relay.var(next(var_names), shape=input_shape, dtype="float32")
out = relay.reshape(a, (1, 1, 1000))
out = relay.sigmoid(out)
out = relay.reshape(out, (1, 1000))
return out
inputs = {"a": tvm.nd.array(np.random.uniform(-127, 128, (1, 1, 1, 1000)).astype("float32"))}
outputs = []
for acl in [False, True]:
func = get_model(inputs["a"].shape, iter(inputs))
outputs.append(
build_and_run(
func, inputs, 1, None, device, enable_acl=acl, tvm_ops=1, acl_partitions=2
)[0]
)
verify(outputs, atol=0.002, rtol=0.01)
def test_multiple_runs():
"""
Test that multiple runs of an operator work.
"""
Device.load("test_config.json")
if skip_runtime_test():
return
device = Device()
def get_model():
a = relay.var("a", shape=(1, 28, 28, 512), dtype="float32")
w = tvm.nd.array(np.ones((256, 1, 1, 512), dtype="float32"))
weights = relay.const(w, "float32")
conv = relay.nn.conv2d(
a,
weights,
kernel_size=(1, 1),
data_layout="NHWC",
kernel_layout="OHWI",
strides=(1, 1),
padding=(0, 0),
dilation=(1, 1),
)
params = {"w": w}
return conv, params
inputs = {
"a": tvm.nd.array(np.random.uniform(-127, 128, (1, 28, 28, 512)).astype("float32")),
}
func, params = get_model()
outputs = build_and_run(func, inputs, 1, params, device, enable_acl=True, no_runs=3)
verify(outputs, atol=0.002, rtol=0.01)
if __name__ == "__main__":
test_multiple_ops()
test_heterogeneous()
test_multiple_runs()
| 4,073 | 29.177778 | 97 | py |
tvm | tvm-main/tests/python/contrib/test_arm_compute_lib/test_dense.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Arm Compute Library integration dense tests."""
import numpy as np
import pytest
import tvm
from tvm import relay
from tvm import testing
from test_arm_compute_lib.infrastructure import (
Device,
skip_runtime_test,
skip_codegen_test,
build_and_run,
verify,
verify_codegen,
)
def _get_model(shape, weight_shape, units, dtype, var_names, has_bias=False):
"""Return a model and any parameters it may have"""
a = relay.var(next(var_names), shape=shape, dtype=dtype)
w = tvm.nd.array(np.random.uniform(-128, 127, weight_shape).astype(dtype))
weights = relay.const(w, dtype)
out = relay.nn.dense(a, weights, units=units, out_dtype=dtype)
params = {"w": w}
if has_bias:
b = tvm.nd.array(np.random.randint(-128, 127, weight_shape[0]).astype(dtype))
biasc = relay.const(b, dtype)
out = relay.nn.bias_add(out, biasc)
params["b"] = b
return out, params
def _get_qnn_params(input_zp, input_sc, kernel_zp, kernel_sc, kernel_h, kernel_w):
"""Get output qnn parameters given input and kernel parameters."""
input_max = input_sc * (255 - input_zp)
input_min = -input_sc * input_zp
kernel_max = kernel_sc * (255 - kernel_zp)
kernel_min = -kernel_sc * kernel_zp
output_limits = [
kernel_max * kernel_h * kernel_w * input_max,
kernel_min * kernel_h * kernel_w * input_max,
kernel_min * kernel_h * kernel_w * input_min,
kernel_max * kernel_h * kernel_w * input_min,
]
output_max = max(output_limits)
output_min = min(output_limits)
output_sc = (output_max - output_min) / 255
output_zp = -int(output_min / output_sc)
return output_zp, output_sc
def _get_qnn_model(
shape,
weight_shape,
units,
dtype,
input_zp,
input_sc,
kernel_zp,
kernel_sc,
output_zp,
output_sc,
var_names,
has_bias=False,
):
a = relay.var(next(var_names), shape=shape, dtype=dtype)
w = tvm.nd.array(np.random.uniform(-128, 127, weight_shape).astype(dtype))
weights = relay.const(w, dtype)
out = relay.qnn.op.dense(
a,
weights,
units=units,
input_zero_point=relay.const(input_zp, "int32"),
kernel_zero_point=relay.const(kernel_zp, "int32"),
input_scale=relay.const(input_sc, "float32"),
kernel_scale=relay.const(kernel_sc, "float32"),
out_dtype="int32",
)
params = {"w": w}
if has_bias:
b = tvm.nd.array(np.random.randint(0, 255, weight_shape[0]).astype("int32"))
biasc = relay.const(b, "int32")
out = relay.nn.bias_add(out, biasc)
params["b"] = b
out = relay.qnn.op.requantize(
out,
relay.const(input_sc * kernel_sc, "float32"), # input scale
relay.const(0, "int32"), # input zero point
relay.const(output_sc, "float32"), # output scale
relay.const(output_zp, "int32"), # output zero point
out_dtype=dtype,
)
return out, params
def _get_expected_codegen(shape, weight_shape, units, dtype, has_bias=False):
output_shape = (shape[0], units)
qnn_dtypes = ("uint8", "int8")
out_dtype = "int32" if dtype in qnn_dtypes else "float32"
node = {
"op": "kernel",
"name": "nn.dense",
"inputs": [],
"attrs": {
"num_outputs": "1",
"out_dtype": [[out_dtype]],
"shape": [[list(output_shape)]],
"dtype": [[dtype]],
"units": [[str(units)]],
},
}
inputs = [
{"op": "input", "name": "", "attrs": {"shape": [[list(shape)]], "dtype": [[str(dtype)]]}},
{
"op": "const",
"name": "",
"attrs": {"shape": [[list(weight_shape)]], "dtype": [[str(dtype)]]},
},
]
# qnn.dense params, input and kernel
if dtype in qnn_dtypes:
node["name"] = "qnn.dense"
for param_dtype in ["int32", "float32"]:
for _ in range(2):
inputs.append(
{
"op": "const",
"name": "",
"attrs": {"shape": [[[]]], "dtype": [[param_dtype]]},
}
)
if has_bias:
bias_dtype = "int32" if dtype in qnn_dtypes else "float32"
bias_shape = [1, weight_shape[0]] if weight_shape[0] != 1 else [weight_shape[0]]
inputs.append(
{
"op": "const",
"name": "",
"attrs": {"shape": [[bias_shape]], "dtype": [[bias_dtype]]},
}
)
# qnn.dense params, output
if dtype in qnn_dtypes:
for param_dtype in ["float32", "int32"]:
inputs.append(
{"op": "const", "name": "", "attrs": {"shape": [[[]]], "dtype": [[param_dtype]]}}
)
input_idx = 0
for _ in range(len(inputs)):
node["inputs"].append([input_idx, 0, 0])
input_idx += 1
node["attrs"]["num_inputs"] = str(len(inputs))
inputs.append(node)
return inputs
def test_dense():
Device.load("test_config.json")
if skip_runtime_test():
return
device = Device()
np.random.seed(0)
dtype = "float32"
trials = [
[(1, 128), (16, 128), 16, True],
[(1, 128), (16, 128), 16, False],
[(32, 32), (32, 32), 32, True],
[(32, 32), (32, 32), 32, False],
[(1, 64), (1, 64), 1, True],
[(1, 64), (1, 64), 1, False],
[(11, 2), (2, 2), 2, True],
[(11, 2), (2, 2), 2, False],
]
for shape, weight_shape, units, composite in trials:
outputs = []
inputs = {"a": tvm.nd.array(np.random.uniform(-128, 127, shape).astype(dtype))}
func, params = _get_model(
shape, weight_shape, units, dtype, var_names=iter(inputs), has_bias=composite
)
for acl in [False, True]:
outputs.append(
build_and_run(
func,
inputs,
1,
params,
device,
enable_acl=acl,
)[0]
)
config = {
"shape": shape,
"weight_shape": weight_shape,
"units": units,
"dtype": dtype,
"composite operators (bias)": composite,
}
verify(outputs, atol=0.001, rtol=0.01, config=config)
def test_codegen_dense():
if skip_codegen_test():
return
np.random.seed(0)
dtype = "float32"
trials = [
[(1, 128), (16, 128), 16, True],
[(1, 128), (16, 128), 16, False],
[(32, 32), (32, 32), 32, True],
[(32, 32), (32, 32), 32, False],
[(1, 64), (1, 64), 1, True],
[(1, 64), (1, 64), 1, False],
[(11, 2), (2, 2), 2, True],
[(11, 2), (2, 2), 2, False],
]
for shape, weight_shape, units, composite in trials:
inputs = {"a"}
args = (shape, weight_shape, units, dtype)
func, params = _get_model(*args, var_names=iter(inputs), has_bias=composite)
exp_codegen = _get_expected_codegen(*args, has_bias=composite)
verify_codegen(func, exp_codegen)
@pytest.mark.parametrize(
"dtype,min_range,max_range",
[
("uint8", 0, 255),
("int8", -127, 128),
],
)
def test_qnn_dense(dtype, min_range, max_range):
Device.load("test_config.json")
if skip_runtime_test():
return
device = Device()
np.random.seed(0)
trials = [
[(1, 2), (2, 2), 2, True],
[(1, 2), (2, 2), 2, False],
[(4, 4), (4, 4), 4, True],
[(4, 4), (4, 4), 4, False],
[(16, 16), (4, 16), 4, True],
[(16, 16), (4, 16), 4, False],
[(1, 128), (16, 128), 16, True],
[(1, 128), (16, 128), 16, False],
[(32, 32), (32, 32), 32, True],
[(32, 32), (32, 32), 32, False],
[(1, 64), (1, 64), 1, True],
[(1, 64), (1, 64), 1, False],
]
for shape, weight_shape, units, composite in trials:
outputs = []
inputs = {"a": tvm.nd.array(np.random.uniform(min_range, max_range, shape).astype(dtype))}
input_zp = 100
input_sc = 0.5
kernel_zp = 50
kernel_sc = 0.03
output_zp, output_sc = _get_qnn_params(
input_zp, input_sc, kernel_zp, kernel_sc, weight_shape[0], weight_shape[1]
)
func, params = _get_qnn_model(
shape,
weight_shape,
units,
dtype,
input_zp,
input_sc,
kernel_zp,
kernel_sc,
output_zp,
output_sc,
var_names=iter(inputs),
has_bias=composite,
)
for acl in [False, True]:
outputs.append(
build_and_run(
func,
inputs,
1,
params,
device,
enable_acl=acl,
)[0]
)
config = {
"shape": shape,
"weight_shape": weight_shape,
"units": units,
"dtype": dtype,
"composite operators (bias)": composite,
"input scale": input_sc,
"input zero point": input_zp,
"kernel scale": kernel_sc,
"kernel zero point": kernel_zp,
"output scale": output_sc,
"output zero point": output_zp,
}
verify(outputs, atol=1, rtol=0, config=config, verify_saturation=True)
@pytest.mark.parametrize("dtype", ["uint8", "int8"])
def test_codegen_qnn_dense(dtype):
if skip_codegen_test():
return
np.random.seed(0)
trials = [
[(1, 2), (2, 2), 2, True],
[(1, 2), (2, 2), 2, False],
[(4, 4), (4, 4), 4, True],
[(4, 4), (4, 4), 4, False],
[(16, 16), (4, 16), 4, True],
[(16, 16), (4, 16), 4, False],
[(1, 128), (16, 128), 16, True],
[(1, 128), (16, 128), 16, False],
[(32, 32), (32, 32), 32, True],
[(32, 32), (32, 32), 32, False],
[(1, 64), (1, 64), 1, True],
[(1, 64), (1, 64), 1, False],
]
for shape, weight_shape, units, composite in trials:
inputs = {"a"}
args = (shape, weight_shape, units, dtype)
input_zp = 100
input_sc = 0.5
kernel_zp = 25
kernel_sc = 0.03
output_zp, output_sc = _get_qnn_params(
input_zp, input_sc, kernel_zp, kernel_sc, weight_shape[0], weight_shape[1]
)
func, params = _get_qnn_model(
*args,
var_names=iter(inputs),
input_zp=input_zp,
input_sc=input_sc,
kernel_zp=kernel_zp,
kernel_sc=kernel_sc,
output_zp=output_zp,
output_sc=output_sc,
has_bias=composite,
)
exp_codegen = _get_expected_codegen(*args, has_bias=composite)
verify_codegen(func, exp_codegen)
@pytest.mark.parametrize(
"param",
["kernel_sc", "kernel_zp"],
)
def test_codegen_qnn_dense_per_channel_quantization(param):
if skip_codegen_test():
return
np.random.seed(0)
dtype = "int8"
shape = (1, 2)
weight_shape = (2, 2)
units = 2
composite = True
inputs = {"a"}
args = (shape, weight_shape, units, dtype)
qnn_params = {
"input_zp": 1,
"input_sc": 1,
"kernel_zp": 1,
"kernel_sc": 1,
"output_zp": 1,
"output_sc": 1,
}
qnn_params[param] = [1, 1]
func, _ = _get_qnn_model(
*args,
var_names=iter(inputs),
input_zp=qnn_params["input_zp"],
input_sc=qnn_params["input_sc"],
kernel_zp=qnn_params["kernel_zp"],
kernel_sc=qnn_params["kernel_sc"],
output_zp=qnn_params["output_zp"],
output_sc=qnn_params["output_sc"],
has_bias=composite,
)
exp_codegen = _get_expected_codegen(*args, has_bias=composite)
verify_codegen(func, exp_codegen, num_acl_modules=0, tvm_ops=3)
if __name__ == "__main__":
test_dense()
test_qnn_dense()
test_codegen_dense()
test_codegen_qnn_dense()
test_codegen_qnn_dense_per_channel_quantization()
| 13,051 | 29.283063 | 98 | py |
tvm | tvm-main/tests/python/contrib/test_arm_compute_lib/test_maximum.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Arm Compute Library integration reshape tests."""
import numpy as np
import tvm
from tvm import relay
from tvm import testing
from .infrastructure import (
skip_runtime_test,
skip_codegen_test,
build_and_run,
verify,
verify_codegen,
)
from .infrastructure import Device
def _get_model(input_shape, dtype, var_names):
"""Return a model and any parameters it may have."""
a = relay.var(next(var_names), shape=input_shape, dtype=dtype)
b = relay.var(next(var_names), shape=input_shape, dtype=dtype)
max = relay.maximum(a, b)
return max
def _get_expected_codegen(shape, dtype):
node = {
"op": "kernel",
"name": "maximum",
"inputs": [[0, 0, 0], [1, 0, 0]],
"attrs": {
"num_inputs": "2",
"num_outputs": "1",
"shape": [[list(shape)]],
"dtype": [[dtype]],
},
}
inputs = [
{"op": "input", "name": "", "attrs": {"shape": [[list(shape)]], "dtype": [[dtype]]}},
{"op": "input", "name": "", "attrs": {"shape": [[list(shape)]], "dtype": [[dtype]]}},
]
inputs.append(node)
return inputs
def test_maximum():
Device.load("test_config.json")
if skip_runtime_test():
return
device = Device()
np.random.seed(0)
for dtype, low, high, atol, rtol in [
("float32", -127, 128, 0.001, 0.001),
("float32", -1, 1, 0.001, 0.001),
]:
inputs = {
"a": tvm.nd.array(np.random.uniform(low, high, (100, 100)).astype(dtype)),
"b": tvm.nd.array(np.random.uniform(low, high, (100, 100)).astype(dtype)),
}
outputs = []
func = _get_model(inputs["a"].shape, dtype, iter(inputs))
for acl in [False, True]:
outputs.append(build_and_run(func, inputs, 1, None, device, enable_acl=acl)[0])
verify(outputs, atol=1e-7, rtol=1e-7)
def test_codegen_maximum():
if skip_codegen_test():
return
shape = (100, 100)
inputs = {"a", "b"}
for dtype in ["float32"]:
args = (shape, dtype)
func = _get_model(*args, iter(inputs))
exp_codegen = _get_expected_codegen(*args)
verify_codegen(func, exp_codegen, 1)
if __name__ == "__main__":
test_maximum()
test_codegen_maximum()
| 3,084 | 28.103774 | 93 | py |
tvm | tvm-main/tests/python/contrib/test_arm_compute_lib/test_network.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Arm Compute Library network tests."""
from distutils.version import LooseVersion
import numpy as np
import pytest
from tvm import relay
from test_arm_compute_lib.infrastructure import Device, skip_runtime_test, build_and_run, verify
def _build_and_run_network(mod, params, inputs, device, tvm_ops, acl_partitions, atol, rtol):
"""Helper function to build and run a network."""
data = {}
np.random.seed(0)
for name, (shape, dtype) in inputs.items():
if dtype == "uint8":
low, high = 0, 255
else:
low, high = -127, 128
data[name] = np.random.uniform(low, high, shape).astype(dtype)
outputs = []
for acl in [False, True]:
outputs.append(
build_and_run(
mod,
data,
1,
params,
device,
enable_acl=acl,
tvm_ops=tvm_ops,
acl_partitions=acl_partitions,
)[0]
)
verify(outputs, atol=atol, rtol=rtol, verify_saturation=False)
def _get_tflite_model(tflite_model_path, inputs_dict):
"""Convert TFlite graph to relay."""
try:
import tflite.Model
except ImportError:
pytest.skip("Missing Tflite support")
with open(tflite_model_path, "rb") as f:
tflite_model_buffer = f.read()
try:
tflite_model = tflite.Model.Model.GetRootAsModel(tflite_model_buffer, 0)
except AttributeError:
tflite_model = tflite.Model.GetRootAsModel(tflite_model_buffer, 0)
shape_dict = {}
dtype_dict = {}
for input in inputs_dict:
input_shape, input_dtype = inputs_dict[input]
shape_dict[input] = input_shape
dtype_dict[input] = input_dtype
return relay.frontend.from_tflite(tflite_model, shape_dict=shape_dict, dtype_dict=dtype_dict)
def _get_keras_model(keras_model, inputs_dict):
"""Convert Keras graph to relay."""
inputs = {}
for name, (shape, _) in inputs_dict.items():
inputs[keras_model.input_names[0]] = shape
return relay.frontend.from_keras(keras_model, inputs, layout="NHWC")
def test_vgg16():
Device.load("test_config.json")
if skip_runtime_test():
return
device = Device()
def get_model():
try:
from keras.applications import VGG16
except ImportError:
pytest.skip("Missing Keras Package")
vgg16 = VGG16(include_top=True, weights="imagenet", input_shape=(224, 224, 3), classes=1000)
inputs = {vgg16.input_names[0]: ((1, 224, 224, 3), "float32")}
mod, params = _get_keras_model(vgg16, inputs)
return mod, params, inputs
_build_and_run_network(
*get_model(),
device=device,
tvm_ops=4,
acl_partitions=21,
atol=0.002,
rtol=0.01,
)
def test_mobilenet():
keras = pytest.importorskip("keras")
Device.load("test_config.json")
if skip_runtime_test():
return
device = Device()
def get_model():
try:
from keras.applications import MobileNet
except ImportError:
pytest.skip("Missing keras module")
mobilenet = MobileNet(
include_top=True, weights="imagenet", input_shape=(224, 224, 3), classes=1000
)
inputs = {mobilenet.input_names[0]: ((1, 224, 224, 3), "float32")}
mod, params = _get_keras_model(mobilenet, inputs)
return mod, params, inputs
if keras.__version__ < LooseVersion("2.9"):
# This can be removed after we migrate to TF/Keras >= 2.9
expected_tvm_ops = 56
expected_acl_partitions = 31
else:
# In Keras >= 2.7, one reshape operator was removed
# from the MobileNet model, so it impacted this test
# which now needs to be reduce in by 1
# The change in Keras is `b6abfaed1326e3c`
expected_tvm_ops = 55
expected_acl_partitions = 30
_build_and_run_network(
*get_model(),
device=device,
tvm_ops=expected_tvm_ops,
acl_partitions=expected_acl_partitions,
atol=0.002,
rtol=0.01,
)
def test_quantized_mobilenet():
Device.load("test_config.json")
if skip_runtime_test():
return
try:
import tvm.relay.testing.tf as tf_testing
except ImportError:
pytest.skip("Missing Tflite support")
device = Device()
def get_model():
model_path = tf_testing.get_workload_official(
"https://storage.googleapis.com/download.tensorflow.org/"
"models/mobilenet_v1_2018_08_02/mobilenet_v1_1.0_224_quant.tgz",
"mobilenet_v1_1.0_224_quant.tflite",
)
inputs = {"input": ((1, 224, 224, 3), "uint8")}
mod, params = _get_tflite_model(model_path, inputs_dict=inputs)
return mod, params, inputs
_build_and_run_network(
*get_model(),
device=device,
tvm_ops=3,
acl_partitions=30,
atol=10,
rtol=0,
)
def test_squeezenet():
Device.load("test_config.json")
if skip_runtime_test():
return
try:
import tvm.relay.testing.tf as tf_testing
except ImportError:
pytest.skip("Missing TF Support")
device = Device()
def get_model():
model_path = tf_testing.get_workload_official(
"https://storage.googleapis.com/download.tensorflow.org/models/tflite/model_zoo/upload_20180427/squeezenet_2018_04_27.tgz",
"squeezenet.tflite",
)
inputs = {"Placeholder": ((1, 224, 224, 3), "float32")}
mod, params = _get_tflite_model(model_path, inputs_dict=inputs)
return mod, params, inputs
_build_and_run_network(
*get_model(),
device=device,
tvm_ops=9,
acl_partitions=31,
atol=8,
rtol=0,
)
if __name__ == "__main__":
test_vgg16()
test_mobilenet()
test_quantized_mobilenet()
test_squeezenet()
| 6,771 | 28.189655 | 135 | py |
tvm | tvm-main/tests/python/contrib/test_arm_compute_lib/test_reshape.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Arm Compute Library integration reshape tests."""
import numpy as np
import tvm
from tvm import relay
from tvm import testing
from .infrastructure import (
skip_runtime_test,
skip_codegen_test,
build_and_run,
verify,
verify_codegen,
)
from .infrastructure import Device
def _get_model(input_shape, output_shape, dtype, var_names):
"""Return a model and any parameters it may have."""
a = relay.var(next(var_names), shape=input_shape, dtype=dtype)
reshape = relay.reshape(a, output_shape)
return reshape
def _get_expected_codegen(input_shape, output_shape, dtype):
node = {
"op": "kernel",
"name": "reshape",
"inputs": [[0, 0, 0]],
"attrs": {
"num_inputs": "1",
"num_outputs": "1",
"newshape": [[str(s) for s in output_shape]],
"shape": [[list(output_shape)]],
"dtype": [[dtype]],
"allowzero": [["0"]],
},
}
input = {
"op": "input",
"name": "",
"attrs": {"shape": [[list(input_shape)]], "dtype": [[dtype]]},
}
return [input, node]
def test_reshape():
Device.load("test_config.json")
if skip_runtime_test():
return
device = Device()
np.random.seed(0)
for dtype, low, high, atol, rtol in [
("float32", -127, 128, 0.001, 0.001),
("uint8", 0, 255, 0, 0),
]:
inputs = {"a": tvm.nd.array(np.random.uniform(low, high, (1, 1, 1, 1000)).astype(dtype))}
for new_shape in [(1, 1000), (10, 10, 10), (10, 100, 1), (1, 1000, 1)]:
outputs = []
func = _get_model(inputs["a"].shape, new_shape, dtype, iter(inputs))
for acl in [False, True]:
outputs.append(build_and_run(func, inputs, 1, None, device, enable_acl=acl)[0])
config = {
"new shape": inputs["a"].shape,
"shape": new_shape,
"dtype": dtype,
}
verify(outputs, atol=1e-7, rtol=1e-7, config=config)
def test_codegen_reshape():
if skip_codegen_test():
return
shape = (1, 1, 1, 1000)
inputs = {"a"}
for dtype in ["float32", "uint8"]:
for new_shape in [(1, 1000), (10, 10, 10), (10, 100, 1)]:
args = (shape, new_shape, dtype)
func = _get_model(*args, iter(inputs))
exp_codegen = _get_expected_codegen(*args)
verify_codegen(func, exp_codegen, 1)
if __name__ == "__main__":
test_reshape()
test_codegen_reshape()
| 3,334 | 28.776786 | 97 | py |
tvm | tvm-main/tests/python/contrib/test_arm_compute_lib/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Infrastructure and tests for Arm Compute Library"""
| 840 | 45.722222 | 62 | py |
tvm | tvm-main/tests/python/contrib/test_arm_compute_lib/test_pooling.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Arm Compute Library integration pooling tests."""
import numpy as np
import pytest
import tvm
from tvm import relay, testing
from test_arm_compute_lib.infrastructure import (
Device,
build_and_run,
skip_codegen_test,
skip_runtime_test,
verify,
verify_codegen,
)
def _calculate_output_shape(shape, sizes, padding, strides, dilation):
"""Calculate pooling output shape."""
height_receptive_field = (sizes[0] - 1) * dilation[0] + 1
width_receptive_field = (sizes[1] - 1) * dilation[1] + 1
output_height = ((shape[1] - height_receptive_field + padding[0] + padding[2]) / strides[0]) + 1
output_width = ((shape[2] - width_receptive_field + padding[1] + padding[3]) / strides[1]) + 1
return 1, int(output_height), int(output_width), shape[3]
def _get_pooling_model(
shape, dtype, typef, sizes, strides, dilation, padding, ceil_mode, count_include_pad, var_names
):
"""Return a model and any parameters it may have."""
if len(padding) == 2:
padding = (padding[0], padding[1], padding[0], padding[1])
out = relay.var(next(var_names), shape=shape, dtype=dtype)
qnn_dtypes = ("uint8", "int8")
if typef == "nn.max_pool2d":
out = relay.nn.max_pool2d(
out,
pool_size=sizes,
strides=strides,
dilation=dilation,
padding=padding,
ceil_mode=ceil_mode,
layout="NHWC",
)
elif typef == "nn.avg_pool2d":
if dtype in qnn_dtypes:
out = relay.cast(out, "int32")
out = relay.nn.avg_pool2d(
out,
pool_size=sizes,
strides=strides,
dilation=dilation,
padding=padding,
ceil_mode=ceil_mode,
count_include_pad=count_include_pad,
layout="NHWC",
)
if dtype in qnn_dtypes:
out = relay.cast(out, dtype)
elif typef == "nn.l2_pool2d":
out = relay.power(out, relay.const(2.0))
out = relay.nn.avg_pool2d(
out,
pool_size=sizes,
strides=strides,
padding=padding,
ceil_mode=ceil_mode,
count_include_pad=count_include_pad,
layout="NHWC",
)
out = relay.sqrt(out)
else:
raise ValueError("Function not supported")
return out
def _get_global_pooling_model(shape, dtype, typef, var_names):
"""Return a model and any parameters it may have."""
out = relay.var(next(var_names), shape=shape, dtype=dtype)
qnn_dtypes = ("uint8", "int8")
if typef == "nn.global_max_pool2d":
out = relay.nn.global_max_pool2d(out, layout="NHWC")
elif typef == "nn.global_avg_pool2d":
if dtype in qnn_dtypes:
out = relay.cast(out, "int32")
out = relay.nn.global_avg_pool2d(out, layout="NHWC")
if dtype in qnn_dtypes:
out = relay.cast(out, dtype)
else:
raise ValueError("Function not supported")
return out
def _get_expected_pooling_codegen(
shape, dtype, typef, sizes, strides, dilation, padding, ceil_mode, count_include_pad
):
if len(padding) == 2:
padding = (padding[0], padding[1], padding[0], padding[1])
output_shape = _calculate_output_shape(shape, sizes, padding, strides, dilation)
node = {
"op": "kernel",
"name": typef,
"inputs": [[0, 0, 0]],
"attrs": {
"num_inputs": "1",
"num_outputs": "1",
"layout": [["NHWC"]],
"out_layout": [[""]],
"shape": [[list(output_shape)]],
"dtype": [[dtype]],
"padding": [[str(p) for p in padding]],
"strides": [[str(s) for s in strides]],
"dilation": [[str(d) for d in dilation]],
"pool_size": [[str(s) for s in sizes]],
"ceil_mode": [[str(1 if ceil_mode else 0)]],
},
}
if typef == "nn.avg_pool2d" or typef == "nn.l2_pool2d":
node["attrs"]["count_include_pad"] = [["1" if count_include_pad else "0"]]
input = {"op": "input", "name": "", "attrs": {"shape": [[list(shape)]], "dtype": [[dtype]]}}
return [input, node]
def _get_expected_global_pooling_codegen(shape, dtype, typef):
node = {
"op": "kernel",
"name": typef,
"inputs": [[0, 0, 0]],
"attrs": {
"num_inputs": "1",
"num_outputs": "1",
"layout": [["NHWC"]],
"out_layout": [[""]],
"shape": [[[1, 1, 1, shape[3]]]],
"dtype": [[dtype]],
},
}
input = {"op": "input", "name": "", "attrs": {"shape": [[list(shape)]], "dtype": [[dtype]]}}
return [input, node]
def _get_low_high_atol_rtol(dtype):
if dtype == "float32":
low, high, atol, rtol = (-127, 128, 0.001, 0.001)
elif dtype == "uint8":
low, high, atol, rtol = (0, 255, 1, 0)
elif dtype == "int8":
low, high, atol, rtol = (-127, 128, 1, 0)
else:
pytest.fail(f"dtype not expected: {dtype}")
return low, high, atol, rtol
# fmt: off
@pytest.mark.parametrize(
"typef,dtype,size,stride,dilation,pad,ceil_mode,count_include_pad,input_shape,expected_ops",
[
("nn.max_pool2d", "float32", (3, 3), (2, 2), (1, 1), (0, 0), False, False, (27, 27, 512), (0, 1),),
("nn.max_pool2d", "float32", (2, 2), (2, 2), (1, 1), (0, 0), False, True, (16, 16, 16), (0, 1),),
("nn.max_pool2d", "float32", (3, 3), (2, 2), (1, 1), (1, 1), True, True, (15, 15, 16), (0, 1),),
("nn.max_pool2d", "float32", (2, 2), (2, 2), (1, 1), (0, 1), False, False, (16, 16, 16), (0, 1),),
("nn.max_pool2d", "uint8", (3, 3), (2, 2), (1, 1), (0, 1), False, False, (16, 16, 16), (0, 1),),
("nn.max_pool2d", "uint8", (2, 2), (2, 2), (1, 1), (1, 1), True, True, (15, 15, 16), (0, 1),),
("nn.max_pool2d", "uint8", (2, 2), (2, 2), (3, 2), (1, 1), True, True, (15, 15, 16), (1, 0),),
("nn.max_pool2d", "int8", (3, 3), (2, 2), (1, 1), (0, 1), False, False, (16, 16, 16), (0, 1),),
("nn.max_pool2d", "int8", (2, 2), (2, 2), (1, 1), (1, 1), True, True, (15, 15, 16), (0, 1),),
("nn.max_pool2d", "int8", (2, 2), (2, 2), (3, 2), (1, 1), True, True, (15, 15, 16), (1, 0),),
("nn.avg_pool2d", "float32", (2, 2), (2, 2), (1, 1), (1, 1), False, False, (16, 16, 16), (0, 1),),
("nn.avg_pool2d", "float32", (2, 2), (2, 2), (1, 1), (0, 0), False, True, (16, 16, 16), (0, 1),),
("nn.avg_pool2d", "float32", (3, 3), (2, 2), (3, 2), (0, 1), True, False, (15, 15, 16), (1, 0),),
# 20.05: "exclude_padding equal false is not supported for AVG Pooling with padding on quantized types"
# ["nn.avg_pool2d", uint8_dtype, (2, 2), (2, 2), (1, 1), False, True, (16, 16, 16)],
("nn.avg_pool2d", "uint8", (3, 3), (2, 2), (1, 1), (0, 1), False, False, (16, 16, 16), (0, 1),),
("nn.avg_pool2d", "int8", (3, 3), (2, 2), (1, 1), (0, 1), False, False, (16, 16, 16), (0, 1),),
("nn.l2_pool2d", "float32", (2, 2), (2, 2), (1, 1), (0, 1), True, False, (16, 16, 16), (0, 1),),
("nn.l2_pool2d", "float32", (3, 3), (2, 2), (1, 1), (0, 0), False, False, (16, 16, 16), (0, 1),),
("nn.l2_pool2d", "float32", (2, 2), (2, 2), (1, 1), (1, 1), False, True, (15, 15, 16), (0, 1),),
],
)
# fmt: on
def test_pooling(
typef,
dtype,
size,
stride,
dilation,
pad,
ceil_mode,
count_include_pad,
input_shape,
expected_ops,
):
Device.load("test_config.json")
if skip_runtime_test():
return
device = Device()
np.random.seed(0)
low, high, atol, rtol = _get_low_high_atol_rtol(dtype)
tvm_ops, acl_partitions = expected_ops
shape = (1, *input_shape)
outputs = []
inputs = {
"a": tvm.nd.array(np.random.uniform(low, high, shape).astype(dtype)),
}
func = _get_pooling_model(
shape,
dtype,
typef,
size,
stride,
dilation,
pad,
ceil_mode,
count_include_pad,
iter(inputs),
)
config = {
"size": size,
"stride": stride,
"shape": shape,
"pooling type": typef,
"dtype": dtype,
"padding": pad,
"dilation": dilation,
"ceil_mode": ceil_mode,
"count_include_pad": count_include_pad,
"inputs": inputs,
}
verify_saturation = True if dtype == "uint8" else False
for acl in [False, True]:
outputs.append(
build_and_run(
func,
inputs,
1,
None,
device,
enable_acl=acl,
tvm_ops=tvm_ops,
acl_partitions=acl_partitions,
config=config,
)[0]
)
verify(outputs, atol=atol, rtol=rtol, config=config, verify_saturation=verify_saturation)
@pytest.mark.parametrize(
"typef,dtype,input_shape",
[
["nn.global_max_pool2d", "float32", (8, 8, 16)],
["nn.global_max_pool2d", "float32", (9, 9, 16)],
["nn.global_max_pool2d", "uint8", (8, 8, 16)],
["nn.global_max_pool2d", "uint8", (9, 9, 16)],
["nn.global_max_pool2d", "int8", (8, 8, 16)],
["nn.global_max_pool2d", "int8", (9, 9, 16)],
["nn.global_avg_pool2d", "float32", (8, 8, 16)],
["nn.global_avg_pool2d", "float32", (9, 9, 16)],
["nn.global_avg_pool2d", "uint8", (8, 8, 16)],
["nn.global_avg_pool2d", "uint8", (9, 9, 16)],
["nn.global_avg_pool2d", "int8", (8, 8, 16)],
["nn.global_avg_pool2d", "int8", (9, 9, 16)],
],
)
def test_global_pooling(typef, dtype, input_shape):
Device.load("test_config.json")
if skip_runtime_test():
return
device = Device()
np.random.seed(0)
low, high, rtol, atol = _get_low_high_atol_rtol(dtype)
shape = (1, *input_shape)
outputs = []
inputs = {
"a": tvm.nd.array(np.random.uniform(low, high, shape).astype(dtype)),
}
func = _get_global_pooling_model(shape, dtype, typef, iter(inputs))
config = {
"shape": shape,
"pooling type": typef,
"dtype": dtype,
}
verify_saturation = True if dtype in ("uint8", "int8") else False
for acl in [False, True]:
outputs.append(
build_and_run(func, inputs, 1, None, device, enable_acl=acl, config=config)[0]
)
verify(outputs, atol=atol, rtol=rtol, config=config, verify_saturation=verify_saturation)
# fmt: off
@pytest.mark.parametrize(
"typef,dtype,size,stride,dilation,pad,ceil_mode,count_include_pad,input_shape,expected_ops",
[
("nn.max_pool2d", "float32", (2, 2), (2, 2), (1, 1), (0, 0), False, True, (16, 16, 16), (0, 1),),
("nn.max_pool2d", "float32", (3, 3), (2, 2), (1, 1), (1, 1), True, True, (15, 15, 16), (0, 1),),
("nn.max_pool2d", "float32", (2, 2), (2, 2), (1, 1), (0, 1), False, False, (16, 16, 16), (0, 1),),
("nn.max_pool2d", "uint8", (3, 3), (2, 2), (1, 1), (0, 1), False, False, (16, 16, 16), (0, 1),),
("nn.max_pool2d", "uint8", (2, 2), (2, 2), (1, 1), (1, 1), True, True, (15, 15, 16), (0, 1),),
("nn.max_pool2d", "uint8", (2, 2), (2, 2), (3, 2), (1, 1), True, True, (15, 15, 16), (1, 0),),
("nn.max_pool2d", "int8", (3, 3), (2, 2), (1, 1), (0, 1), False, False, (16, 16, 16), (0, 1),),
("nn.max_pool2d", "int8", (2, 2), (2, 2), (1, 1), (1, 1), True, True, (15, 15, 16), (0, 1),),
("nn.max_pool2d", "int8", (2, 2), (2, 2), (3, 2), (1, 1), True, True, (15, 15, 16), (1, 0),),
("nn.avg_pool2d", "float32", (2, 2), (2, 2), (1, 1), (1, 1), False, False, (16, 16, 16), (0, 1),),
("nn.avg_pool2d", "float32", (2, 2), (2, 2), (1, 1), (1, 1), False, False, (16, 16, 16), (0, 1),),
("nn.avg_pool2d", "float32", (2, 2), (2, 2), (1, 1), (0, 0), False, True, (16, 16, 16), (0, 1),),
("nn.avg_pool2d", "float32", (3, 3), (2, 2), (3, 2), (0, 1), True, False, (15, 15, 16), (1, 0),),
("nn.avg_pool2d", "uint8", (2, 2), (2, 2), (1, 1), (1, 1), False, True, (16, 16, 16), (0, 1),),
("nn.avg_pool2d", "uint8", (3, 3), (2, 2), (1, 1), (0, 1), False, False, (16, 16, 16), (0, 1),),
("nn.avg_pool2d", "int8", (2, 2), (2, 2), (1, 1), (1, 1), False, True, (16, 16, 16), (0, 1),),
("nn.avg_pool2d", "int8", (3, 3), (2, 2), (1, 1), (0, 1), False, False, (16, 16, 16), (0, 1),),
("nn.l2_pool2d", "float32", (2, 2), (2, 2), (1, 1), (0, 1), True, False, (15, 15, 16), (0, 1),),
("nn.l2_pool2d", "float32", (3, 3), (2, 2), (1, 1), (0, 0), False, False, (16, 16, 16), (0, 1),),
("nn.l2_pool2d", "float32", (2, 2), (2, 2), (1, 1), (1, 1), False, True, (15, 15, 16), (0, 1),),
],
)
# fmt: on
def test_codegen_pooling(
typef,
dtype,
size,
stride,
dilation,
pad,
ceil_mode,
count_include_pad,
input_shape,
expected_ops,
):
if skip_codegen_test():
return
low, high, _, _ = _get_low_high_atol_rtol(dtype)
tvm_ops, acl_partitions = expected_ops
shape = (1, *input_shape)
inputs = {"a"}
args = (shape, dtype, typef, size, stride, dilation, pad, False, False)
func = _get_pooling_model(*args, iter(inputs))
exp_codegen = _get_expected_pooling_codegen(*args)
verify_codegen(func, exp_codegen, acl_partitions, tvm_ops)
@pytest.mark.parametrize(
"typef,dtype,input_shape",
[
("nn.global_max_pool2d", "float32", (8, 8, 16)),
("nn.global_max_pool2d", "float32", (9, 9, 16)),
("nn.global_max_pool2d", "uint8", (8, 8, 16)),
("nn.global_max_pool2d", "uint8", (9, 9, 16)),
("nn.global_max_pool2d", "int8", (8, 8, 16)),
("nn.global_max_pool2d", "int8", (9, 9, 16)),
("nn.global_avg_pool2d", "float32", (8, 8, 16)),
("nn.global_avg_pool2d", "float32", (9, 9, 16)),
("nn.global_avg_pool2d", "uint8", (8, 8, 16)),
("nn.global_avg_pool2d", "uint8", (9, 9, 16)),
("nn.global_avg_pool2d", "int8", (8, 8, 16)),
("nn.global_avg_pool2d", "int8", (9, 9, 16)),
],
)
def test_codegen_global_pooling(typef, dtype, input_shape):
if skip_codegen_test():
return
low, high, _, _ = _get_low_high_atol_rtol(dtype)
shape = (1, *input_shape)
inputs = {"a"}
args = (shape, dtype, typef)
func = _get_global_pooling_model(*args, iter(inputs))
exp_codegen = _get_expected_global_pooling_codegen(*args)
verify_codegen(func, exp_codegen, 1)
if __name__ == "__main__":
test_pooling()
test_global_pooling()
test_codegen_pooling()
test_codegen_global_pooling()
| 15,529 | 35.97619 | 111 | py |
tvm | tvm-main/tests/python/contrib/test_arm_compute_lib/test_concatenate.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Arm Compute Library integration concatenate tests."""
import numpy as np
import pytest
import tvm
from tvm import relay
from tvm import testing
from test_arm_compute_lib.infrastructure import (
skip_runtime_test,
skip_codegen_test,
build_and_run,
verify,
verify_codegen,
)
from test_arm_compute_lib.infrastructure import Device
def _get_model(input_shape_a, input_shape_b, input_shape_c, axis, dtype, var_names):
"""Return a model and any parameters it may have."""
a = relay.var(next(var_names), shape=input_shape_a, dtype=dtype)
b = relay.var(next(var_names), shape=input_shape_b, dtype=dtype)
c = relay.var(next(var_names), shape=input_shape_c, dtype=dtype)
out = relay.concatenate([a, b, c], axis)
return out
def _get_expected_codegen(input_shape_a, input_shape_b, input_shape_c, axis, dtype):
node = {
"op": "kernel",
"name": "concatenate",
"inputs": [
[0, 0, 0],
[1, 0, 0],
[2, 0, 0],
],
"attrs": {
"num_outputs": "1",
"num_inputs": "3",
"dtype": [[dtype]],
"axis": [[str(axis)]],
"shape": [[[6, 234, 234, 256]]],
},
}
input_a = {
"op": "input",
"name": "",
"attrs": {
"shape": [[input_shape_a]],
"dtype": [[dtype]],
},
}
input_b = {
"op": "input",
"name": "",
"attrs": {
"shape": [[input_shape_b]],
"dtype": [[dtype]],
},
}
input_c = {
"op": "input",
"name": "",
"attrs": {
"shape": [[input_shape_c]],
"dtype": [[dtype]],
},
}
return [input_a, input_b, input_c, node]
@pytest.mark.parametrize(
"input_shape_a, input_shape_b, input_shape_c, axis, dtype",
[
([1, 234, 234, 256], [2, 234, 234, 256], [3, 234, 234, 256], 0, "float32"),
([1, 1, 234, 256], [1, 2, 234, 256], [1, 3, 234, 256], 1, "float32"),
([1, 234, 234, 1], [1, 234, 234, 2], [1, 234, 234, 3], -1, "float32"),
([1, 234, 234, 256], [2, 234, 234, 256], [3, 234, 234, 256], -4, "float32"),
([1, 234, 234, 256], [2, 234, 234, 256], [3, 234, 234, 256], 0, "uint8"),
([1, 1, 234, 256], [1, 2, 234, 256], [1, 3, 234, 256], 1, "uint8"),
([1, 234, 234, 1], [1, 234, 234, 2], [1, 234, 234, 3], -1, "uint8"),
([1, 234, 234, 256], [2, 234, 234, 256], [3, 234, 234, 256], -4, "uint8"),
([1, 234, 234, 256], [2, 234, 234, 256], [3, 234, 234, 256], 0, "int8"),
([1, 1, 234, 256], [1, 2, 234, 256], [1, 3, 234, 256], 1, "int8"),
([1, 234, 234, 1], [1, 234, 234, 2], [1, 234, 234, 3], -1, "int8"),
([1, 234, 234, 256], [2, 234, 234, 256], [3, 234, 234, 256], -4, "int8"),
],
)
def test_concatenate(input_shape_a, input_shape_b, input_shape_c, axis, dtype):
Device.load("test_config.json")
if skip_runtime_test():
return
device = Device()
np.random.seed(0)
outputs = []
inputs = {
"a": tvm.nd.array(np.random.randn(*input_shape_a).astype(dtype)),
"b": tvm.nd.array(np.random.randn(*input_shape_b).astype(dtype)),
"c": tvm.nd.array(np.random.randn(*input_shape_c).astype(dtype)),
}
func = _get_model(
inputs["a"].shape, inputs["b"].shape, inputs["c"].shape, axis, dtype, iter(inputs)
)
for acl in [False, True]:
outputs.append(
build_and_run(func, inputs, 1, None, device, enable_acl=acl, disabled_ops=[])[0]
)
config = {
"input_shape_a": input_shape_a,
"input_shape_b": input_shape_b,
"input_shape_c": input_shape_c,
"axis": axis,
"dtype": dtype,
}
verify(outputs, atol=1e-7, rtol=1e-7, config=config)
def test_codegen_concatenate():
if skip_codegen_test():
return
shape_a = [1, 234, 234, 256]
shape_b = [2, 234, 234, 256]
shape_c = [3, 234, 234, 256]
axis = 0
inputs = {"a", "b", "c"}
for dtype in ["float32"]:
args = (shape_a, shape_b, shape_c, axis, dtype)
func = _get_model(*args, iter(inputs))
exp_codegen = _get_expected_codegen(*args)
verify_codegen(func, exp_codegen, 1, disabled_ops=[])
if __name__ == "__main__":
test_concatenate()
test_codegen_concatenate()
| 5,165 | 31.2875 | 92 | py |
tvm | tvm-main/tests/python/contrib/test_arm_compute_lib/infrastructure.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from itertools import zip_longest, combinations
import json
import os
import warnings
import numpy as np
import tvm
from tvm import relay
from tvm import rpc
from tvm.contrib import graph_executor
from tvm.relay.op.contrib import arm_compute_lib
from tvm.contrib import utils
from tvm.autotvm.measure import request_remote
QNN_DTYPES = ("uint8", "int8")
class Device:
"""
Configuration for Arm Compute Library tests.
Check tests/python/contrib/arm_compute_lib/ for the presence of an test_config.json file.
This file can be used to override the default configuration here which will attempt to run the Arm
Compute Library runtime tests locally if the runtime is available. Changing the configuration
will allow these runtime tests to be offloaded to a remote Arm device via a tracker for example.
Notes
-----
The test configuration will be loaded once when the class is created. If the configuration
changes between tests, any changes will not be picked up.
Parameters
----------
device : RPCSession
Allows tests to connect to and use remote device.
Attributes
----------
connection_type : str
Details the type of RPC connection to use. Options:
local - Use the local device,
tracker - Connect to a tracker to request a remote device,
remote - Connect to a remote device directly.
host : str
Specify IP address or hostname of remote target.
port : int
Specify port number of remote target.
target : str
The compilation target.
device_key : str
The device key of the remote target. Use when connecting to a remote device via a tracker.
cross_compile : str
Specify path to cross compiler to use when connecting a remote device from a non-arm platform.
"""
connection_type = "local"
host = "127.0.0.1"
port = 9090
target = "llvm -mtriple=aarch64-linux-gnu -mattr=+neon"
device_key = ""
cross_compile = ""
def __init__(self):
"""Keep remote device for lifetime of object."""
self.device = self._get_remote()
@classmethod
def _get_remote(cls):
"""Get a remote (or local) device to use for testing."""
if cls.connection_type == "tracker":
device = request_remote(cls.device_key, cls.host, cls.port, timeout=1000)
elif cls.connection_type == "remote":
device = rpc.connect(cls.host, cls.port)
elif cls.connection_type == "local":
device = rpc.LocalSession()
else:
raise ValueError(
"connection_type in test_config.json should be one of: " "local, tracker, remote."
)
return device
@classmethod
def load(cls, file_name):
"""Load test config
Load the test configuration by looking for file_name relative
to the test_arm_compute_lib directory.
"""
location = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
config_file = os.path.join(location, file_name)
if not os.path.exists(config_file):
warnings.warn(
"Config file doesn't exist, resuming Arm Compute Library tests with default config."
)
return
with open(config_file, mode="r") as config:
test_config = json.load(config)
cls.connection_type = test_config["connection_type"]
cls.host = test_config["host"]
cls.port = test_config["port"]
cls.target = test_config["target"]
cls.device_key = test_config.get("device_key") or ""
cls.cross_compile = test_config.get("cross_compile") or ""
def get_low_high_atol_rtol(dtype):
"""Returns a tuple with boundary values and and tolerance for ACL tests."""
if dtype == "float32":
low, high, atol, rtol = (-127, 128, 0.001, 0.001)
elif dtype == "uint8":
low, high, atol, rtol = (0, 255, 1, 0)
elif dtype == "int8":
low, high, atol, rtol = (-127, 128, 1, 0)
else:
raise Exception(f"dtype not expected: {dtype}")
return low, high, atol, rtol
def get_cpu_op_count(mod):
"""Traverse graph counting ops offloaded to TVM."""
class Counter(tvm.relay.ExprVisitor):
def __init__(self):
super().__init__()
self.count = 0
def visit_call(self, call):
if isinstance(call.op, tvm.ir.Op):
self.count += 1
super().visit_call(call)
c = Counter()
c.visit(mod["main"])
return c.count
def skip_runtime_test():
"""Skip test if it requires the runtime and it's not present."""
# ACL codegen not present.
if not tvm.get_global_func("relay.ext.arm_compute_lib", True):
print("Skip because Arm Compute Library codegen is not available.")
return True
# Remote device is in use or ACL runtime not present
# Note: Ensure that the device config has been loaded before this check
if (
not Device.connection_type != "local"
and not arm_compute_lib.is_arm_compute_runtime_enabled()
):
print("Skip because runtime isn't present or a remote device isn't being used.")
return True
def skip_codegen_test():
"""Skip test if it requires the ACL codegen and it's not present."""
if not tvm.get_global_func("relay.ext.arm_compute_lib", True):
print("Skip because Arm Compute Library codegen is not available.")
return True
def build_module(
mod,
target,
params=None,
enable_acl=True,
tvm_ops=0,
acl_partitions=1,
disabled_ops=["concatenate"],
):
"""Build module with option to build for ACL."""
if isinstance(mod, tvm.relay.expr.Call):
mod = tvm.IRModule.from_expr(mod)
with tvm.transform.PassContext(opt_level=3, disabled_pass=["AlterOpLayout"]):
if enable_acl:
mod = arm_compute_lib.partition_for_arm_compute_lib(
mod, params, disabled_ops=disabled_ops
)
tvm_op_count = get_cpu_op_count(mod)
assert tvm_op_count == tvm_ops, "Got {} TVM operators, expected {}".format(
tvm_op_count, tvm_ops
)
partition_count = 0
for global_var in mod.get_global_vars():
if "arm_compute_lib" in global_var.name_hint:
partition_count += 1
assert (
acl_partitions == partition_count
), "Got {} Arm Compute Library partitions, expected {}".format(
partition_count, acl_partitions
)
relay.backend.te_compiler.get().clear()
return relay.build(mod, target=target, params=params)
def build_and_run(
mod,
inputs,
outputs,
params,
device,
enable_acl=True,
no_runs=1,
tvm_ops=0,
acl_partitions=1,
config=None,
disabled_ops=["concatenate"],
):
"""Build and run the relay module."""
if config is None:
config = {}
try:
lib = build_module(
mod, device.target, params, enable_acl, tvm_ops, acl_partitions, disabled_ops
)
except Exception as e:
err_msg = "The module could not be built.\n"
if config:
err_msg += f"The test failed with the following parameters: {config}\n"
err_msg += str(e)
raise Exception(err_msg)
lib = update_lib(lib, device.device, device.cross_compile)
gen_module = graph_executor.GraphModule(lib["default"](device.device.cpu(0)))
gen_module.set_input(**inputs)
out = []
for _ in range(no_runs):
gen_module.run()
out.append([gen_module.get_output(i) for i in range(outputs)])
return out
def update_lib(lib, device, cross_compile):
"""Export the library to the remote/local device."""
lib_name = "mod.so"
temp = utils.tempdir()
lib_path = temp.relpath(lib_name)
if cross_compile:
lib.export_library(lib_path, cc=cross_compile)
else:
lib.export_library(lib_path)
device.upload(lib_path)
lib = device.load_module(lib_name)
return lib
def verify(answers, atol, rtol, verify_saturation=False, config=None):
"""Compare the array of answers. Each entry is a list of outputs."""
if config is None:
config = {}
if len(answers) < 2:
raise RuntimeError(f"No results to compare: expected at least two, found {len(answers)}")
for answer in zip_longest(*answers):
for outs in combinations(answer, 2):
try:
if verify_saturation:
assert (
np.count_nonzero(outs[0].numpy() == 255) < 0.25 * outs[0].numpy().size
), "Output is saturated: {}".format(outs[0])
assert (
np.count_nonzero(outs[0].numpy() == 0) < 0.25 * outs[0].numpy().size
), "Output is saturated: {}".format(outs[0])
tvm.testing.assert_allclose(outs[0].numpy(), outs[1].numpy(), rtol=rtol, atol=atol)
except AssertionError as e:
err_msg = "Results not within the acceptable tolerance.\n"
if config:
err_msg += f"The test failed with the following parameters: {config}\n"
err_msg += str(e)
raise AssertionError(err_msg)
def extract_acl_modules(module):
"""Get the ACL module(s) from llvm module."""
return list(
filter(lambda mod: mod.type_key == "arm_compute_lib", module.get_lib().imported_modules)
)
def verify_codegen(
module,
known_good_codegen,
num_acl_modules=1,
tvm_ops=0,
target="llvm -mtriple=aarch64-linux-gnu -mattr=+neon",
disabled_ops=["concatenate"],
):
"""Check acl codegen against a known good output."""
module = build_module(
module,
target,
tvm_ops=tvm_ops,
acl_partitions=num_acl_modules,
disabled_ops=disabled_ops,
)
acl_modules = extract_acl_modules(module)
assert len(acl_modules) == num_acl_modules, (
f"The number of Arm Compute Library modules produced ({len(acl_modules)}) does not "
f"match the expected value ({num_acl_modules})."
)
for mod in acl_modules:
source = mod.get_source("json")
codegen = json.loads(source)["nodes"]
# remove input and const names as these cannot be predetermined
for node in range(len(codegen)):
if codegen[node]["op"] == "input" or codegen[node]["op"] == "const":
codegen[node]["name"] = ""
codegen_str = json.dumps(codegen, sort_keys=True, indent=2)
known_good_codegen_str = json.dumps(known_good_codegen, sort_keys=True, indent=2)
assert codegen_str == known_good_codegen_str, (
f"The JSON produced by codegen does not match the expected result. \n"
f"Actual={codegen_str} \n"
f"Expected={known_good_codegen_str}"
)
| 11,792 | 33.482456 | 102 | py |
tvm | tvm-main/tests/python/contrib/test_vitis_ai/test_vitis_ai_runtime_cpu_part.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=no-else-return, unidiomatic-typecheck, invalid-name, W0611, C0413
"""Vitis-AI runtime test for CPU only part
This test verifies as much as possible whether the a model can be correctly offloaded
and executed for Vitis-AI acceleration. This entails:
- Annotating and partitioning model for Vitis-AI acceleration
- Building a Vitis-AI PyXIR runtime module with on-the-fly quantization enabled
- Run first iteration of on-the-fly quantization flow. This will always be run
on CPU as the first N (parameter) will be used for collecting calibration data
for quantization.
NOTE This is not a full end-to-end test as we need the full Vitis-AI docker environment
and access to an FPGA instance for that. This test verifies the Vitis-AI flow as much as
possible without requiring access to dedicated docker environment and/or hardware setup.
NOTE Quantization is not being tested (we need to be inside Vitis-AI docker environment
for that) buth the internal representation used for quantization is being generated and
functionally tested (CPU).
"""
import sys
import numpy as np
import pytest
pytest.importorskip("pyxir")
import pyxir.contrib.target.DPUCADF8H
import pyxir.contrib.target.DPUCVDX8H
import pyxir.contrib.target.DPUCZDX8G
import tvm
import tvm.relay.testing
from tvm import relay
from tvm.testing import requires_vitis_ai
from .infrastructure import verify_result
@requires_vitis_ai
@pytest.mark.parametrize("dpu_target", ["DPUCADF8H", "DPUCVDX8H", "DPUCZDX8G-zcu104"])
def test_extern_vitis_ai_resnet18(dpu_target):
"""Test first part of Vitis AI on-the-fly quantization runtime with ResNet 18 model"""
dtype = "float32"
ishape = (1, 3, 224, 224)
mod, params = relay.testing.resnet.get_workload(num_layers=18, batch_size=1)
ref_mod, params = relay.testing.resnet.get_workload(num_layers=18, batch_size=1)
i_data = np.random.uniform(0, 1, ishape).astype(dtype)
ref_res = relay.create_executor("graph", mod=ref_mod, device=tvm.cpu(0)).evaluate()(
i_data, **params
)
verify_result(
mod,
{"data": i_data},
(1, 1000),
ref_res.numpy(),
tol=1e-5,
params=params,
dpu_target=dpu_target,
tvm_ops=7,
)
if __name__ == "__main__":
if sys.platform == "win32":
print("Skip test on Windows for now")
sys.exit(0)
tvm.testing.main()
| 3,200 | 35.375 | 90 | py |
tvm | tvm-main/tests/python/contrib/test_vitis_ai/test_vitis_ai_codegen.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=no-else-return, unidiomatic-typecheck, invalid-name, W0611, C0413
"""Vitis-AI codegen tests"""
import sys
import numpy as np
import pytest
pytest.importorskip("pyxir")
import pyxir.contrib.target.DPUCADF8H
import pyxir.contrib.target.DPUCAHX8H
import pyxir.contrib.target.DPUCAHX8L
import pyxir.contrib.target.DPUCVDX8G
import pyxir.contrib.target.DPUCVDX8H
import pyxir.contrib.target.DPUCZDX8G
import tvm
from tvm import relay
from tvm.testing import requires_vitis_ai
from tvm.contrib.target import vitis_ai
from tvm.relay import transform
from tvm.relay.build_module import bind_params_by_name
from tvm.relay.op.contrib.vitis_ai import annotation
from .infrastructure import verify_codegen
def set_func_attr(func, compile_name, symbol_name):
func = func.with_attr("Primitive", tvm.tir.IntImm("int32", 1))
func = func.with_attr("Inline", tvm.tir.IntImm("int32", 1))
func = func.with_attr("Compiler", compile_name)
func = func.with_attr("global_symbol", symbol_name)
return func
@requires_vitis_ai
@pytest.mark.parametrize(
"dpu_target",
["DPUCADF8H", "DPUCAHX8H-u50", "DPUCAHX8L", "DPUCVDX8H", "DPUCVDX8G", "DPUCZDX8G-zcu104"],
)
def test_conv2d(dpu_target):
"""Test conv2d operator for Vitis AI DPU targets"""
x = relay.var("x", shape=(1, 3, 224, 224))
w = relay.const(np.zeros((16, 3, 3, 3), dtype="float32"))
y = relay.nn.conv2d(x, w, strides=[2, 2], padding=[1, 1, 1, 1], kernel_size=[3, 3])
func = relay.Function([x], y)
params = {}
params["w"] = np.random.rand(16, 3, 3, 3).astype("float32")
mod = tvm.IRModule()
mod["main"] = func
verify_codegen(mod, params=params, dpu_target=dpu_target, tvm_ops=2)
@requires_vitis_ai
@pytest.mark.parametrize("dpu_target", ["DPUCAHX8L", "DPUCZDX8G-zcu104"])
def test_depthwise_conv(dpu_target):
"""Test depthwise_conv operator for Vitis-AI DPUCZDX8G-zcu104 target"""
dtype = "float32"
ishape = (1, 32, 14, 14)
wshape = (32, 1, 3, 3)
data = relay.var("data", shape=(ishape), dtype=dtype)
weights = relay.var("weights", shape=(wshape), dtype=dtype)
depthwise_conv2d = relay.nn.conv2d(data, weights, kernel_size=(3, 3), padding=(1, 1), groups=32)
func = relay.Function([data, weights], depthwise_conv2d)
params = {}
params["weights"] = np.random.randn(32, 1, 3, 3).astype(dtype)
mod = tvm.IRModule()
mod["main"] = func
verify_codegen(mod, params=params, dpu_target=dpu_target, tvm_ops=2)
@requires_vitis_ai
@pytest.mark.parametrize(
"dpu_target",
["DPUCADF8H", "DPUCAHX8H-u50", "DPUCAHX8L", "DPUCVDX8H", "DPUCVDX8G", "DPUCZDX8G-zcu104"],
)
def test_bias_add(dpu_target):
"""Test bias_add operator for Vitis AI DPU targets"""
dtype = "float32"
ishape = (1, 32, 14, 14)
data = relay.var("data", shape=(ishape), dtype=dtype)
bias = relay.var("bias", relay.TensorType((32,), dtype))
out = relay.nn.bias_add(data, bias)
func = relay.Function([data, bias], out)
params = {}
params["bias"] = np.random.randn(32).astype(dtype)
mod = tvm.IRModule()
mod["main"] = func
verify_codegen(mod, params=params, dpu_target=dpu_target)
@requires_vitis_ai
@pytest.mark.parametrize(
"dpu_target",
["DPUCADF8H", "DPUCAHX8H-u50", "DPUCAHX8L", "DPUCVDX8H", "DPUCVDX8G", "DPUCZDX8G-zcu104"],
)
def test_relu(dpu_target):
"""Test relu operator for Vitis AI DPU targets"""
shape = (10, 10)
x = relay.var("x", shape=shape)
y = relay.nn.relu(x)
func = relay.Function([x], y)
mod = tvm.IRModule()
mod["main"] = func
verify_codegen(mod, dpu_target=dpu_target, num_vitis_ai_modules=0, tvm_ops=1)
@requires_vitis_ai
@pytest.mark.parametrize(
"dpu_target",
["DPUCADF8H", "DPUCAHX8H-u50", "DPUCAHX8L", "DPUCVDX8H", "DPUCVDX8G", "DPUCZDX8G-zcu104"],
)
def test_batchnorm(dpu_target):
"""Test batchnorm operator for Vitis AI DPU targets"""
data = relay.var("data", shape=(1, 16, 112, 112))
bn_gamma = relay.var("bn_gamma", relay.TensorType((16,), "float32"))
bn_beta = relay.var("bn_beta", relay.TensorType((16,), "float32"))
bn_mmean = relay.var("bn_mean", relay.TensorType((16,), "float32"))
bn_mvar = relay.var("bn_var", relay.TensorType((16,), "float32"))
bn_output = relay.nn.batch_norm(data, bn_gamma, bn_beta, bn_mmean, bn_mvar)
func = relay.Function([data, bn_gamma, bn_beta, bn_mmean, bn_mvar], bn_output[0])
params = {}
params["bn_gamma"] = np.random.rand(16).astype("float32")
params["bn_beta"] = np.random.rand(16).astype("float32")
params["bn_mean"] = np.random.rand(16).astype("float32")
params["bn_var"] = np.random.rand(16).astype("float32")
mod = tvm.IRModule()
mod["main"] = func
verify_codegen(mod, params=params, dpu_target=dpu_target)
@requires_vitis_ai
@pytest.mark.parametrize(
"dpu_target",
["DPUCADF8H", "DPUCAHX8H-u50", "DPUCAHX8L", "DPUCVDX8H", "DPUCVDX8G", "DPUCZDX8G-zcu104"],
)
def test_add(dpu_target):
"""Test add operator for Vitis AI DPU targets"""
shape = (10, 10)
x = relay.var("x", shape=shape)
y = x + x
func = relay.Function([x], y)
mod = tvm.IRModule()
mod["main"] = func
verify_codegen(mod, dpu_target=dpu_target)
@requires_vitis_ai
@pytest.mark.parametrize(
"dpu_target",
["DPUCADF8H", "DPUCAHX8H-u50", "DPUCAHX8L", "DPUCVDX8H", "DPUCVDX8G", "DPUCZDX8G-zcu104"],
)
def test_global_avg_pool2d(dpu_target):
"""Test global_avg_pool2d operator for Vitis AI DPU targets"""
shape = (10, 10, 7, 7)
x = relay.var("x", shape=shape)
y = relay.nn.global_avg_pool2d(x)
func = relay.Function([x], y)
mod = tvm.IRModule()
mod["main"] = func
verify_codegen(mod, dpu_target=dpu_target)
@requires_vitis_ai
@pytest.mark.parametrize(
"dpu_target",
["DPUCADF8H", "DPUCAHX8H-u50", "DPUCAHX8L", "DPUCVDX8H", "DPUCVDX8G", "DPUCZDX8G-zcu104"],
)
def test_avg_pool2d(dpu_target):
"""Test avg_pool2d for operator Vitis AI DPU targets"""
shape = (10, 10, 10, 10)
x = relay.var("x", shape=shape)
y = relay.nn.avg_pool2d(x, pool_size=(3, 3))
func = relay.Function([x], y)
mod = tvm.IRModule()
mod["main"] = func
verify_codegen(mod, dpu_target=dpu_target)
@requires_vitis_ai
@pytest.mark.parametrize(
"dpu_target",
["DPUCADF8H", "DPUCAHX8H-u50", "DPUCAHX8L", "DPUCVDX8H", "DPUCVDX8G", "DPUCZDX8G-zcu104"],
)
def test_max_pool2d(dpu_target):
"""Test max_pool2d for operator Vitis AI DPU targets"""
shape = (64, 512, 10, 10)
x = relay.var("x", shape=shape)
y = relay.nn.max_pool2d(x, pool_size=(3, 3))
func = relay.Function([x], y)
mod = tvm.IRModule()
mod["main"] = func
verify_codegen(mod, dpu_target=dpu_target)
@requires_vitis_ai
@pytest.mark.parametrize(
"dpu_target",
["DPUCADF8H", "DPUCAHX8H-u50", "DPUCAHX8L", "DPUCVDX8H", "DPUCVDX8G", "DPUCZDX8G-zcu104"],
)
def test_global_max_pool2d(dpu_target):
"""Test global_maxpool2d operator for Vitis AI DPU targets"""
shape = (1, 512, 7, 7)
x = relay.var("x", shape=shape)
y = relay.nn.global_max_pool2d(x)
func = relay.Function([x], y)
mod = tvm.IRModule()
mod["main"] = func
verify_codegen(mod, dpu_target=dpu_target)
@requires_vitis_ai
@pytest.mark.parametrize(
"dpu_target",
["DPUCADF8H", "DPUCAHX8H-u50", "DPUCAHX8L", "DPUCVDX8H", "DPUCVDX8G", "DPUCZDX8G-zcu104"],
)
def test_upsampling(dpu_target):
"""Test upsampling operator for Vitis AI DPU targets"""
shape = (64, 512, 10, 10)
x = relay.var("x", shape=shape)
y = relay.nn.upsampling(x, scale_h=2, scale_w=2)
func = relay.Function([x], y)
mod = tvm.IRModule()
mod["main"] = func
verify_codegen(mod, dpu_target=dpu_target)
@pytest.mark.skip(
reason="I and O used to be mixed up in kernel layouts in TVM."
"This is fixed, but vitis needs to adopt the new convention."
"To change, simply remove this line:"
"https://github.com/Xilinx/pyxir/blob/bef661d6d77adcdbd2cf4163f2cf3a1d31d40406/"
"python/pyxir/frontend/tvm/relay_tools/relay_l2_convolution.py#L380"
)
@pytest.mark.parametrize(
"dpu_target",
["DPUCADF8H", "DPUCAHX8H-u50", "DPUCAHX8L", "DPUCVDX8H", "DPUCVDX8G", "DPUCZDX8G-zcu104"],
)
def test_conv2d_transpose(dpu_target):
"""Test conv2d_transpose operator for Vitis AI DPU targets"""
dshape = (1, 3, 18, 18)
kshape = (3, 10, 3, 3)
x = relay.var("x", shape=dshape)
w = relay.const(np.zeros(kshape, dtype="float32"))
y = relay.nn.conv2d_transpose(
x,
w,
channels=10,
kernel_size=(3, 3),
strides=(1, 1),
padding=(1, 1),
data_layout="NCHW",
kernel_layout="IOHW",
)
func = relay.Function([x], y)
params = {}
dtype = "float32"
params["w"] = np.random.uniform(size=kshape).astype(dtype)
mod = tvm.IRModule()
mod["main"] = func
verify_codegen(mod, params=params, dpu_target=dpu_target)
@requires_vitis_ai
@pytest.mark.parametrize(
"dpu_target",
["DPUCADF8H", "DPUCAHX8H-u50", "DPUCAHX8L", "DPUCVDX8H", "DPUCVDX8G", "DPUCZDX8G-zcu104"],
)
def test_annotate(dpu_target):
"""Test annotation operator for Vitis AI DPU targets"""
def partition(dpu_target):
data = relay.var("data", relay.TensorType((1, 3, 224, 224), "float32"))
weight = relay.var("weight", relay.TensorType((16, 3, 3, 3), "float32"))
bn_gamma = relay.var("bn_gamma", relay.TensorType((16,), "float32"))
bn_beta = relay.var("bn_beta", relay.TensorType((16,), "float32"))
bn_mmean = relay.var("bn_mean", relay.TensorType((16,), "float32"))
bn_mvar = relay.var("bn_var", relay.TensorType((16,), "float32"))
conv = relay.nn.conv2d(
data=data, weight=weight, kernel_size=(3, 3), channels=16, padding=(1, 1)
)
bn_output = relay.nn.batch_norm(conv, bn_gamma, bn_beta, bn_mmean, bn_mvar)
func = relay.Function(
[data, weight, bn_gamma, bn_beta, bn_mmean, bn_mvar], bn_output.astuple()
)
mod = tvm.IRModule()
mod["main"] = func
params = {}
params["weight"] = np.random.rand(16, 3, 3, 3).astype("float32")
params["bn_gamma"] = np.random.rand(16).astype("float32")
params["bn_beta"] = np.random.rand(16).astype("float32")
params["bn_mean"] = np.random.rand(16).astype("float32")
params["bn_var"] = np.random.rand(16).astype("float32")
mod = annotation(mod, params, dpu_target)
opt_pass = tvm.transform.Sequential(
[
transform.MergeCompilerRegions(),
transform.PartitionGraph(),
]
)
with tvm.transform.PassContext(opt_level=3):
mod = opt_pass(mod)
return mod
def expected():
# function variables for conv2d
data0 = relay.var("data0", relay.TensorType((1, 3, 224, 224), "float32"))
weight0 = relay.var("weight0", relay.TensorType((16, 3, 3, 3), "float32"))
conv = relay.nn.conv2d(
data=data0, weight=weight0, kernel_size=(3, 3), channels=16, padding=(1, 1)
)
# function variables for batch_norm
bn_gamma0 = relay.var("bn_gamma0", relay.TensorType((16,), "float32"))
bn_beta0 = relay.var("bn_beta0", relay.TensorType((16,), "float32"))
bn_mmean0 = relay.var("bn_mean0", relay.TensorType((16,), "float32"))
bn_mvar0 = relay.var("bn_var0", relay.TensorType((16,), "float32"))
bn = relay.nn.batch_norm(conv, bn_gamma0, bn_beta0, bn_mmean0, bn_mvar0)
func0 = relay.Function(
[data0, weight0, bn_gamma0, bn_beta0, bn_mmean0, bn_mvar0], bn.astuple()
)
func0 = set_func_attr(func0, "vitis_ai", "tvmgen_default_vitis_ai_main_0")
gv0 = relay.GlobalVar("tvmgen_default_vitis_ai_main_0")
mod = tvm.IRModule()
mod[gv0] = func0
mod = relay.transform.InferType()(mod)
# main function
data = relay.var("data", relay.TensorType((1, 3, 224, 224), "float32"))
weight = relay.var("weight", relay.TensorType((16, 3, 3, 3), "float32"))
bn_gamma = relay.var("bn_gamma", relay.TensorType((16,), "float32"))
bn_beta = relay.var("bn_beta", relay.TensorType((16,), "float32"))
bn_mmean = relay.var("bn_mean", relay.TensorType((16,), "float32"))
bn_mvar = relay.var("bn_var", relay.TensorType((16,), "float32"))
call0 = gv0(data, weight, bn_gamma, bn_beta, bn_mmean, bn_mvar)
mod["main"] = relay.Function([data, weight, bn_gamma, bn_beta, bn_mmean, bn_mvar], call0)
mod = relay.transform.InferType()(mod)
return mod
partitioned_mod = partition(dpu_target)
ref_mod = expected()
assert tvm.ir.structural_equal(partitioned_mod, ref_mod, map_free_vars=True)
if __name__ == "__main__":
if sys.platform == "win32":
print("Skip test on Windows for now")
sys.exit(0)
tvm.testing.main()
| 13,756 | 34.825521 | 100 | py |
tvm | tvm-main/tests/python/contrib/test_vitis_ai/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" Infrastructure and tests for Vitis-AI codegen """
| 840 | 43.263158 | 62 | py |
tvm | tvm-main/tests/python/contrib/test_vitis_ai/infrastructure.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=no-else-return, unidiomatic-typecheck, invalid-name, W0611, C0413
"""Expose Vitis-AI test functions to the Python frontend"""
import sys
import numpy as np
import pytest
pytest.importorskip("pyxir")
import pyxir.contrib.target.DPUCZDX8G
import tvm
from tvm import relay
from tvm import runtime
from tvm.relay import transform
from tvm.relay.op.contrib.vitis_ai import partition_for_vitis_ai
from tvm.relay.build_module import bind_params_by_name
from tvm.contrib.target import vitis_ai
from tvm.contrib import graph_executor
from tvm.contrib import utils
def get_cpu_op_count(mod):
"""Traverse graph counting ops offloaded to TVM."""
class Counter(tvm.relay.ExprVisitor):
def __init__(self):
super().__init__()
self.count = 0
def visit_call(self, call):
if isinstance(call.op, tvm.ir.Op):
self.count += 1
super().visit_call(call)
c = Counter()
c.visit(mod["main"])
return c.count
def build_module(
mod,
target,
dpu_target="DPUCADF8H",
params=None,
enable_vitis_ai=True,
tvm_ops=0,
vitis_ai_partitions=1,
):
"""Build module for Vitis-AI codegen."""
if isinstance(mod, tvm.relay.expr.Call):
mod = tvm.IRModule.from_expr(mod)
if params is None:
params = {}
with tvm.transform.PassContext(
opt_level=3, config={"relay.ext.vitis_ai.options.target": dpu_target}
):
if enable_vitis_ai:
mod = partition_for_vitis_ai(mod, params, dpu_target)
tvm_op_count = get_cpu_op_count(mod)
assert tvm_op_count == tvm_ops, "Got {} TVM operators, expected {}".format(
tvm_op_count, tvm_ops
)
partition_count = 0
for global_var in mod.get_global_vars():
if "vitis_ai" in global_var.name_hint:
partition_count += 1
assert (
vitis_ai_partitions == partition_count
), "Got {} Vitis-AI partitions, expected {}".format(
partition_count, vitis_ai_partitions
)
relay.backend.te_compiler.get().clear()
return relay.build(mod, target, params=params)
def update_lib(lib, cross_compile=None):
tmp_path = utils.tempdir()
lib_name = "lib.so"
lib_path = tmp_path.relpath(lib_name)
if cross_compile:
lib.export_library(lib_path, cc=cross_compile)
else:
lib.export_library(lib_path)
lib = runtime.load_module(lib_path)
return lib
def extract_vitis_ai_modules(module):
"""Get the Vits-AI runtime module from llvm module."""
return list(
filter(lambda mod: mod.type_key == "VitisAIRuntime", module.get_lib().imported_modules)
)
def verify_codegen(
module, num_vitis_ai_modules=1, params=None, target="llvm", tvm_ops=0, dpu_target="DPUCADX8G"
):
"""Check Vitis-AI codegen against a known good output."""
module = build_module(
module,
target,
params=params,
dpu_target=dpu_target,
tvm_ops=tvm_ops,
vitis_ai_partitions=num_vitis_ai_modules,
)
vitis_ai_modules = extract_vitis_ai_modules(module)
assert len(vitis_ai_modules) == num_vitis_ai_modules, (
f"The number of Vitis-AI modules produced ({len(vitis_ai_modules)}) does not "
f"match the expected value ({num_vitis_ai_modules})."
)
def verify_result(
mod,
map_inputs,
out_shape,
result,
tol=1e-5,
target="llvm",
device=tvm.cpu(),
params=None,
dpu_target="DPUCADX8G",
tvm_ops=0,
):
"""To check the result between reference and byoc vitis-ai flow"""
lib = build_module(mod, target, params=params, dpu_target=dpu_target, tvm_ops=tvm_ops)
lib = update_lib(lib)
rt_mod = graph_executor.GraphModule(lib["default"](tvm.cpu()))
for name, data in map_inputs.items():
rt_mod.set_input(name, data)
rt_mod.set_input(**params)
rt_mod.run()
out_shapes = out_shape if isinstance(out_shape, list) else [out_shape]
results = result if isinstance(result, list) else [result]
for idx, shape in enumerate(out_shapes):
out = tvm.nd.empty(shape, device=device)
out = rt_mod.get_output(idx, out)
tvm.testing.assert_allclose(out.numpy(), results[idx], rtol=tol, atol=tol)
| 5,146 | 30.006024 | 97 | py |
tvm | tvm-main/tests/python/contrib/test_cmsisnn/test_generate_constants.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""CMSIS-NN integration tests: generate_constants pass"""
import math
import numpy as np
import pytest
import tvm
from tvm.testing.aot import get_dtype_range
from tvm import relay
from tvm.relay.op.contrib import cmsisnn
from .utils import (
make_module,
get_same_padding,
get_conv2d_qnn_params,
make_qnn_relu,
)
tvm._ffi._init_api("relay.ext.cmsisnn.transform", __name__)
def quantize_scale(scale):
multiplier, shift = math.frexp(scale)
multiplier_q31 = round(multiplier * (1 << 31))
return multiplier_q31, shift
class CheckGeneratedConstants(tvm.relay.ExprVisitor):
"""Provides methods to compare against expected quantization parameters"""
def __init__(self, enable_bias, multiplier, shift):
super().__init__()
self.num_constant_args_ = 0
self.enable_bias_ = enable_bias
self.multiplier_ = multiplier
self.shift_ = shift
def visit_call(self, call):
"""Tests if the multiplier and shift constants required by CMSIS-NN API were generated"""
super().visit_call(call)
if isinstance(call.op, tvm.ir.expr.GlobalVar):
multiplier = call.args[2]
shift = call.args[6] if self.enable_bias_ else call.args[5]
assert isinstance(
multiplier, relay.expr.Constant
), "Expected quantized multiplier at argument#3"
assert isinstance(
shift, relay.expr.Constant
), "Expected a constant while looking for quantized shift"
multiplier = multiplier.data.numpy()
shift = shift.data.numpy()
tvm.testing.assert_allclose(multiplier, self.multiplier_, atol=100, rtol=1e-10)
tvm.testing.assert_allclose(shift, self.shift_, atol=1, rtol=1e-5)
def make_model(
shape,
kernel_shape,
input_zero_point,
input_scale,
kernel_zero_point,
kernel_scale,
output_zero_point,
output_scale,
padding,
strides,
dilation,
groups,
dtype,
kernel_dtype,
out_channels,
weight_format,
enable_bias,
relu_type,
):
"""Return a model and any parameters it may have"""
h_index = weight_format.index("H")
w_index = weight_format.index("W")
kernel_h = kernel_shape[h_index]
kernel_w = kernel_shape[w_index]
a = relay.var("input", shape=shape, dtype=dtype)
p = (0, 0, 0, 0)
if padding == "SAME":
p = get_same_padding((shape[1], shape[2]), (kernel_h, kernel_w), dilation, strides)
a = relay.nn.pad(
a,
pad_width=[(0, 0), (p[0], p[2]), (p[1], p[3]), (0, 0)],
pad_value=input_zero_point,
pad_mode="constant",
)
shape = (shape[0], shape[1] + p[0] + p[2], shape[2] + p[1] + p[3], shape[3])
weight_shape = (kernel_h, kernel_w, shape[3] // groups, out_channels)
rng = np.random.default_rng(12321)
kmin, kmax = get_dtype_range(kernel_dtype)
weight = tvm.nd.array(
rng.integers(
kmin,
high=kmax,
size=weight_shape,
dtype=kernel_dtype,
)
)
weight_const = relay.const(weight, kernel_dtype)
conv = relay.qnn.op.conv2d(
a,
weight_const,
input_zero_point=relay.const(input_zero_point, "int32"),
kernel_zero_point=relay.const(kernel_zero_point, "int32"),
input_scale=relay.const(input_scale, "float32"),
kernel_scale=relay.const(kernel_scale, "float32"),
kernel_size=(kernel_h, kernel_w),
data_layout="NHWC",
kernel_layout=weight_format,
dilation=dilation,
strides=strides,
groups=groups,
channels=out_channels,
padding=p,
out_dtype="int32",
)
bias = tvm.nd.array(rng.integers(0, high=10, size=(out_channels,), dtype="int32"))
bias_const = relay.const(bias, "int32")
last_op = relay.nn.bias_add(conv, bias_const, axis=3) if enable_bias else conv
requant_input_sc = [sc * input_scale for sc in kernel_scale]
last_op = relay.qnn.op.requantize(
last_op,
relay.const(requant_input_sc, "float32"),
relay.const(0, "int32"),
relay.const(output_scale, "float32"),
relay.const(output_zero_point, "int32"),
out_dtype=dtype,
)
last_op = make_qnn_relu(last_op, relu_type, output_scale, output_zero_point, dtype)
params = {"w": weight, "b": bias}
return last_op, params
@tvm.testing.requires_cmsisnn
@pytest.mark.parametrize("enable_bias", [True, False])
@pytest.mark.parametrize(
"input_zero_point, input_scale, kernel_scale, out_channels",
[(10, 0.0128, [0.11, 0.22], 2), (-64, 1, [1, 0.0256, 1.37], 3)],
)
def test_op_int8(
enable_bias,
input_zero_point,
input_scale,
kernel_scale,
out_channels,
):
"""Tests for CMSIS-NN constants when the dtype is int8"""
ifm_shape = (1, 28, 28, 3)
padding = "VALID"
strides = (1, 1)
dilation = (1, 1)
kernel_size = (3, 3)
kernel_zero_point = 0
groups = 1
weight_format = "HWIO"
kernel_h = kernel_size[0]
kernel_w = kernel_size[1]
dtype = "int8"
relu_type = "RELU"
weight_shape = (kernel_h, kernel_w, ifm_shape[3] // groups, out_channels)
output_scale, output_zero_point = get_conv2d_qnn_params(
weight_shape,
input_scale,
input_zero_point,
kernel_scale,
kernel_zero_point,
dtype,
dtype,
dtype,
False,
)
model, params = make_model(
ifm_shape,
weight_shape,
input_zero_point,
input_scale,
kernel_zero_point,
kernel_scale,
output_zero_point,
output_scale,
padding,
strides,
dilation,
groups,
dtype,
dtype,
out_channels,
weight_format,
enable_bias,
relu_type,
)
mod = make_module(model)
cmsisnn_mod = cmsisnn.partition_for_cmsisnn(mod, params)
multiplier_array = []
shift_array = []
for i in range(out_channels):
multiplier, shift = quantize_scale(input_scale * kernel_scale[i] / output_scale)
multiplier_array.append(multiplier)
shift_array.append(shift)
CheckGeneratedConstants(enable_bias, multiplier_array, shift_array).visit_function(
cmsisnn_mod["main"]
)
if __name__ == "__main__":
tvm.testing.main()
| 7,183 | 30.099567 | 97 | py |
tvm | tvm-main/tests/python/contrib/test_cmsisnn/test_invalid_graphs.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""CMSIS-NN integration tests: Tests invalid graphs"""
import numpy as np
import tvm
from tvm.testing.aot import AOTTestModel, get_dtype_range, compile_and_run, generate_ref_data
from tvm.micro.testing.aot_test_utils import (
AOT_USMP_CORSTONE300_RUNNER,
)
from .utils import (
skip_if_no_reference_system,
)
@skip_if_no_reference_system
@tvm.testing.requires_cmsisnn
def test_empty_function():
"""Test partitioned function without composite function"""
original_model = """
#[version = "0.0.5"]
def @main(%data : Tensor[(16, 29), int8]) -> Tensor[(16, 29), int8] {
add(%data, %data)
}
"""
cmsisnn_model = """
#[version = "0.0.5"]
def @tvmgen_default_cmsis_nn_main_1(%i1: Tensor[(16, 29), int8], Inline=1, Compiler="cmsis-nn", global_symbol="tvmgen_default_cmsis_nn_main_1", Primitive=1) -> Tensor[(16, 29), int8] {
add(%i1, %i1)
}
def @main(%data : Tensor[(16, 29), int8]) -> Tensor[(16, 29), int8] {
%1 = @tvmgen_default_cmsis_nn_main_1(%data) /* ty=Tensor[(16, 29), int8] */;
%1
}
"""
orig_mod = tvm.relay.fromtext(original_model)
cmsisnn_mod = tvm.relay.fromtext(cmsisnn_model)
params = {}
# validate the output
interface_api = "c"
use_unpacked_api = True
test_runner = AOT_USMP_CORSTONE300_RUNNER
dtype = "int8"
in_min, in_max = get_dtype_range(dtype)
rng = np.random.default_rng(12345)
inputs = {"data": rng.integers(in_min, high=in_max, size=(16, 29), dtype=dtype)}
outputs = generate_ref_data(orig_mod["main"], inputs, params)
compile_and_run(
AOTTestModel(
module=cmsisnn_mod,
inputs=inputs,
outputs=outputs,
params=params,
output_tolerance=0,
),
test_runner,
interface_api,
use_unpacked_api,
)
| 2,580 | 32.960526 | 184 | py |
tvm | tvm-main/tests/python/contrib/test_cmsisnn/test_conv2d.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""CMSIS-NN integration tests: Conv2D"""
import itertools
import numpy as np
import pytest
import tvm
from tvm import relay
from tvm.relay.op.contrib import cmsisnn
from tvm.testing.aot import (
get_dtype_range,
generate_ref_data,
AOTTestModel,
compile_models,
compile_and_run,
run_and_check,
)
from tvm.micro.testing.aot_test_utils import AOT_USMP_CORSTONE300_RUNNER
from .utils import (
make_module,
get_same_padding,
get_conv2d_qnn_params,
get_kernel_bias_dtype,
make_qnn_relu,
assert_partitioned_function,
assert_no_external_function,
create_test_runner,
CheckForPadsWithinCompositeFunc,
)
def make_model(
shape,
kernel_shape,
input_zero_point,
input_scale,
kernel_zero_point,
kernel_scale,
output_zero_point,
output_scale,
padding,
strides,
dilation,
groups,
dtype,
kernel_dtype,
bias_dtype,
out_channels,
kernel_layout,
enable_bias,
relu_type,
input_op=None,
):
"""Return a model and any parameters it may have"""
if input_op:
op = input_op
else:
op = relay.var("input", shape=shape, dtype=dtype)
h_index = kernel_layout.index("H")
w_index = kernel_layout.index("W")
kernel_h = kernel_shape[h_index]
kernel_w = kernel_shape[w_index]
p = (0, 0, 0, 0)
if padding == "SAME":
p = get_same_padding((shape[1], shape[2]), (kernel_h, kernel_w), dilation, strides)
rng = np.random.default_rng(12321)
kmin, kmax = get_dtype_range(kernel_dtype)
kernel = tvm.nd.array(
rng.integers(
kmin,
high=kmax,
size=kernel_shape,
dtype=kernel_dtype,
)
)
kernel_const = relay.const(kernel, kernel_dtype)
conv2d_kernel_sc = kernel_scale[0] if out_channels == 1 else kernel_scale
conv = relay.qnn.op.conv2d(
op,
kernel_const,
input_zero_point=relay.const(input_zero_point, "int32"),
kernel_zero_point=relay.const(kernel_zero_point, "int32"),
input_scale=relay.const(input_scale, "float32"),
kernel_scale=relay.const(conv2d_kernel_sc, "float32"),
kernel_size=(kernel_h, kernel_w),
data_layout="NHWC",
kernel_layout=kernel_layout,
dilation=dilation,
strides=strides,
groups=groups,
channels=out_channels,
padding=p,
out_dtype=bias_dtype,
)
bias = tvm.nd.array(rng.integers(0, high=10, size=(out_channels,), dtype=bias_dtype))
bias_const = relay.const(bias, bias_dtype)
last_op = relay.nn.bias_add(conv, bias_const, axis=3) if enable_bias else conv
requant_input_sc = [sc * input_scale for sc in kernel_scale]
requant_input_sc = requant_input_sc[0] if out_channels == 1 else requant_input_sc
last_op = relay.qnn.op.requantize(
last_op,
relay.const(requant_input_sc, "float32"),
relay.const(0, "int32"),
relay.const(output_scale, "float32"),
relay.const(output_zero_point, "int32"),
out_dtype=dtype,
)
last_op = make_qnn_relu(last_op, relu_type, output_scale, output_zero_point, dtype)
params = {"w": kernel, "b": bias}
return last_op, params
@tvm.testing.requires_cmsisnn
@pytest.mark.parametrize("padding", ["SAME", "VALID"])
@pytest.mark.parametrize("enable_bias", [True, False])
@pytest.mark.parametrize(
"input_zero_point, input_scale, kernel_scale, out_channels",
[(10, 0.0128, [0.11, 0.22], 2)],
)
def test_conv2d_number_primfunc_args(
padding,
enable_bias,
input_zero_point,
input_scale,
kernel_scale,
out_channels,
):
"""Tests number of arguments in Conv2D primfunc"""
interface_api = "c"
use_unpacked_api = True
ifm_shape = (1, 64, 100, 4)
kernel_size = (3, 3)
strides = (1, 1)
dilation = (1, 1)
dtype = "int8"
groups = 1
kernel_layout = "HWIO"
kernel_h = kernel_size[0]
kernel_w = kernel_size[1]
kernel_shape = (kernel_h, kernel_w, ifm_shape[3] // groups, out_channels)
kernel_zero_point = 0
in_min, in_max = get_dtype_range(dtype)
relu_type = "RELU"
kernel_dtype, bias_dtype = get_kernel_bias_dtype(dtype)
output_scale, output_zero_point = get_conv2d_qnn_params(
kernel_shape,
input_scale,
input_zero_point,
kernel_scale,
kernel_zero_point,
input_dtype=dtype,
kernel_dtype=kernel_dtype,
output_dtype=dtype,
)
model, params = make_model(
ifm_shape,
kernel_shape,
input_zero_point,
input_scale,
kernel_zero_point,
kernel_scale,
output_zero_point,
output_scale,
padding,
strides,
dilation,
groups,
dtype,
kernel_dtype,
bias_dtype,
out_channels,
kernel_layout,
enable_bias,
relu_type,
)
orig_mod = make_module(model)
cmsisnn_mod = cmsisnn.partition_for_cmsisnn(orig_mod, params)
# validate pattern matching
assert_partitioned_function(orig_mod, cmsisnn_mod)
# compile the model
rng = np.random.default_rng(12345)
inputs = {"input": rng.integers(in_min, high=in_max, size=ifm_shape, dtype=dtype)}
output_list = generate_ref_data(orig_mod["main"], inputs, params)
compiled_models = compile_models(
AOTTestModel(module=cmsisnn_mod, inputs=inputs, outputs=output_list, params=params),
interface_api,
use_unpacked_api,
pass_config={"tir.usmp.enable": False},
)
# validate number of TIR primfunc args
expected_num_params = 6 if enable_bias else 5
cmsisnn_tir_mod = None
for target, mod in compiled_models[0].executor_factory.lowered_ir_mods.items():
if target.kind.name == "cmsis-nn":
cmsisnn_tir_mod = mod
cmsisnn_func = cmsisnn_tir_mod["tvmgen_default_cmsis_nn_main_0"]
assert (
len(cmsisnn_func.params) == expected_num_params
), "Generated unexpected number of function arguments."
@tvm.testing.requires_cmsisnn
@pytest.mark.parametrize("dtype", ["int8", "int16"])
@pytest.mark.parametrize("padding", ["SAME", "VALID"])
@pytest.mark.parametrize("relu_type", ["RELU"])
@pytest.mark.parametrize("enable_bias", [True, False])
@pytest.mark.parametrize(
"input_zero_point, input_scale, kernel_scale, out_channels",
[(10, 0.0128, [0.11, 0.22], 2), (-64, 1, [1, 0.0256, 1.37], 3)],
)
@pytest.mark.parametrize(
"compiler_cpu, cpu_flags", [("cortex-m55", "+nomve"), ("cortex-m55", ""), ("cortex-m7", "")]
)
def test_conv2d_symmetric_padding(
dtype,
padding,
enable_bias,
relu_type,
input_zero_point,
input_scale,
kernel_scale,
out_channels,
compiler_cpu,
cpu_flags,
):
"""Tests QNN Conv2D where the padding is symmetric on both sides of input"""
interface_api = "c"
use_unpacked_api = True
ifm_shape = (1, 64, 100, 4)
kernel_size = (3, 3)
strides = (1, 1)
dilation = (1, 1)
groups = 1
# input_zero_point is not handled by TFLM when int16
input_zero_point = input_zero_point if dtype == "int8" else 0
kernel_layout = "HWIO"
kernel_h = kernel_size[0]
kernel_w = kernel_size[1]
kernel_shape = (kernel_h, kernel_w, ifm_shape[3] // groups, out_channels)
kernel_zero_point = 0
in_min, in_max = get_dtype_range(dtype)
kernel_dtype, bias_dtype = get_kernel_bias_dtype(dtype)
output_scale, output_zero_point = get_conv2d_qnn_params(
kernel_shape,
input_scale,
input_zero_point,
kernel_scale,
kernel_zero_point,
input_dtype=dtype,
kernel_dtype=kernel_dtype,
output_dtype=dtype,
)
model, params = make_model(
ifm_shape,
kernel_shape,
input_zero_point,
input_scale,
kernel_zero_point,
kernel_scale,
output_zero_point,
output_scale,
padding,
strides,
dilation,
groups,
dtype,
kernel_dtype,
bias_dtype,
out_channels,
kernel_layout,
enable_bias,
relu_type,
)
orig_mod = make_module(model)
cmsisnn_mod = cmsisnn.partition_for_cmsisnn(orig_mod, params)
# validate pattern matching
assert_partitioned_function(orig_mod, cmsisnn_mod)
# validate the output
rng = np.random.default_rng(12345)
inputs = {"input": rng.integers(in_min, high=in_max, size=ifm_shape, dtype=dtype)}
output_list = generate_ref_data(orig_mod["main"], inputs, params)
compile_and_run(
AOTTestModel(
module=cmsisnn_mod,
inputs=inputs,
outputs=output_list,
params=params,
output_tolerance=1,
),
create_test_runner(compiler_cpu, cpu_flags),
interface_api,
use_unpacked_api,
)
@tvm.testing.requires_cmsisnn
@pytest.mark.parametrize("padding", ["SAME", "VALID"])
@pytest.mark.parametrize("relu_type", ["RELU", "NONE"])
@pytest.mark.parametrize("enable_bias", [True, False])
@pytest.mark.parametrize(
"input_zero_point, input_scale, kernel_scale, out_channels",
[(10, 0.0128, [0.11, 0.22], 2), (-64, 1, [1, 0.0256, 1.37], 3)],
)
def test_conv2d_asymmetric_padding(
padding,
enable_bias,
relu_type,
input_zero_point,
input_scale,
kernel_scale,
out_channels,
):
"""Tests QNN Conv2D where the padding is asymmetric on different sides of input"""
interface_api = "c"
use_unpacked_api = True
test_runner = AOT_USMP_CORSTONE300_RUNNER
dtype = "int8"
ifm_shape = (1, 25, 25, 12)
kernel_size = (5, 5)
strides = (2, 2)
dilation = (1, 1)
groups = 1
input_zero_point = input_zero_point if dtype == "int8" else 0
kernel_layout = "HWIO"
kernel_h = kernel_size[0]
kernel_w = kernel_size[1]
kernel_shape = (kernel_h, kernel_w, ifm_shape[3] // groups, out_channels)
kernel_zero_point = 0
in_min, in_max = get_dtype_range(dtype)
kernel_dtype, bias_dtype = get_kernel_bias_dtype(dtype)
output_scale, output_zero_point = get_conv2d_qnn_params(
kernel_shape,
input_scale,
input_zero_point,
kernel_scale,
kernel_zero_point,
input_dtype=dtype,
kernel_dtype=kernel_dtype,
output_dtype=dtype,
)
model, params = make_model(
ifm_shape,
kernel_shape,
input_zero_point,
input_scale,
kernel_zero_point,
kernel_scale,
output_zero_point,
output_scale,
padding,
strides,
dilation,
groups,
dtype,
kernel_dtype,
bias_dtype,
out_channels,
kernel_layout,
enable_bias,
relu_type,
)
orig_mod = make_module(model)
cmsisnn_mod = cmsisnn.partition_for_cmsisnn(orig_mod, params)
# validate pattern matching
assert_partitioned_function(orig_mod, cmsisnn_mod)
# validate the output
rng = np.random.default_rng(12345)
inputs = {"input": rng.integers(in_min, high=in_max, size=ifm_shape, dtype=dtype)}
output_list = generate_ref_data(orig_mod["main"], inputs, params)
compile_and_run(
AOTTestModel(
module=cmsisnn_mod,
inputs=inputs,
outputs=output_list,
params=params,
output_tolerance=1,
),
test_runner,
interface_api,
use_unpacked_api,
)
@tvm.testing.requires_cmsisnn
@pytest.mark.parametrize("ifm_shape", [(1, 25, 25, 12), (1, 64, 100, 4)])
@pytest.mark.parametrize(
"pad_width",
[
((0, 0), (0, 1), (1, 2), (0, 0)),
((0, 0), (1, 1), (1, 1), (0, 0)),
((0, 0), (2, 2), (3, 4), (0, 0)),
],
)
def test_pad_conv2d_fusion_int8(
ifm_shape,
pad_width,
):
"""Tests QNN Conv2D where the padding is asymmetric on different sides of input"""
interface_api = "c"
use_unpacked_api = True
test_runner = AOT_USMP_CORSTONE300_RUNNER
ifm_shape = (1, 25, 25, 12)
kernel_size = (5, 5)
strides = (2, 2)
dilation = (1, 1)
padding = "SAME"
dtype = "int8"
enable_bias = True
relu_type = "NONE"
input_zero_point = 10
input_scale = 0.0128
kernel_scale = [0.11, 0.22]
out_channels = 2
groups = 1
kernel_layout = "HWIO"
kernel_h = kernel_size[0]
kernel_w = kernel_size[1]
kernel_shape = (kernel_h, kernel_w, ifm_shape[3] // groups, out_channels)
kernel_zero_point = 0
in_min, in_max = get_dtype_range(dtype)
kernel_dtype, bias_dtype = get_kernel_bias_dtype(dtype)
output_scale, output_zero_point = get_conv2d_qnn_params(
kernel_shape,
input_scale,
input_zero_point,
kernel_scale,
kernel_zero_point,
input_dtype=dtype,
kernel_dtype=kernel_dtype,
output_dtype=dtype,
)
invar = relay.var("input", shape=ifm_shape, dtype=dtype)
pad = relay.nn.pad(
invar,
pad_width=pad_width, # ((), (top, bottom), (left, right), ())
pad_value=input_zero_point,
pad_mode="constant",
)
model, params = make_model(
ifm_shape,
kernel_shape,
input_zero_point,
input_scale,
kernel_zero_point,
kernel_scale,
output_zero_point,
output_scale,
padding,
strides,
dilation,
groups,
dtype,
kernel_dtype,
bias_dtype,
out_channels,
kernel_layout,
enable_bias,
relu_type,
input_op=pad,
)
orig_mod = make_module(model)
cmsisnn_mod = cmsisnn.partition_for_cmsisnn(orig_mod, params)
# validate pattern matching
assert_partitioned_function(orig_mod, cmsisnn_mod, False)
# check pad is not present inside CMSIS-NN partitioned function
cmsisnn_func = None
for var in cmsisnn_mod.get_global_vars():
if "cmsis_nn_main_0" in var.name_hint:
cmsisnn_func = cmsisnn_mod[var]
pad_verifier = CheckForPadsWithinCompositeFunc()
pad_verifier.visit_function(cmsisnn_func)
pad_verifier.assert_no_pads_within_func()
# validate the output
rng = np.random.default_rng(12345)
inputs = {"input": rng.integers(in_min, high=in_max, size=ifm_shape, dtype=dtype)}
output_list = generate_ref_data(orig_mod["main"], inputs, params)
compile_and_run(
AOTTestModel(
module=cmsisnn_mod,
inputs=inputs,
outputs=output_list,
params=params,
output_tolerance=1,
),
test_runner,
interface_api,
use_unpacked_api,
)
@tvm.testing.requires_cmsisnn
@pytest.mark.parametrize(
"ifm_shape, pad_width, conv2d_padding",
[
[(1, 25, 25, 12), ((0, 0), (0, 2), (1, 2), (0, 0)), "SAME"],
[(1, 64, 100, 4), ((0, 0), (1, 3), (1, 1), (0, 0)), "VALID"],
[(1, 55, 55, 3), ((0, 0), (2, 1), (3, 5), (0, 0)), "SAME"],
],
)
def test_invalid_pad_conv2d_fusion_int8(
ifm_shape,
pad_width,
conv2d_padding,
):
"""Tests QNN Conv2D where the padding is asymmetric on different sides of input"""
interface_api = "c"
use_unpacked_api = True
test_runner = AOT_USMP_CORSTONE300_RUNNER
ifm_shape = (1, 25, 25, 12)
kernel_size = (5, 5)
strides = (2, 2)
dilation = (1, 1)
dtype = "int8"
enable_bias = True
relu_type = "NONE"
input_zero_point = 10
input_scale = 0.0128
kernel_scale = [0.11, 0.22]
out_channels = 2
groups = 1
kernel_layout = "HWIO"
kernel_h = kernel_size[0]
kernel_w = kernel_size[1]
kernel_shape = (kernel_h, kernel_w, ifm_shape[3] // groups, out_channels)
kernel_zero_point = 0
in_min, in_max = get_dtype_range(dtype)
kernel_dtype, bias_dtype = get_kernel_bias_dtype(dtype)
output_scale, output_zero_point = get_conv2d_qnn_params(
kernel_shape,
input_scale,
input_zero_point,
kernel_scale,
kernel_zero_point,
input_dtype=dtype,
kernel_dtype=kernel_dtype,
output_dtype=dtype,
)
invar = relay.var("input", shape=ifm_shape, dtype=dtype)
pad = relay.nn.pad(
invar,
pad_width=pad_width, # ((), (top, bottom), (left, right), ())
pad_value=input_zero_point,
pad_mode="constant",
)
model, params = make_model(
ifm_shape,
kernel_shape,
input_zero_point,
input_scale,
kernel_zero_point,
kernel_scale,
output_zero_point,
output_scale,
conv2d_padding,
strides,
dilation,
groups,
dtype,
kernel_dtype,
bias_dtype,
out_channels,
kernel_layout,
enable_bias,
relu_type,
input_op=pad,
)
orig_mod = make_module(model)
cmsisnn_mod = cmsisnn.partition_for_cmsisnn(orig_mod, params)
# validate pattern matching
assert_partitioned_function(orig_mod, cmsisnn_mod)
# check pad is only present inside main function
cmsisnn_func = None
for var in cmsisnn_mod.get_global_vars():
if "cmsis_nn_main_0" in var.name_hint:
cmsisnn_func = cmsisnn_mod[var]
pad_verifier = CheckForPadsWithinCompositeFunc()
pad_verifier.visit_function(cmsisnn_func)
pad_verifier.assert_no_pads_within_func()
else:
main_func = cmsisnn_mod[var]
pad_verifier = CheckForPadsWithinCompositeFunc()
pad_verifier.visit_function(main_func)
pad_verifier.assert_pads_within_func()
# validate the output
rng = np.random.default_rng(12345)
inputs = {"input": rng.integers(in_min, high=in_max, size=ifm_shape, dtype=dtype)}
output_list = generate_ref_data(orig_mod["main"], inputs, params)
compile_and_run(
AOTTestModel(
module=cmsisnn_mod,
inputs=inputs,
outputs=output_list,
params=params,
output_tolerance=1,
),
test_runner,
interface_api,
use_unpacked_api,
)
# pylint: disable=import-outside-toplevel
@tvm.testing.requires_cmsisnn
@pytest.mark.parametrize("ifm_shape", [(1, 55, 55, 3)])
@pytest.mark.parametrize("kernel_shape", [(3, 2), (1, 3)])
@pytest.mark.parametrize("strides, dilation", [((3, 2), (1, 1))])
@pytest.mark.parametrize("padding", ["SAME", "VALID"])
@pytest.mark.parametrize("activation", ["NONE", "RELU"])
def test_conv2d_int8_tflite(ifm_shape, kernel_shape, strides, dilation, padding, activation):
"""Compares TVM output against TFLite output"""
interface_api = "c"
use_unpacked_api = True
test_runner = AOT_USMP_CORSTONE300_RUNNER
dtype = "int8"
from tvm.relay.testing.tflite import TFLiteModel
tfl_model = TFLiteModel(dtype)
conv2d_function = tfl_model.create_conv2d_single(
kernel_shape, strides, padding, dilation, activation
)
tfl_model.create_tflite_model(conv2d_function, [ifm_shape])
relay_mod, relay_params = tfl_model.convert_to_relay()
cmsisnn_mod = cmsisnn.partition_for_cmsisnn(relay_mod, relay_params)
# validate pattern matching
assert_partitioned_function(relay_mod, cmsisnn_mod)
# validate CMSIS-NN output against TFLite output
input_map, output_map, output_tolerance = tfl_model.generate_reference_data()
compile_and_run(
AOTTestModel(
module=cmsisnn_mod,
inputs=input_map,
outputs=output_map,
params=relay_params,
output_tolerance=output_tolerance,
),
test_runner,
interface_api,
use_unpacked_api,
)
@tvm.testing.requires_cmsisnn
@pytest.mark.parametrize("dtype", ["int8", "int16"])
@pytest.mark.parametrize("ifm_shape", [(1, 28, 28, 12), (1, 64, 100, 4)])
@pytest.mark.parametrize("kernel_size", [(3, 3)])
@pytest.mark.parametrize("padding", ["SAME", "VALID"])
@pytest.mark.parametrize("strides, dilation", [((1, 1), (1, 1))])
@pytest.mark.parametrize("relu_type", ["RELU"])
@pytest.mark.parametrize(
"depth_multiplier, enable_bias",
[(1, True), (3, True)],
)
@pytest.mark.parametrize(
"input_zero_point, input_scale, kernel_scale, out_channels",
[(10, 0.0128, [0.11, 0.22], 2), (-64, 1, [1, 0.0256, 1.37], 3)],
)
@pytest.mark.parametrize(
"compiler_cpu, cpu_flags", [("cortex-m55", "+nomve"), ("cortex-m55", ""), ("cortex-m7", "")]
)
def test_depthwise(
dtype,
ifm_shape,
kernel_size,
padding,
strides,
dilation,
enable_bias,
relu_type,
input_zero_point,
input_scale,
kernel_scale,
out_channels,
depth_multiplier,
compiler_cpu,
cpu_flags,
):
"""Tests QNN Depthwise int8 op via CMSIS-NN"""
interface_api = "c"
use_unpacked_api = True
groups = 1
input_zero_point = input_zero_point if dtype == "int8" else 0
kernel_layout = "HWIO"
kernel_h = kernel_size[0]
kernel_w = kernel_size[1]
kernel_shape = (kernel_h, kernel_w, ifm_shape[3] // groups, out_channels)
kernel_zero_point = 0
in_min, in_max = get_dtype_range(dtype)
groups = ifm_shape[3]
kernel_layout = "HWOI"
kernel_shape = (kernel_h, kernel_w, ifm_shape[3], depth_multiplier)
out_channels = ifm_shape[3] * depth_multiplier
ks_len = len(kernel_scale)
kernel_scale = [kernel_scale[i % ks_len] for i in range(out_channels)]
kernel_dtype, bias_dtype = get_kernel_bias_dtype(dtype)
output_scale, output_zero_point = get_conv2d_qnn_params(
kernel_shape,
input_scale,
input_zero_point,
kernel_scale,
kernel_zero_point,
input_dtype=dtype,
kernel_dtype=kernel_dtype,
output_dtype=dtype,
is_depthwise=True,
)
model, params = make_model(
ifm_shape,
kernel_shape,
input_zero_point,
input_scale,
kernel_zero_point,
kernel_scale,
output_zero_point,
output_scale,
padding,
strides,
dilation,
groups,
dtype,
kernel_dtype,
bias_dtype,
out_channels,
kernel_layout,
enable_bias,
relu_type,
)
orig_mod = make_module(model)
cmsisnn_mod = cmsisnn.partition_for_cmsisnn(orig_mod, params)
# validate pattern matching
assert_partitioned_function(orig_mod, cmsisnn_mod)
# validate the output
rng = np.random.default_rng(12345)
inputs = {"input": rng.integers(in_min, high=in_max, size=ifm_shape, dtype=dtype)}
output_list = generate_ref_data(orig_mod["main"], inputs, params)
compile_and_run(
AOTTestModel(
module=cmsisnn_mod,
inputs=inputs,
outputs=output_list,
params=params,
output_tolerance=1,
),
create_test_runner(compiler_cpu, cpu_flags),
interface_api,
use_unpacked_api,
)
@tvm.testing.requires_cmsisnn
@pytest.mark.parametrize("padding", ["SAME", "VALID"])
@pytest.mark.parametrize("strides, dilation", [((1, 1), (1, 1))])
@pytest.mark.parametrize("relu_type", ["RELU", "NONE"])
@pytest.mark.parametrize("depth_multiplier", [1, 3])
@pytest.mark.parametrize(
"input_zero_point, input_scale, kernel_scale",
[
(
10,
0.0128,
[0.11, 0.22],
),
(
-64,
1,
[1, 0.0256, 1.37],
),
],
)
def test_relay_conv2d_cmsisnn_depthwise_int8(
padding,
strides,
dilation,
relu_type,
input_zero_point,
input_scale,
kernel_scale,
depth_multiplier,
):
"""Tests QNN Depthwise int8 op via CMSIS-NN"""
interface_api = "c"
use_unpacked_api = True
test_runner = AOT_USMP_CORSTONE300_RUNNER
dtype = "int8"
in_min, in_max = get_dtype_range(dtype)
ifm_shape = (1, 24, 24, 1)
groups = ifm_shape[3]
input_zero_point = input_zero_point if dtype == "int8" else 0
kernel_layout = "HWIO"
(kernel_h, kernel_w) = (3, 3)
kernel_shape = (kernel_h, kernel_w, ifm_shape[3], depth_multiplier)
out_channels = ifm_shape[3] * depth_multiplier
enable_bias = True
ks_len = len(kernel_scale)
kernel_zero_point = 0
kernel_scale = [kernel_scale[i % ks_len] for i in range(out_channels)]
kernel_dtype, bias_dtype = get_kernel_bias_dtype(dtype)
output_scale, output_zero_point = get_conv2d_qnn_params(
kernel_shape,
input_scale,
input_zero_point,
kernel_scale,
kernel_zero_point,
input_dtype=dtype,
kernel_dtype=kernel_dtype,
output_dtype=dtype,
is_depthwise=True,
)
model, params = make_model(
ifm_shape,
kernel_shape,
input_zero_point,
input_scale,
kernel_zero_point,
kernel_scale,
output_zero_point,
output_scale,
padding,
strides,
dilation,
groups,
dtype,
kernel_dtype,
bias_dtype,
out_channels,
kernel_layout,
enable_bias,
relu_type,
)
orig_mod = make_module(model)
cmsisnn_mod = cmsisnn.partition_for_cmsisnn(orig_mod, params)
# validate pattern matching
assert_partitioned_function(orig_mod, cmsisnn_mod)
# generate reference output
rng = np.random.default_rng(12345)
inputs = {"input": rng.integers(in_min, high=in_max, size=ifm_shape, dtype=dtype)}
output_list = generate_ref_data(orig_mod["main"], inputs, params)
# validate presence of depthwise convolution
compiled_models = compile_models(
AOTTestModel(
module=cmsisnn_mod,
inputs=inputs,
outputs=output_list,
params=params,
output_tolerance=1,
),
interface_api,
use_unpacked_api,
pass_config=test_runner.pass_config,
)
cmsisnn_tir_mod = None
for target, mod in compiled_models[0].executor_factory.lowered_ir_mods.items():
if target.kind.name == "cmsis-nn":
cmsisnn_tir_mod = mod
cmsisnn_func = cmsisnn_tir_mod["tvmgen_default_cmsis_nn_main_0"]
call_extern = None
# This happens when context buffer is init in case depthM != 1
if isinstance(cmsisnn_func.body, tvm.tir.stmt.Evaluate):
call_extern = cmsisnn_func.body.value
else:
call_extern = cmsisnn_func.body.body.value
assert (
call_extern.args[0].value == "arm_depthwise_conv_wrapper_s8"
), "Relay Conv2D should be mapped to CMSIS-NN Depthwise Convolution."
# validate the output
run_and_check(
models=compiled_models,
runner=test_runner,
interface_api=interface_api,
)
def parameterize_for_invalid_model(test):
"""Generates non-int8 non-int16 inputs"""
in_dtype = ["uint8", "int8", "int16"]
kernel_dtype = ["uint8", "int8"]
kernel_zero_point = [-33, 10, 0]
input_zero_point = [64, 0]
all_combinations = itertools.product(
in_dtype, kernel_dtype, kernel_zero_point, input_zero_point
)
all_combinations = filter(
lambda parameters: not (
(parameters[0] == "int8" or (parameters[0] == "int16" and parameters[3] == 0))
and parameters[1] == "int8"
and parameters[2] == 0
),
all_combinations,
)
return pytest.mark.parametrize(
["in_dtype", "kernel_dtype", "kernel_zero_point", "input_zero_point"],
all_combinations,
)(test)
@tvm.testing.requires_cmsisnn
@parameterize_for_invalid_model
def test_invalid_parameters(
in_dtype,
kernel_dtype,
kernel_zero_point,
input_zero_point,
):
"""Tests Depthwise op for non int8 inputs"""
ifm_shape = (1, 28, 28, 12)
out_channels = 2
input_scale = 1
kernel_scale = [0.11, 0.0237]
kernel_layout = "HWIO"
kernel_shape = [3, 3, ifm_shape[3], out_channels]
_, bias_dtype = get_kernel_bias_dtype(in_dtype)
output_scale, output_zero_point = get_conv2d_qnn_params(
kernel_shape,
input_scale,
input_zero_point,
kernel_scale,
kernel_zero_point,
in_dtype,
kernel_dtype,
in_dtype,
is_depthwise=False,
)
model, params = make_model(
shape=ifm_shape,
kernel_shape=kernel_shape,
input_zero_point=input_zero_point,
input_scale=input_scale,
kernel_zero_point=kernel_zero_point,
kernel_scale=kernel_scale,
output_zero_point=output_zero_point,
output_scale=output_scale,
padding="SAME",
strides=(1, 1),
dilation=(1, 1),
groups=1,
dtype=in_dtype,
kernel_dtype=kernel_dtype,
bias_dtype=bias_dtype,
out_channels=out_channels,
kernel_layout=kernel_layout,
enable_bias=True,
relu_type="NONE",
)
orig_mod = make_module(model)
cmsisnn_mod = cmsisnn.partition_for_cmsisnn(orig_mod, params)
assert_no_external_function(cmsisnn_mod)
if __name__ == "__main__":
tvm.testing.main()
| 29,902 | 28.145224 | 96 | py |
tvm | tvm-main/tests/python/contrib/test_cmsisnn/test_networks.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""CMSIS-NN: testing with networks"""
import pytest
import numpy as np
import tvm.testing
from tvm import relay
from tvm.contrib.download import download_testdata
from tvm.relay.op.contrib import cmsisnn
from tvm.testing.aot import AOTTestModel, get_dtype_range, compile_and_run, generate_ref_data
from tvm.micro.testing.aot_test_utils import (
AOT_CORSTONE300_RUNNER,
AOT_USMP_CORSTONE300_RUNNER,
)
from .utils import skip_if_no_reference_system
# pylint: disable=import-outside-toplevel
def _convert_to_relay(
tflite_model_buf,
input_data,
input_node,
):
"""Converts TFLite model to Relay module and params"""
def convert_to_list(x):
if not isinstance(x, list):
x = [x]
return x
# TFLite.Model.Model has changed to TFLite.Model from 1.14 to 2.1
try:
import tflite.Model
tflite_model = tflite.Model.Model.GetRootAsModel(tflite_model_buf, 0)
except AttributeError:
import tflite
tflite_model = tflite.Model.GetRootAsModel(tflite_model_buf, 0)
except ImportError:
raise ImportError("The tflite package must be installed")
input_data = convert_to_list(input_data)
input_node = convert_to_list(input_node)
shape_dict = {}
dtype_dict = {}
for i, name in enumerate(input_node):
shape_dict[name] = input_data[i].shape
dtype_dict[name] = input_data[i].dtype.name
mod, params = relay.frontend.from_tflite(
tflite_model, shape_dict=shape_dict, dtype_dict=dtype_dict
)
return mod, params
@skip_if_no_reference_system
@tvm.testing.requires_package("tflite")
@tvm.testing.requires_cmsisnn
@pytest.mark.parametrize("test_runner", [AOT_CORSTONE300_RUNNER, AOT_USMP_CORSTONE300_RUNNER])
def test_cnn_small(test_runner):
"""Download a small network and tests TVM via CMSIS-NN output against TFLite output"""
# download the model
base_url = (
"https://github.com/ARM-software/ML-zoo/raw/"
"48a22ee22325d15d2371a6df24eb7d67e21dcc97"
"/models/keyword_spotting/cnn_small/tflite_int8"
)
file_to_download = "cnn_s_quantized.tflite"
file_saved = "cnn_s_quantized_15Dec2021.tflite"
model_file = download_testdata("{}/{}".format(base_url, file_to_download), file_saved)
with open(model_file, "rb") as f:
tflite_model_buf = f.read()
input_shape = (1, 490)
dtype = "int8"
in_min, in_max = get_dtype_range(dtype)
rng = np.random.default_rng(12345)
input_data = rng.integers(in_min, high=in_max, size=input_shape, dtype=dtype)
orig_mod, params = _convert_to_relay(tflite_model_buf, input_data, "input")
cmsisnn_mod = cmsisnn.partition_for_cmsisnn(orig_mod, params)
# validate CMSIS-NN output against CPU output
interface_api = "c"
use_unpacked_api = True
inputs = {"input": input_data}
params = {}
output_list = generate_ref_data(orig_mod["main"], inputs, params)
compile_and_run(
AOTTestModel(
module=cmsisnn_mod,
inputs=inputs,
outputs=output_list,
params=params,
output_tolerance=1,
),
test_runner,
interface_api,
use_unpacked_api,
)
@tvm.testing.requires_package("tflite")
def test_keyword_scramble():
"""Download keyword_scrambled and test for Relay conversion.
In future, this test can be extended for CMSIS-NN"""
# download the model
base_url = (
"https://github.com/tensorflow/tflite-micro/raw/"
"de8f61a074460e1fa5227d875c95aa303be01240/"
"tensorflow/lite/micro/models"
)
file_to_download = "keyword_scrambled.tflite"
file_saved = "keyword_scrambled.tflite"
model_file = download_testdata("{}/{}".format(base_url, file_to_download), file_saved)
with open(model_file, "rb") as f:
tflite_model_buf = f.read()
input_shape = (1, 96)
dtype = "int8"
in_min, in_max = get_dtype_range(dtype)
rng = np.random.default_rng(12345)
input_data = rng.integers(in_min, high=in_max, size=input_shape, dtype=dtype)
with pytest.raises(tvm.error.OpNotImplemented):
_, _ = _convert_to_relay(tflite_model_buf, input_data, "input")
if __name__ == "__main__":
tvm.testing.main()
| 5,041 | 32.171053 | 94 | py |
tvm | tvm-main/tests/python/contrib/test_cmsisnn/test_last_error.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""CMSIS-NN integration tests: debug_last_error"""
import re
import numpy as np
import pytest
import tvm
from tvm import relay
from tvm.relay.op.contrib import cmsisnn
from tvm.testing.aot import (
get_dtype_range,
generate_ref_data,
AOTTestModel,
compile_and_run,
)
from .utils import (
make_module,
get_same_padding,
make_qnn_relu,
assert_partitioned_function,
create_test_runner,
)
def make_model(
pool_op,
shape,
pool_size,
strides,
padding,
dtype,
scale,
zero_point,
relu_type,
layout,
input_op,
):
"""Create a Relay Function / network model"""
if input_op:
op = input_op
else:
op = relay.var("input", shape=shape, dtype=dtype)
pad_ = (0, 0, 0, 0)
if padding == "SAME":
dilation = (1, 1)
pad_ = get_same_padding((shape[1], shape[2]), pool_size, dilation, strides)
op = relay.nn.pad(
op,
pad_width=[(0, 0), (pad_[0], pad_[2]), (pad_[1], pad_[3]), (0, 0)],
pad_value=zero_point,
pad_mode="constant",
)
if pool_op.__name__ == relay.nn.avg_pool2d.__name__:
op = relay.cast(op, "int32")
op = pool_op(
op, pool_size=pool_size, strides=strides, padding=pad_, ceil_mode=True, layout=layout
)
if pool_op.__name__ == relay.nn.avg_pool2d.__name__:
op = relay.cast(op, dtype)
op = make_qnn_relu(op, relu_type, scale, zero_point, dtype)
return op
@tvm.testing.requires_cmsisnn
@pytest.mark.parametrize("debug_last_error", [True, False])
def test_last_error(debug_last_error):
"""Tests debug_last_error"""
dtype = "int16"
in_shape = (1, 28, 28, 12)
pool_size = (3, 3)
strides = (2, 2)
padding = "SAME"
relu_type = "NONE"
pool_type = relay.nn.avg_pool2d
zero_point = -34
scale = 0.0256
compiler_cpu = "cortex-m55"
cpu_flags = "+nomve"
layout = "NHWC"
input_op = None
interface_api = "c"
use_unpacked_api = True
model = make_model(
pool_op=pool_type,
shape=in_shape,
pool_size=pool_size,
strides=strides,
padding=padding,
dtype=dtype,
scale=scale,
zero_point=zero_point,
relu_type=relu_type,
layout=layout,
input_op=input_op,
)
orig_mod = make_module(model)
cmsisnn_mod = cmsisnn.partition_for_cmsisnn(orig_mod)
# validate pattern matching
assert_partitioned_function(orig_mod, cmsisnn_mod)
# validate the output
in_min, in_max = get_dtype_range(dtype)
inputs = {
"input": np.random.randint(in_min, high=in_max, size=in_shape, dtype=dtype),
}
output_list = generate_ref_data(orig_mod["main"], inputs)
def checker(base_path: str) -> bool:
def read_file(path):
with open(path) as f:
return f.read()
test = read_file(base_path + "/build/test.c")
test_check = "TVMGetLastError" in test
default_lib2 = read_file(base_path + "/codegen/host/src/default_lib2.c")
regex = (
r"(?s)arm_avgpool_s16(.*?)"
r'ARM_CMSIS_NN_ARG_ERROR: TVMAPISetLastError\("ARM_CMSIS_NN_ARG_ERROR(.*?)'
r'ARM_CMSIS_NN_NO_IMPL_ERROR: TVMAPISetLastError\("ARM_CMSIS_NN_NO_IMPL_ERROR'
)
default_lib2_check = re.search(regex, default_lib2) is not None
if debug_last_error:
return test_check and default_lib2_check
else:
return not (test_check or default_lib2_check)
result = compile_and_run(
AOTTestModel(
module=cmsisnn_mod,
inputs=inputs,
outputs=output_list,
params=None,
output_tolerance=1,
),
create_test_runner(compiler_cpu, cpu_flags, debug_last_error=debug_last_error),
interface_api,
use_unpacked_api,
debug_last_error=debug_last_error,
checker=checker,
)
assert result
| 4,772 | 27.927273 | 93 | py |
tvm | tvm-main/tests/python/contrib/test_cmsisnn/utils.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""CMSIS-NN functions for testing networks"""
import math
from typing import List, Union, Tuple
import numpy as np
import tvm
from tvm import relay
from tvm.testing.aot import AOTTestRunner, get_dtype_range
def skip_if_no_reference_system(func):
return tvm.testing.skip_if_32bit(reason="Reference system unavailable in i386 container")(func)
def count_num_calls(mod):
"""Counts number of CallNode(s) in the IRModule"""
class CallCounter(relay.ExprVisitor):
def __init__(self):
super().__init__()
self.count = 0
def visit_call(self, call):
if isinstance(call.op, tvm.ir.Op):
self.count += 1
super().visit_call(call)
counter = CallCounter()
for var in mod.get_global_vars():
counter.visit(mod[var.name_hint])
return counter.count
def assert_partitioned_function(orig_mod, cmsisnn_mod, expected_ops_unchanged=True):
"""
if KCompiler attribute is missing, this function raises an assertion.
Parameters
----------
orig_mod : IRModule
Pre-partitioning module
cmsisnn_mod : IRModule
Post-partitioning module
is_num_calls_same: bool
Are number of CallNode(s) before and after partitioning expected to be the same
"""
attrs = [
cmsisnn_mod[var.name_hint].attrs
for var in cmsisnn_mod.get_global_vars()
if cmsisnn_mod[var.name_hint].attrs
]
assert any(attrs), "At least one function with external attributes was expected."
compilers = [
key == "Compiler" and value == "cmsis-nn" for attr in attrs for key, value in attr.items()
]
assert any(compilers), "Module does not contain function for cmsisnn target."
if expected_ops_unchanged:
assert count_num_calls(orig_mod) == count_num_calls(
cmsisnn_mod
), "Number of calls changed during partitioning"
def assert_no_external_function(mod):
attrs = [mod[var.name_hint].attrs for var in mod.get_global_vars() if mod[var.name_hint].attrs]
assert not any(attrs), "No function should have an external attribute."
def make_module(func):
"""Creates IRModule from Function"""
func = relay.Function(relay.analysis.free_vars(func), func)
mod = tvm.IRModule.from_expr(func)
mod = relay.transform.InferType()(mod)
return mod
def get_same_padding(in_shape, kernel, dilation, stride):
"""
Provides CMSIS-NN padding when output dim == input dim.
This is TFLu's "SAME" padding case.
"""
dilated_kernel_h = dilation[0] * (kernel[0] - 1) + 1
out = int(math.ceil(float(in_shape[0]) / float(stride[0])))
pad = max(0, (out - 1) * stride[0] + dilated_kernel_h - in_shape[0])
pad_top = pad // 2
pad_bottom = pad - pad_top
dilated_kernel_w = dilation[1] * (kernel[1] - 1) + 1
out = int(math.ceil(float(in_shape[1]) / float(stride[1])))
pad = max(0, (out - 1) * stride[1] + dilated_kernel_w - in_shape[1])
pad_left = pad // 2
pad_right = pad - pad_left
return [pad_top, pad_left, pad_bottom, pad_right]
def get_kernel_bias_dtype(input_dtype):
"""
Returns (kernel_dtype, bias_dtype) based on input's dtype.
"""
# uint8 corresponds to an invalid case, so returning int types
# does not cause tests to break
if input_dtype in ("int8", "uint8"):
return ("int8", "int32")
elif input_dtype == "int16":
return ("int8", "int64")
raise ValueError("Invalid dtype provided to get_kernel_bias_dtype()")
def get_conv2d_qnn_params(
kernel_shape: List[int],
input_scale: float,
input_zp: int,
kernel_scale: Union[float, List[float]],
kernel_zp: int,
input_dtype: str = "int8",
kernel_dtype: str = "int8",
output_dtype: str = "int8",
is_depthwise: bool = False,
) -> Tuple[float, int]:
"""
Calculate the output quantization parameters for convolution based on the input and
kernel quantization paramters and the data types.
Parameters
----------
kernel_shape : List[int]
shape of the kernel
input_scale : float
scale of the input tensor
input_zp : int
zero point of the input tensor
kernel_scale : Union[float, List[float]]
scale(s) of the kernel tensor
kernel_zp : int
zero point of the kernel tensor
is_depthwise : bool
whether it is a depthwise convolution
input_dtype : str
data type of the input tensor
kernel_dtype : str
data type of the kernel tensor
output_dtype : str
data type of the output tensor
Returns
-------
output_scale : float
scale of the output tensor
output_zp : int
zero point of the output tensor
"""
input_dtype_min, input_dtype_max = get_dtype_range(input_dtype)
input_max = input_scale * (input_dtype_max - input_zp)
input_min = input_scale * (input_dtype_min - input_zp)
kernel_dtype_min, kernel_dtype_max = get_dtype_range(kernel_dtype)
kernel_sc_max = np.max(kernel_scale)
kernel_max = kernel_sc_max * (kernel_dtype_max - kernel_zp)
kernel_sc_min = np.min(kernel_scale)
kernel_min = kernel_sc_min * (kernel_dtype_min - kernel_zp)
kernel_h = kernel_shape[1]
kernel_w = kernel_shape[2]
channels = kernel_shape[3]
num_elements = kernel_h * kernel_w * channels
# Adjust the result if it is a depthwise convolution
if is_depthwise:
num_elements = num_elements / channels
# The smallest and largest possible values in the unquantized output tensor
output_limits = [
kernel_max * input_max * num_elements,
kernel_min * input_max * num_elements,
kernel_min * input_min * num_elements,
kernel_max * input_min * num_elements,
]
output_max = max(output_limits)
output_min = min(output_limits)
output_dtype_min, output_dtype_max = get_dtype_range(output_dtype)
output_scale = (output_max - output_min) / (output_dtype_max - output_dtype_min)
output_zp = int(output_dtype_min - (output_min / output_scale))
return output_scale, output_zp
def make_qnn_relu(expr, fused_activation_fn, scale, zero_point, dtype):
"""Mimics convert_qnn_fused_activation_function from TFLite frontend"""
quantize = lambda x: float(int(round(x / scale)) + zero_point)
# Get min/max of the output dtype. This will be used to ensure that clip a_min/a_max are not
# beyond the dtype range.
qmin, qmax = get_dtype_range(dtype)
# The input expr is a quantized tensor with its scale and zero point. We calculate the
# suitable clip off points based on these scale and zero point.
if fused_activation_fn == "NONE":
return expr
if fused_activation_fn == "RELU6":
return tvm.relay.op.clip(expr, a_min=max(qmin, quantize(0)), a_max=min(qmax, quantize(6.0)))
if fused_activation_fn == "RELU_N1_TO_1":
return tvm.relay.op.clip(
expr, a_min=max(qmin, quantize(-1.0)), a_max=min(qmax, quantize(1.0))
)
if fused_activation_fn == "RELU":
return tvm.relay.op.clip(expr, a_min=max(qmin, quantize(0.0)), a_max=qmax)
raise ValueError("Invalid argument provided with fused_activation_fn")
class CheckForPadsWithinCompositeFunc(tvm.relay.ExprVisitor):
"""Provides method to test number of pads present inside the function being visited."""
def __init__(self):
super().__init__()
self.num_pads_ = 0
def visit_call(self, call):
super().visit_call(call)
if (
isinstance(call, tvm.relay.Call)
and isinstance(call.op, tvm.ir.op.Op)
and call.op.name == "nn.pad"
):
self.num_pads_ += 1
def assert_no_pads_within_func(self):
assert self.num_pads_ == 0, "CMSIS-NN composite function should not have pads."
def assert_pads_within_func(self):
assert self.num_pads_ > 0, "Composite function should have pads within it."
def create_test_runner(compiler_cpu="cortex-m55", cpu_flags="", debug_last_error=False):
"""
Creates AOT test runner for CMSIS-NN tests.
Parameters
----------
compiler_cpu : str
Equivalent of gcc option mcpu
Options: cortex-m55, cortex-m7
cpu_flags: str
Disable Arm(R) Cortex(R)-M profile vector extension (mve)
Options:
Arm(R) Cortex(R)-M55: when null +mve is set by default.
+nomve disables vector extensions.
Arm(R) Cortex(R)-M7 does not support mve.
debug_last_error: bool
Whether to enable storing the last error
"""
# cmsis_cpu is used to find out start up code inside CMSIS package
cmsis_cpu = "ARMCM7" if compiler_cpu == "cortex-m7" else "ARMCM55"
mfloat_abi = "soft" if compiler_cpu == "cortex-m7" else "hard"
return AOTTestRunner(
makefile="corstone300",
prologue="""
UartStdOutInit();
""",
includes=["uart_stdout.h"],
pass_config={
"relay.ext.cmsisnn.options": {
"mcpu": compiler_cpu + cpu_flags,
"debug_last_error": debug_last_error,
},
"tir.usmp.enable": True,
"tir.disable_storage_rewrite": True,
},
parameters={
"ARM_CPU": cmsis_cpu,
"MCPU": compiler_cpu,
"MCPU_FLAGS": cpu_flags,
"MFLOAT_ABI": mfloat_abi,
"DEBUG_LAST_ERROR": 1 if debug_last_error else 0,
},
)
| 10,265 | 33.449664 | 100 | py |
tvm | tvm-main/tests/python/contrib/test_cmsisnn/test_fully_connected.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""CMSIS-NN integration tests: Fully Connected"""
import itertools
import numpy as np
import pytest
import tvm
from tvm import relay
from tvm.relay.op.contrib import cmsisnn
from tvm.testing.aot import get_dtype_range, generate_ref_data, AOTTestModel, compile_and_run
from .utils import (
make_module,
get_conv2d_qnn_params,
make_qnn_relu,
assert_partitioned_function,
assert_no_external_function,
create_test_runner,
get_kernel_bias_dtype,
)
def make_model(
in_shape, # [batchsize, in_channels]
kernel_shape, # [out_channels, num_inputs]
input_zero_point,
kernel_zero_point,
input_scale,
kernel_scale,
output_zero_point,
output_scale,
dtype,
kernel_dtype,
bias_dtype,
out_channels,
enable_bias,
relu_type="NONE",
):
"""Return a model and any parameters it may have"""
input_ = relay.var("input", shape=in_shape, dtype=dtype)
rng = np.random.default_rng(12321)
kmin, kmax = get_dtype_range(kernel_dtype)
weight = tvm.nd.array(
rng.integers(
kmin,
high=kmax,
size=kernel_shape,
dtype=kernel_dtype,
)
)
weight_const = relay.const(weight, kernel_dtype)
dense = relay.qnn.op.dense(
input_,
weight_const,
input_zero_point=relay.const(input_zero_point, "int32"),
kernel_zero_point=relay.const(kernel_zero_point, "int32"),
input_scale=relay.const(input_scale, "float32"),
kernel_scale=relay.const(kernel_scale, "float32"),
units=out_channels,
out_dtype=bias_dtype,
)
bias = tvm.nd.array(rng.integers(0, high=10, size=(out_channels,), dtype=bias_dtype))
bias_const = relay.const(bias, bias_dtype)
last_op = relay.nn.bias_add(dense, bias_const) if enable_bias else dense
requant_input_sc = input_scale * kernel_scale
last_op = relay.qnn.op.requantize(
last_op,
relay.const(requant_input_sc, "float32"),
relay.const(0, "int32"),
relay.const(output_scale, "float32"),
relay.const(output_zero_point, "int32"),
out_dtype=dtype,
)
last_op = make_qnn_relu(last_op, relu_type, output_scale, output_zero_point, dtype)
params = {"w": weight, "b": bias}
return last_op, params
@tvm.testing.requires_cmsisnn
@pytest.mark.parametrize("dtype", ["int8", "int16"])
@pytest.mark.parametrize("in_shape", [(2, 28), (1, 64)])
@pytest.mark.parametrize("out_channels", [12, 128])
@pytest.mark.parametrize("enable_bias", [False, True])
@pytest.mark.parametrize(
"input_zero_point, input_scale, kernel_scale",
[(10, 0.0128, 0.11), (-64, 0.0256, 1.37)],
)
@pytest.mark.parametrize(
"compiler_cpu, cpu_flags", [("cortex-m55", "+nomve"), ("cortex-m55", ""), ("cortex-m7", "")]
)
def test_ops(
dtype,
in_shape,
enable_bias,
input_zero_point,
input_scale,
kernel_scale,
out_channels,
compiler_cpu,
cpu_flags,
):
"""Test QNN fully connected layer"""
interface_api = "c"
use_unpacked_api = True
kernel_dtype, bias_dtype = get_kernel_bias_dtype(dtype)
kernel_zero_point = 0
kernel_shape = [out_channels, in_shape[1]]
conv2d_kernel_shape = (1, 1, kernel_shape[0], kernel_shape[1])
in_min, in_max = get_dtype_range(dtype)
output_scale, output_zero_point = get_conv2d_qnn_params(
conv2d_kernel_shape,
input_scale,
input_zero_point,
kernel_scale,
kernel_zero_point,
dtype,
)
model, params = make_model(
in_shape,
kernel_shape,
input_zero_point,
kernel_zero_point,
input_scale,
kernel_scale,
output_zero_point,
output_scale,
dtype,
kernel_dtype,
bias_dtype,
out_channels,
enable_bias,
)
orig_mod = make_module(model)
cmsisnn_mod = cmsisnn.partition_for_cmsisnn(orig_mod, params)
# validate pattern matching
assert_partitioned_function(orig_mod, cmsisnn_mod)
# validate the output
rng = np.random.default_rng(12345)
inputs = {"input": rng.integers(in_min, high=in_max, size=in_shape, dtype=dtype)}
output_list = generate_ref_data(orig_mod["main"], inputs, params)
compile_and_run(
AOTTestModel(
module=cmsisnn_mod,
inputs=inputs,
outputs=output_list,
params=params,
output_tolerance=1,
),
create_test_runner(compiler_cpu, cpu_flags),
interface_api,
use_unpacked_api,
)
def parameterize_for_invalid_model(test):
"""Generates parameters for non int8 inputs to fully connected layer"""
in_dtype = ["uint8", "int8", "int16"]
kernel_dtype = ["uint8", "int8"]
kernel_zero_point = [-33, 10, 0]
all_combinations = itertools.product(in_dtype, kernel_dtype, kernel_zero_point)
all_combinations = filter(
lambda parameters: not (
(parameters[0] == "int8" or parameters[0] == "int16")
and parameters[1] == "int8"
and parameters[2] == 0
),
all_combinations,
)
return pytest.mark.parametrize(
["in_dtype", "kernel_dtype", "kernel_zero_point"],
all_combinations,
)(test)
@tvm.testing.requires_cmsisnn
@parameterize_for_invalid_model
def test_invalid_parameters(
in_dtype,
kernel_dtype,
kernel_zero_point,
):
"""Tests fully connected layer with non int8 inputs"""
in_shape = (2, 28)
out_channels = 2
input_scale = 1
input_zero_point = 24
kernel_scale = [0.11, 0.0237]
_, bias_dtype = get_kernel_bias_dtype(in_dtype)
kernel_shape = [out_channels, in_shape[1]]
conv2d_kernel_shape = [1, 1, kernel_shape[0], kernel_shape[1]]
output_scale, output_zero_point = get_conv2d_qnn_params(
conv2d_kernel_shape,
input_scale,
input_zero_point,
kernel_scale,
kernel_zero_point,
in_dtype,
kernel_dtype,
in_dtype,
)
model, params = make_model(
in_shape=in_shape,
kernel_shape=kernel_shape,
input_zero_point=input_zero_point,
kernel_zero_point=kernel_zero_point,
input_scale=input_scale,
kernel_scale=kernel_scale,
output_zero_point=output_zero_point,
output_scale=output_scale,
dtype=in_dtype,
kernel_dtype=kernel_dtype,
bias_dtype=bias_dtype,
out_channels=out_channels,
enable_bias=True,
)
orig_mod = make_module(model)
cmsisnn_mod = cmsisnn.partition_for_cmsisnn(orig_mod, params)
# validate pattern matching
assert_no_external_function(cmsisnn_mod)
if __name__ == "__main__":
tvm.testing.main()
| 7,511 | 29.412955 | 96 | py |
tvm | tvm-main/tests/python/contrib/test_cmsisnn/test_remove_reshapes.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""CMSIS-NN integration tests: Reshape removal"""
import numpy as np
import pytest
import tvm
from tvm import relay
from tvm.relay.op.contrib import cmsisnn
from tvm.testing.aot import (
get_dtype_range,
generate_ref_data,
AOTTestModel,
compile_models,
run_and_check,
)
from tvm.micro.testing.aot_test_utils import AOT_USMP_CORSTONE300_RUNNER
from .utils import (
make_module,
get_same_padding,
make_qnn_relu,
assert_partitioned_function,
)
def make_model(
pool_op,
shape=(1, 28, 28, 12),
pool_size=(3, 3),
strides=(2, 2),
padding="VALID",
dtype="int8",
scale=1,
zero_point=-33,
relu_type="RELU",
layout="NHWC",
input_op=None,
):
"""Return a model and any parameters it may have,
all parameters are defaulted to known good values
"""
if input_op:
op = input_op
else:
op = relay.var("input", shape=shape, dtype=dtype)
pad_ = (0, 0, 0, 0)
if padding == "SAME":
dilation = (1, 1)
pad_ = get_same_padding((shape[1], shape[2]), pool_size, dilation, strides)
op = relay.nn.pad(
op,
pad_width=[(0, 0), (pad_[0], pad_[2]), (pad_[1], pad_[3]), (0, 0)],
pad_value=zero_point,
pad_mode="constant",
)
if pool_op.__name__ == relay.nn.avg_pool2d.__name__:
op = relay.cast(op, "int32")
op = pool_op(
op, pool_size=pool_size, strides=strides, padding=pad_, ceil_mode=True, layout=layout
)
if pool_op.__name__ == relay.nn.avg_pool2d.__name__:
op = relay.cast(op, dtype)
op = make_qnn_relu(op, relu_type, scale, zero_point, dtype)
return op
@tvm.testing.requires_cmsisnn
@pytest.mark.parametrize("padding", ["SAME", "VALID"])
def test_reshape_removal(padding):
"""Tests reshape is removed from the network"""
interface_api = "c"
use_unpacked_api = True
test_runner = AOT_USMP_CORSTONE300_RUNNER
in_shape = (1, 28, 28, 12)
pool_size = (3, 3)
strides = (2, 2)
relu_type = "NONE"
zero_point, scale = (-34, 0.0256)
max_pool = make_model(
pool_op=relay.nn.max_pool2d,
shape=in_shape,
pool_size=pool_size,
strides=strides,
padding=padding,
scale=scale,
zero_point=zero_point,
relu_type=relu_type,
)
new_shape = (1, 28, 28, 3) if padding == "VALID" else (1, 30, 30, 3)
reshape = relay.reshape(max_pool, newshape=new_shape)
model = make_model(
pool_op=relay.nn.avg_pool2d,
shape=new_shape,
pool_size=pool_size,
strides=strides,
padding=padding,
scale=scale,
zero_point=zero_point,
relu_type=relu_type,
input_op=reshape,
)
orig_mod = make_module(model)
cmsisnn_mod = cmsisnn.partition_for_cmsisnn(orig_mod)
# validate pattern matching
assert_partitioned_function(orig_mod, cmsisnn_mod)
# generate reference output
rng = np.random.default_rng(12345)
in_min, in_max = get_dtype_range("int8")
inputs = {"input": rng.integers(in_min, high=in_max, size=in_shape, dtype="int8")}
output_list = generate_ref_data(orig_mod["main"], inputs, params=None)
# validate presence of depthwise convolution
compiled_models = compile_models(
AOTTestModel(
module=cmsisnn_mod,
inputs=inputs,
outputs=output_list,
params=None,
output_tolerance=1,
),
interface_api,
use_unpacked_api,
pass_config=test_runner.pass_config,
)
main_mod = None
for target, mod in compiled_models[0].executor_factory.lowered_ir_mods.items():
if target.kind.name == "c":
main_mod = mod
# when padding="SAME", extra padding is introduced which causes Reshape to be fused with the
# Pad. RemoveReshapes pass cannot remove a fused Reshape. Whereas padding="VALID" doesn't need
# an extra Pad layer. In this case, the pass removes the Reshape from the graph.
reshapes_present = any(["reshape" in gv.name_hint for gv in main_mod.get_global_vars()])
check_reshapes = reshapes_present if padding == "SAME" else not reshapes_present
expected_reshapes = "a" if padding == "SAME" else "No"
assert check_reshapes, "Expeting {} reshape layer(s).".format(expected_reshapes)
# validate the output
run_and_check(
models=compiled_models,
runner=test_runner,
interface_api=interface_api,
)
if __name__ == "__main__":
tvm.testing.main()
| 5,341 | 30.423529 | 98 | py |
tvm | tvm-main/tests/python/contrib/test_cmsisnn/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Infrastructure and tests for CMSIS-NN"""
| 829 | 45.111111 | 62 | py |
tvm | tvm-main/tests/python/contrib/test_cmsisnn/test_extract_constants.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""CMSIS-NN integration tests: extract_constants pass"""
import numpy as np
import pytest
import tvm
import tvm.testing
from tvm import relay
tvm._ffi._init_api("relay.ext.cmsisnn.transform", __name__)
class CheckFunctionsForConstants(tvm.relay.ExprVisitor):
"""Provides methods to test number of constants present in a function"""
def __init__(self):
super().__init__()
self.num_constants_ = 0
def visit_call(self, call):
super().visit_call(call)
for arg in call.args:
if isinstance(arg, relay.Constant) and arg.data.numpy().ndim > 0:
self.num_constants_ += 1
def check_num_constants(self):
assert self.num_constants_ == 0, "Functions should not have constant arguments in Calls"
def set_external_func_attr(func, compiler, ext_symbol):
func = func.with_attr("Primitive", tvm.tir.IntImm("int32", 1))
func = func.with_attr("Compiler", compiler)
func = func.with_attr("global_symbol", ext_symbol)
return func
def set_composite_func_attr(func, name):
func = func.with_attr("Composite", name)
return func
@tvm.testing.requires_cmsisnn
def test_external_function():
"""Tests the pass ExternConstants when the function is a global function"""
input1_data = np.random.uniform(0, 1, (8, 8)).astype("float32")
input0 = relay.var("input0", shape=(8, 8))
input1_const = relay.const(input1_data, "float32")
binary_op = input0 + input1_const
extern_func = relay.Function([input0], binary_op, relay.TensorType((8, 8), "float32"))
global_var = relay.GlobalVar("external_function")
extern_func = set_external_func_attr(extern_func, "cmsis-nn", global_var.name_hint)
arg = relay.var("arg", shape=(8, 8))
call_extern_func = relay.Call(global_var, [arg])
main_func = relay.Function([arg], call_extern_func, relay.TensorType((8, 8), "float32"))
main_var = relay.GlobalVar("main")
mod = tvm.IRModule()
mod[global_var] = extern_func
mod[main_var] = main_func
mod = ExtractConstantsFromPartitionedFunction()(mod)
constant_verifier = CheckFunctionsForConstants()
constant_verifier.visit_function(mod[global_var])
constant_verifier.check_num_constants()
relay.transform.InferType()(mod)
@tvm.testing.requires_cmsisnn
def test_nested_function():
"""Tests the pass ExternConstants when a composite function
is present within global function
"""
input1_data = np.random.uniform(0, 1, (8, 8)).astype("float32")
input0 = relay.var("input0", shape=(8, 8))
input1_const = relay.const(input1_data, "float32")
binary_op0 = input0 + input1_const
binary_op1 = binary_op0 * relay.const(5.0, "float32")
local_func = relay.Function([input0], binary_op1, relay.TensorType((8, 8), "float32"))
local_func = set_composite_func_attr(local_func, "cmsis-nn")
arg = relay.var("arg", shape=(8, 8))
call_local_func = relay.Call(local_func, [arg])
extern_func = relay.Function([arg], call_local_func, relay.TensorType((8, 8), "float32"))
global_arg = relay.var("garg", shape=(8, 8))
global_var = relay.GlobalVar("external_function")
extern_func = set_external_func_attr(extern_func, "cmsis-nn", global_var.name_hint)
call_extern_func = relay.Call(global_var, [global_arg])
main_func = relay.Function([global_arg], call_extern_func, relay.TensorType((8, 8), "float32"))
main_var = relay.GlobalVar("main")
mod = tvm.IRModule()
mod[global_var] = extern_func
mod[main_var] = main_func
mod = ExtractConstantsFromPartitionedFunction()(mod)
constant_verifier = CheckFunctionsForConstants()
constant_verifier.visit_function(mod[global_var])
constant_verifier.check_num_constants()
relay.transform.InferType()(mod)
@tvm.testing.requires_cmsisnn
def test_internal_function_with_duplicate_arguments():
"""Tests the pass ExternConstants when a composite function
is present within global function with repeating arguments
to one of the binary ops.
"""
input0 = relay.var("input0", shape=(8, 8))
binary_op0 = input0 + input0
binary_op1 = binary_op0 * relay.const(5.0, "float32")
local_func = relay.Function([input0], binary_op1, relay.TensorType((8, 8), "float32"))
local_func = set_composite_func_attr(local_func, "cmsis-nn")
arg = relay.var("arg", shape=(8, 8))
call_local_func = relay.Call(local_func, [arg])
extern_func = relay.Function([arg], call_local_func, relay.TensorType((8, 8), "float32"))
global_arg = relay.var("global_var", shape=(8, 8))
global_var = relay.GlobalVar("external_function")
extern_func = set_external_func_attr(extern_func, "cmsis-nn", global_var.name_hint)
call_extern_func = relay.Call(global_var, [global_arg])
main_func = relay.Function([global_arg], call_extern_func, relay.TensorType((8, 8), "float32"))
main_var = relay.GlobalVar("main")
mod = tvm.IRModule()
mod[global_var] = extern_func
mod[main_var] = main_func
mod = ExtractConstantsFromPartitionedFunction()(mod)
constant_verifier = CheckFunctionsForConstants()
constant_verifier.visit_function(mod[global_var])
constant_verifier.check_num_constants()
relay.transform.InferType()(mod)
@tvm.testing.requires_cmsisnn
def test_multiple_functions():
"""Tests the pass ExternConstants when global function
contains multiple composite functions inside it
"""
f0_input1_data = np.random.uniform(0, 1, (8, 8)).astype("float32")
f0_input0 = relay.var("f0_in0", shape=(8, 8))
f0_input1_const = relay.const(f0_input1_data, "float32")
f0_binary_op = f0_input0 + f0_input1_const
f0_func = relay.Function([f0_input0], f0_binary_op, relay.TensorType((8, 8), "float32"))
f0_func = set_composite_func_attr(f0_func, "cmsis-nn")
f1_input1_data = np.random.uniform(0, 1, (8, 8)).astype("float32")
f1_input0 = relay.var("f1_in0", shape=(8, 8))
f1_input1_const = relay.const(f1_input1_data, "float32")
f1_binary_op = f1_input0 + f1_input1_const
f1_func = relay.Function([f1_input0], f1_binary_op, relay.TensorType((8, 8), "float32"))
f1_func = set_composite_func_attr(f1_func, "cmsis-nn")
arg0 = relay.var("arg0", shape=(8, 8))
call_local_func0 = relay.Call(f0_func, [arg0])
call_local_func1 = relay.Call(f1_func, [call_local_func0])
extern_func = relay.Function([arg0], call_local_func1, relay.TensorType((8, 8), "float32"))
input0 = relay.var("input0", shape=(8, 8))
global_var = relay.GlobalVar("cmsis-nn")
extern_func = set_external_func_attr(extern_func, "cmsis-nn", global_var.name_hint)
call_extern_func = relay.Call(global_var, [input0])
main_func = relay.Function([input0], call_extern_func, relay.TensorType((8, 8), "float32"))
main_var = relay.GlobalVar("main")
mod = tvm.IRModule()
mod[global_var] = extern_func
mod[main_var] = main_func
mod = ExtractConstantsFromPartitionedFunction()(mod)
constant_verifier = CheckFunctionsForConstants()
constant_verifier.visit_function(mod[global_var])
constant_verifier.check_num_constants()
relay.transform.InferType()(mod)
@tvm.testing.requires_cmsisnn
def test_main_function():
"""Tests the pass ExternConstants on main function"""
input0 = relay.var("input0", shape=(8, 8))
input1 = relay.var("input1", shape=(8, 8))
binary_op = input0 + input1
extern_func = relay.Function([input0, input1], binary_op, relay.TensorType((8, 8), "float32"))
global_var = relay.GlobalVar("external_function")
extern_func = set_external_func_attr(extern_func, "cmsis-nn", global_var.name_hint)
arg = relay.var("arg", shape=(8, 8))
input_data = np.random.uniform(0, 1, (8, 8)).astype("float32")
input_const = relay.const(input_data, "float32")
binary_op = arg + input_const
call_extern_func = relay.Call(global_var, [arg, binary_op])
main_func = relay.Function([arg], call_extern_func, relay.TensorType((8, 8), "float32"))
main_var = relay.GlobalVar("main")
mod = tvm.IRModule()
mod[global_var] = extern_func
mod[main_var] = main_func
mod = ExtractConstantsFromPartitionedFunction()(mod)
check_for_constants = CheckFunctionsForConstants()
check_for_constants.visit_call(mod[main_var].body)
assert (
check_for_constants.num_constants_ == 1
), "main() should have same number of arguments as before"
@tvm.testing.requires_cmsisnn
@pytest.mark.parametrize("external_compiler", ["cmsis-nn", "other_compiler"])
def test_multiple_functions_non_cmsisnn_compiler(external_compiler):
"""Tests the pass ExternConstants on non CMSIS-NN targets"""
y20_data = np.random.uniform(0, 1, (8, 8)).astype("float32")
x20 = relay.var("x20", shape=(8, 8))
y20_const = relay.const(y20_data, "float32")
z20 = x20 + y20_const
f20 = relay.Function([x20], z20, relay.TensorType((8, 8), "float32"))
f20 = set_composite_func_attr(f20, "cmsis-nn.qnn_op_1")
x10 = relay.var("x10", shape=(8, 8))
call_local_func0 = relay.Call(f20, [x10])
extern_func0 = relay.Function([x10], call_local_func0, relay.TensorType((8, 8), "float32"))
y21_data = np.random.uniform(0, 1, (8, 8)).astype("float32")
x21 = relay.var("x21", shape=(8, 8))
y21_const = relay.const(y21_data, "float32")
z21 = x21 + y21_const
f21 = relay.Function([x21], z21, relay.TensorType((8, 8), "float32"))
f21 = set_composite_func_attr(f21, "cmsis-nn.qnn_op_2")
x11 = relay.var("x11", shape=(8, 8))
call_local_func1 = relay.Call(f21, [x11])
extern_func1 = relay.Function([x11], call_local_func1, relay.TensorType((8, 8), "float32"))
input0 = relay.var("input0", shape=(8, 8))
global_var0 = relay.GlobalVar("external_function_0")
extern_func0 = set_external_func_attr(extern_func0, external_compiler, global_var0.name_hint)
call_extern_func0 = relay.Call(global_var0, [input0])
global_var1 = relay.GlobalVar("external_function_1")
extern_func1 = set_external_func_attr(extern_func1, external_compiler, global_var1.name_hint)
call_extern_func1 = relay.Call(global_var1, [call_extern_func0])
main_func = relay.Function([input0], call_extern_func1, relay.TensorType((8, 8), "float32"))
main_var = relay.GlobalVar("main")
mod = tvm.IRModule()
mod[global_var0] = extern_func0
mod[global_var1] = extern_func1
mod[main_var] = main_func
mod = ExtractConstantsFromPartitionedFunction()(mod)
check_for_constants = CheckFunctionsForConstants()
check_for_constants.visit_call(mod[main_var].body)
num_extracted_constants = 0
if external_compiler == "cmsis-nn":
num_extracted_constants = 2
assert (
check_for_constants.num_constants_ == num_extracted_constants
), "main() should have same number of arguments as before"
if __name__ == "__main__":
tvm.testing.main()
| 11,655 | 40.928058 | 99 | py |
tvm | tvm-main/tests/python/contrib/test_cmsisnn/test_pooling.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""CMSIS-NN integration tests: Pooling"""
import numpy as np
import pytest
import tvm
from tvm import relay
from tvm.relay.op.contrib import cmsisnn
from tvm.testing.aot import (
get_dtype_range,
generate_ref_data,
AOTTestModel,
compile_and_run,
)
from tvm.micro.testing.aot_test_utils import AOT_USMP_CORSTONE300_RUNNER
from .utils import (
make_module,
get_same_padding,
make_qnn_relu,
assert_partitioned_function,
assert_no_external_function,
create_test_runner,
)
def make_model(
pool_op,
shape=(1, 28, 28, 12),
pool_size=(3, 3),
strides=(2, 2),
padding="VALID",
dtype="int8",
scale=1,
zero_point=-33,
relu_type="RELU",
layout="NHWC",
input_op=None,
):
"""Return a model and any parameters it may have,
all parameters are defaulted to known good values
"""
if input_op:
op = input_op
else:
op = relay.var("input", shape=shape, dtype=dtype)
pad_ = (0, 0, 0, 0)
if padding == "SAME":
dilation = (1, 1)
pad_ = get_same_padding((shape[1], shape[2]), pool_size, dilation, strides)
op = relay.nn.pad(
op,
pad_width=[(0, 0), (pad_[0], pad_[2]), (pad_[1], pad_[3]), (0, 0)],
pad_value=zero_point,
pad_mode="constant",
)
if pool_op.__name__ == relay.nn.avg_pool2d.__name__:
op = relay.cast(op, "int32")
op = pool_op(
op, pool_size=pool_size, strides=strides, padding=pad_, ceil_mode=True, layout=layout
)
if pool_op.__name__ == relay.nn.avg_pool2d.__name__:
op = relay.cast(op, dtype)
op = make_qnn_relu(op, relu_type, scale, zero_point, dtype)
return op
@tvm.testing.requires_cmsisnn
@pytest.mark.parametrize("dtype", ["int16", "int8"])
@pytest.mark.parametrize("in_shape", [(1, 28, 28, 12), (1, 64, 100, 4)])
@pytest.mark.parametrize(
"pool_size, strides, padding", [((3, 3), (2, 2), "SAME"), ((2, 2), (1, 1), "VALID")]
)
@pytest.mark.parametrize("relu_type", ["NONE", "RELU"])
@pytest.mark.parametrize("pool_type", [relay.nn.max_pool2d, relay.nn.avg_pool2d])
@pytest.mark.parametrize("zero_point, scale", [(-34, 0.0256)])
@pytest.mark.parametrize(
"compiler_cpu, cpu_flags", [("cortex-m55", "+nomve"), ("cortex-m55", ""), ("cortex-m7", "")]
)
def test_ops(
dtype,
in_shape,
pool_size,
strides,
padding,
relu_type,
pool_type,
zero_point,
scale,
compiler_cpu,
cpu_flags,
):
"""Tests QNN pooling op for int8 and int16 pooling"""
interface_api = "c"
use_unpacked_api = True
model = make_model(
pool_op=pool_type,
shape=in_shape,
pool_size=pool_size,
strides=strides,
padding=padding,
dtype=dtype,
scale=scale,
zero_point=zero_point,
relu_type=relu_type,
)
orig_mod = make_module(model)
cmsisnn_mod = cmsisnn.partition_for_cmsisnn(orig_mod)
# validate pattern matching
assert_partitioned_function(orig_mod, cmsisnn_mod)
# validate the output
in_min, in_max = get_dtype_range(dtype)
np.random.seed(0)
inputs = {
"input": np.random.randint(in_min, high=in_max, size=in_shape, dtype=dtype),
}
output_list = generate_ref_data(orig_mod["main"], inputs)
compile_and_run(
AOTTestModel(
module=cmsisnn_mod,
inputs=inputs,
outputs=output_list,
params=None,
output_tolerance=1,
),
create_test_runner(compiler_cpu, cpu_flags),
interface_api,
use_unpacked_api,
)
@tvm.testing.requires_cmsisnn
@pytest.mark.parametrize(
"pool_size, strides, padding", [((3, 3), (2, 2), "SAME"), ((2, 2), (1, 1), "VALID")]
)
@pytest.mark.parametrize("relu_type", ["NONE", "RELU"])
def test_int8_pool_with_float32_input(
pool_size,
strides,
padding,
relu_type,
):
"""Tests QNN maxpool partitions with float32 input"""
interface_api = "c"
use_unpacked_api = True
test_runner = AOT_USMP_CORSTONE300_RUNNER
in_shape = (1, 28, 28, 12)
zero_point, scale = (-34, 0.0256)
input_ = relay.var("input", shape=in_shape, dtype="float32")
op = relay.op.add(input_, input_)
op = relay.qnn.op.quantize(op, relay.const(scale), relay.const(zero_point), -1, "int8")
model = make_model(
pool_op=relay.nn.max_pool2d,
shape=in_shape,
pool_size=pool_size,
strides=strides,
padding=padding,
scale=scale,
zero_point=zero_point,
relu_type=relu_type,
input_op=op,
)
orig_mod = make_module(model)
cmsisnn_mod = cmsisnn.partition_for_cmsisnn(orig_mod)
# validate pattern matching
assert_partitioned_function(orig_mod, cmsisnn_mod)
# validate the output
np.random.seed(0)
inputs = {"input": np.random.uniform(0, 1, in_shape).astype("float32")}
output_list = generate_ref_data(orig_mod["main"], inputs)
compile_and_run(
AOTTestModel(
module=cmsisnn_mod,
inputs=inputs,
outputs=output_list,
params=None,
output_tolerance=1,
),
test_runner,
interface_api,
use_unpacked_api,
)
@tvm.testing.requires_cmsisnn
@pytest.mark.parametrize("op", [relay.nn.avg_pool2d, relay.nn.max_pool2d])
def test_invalid_datatype(op):
"""Checks CMSIS-NN partitioning for non int8 dtype"""
model = make_model(pool_op=op, dtype="int64")
orig_mod = make_module(model)
cmsisnn_mod = cmsisnn.partition_for_cmsisnn(orig_mod)
assert_no_external_function(cmsisnn_mod)
@tvm.testing.requires_cmsisnn
@pytest.mark.parametrize("op", [relay.nn.avg_pool2d, relay.nn.max_pool2d])
def test_invalid_batch_size(op):
"""Checks CMSIS-NN partitioning when batch size is not 1"""
model = make_model(
pool_op=op,
shape=(2, 28, 28, 12),
)
orig_mod = make_module(model)
cmsisnn_mod = cmsisnn.partition_for_cmsisnn(orig_mod)
assert_no_external_function(cmsisnn_mod)
@tvm.testing.requires_cmsisnn
@pytest.mark.parametrize("op", [relay.nn.avg_pool2d, relay.nn.max_pool2d])
def test_invalid_layout(op):
"""Checks CMSIS-NN partitioning when layout is not NHWC"""
model = make_model(pool_op=op, layout="NCHW")
orig_mod = make_module(model)
cmsisnn_mod = cmsisnn.partition_for_cmsisnn(orig_mod)
assert_no_external_function(cmsisnn_mod)
if __name__ == "__main__":
tvm.testing.main()
| 7,285 | 28.497976 | 96 | py |
tvm | tvm-main/tests/python/contrib/test_cmsisnn/test_binary_ops.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""CMSIS-NN integration tests: binary ops"""
import itertools
import numpy as np
import pytest
import tvm
from tvm import relay
from tvm.relay.op.contrib import cmsisnn
from tvm.testing.aot import get_dtype_range, generate_ref_data, AOTTestModel, compile_and_run
from tvm.micro.testing.aot_test_utils import (
AOT_USMP_CORSTONE300_RUNNER,
)
from .utils import (
skip_if_no_reference_system,
make_module,
make_qnn_relu,
assert_partitioned_function,
assert_no_external_function,
create_test_runner,
)
def generate_tensor_constant():
rng = np.random.default_rng(12321)
dtype = "int8"
shape = (1, 16, 16, 3)
in_min, in_max = get_dtype_range(dtype)
values = tvm.nd.array(rng.integers(in_min, high=in_max, size=shape, dtype=dtype))
return relay.const(values, dtype)
def generate_scalar_constant():
dtype = "int8"
return relay.const(-30, dtype)
def generate_variable(name, dtype="int8"):
return relay.var(name, shape=(1, 16, 16, 3), dtype=dtype)
def make_model(
op,
input_0,
input_1,
input_0_scale,
input_0_zero_point,
input_1_scale,
input_1_zero_point,
relu_type="NONE",
out_scale=1.0 / 256,
out_zero_point=-128,
):
"""Create a Relay Function / network model"""
binary_op = op(
input_0,
input_1,
relay.const(input_0_scale, "float32"),
relay.const(input_0_zero_point, "int32"),
relay.const(input_1_scale, "float32"),
relay.const(input_1_zero_point, "int32"),
relay.const(out_scale, "float32"),
relay.const(out_zero_point, "int32"),
)
return make_qnn_relu(binary_op, relu_type, out_scale, out_zero_point, "int8")
@skip_if_no_reference_system
@tvm.testing.requires_cmsisnn
@pytest.mark.parametrize("op", [relay.qnn.op.mul, relay.qnn.op.add])
@pytest.mark.parametrize("relu_type", ["RELU", "NONE"])
@pytest.mark.parametrize(
[
"input_0_scale",
"input_0_zero_point",
"input_1_scale",
"input_1_zero_point",
],
[[0.256, 33, 0.256, 33], [0.0128, -64, 0.0128, -64], [0.0128, -64, 0.256, 33]],
)
@pytest.mark.parametrize(
"compiler_cpu, cpu_flags", [("cortex-m55", "+nomve"), ("cortex-m55", ""), ("cortex-m7", "")]
)
def test_op_int8(
op,
relu_type,
input_0_scale,
input_0_zero_point,
input_1_scale,
input_1_zero_point,
compiler_cpu,
cpu_flags,
):
"""Tests QNN binary operator for CMSIS-NN"""
interface_api = "c"
use_unpacked_api = True
dtype = "int8"
shape = [1, 16, 16, 3]
model = make_model(
op,
generate_variable("input_0"),
generate_variable("input_1"),
input_0_scale,
input_0_zero_point,
input_1_scale,
input_1_zero_point,
relu_type,
)
orig_mod = make_module(model)
cmsisnn_mod = cmsisnn.partition_for_cmsisnn(orig_mod)
# validate pattern matching
assert_partitioned_function(orig_mod, cmsisnn_mod)
# validate the output
in_min, in_max = get_dtype_range(dtype)
inputs = {
"input_0": np.random.randint(in_min, high=in_max, size=shape, dtype=dtype),
"input_1": np.random.randint(in_min, high=in_max, size=shape, dtype=dtype),
}
output_list = generate_ref_data(orig_mod["main"], inputs)
compile_and_run(
AOTTestModel(
module=cmsisnn_mod,
inputs=inputs,
outputs=output_list,
output_tolerance=1,
),
create_test_runner(compiler_cpu, cpu_flags),
interface_api,
use_unpacked_api,
)
@skip_if_no_reference_system
@tvm.testing.requires_cmsisnn
@pytest.mark.parametrize("op", [relay.qnn.op.mul, relay.qnn.op.add])
@pytest.mark.parametrize("relu_type", ["RELU", "NONE"])
@pytest.mark.parametrize(
[
"input_0_scale",
"input_1_scale",
"output_scale",
],
[
[0.256, 0.256, 0.256],
[0.0128, 0.0128, 0.0128],
[0.0128, 0.256, 0.256],
],
)
@pytest.mark.parametrize(
"compiler_cpu, cpu_flags", [("cortex-m55", "+nomve"), ("cortex-m55", ""), ("cortex-m7", "")]
)
def test_op_int16(
op,
relu_type,
input_0_scale,
input_1_scale,
output_scale,
compiler_cpu,
cpu_flags,
):
"""Tests QNN 16bit binary operators for CMSIS-NN"""
interface_api = "c"
use_unpacked_api = True
dtype = "int16"
shape = [1, 16, 16, 3]
model = make_model(
op,
generate_variable("input_0", dtype),
generate_variable("input_1", dtype),
input_0_scale,
0,
input_1_scale,
0,
relu_type,
output_scale,
0,
)
orig_mod = make_module(model)
cmsisnn_mod = cmsisnn.partition_for_cmsisnn(orig_mod)
assert_partitioned_function(orig_mod, cmsisnn_mod)
# validate the output
in_min, in_max = get_dtype_range(dtype)
inputs = {
"input_0": np.random.randint(in_min, high=in_max, size=shape, dtype=dtype),
"input_1": np.random.randint(in_min, high=in_max, size=shape, dtype=dtype),
}
output_list = generate_ref_data(orig_mod["main"], inputs)
compile_and_run(
AOTTestModel(
module=cmsisnn_mod,
inputs=inputs,
outputs=output_list,
output_tolerance=1,
),
create_test_runner(compiler_cpu, cpu_flags),
interface_api,
use_unpacked_api,
)
@skip_if_no_reference_system
@tvm.testing.requires_cmsisnn
@pytest.mark.parametrize("op", [relay.qnn.op.mul, relay.qnn.op.add])
@pytest.mark.parametrize("relu_type", ["RELU", "NONE"])
@pytest.mark.parametrize(
[
"input_0_scale",
"input_0_zero_point",
"input_1_scale",
"input_1_zero_point",
"output_scale",
"output_zero_point",
],
[
[0.256, 0, 0.256, 33, 0.256, 33],
[0.0128, -64, 0.0128, 0, 0.0128, -64],
[0.0128, -64, 0.256, 33, 0.256, 0],
],
)
def test_op_int16_cannot_partition(
op,
relu_type,
input_0_scale,
input_0_zero_point,
input_1_scale,
input_1_zero_point,
output_scale,
output_zero_point,
):
"""Tests QNN 16bit binary operators for CMSIS-NN in the edge case of
non-zero zero points"""
model = make_model(
op,
generate_variable("input_0", "int16"),
generate_variable("input_1", "int16"),
input_0_scale,
input_0_zero_point,
input_1_scale,
input_1_zero_point,
relu_type,
output_scale,
output_zero_point,
)
orig_mod = make_module(model)
cmsisnn_mod = cmsisnn.partition_for_cmsisnn(orig_mod)
# arm_elementwise_(mul|add)_s16 does not support non-zero shifts in any
# argument
assert_no_external_function(cmsisnn_mod)
@skip_if_no_reference_system
@tvm.testing.requires_cmsisnn
@pytest.mark.parametrize("op", [relay.qnn.op.mul, relay.qnn.op.add])
@pytest.mark.parametrize("relu_type", ["RELU", "NONE"])
def test_same_input_to_binary_op(op, relu_type):
"""Tests QNN binary operator for CMSIS-NN where both inputs are the same"""
interface_api = "c"
use_unpacked_api = True
test_runner = AOT_USMP_CORSTONE300_RUNNER
dtype = "int8"
shape = [1, 16, 16, 3]
input_ = generate_variable("input")
input_scale = 0.256
input_zero_point = 33
model = make_model(
op,
input_,
input_,
input_scale,
input_zero_point,
input_scale,
input_zero_point,
relu_type,
)
orig_mod = make_module(model)
cmsisnn_mod = cmsisnn.partition_for_cmsisnn(orig_mod)
# validate pattern matching
assert_partitioned_function(orig_mod, cmsisnn_mod)
# Check if the number of internal function parameter is 1
cmsisnn_global_func = cmsisnn_mod["tvmgen_default_cmsis_nn_main_0"]
assert (
isinstance(cmsisnn_global_func.body, tvm.relay.expr.Call)
and len(cmsisnn_global_func.body.args) == 1
), "Composite function for the binary op should have only 1 parameter."
# validate the output
in_min, in_max = get_dtype_range(dtype)
inputs = {
"input": np.random.randint(in_min, high=in_max, size=shape, dtype=dtype),
}
output_list = generate_ref_data(orig_mod["main"], inputs)
compile_and_run(
AOTTestModel(
module=cmsisnn_mod,
inputs=inputs,
outputs=output_list,
output_tolerance=1,
),
test_runner,
interface_api,
use_unpacked_api,
)
def parameterize_for_constant_inputs(test):
"""Generates parameters in such a way so that at least one of the inputs is a constant,
both can't be variables, both can't be scalars.
"""
op = [relay.qnn.op.mul, relay.qnn.op.add]
input_0 = [generate_variable("input_0"), generate_tensor_constant(), generate_scalar_constant()]
input_1 = [generate_variable("input_1"), generate_tensor_constant(), generate_scalar_constant()]
all_combinations = itertools.product(op, input_0, input_1)
all_combinations = filter(
lambda parameters: not (
(
isinstance(parameters[1], tvm.relay.expr.Var)
and isinstance(parameters[2], tvm.relay.expr.Var)
)
or (
isinstance(parameters[1], tvm.relay.expr.Constant)
and isinstance(parameters[2], tvm.relay.expr.Constant)
and parameters[1].data.numpy().ndim == 0
and parameters[2].data.numpy().ndim == 0
)
),
all_combinations,
)
return pytest.mark.parametrize(
["op", "input_0", "input_1"],
all_combinations,
)(test)
@skip_if_no_reference_system
@tvm.testing.requires_cmsisnn
@parameterize_for_constant_inputs
def test_constant_input_int8(op, input_0, input_1):
"""Tests binary ops where one of the operands is a constant"""
interface_api = "c"
use_unpacked_api = True
test_runner = AOT_USMP_CORSTONE300_RUNNER
dtype = "int8"
shape = [1, 16, 16, 3]
input_0_scale = 0.256
input_0_zero_point = 33
input_1_scale = 0.128
input_1_zero_point = -24
model = make_model(
op,
input_0,
input_1,
input_0_scale,
input_0_zero_point,
input_1_scale,
input_1_zero_point,
)
orig_mod = make_module(model)
cmsisnn_mod = cmsisnn.partition_for_cmsisnn(orig_mod)
# validate pattern matching
assert_partitioned_function(orig_mod, cmsisnn_mod)
# validate the output
in_min, in_max = get_dtype_range(dtype)
inputs = {}
if isinstance(input_0, tvm.relay.expr.Var):
inputs.update({"input_0": np.random.randint(in_min, high=in_max, size=shape, dtype=dtype)})
if isinstance(input_1, tvm.relay.expr.Var):
inputs.update({"input_1": np.random.randint(in_min, high=in_max, size=shape, dtype=dtype)})
output_list = generate_ref_data(orig_mod["main"], inputs)
compile_and_run(
AOTTestModel(
module=cmsisnn_mod,
inputs=inputs,
outputs=output_list,
output_tolerance=1,
),
test_runner,
interface_api,
use_unpacked_api,
)
@skip_if_no_reference_system
@tvm.testing.requires_cmsisnn
@pytest.mark.parametrize("op", [relay.qnn.op.mul, relay.qnn.op.add])
def test_both_scalar_inputs_int8(
op,
):
"""Tests binary ops where both operands are scalars"""
input_scale = 0.256
input_zero_point = 33
model = make_model(
op,
generate_scalar_constant(),
generate_scalar_constant(),
input_scale,
input_zero_point,
input_scale,
input_zero_point,
)
orig_mod = make_module(model)
cmsisnn_mod = cmsisnn.partition_for_cmsisnn(orig_mod)
assert_no_external_function(cmsisnn_mod)
@skip_if_no_reference_system
@tvm.testing.requires_cmsisnn
@pytest.mark.parametrize("op", [relay.qnn.op.mul, relay.qnn.op.add])
@pytest.mark.parametrize(["input_dtype"], [["uint8"], ["uint16"]])
def test_invalid_parameters(
op,
input_dtype,
):
"""Tests binary ops for non int8 dtypes"""
input_scale = 0.256
input_zero_point = 33
model = make_model(
op,
generate_variable("input_0", input_dtype),
generate_variable("input_1", input_dtype),
input_scale,
input_zero_point,
input_scale,
input_zero_point,
)
orig_mod = make_module(model)
cmsisnn_mod = cmsisnn.partition_for_cmsisnn(orig_mod)
assert_no_external_function(cmsisnn_mod)
if __name__ == "__main__":
tvm.testing.main()
| 13,423 | 27.38055 | 100 | py |
tvm | tvm-main/tests/python/contrib/test_cmsisnn/test_fuse_pads.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""CMSIS-NN integration tests: fuse_pads pass"""
import numpy as np
import pytest
import tvm
from tvm.testing.aot import get_dtype_range
from tvm import relay
from .utils import CheckForPadsWithinCompositeFunc
tvm._ffi._init_api("relay.ext.cmsisnn.transform", __name__)
def set_external_func_attr(func, compiler, ext_symbol):
func = func.with_attr("Primitive", tvm.tir.IntImm("int32", 1))
func = func.with_attr("Compiler", compiler)
func = func.with_attr("global_symbol", ext_symbol)
return func
def set_composite_func_attr(func, name):
func = func.with_attr("Composite", name)
return func
@pytest.mark.parametrize(
"ifm_shape, pad_width, conv2d_padding, ofm_shape",
[
[(1, 25, 25, 12), ((0, 0), (0, 2), (1, 2), (0, 0)), (1, 1, 1, 1), (1, 26, 28, 2)],
[(1, 64, 100, 4), ((0, 0), (1, 3), (1, 1), (0, 0)), (0, 0, 0, 0), (1, 64, 100, 2)],
[(1, 55, 55, 3), ((0, 0), (2, 1), (3, 5), (0, 0)), (0, 0, 1, 1), (1, 57, 59, 2)],
],
)
def test_invalid_padding_for_fusion(ifm_shape, pad_width, conv2d_padding, ofm_shape):
"""Negative tests for pads preceding Conv2D that cannot be fused."""
dtype = "int8"
kernel_size = (3, 3)
ofm_channels = 2
local_input = relay.var("local_input", shape=ifm_shape, dtype=dtype)
pad = relay.nn.pad(
local_input,
pad_width=pad_width, # ((), (top, bottom), (left, right), ())
pad_value=10,
pad_mode="constant",
)
rng = np.random.default_rng(12321)
in_min, in_max = get_dtype_range(dtype)
local_weight = tvm.nd.array(
rng.integers(
in_min,
high=in_max,
size=(ofm_channels, kernel_size[0], kernel_size[1], ifm_shape[3]),
dtype=dtype,
)
)
local_weight = relay.const(local_weight, dtype)
conv2d = relay.qnn.op.conv2d(
pad,
local_weight,
relay.const(1, "int32"),
relay.const(1, "int32"),
relay.const(1, "float32"),
relay.const(1, "float32"),
data_layout="NHWC",
kernel_layout="OHWI",
channels=ofm_channels,
kernel_size=(3, 3),
padding=conv2d_padding,
out_dtype="int32",
)
requantize = relay.qnn.op.requantize(
conv2d,
relay.const(1, "float32"),
relay.const(1, "int32"),
relay.const(1, "float32"),
relay.const(1, "int32"),
axis=0,
out_dtype=dtype,
)
local_func = relay.Function(relay.analysis.free_vars(requantize), requantize)
local_func = set_composite_func_attr(local_func, "cmsis-nn.qnn_conv2d")
mod = tvm.IRModule()
ext_input = relay.var("ext_input", shape=ifm_shape, dtype=dtype)
call_local_func = relay.Call(local_func, [ext_input])
extern_func = relay.Function(relay.analysis.free_vars(call_local_func), call_local_func)
extern_var = relay.GlobalVar("external_function")
extern_func = set_external_func_attr(extern_func, "cmsis-nn", extern_var.name_hint)
mod[extern_var] = extern_func
main_input = relay.var("main_input", shape=ifm_shape, dtype=dtype)
call_extern_func = relay.Call(extern_var, [main_input])
main_func = relay.Function([main_input], call_extern_func, relay.TensorType(ofm_shape, dtype))
main_var = relay.GlobalVar("main")
mod[main_var] = main_func
mod = relay.transform.InferType()(mod)
error_regex = r"Difference on each side of a dimension should be either 0 or 1"
with pytest.raises(tvm.TVMError, match=error_regex):
mod = CMSISNNFusePads()(mod)
@pytest.mark.parametrize(
"ifm_shape, pad_width, conv2d_padding, ofm_shape",
[
[(1, 25, 25, 12), ((0, 0), (0, 1), (1, 2), (0, 0)), (1, 1, 1, 1), (1, 26, 28, 2)],
[(1, 64, 100, 4), ((0, 0), (1, 1), (1, 1), (0, 0)), (0, 0, 0, 0), (1, 64, 100, 2)],
[(1, 55, 55, 3), ((0, 0), (2, 1), (3, 2), (0, 0)), (0, 0, 1, 1), (1, 57, 59, 2)],
],
)
def test_pad_conv2d_fusion_noncmsisnn_target(ifm_shape, pad_width, conv2d_padding, ofm_shape):
"""Tests the pads and conv2d fusion for non-cmsisnn targets.
It is expected that pad will not be fused with Conv2D in this case.
"""
dtype = "int8"
kernel_size = (3, 3)
ofm_channels = 2
local_input = relay.var("local_input", shape=ifm_shape, dtype=dtype)
pad = relay.nn.pad(
local_input,
pad_width=pad_width, # ((), (top, bottom), (left, right), ())
pad_value=10,
pad_mode="constant",
)
rng = np.random.default_rng(12321)
in_min, in_max = get_dtype_range(dtype)
local_weight = tvm.nd.array(
rng.integers(
in_min,
high=in_max,
size=(ofm_channels, kernel_size[0], kernel_size[1], ifm_shape[3]),
dtype=dtype,
)
)
local_weight = relay.const(local_weight, dtype)
conv2d = relay.qnn.op.conv2d(
pad,
local_weight,
relay.const(1, "int32"),
relay.const(1, "int32"),
relay.const(1, "float32"),
relay.const(1, "float32"),
data_layout="NHWC",
kernel_layout="OHWI",
channels=ofm_channels,
kernel_size=(3, 3),
padding=conv2d_padding,
out_dtype="int32",
)
requantize = relay.qnn.op.requantize(
conv2d,
relay.const(1, "float32"),
relay.const(1, "int32"),
relay.const(1, "float32"),
relay.const(1, "int32"),
axis=0,
out_dtype=dtype,
)
local_func = relay.Function(relay.analysis.free_vars(requantize), requantize)
local_func = set_composite_func_attr(local_func, "noncmsis-nn.qnn_conv2d")
mod = tvm.IRModule()
ext_input = relay.var("ext_input", shape=ifm_shape, dtype=dtype)
call_local_func = relay.Call(local_func, [ext_input])
extern_func = relay.Function(relay.analysis.free_vars(call_local_func), call_local_func)
extern_var = relay.GlobalVar("external_function")
extern_func = set_external_func_attr(extern_func, "noncmsis-nn", extern_var.name_hint)
mod[extern_var] = extern_func
main_input = relay.var("main_input", shape=ifm_shape, dtype=dtype)
call_extern_func = relay.Call(extern_var, [main_input])
main_func = relay.Function([main_input], call_extern_func, relay.TensorType(ofm_shape, dtype))
main_var = relay.GlobalVar("main")
mod[main_var] = main_func
mod = relay.transform.InferType()(mod)
mod = CMSISNNFusePads()(mod)
pad_verifier = CheckForPadsWithinCompositeFunc()
pad_verifier.visit_function(mod[extern_var])
pad_verifier.assert_pads_within_func()
@pytest.mark.parametrize(
"ifm_shape, pad_width, conv2d_padding, ofm_shape",
[
[(1, 25, 25, 12), ((0, 0), (0, 1), (1, 2), (0, 0)), (1, 1, 1, 1), (1, 26, 28, 2)],
[(1, 64, 100, 4), ((0, 0), (1, 1), (1, 1), (0, 0)), (0, 0, 0, 0), (1, 64, 100, 2)],
[(1, 55, 55, 3), ((0, 0), (2, 1), (3, 2), (0, 0)), (0, 0, 1, 1), (1, 57, 59, 2)],
],
)
def test_pad_conv2d_fusion(ifm_shape, pad_width, conv2d_padding, ofm_shape):
"""Tests the pads and conv2d fusion."""
dtype = "int8"
kernel_size = (3, 3)
ofm_channels = 2
local_input = relay.var("local_input", shape=ifm_shape, dtype=dtype)
pad = relay.nn.pad(
local_input,
pad_width=pad_width, # ((), (top, bottom), (left, right), ())
pad_value=10,
pad_mode="constant",
)
rng = np.random.default_rng(12321)
kmin, kmax = get_dtype_range(dtype)
local_weight = tvm.nd.array(
rng.integers(
kmin,
high=kmax,
size=(ofm_channels, kernel_size[0], kernel_size[1], ifm_shape[3]),
dtype=dtype,
)
)
local_weight = relay.const(local_weight, dtype)
conv2d = relay.qnn.op.conv2d(
pad,
local_weight,
relay.const(1, "int32"),
relay.const(1, "int32"),
relay.const(1, "float32"),
relay.const(1, "float32"),
data_layout="NHWC",
kernel_layout="OHWI",
channels=ofm_channels,
kernel_size=(3, 3),
padding=conv2d_padding,
out_dtype="int32",
)
requantize = relay.qnn.op.requantize(
conv2d,
relay.const(1, "float32"),
relay.const(1, "int32"),
relay.const(1, "float32"),
relay.const(1, "int32"),
axis=0,
out_dtype=dtype,
)
local_func = relay.Function(relay.analysis.free_vars(requantize), requantize)
local_func = set_composite_func_attr(local_func, "cmsis-nn.qnn_conv2d")
mod = tvm.IRModule()
ext_input = relay.var("ext_input", shape=ifm_shape, dtype=dtype)
call_local_func = relay.Call(local_func, [ext_input])
extern_func = relay.Function(relay.analysis.free_vars(call_local_func), call_local_func)
extern_var = relay.GlobalVar("external_function")
extern_func = set_external_func_attr(extern_func, "cmsis-nn", extern_var.name_hint)
mod[extern_var] = extern_func
main_input = relay.var("main_input", shape=ifm_shape, dtype=dtype)
call_extern_func = relay.Call(extern_var, [main_input])
main_func = relay.Function([main_input], call_extern_func, relay.TensorType(ofm_shape, dtype))
main_var = relay.GlobalVar("main")
mod[main_var] = main_func
mod = relay.transform.InferType()(mod)
mod = CMSISNNFusePads()(mod)
pad_verifier = CheckForPadsWithinCompositeFunc()
pad_verifier.visit_function(mod[extern_var])
pad_verifier.assert_no_pads_within_func()
def test_without_preceding_pad():
"""Tests the pass FusePads when padding is not present before qnn.conv2d."""
dtype = "int8"
ifm_shape = (1, 56, 56, 64)
ofm_shape = (1, 56, 56, 64)
local_input = relay.var("local_input", shape=ifm_shape, dtype=dtype)
rng = np.random.default_rng(12321)
kmin, kmax = get_dtype_range(dtype)
local_weight = tvm.nd.array(
rng.integers(
kmin,
high=kmax,
size=(64, 3, 3, 64),
dtype=dtype,
)
)
local_weight = relay.const(local_weight, dtype)
conv2d = relay.qnn.op.conv2d(
local_input,
local_weight,
relay.const(1, "int32"),
relay.const(1, "int32"),
relay.const(1, "float32"),
relay.const(1, "float32"),
data_layout="NHWC",
kernel_layout="OHWI",
channels=64,
kernel_size=(3, 3),
padding=(1, 1, 1, 1),
out_dtype="int32",
)
requantize = relay.qnn.op.requantize(
conv2d,
relay.const(1, "float32"),
relay.const(1, "int32"),
relay.const(1, "float32"),
relay.const(1, "int32"),
axis=0,
out_dtype=dtype,
)
relu = relay.nn.relu(requantize)
local_func = relay.Function(relay.analysis.free_vars(relu), relu)
local_func = set_composite_func_attr(local_func, "cmsis-nn.qnn_conv2d")
mod = tvm.IRModule()
ext_input = relay.var("ext_input", shape=ifm_shape, dtype=dtype)
call_local_func = relay.Call(local_func, [ext_input])
extern_func = relay.Function(relay.analysis.free_vars(call_local_func), call_local_func)
extern_var = relay.GlobalVar("external_function")
extern_func = set_external_func_attr(extern_func, "cmsis-nn", extern_var.name_hint)
mod[extern_var] = extern_func
main_input = relay.var("main_input", shape=ifm_shape, dtype=dtype)
call_extern_func = relay.Call(extern_var, [main_input])
main_func = relay.Function(relay.analysis.free_vars(call_extern_func), call_extern_func)
main_func = relay.Function([main_input], call_extern_func, relay.TensorType(ofm_shape, dtype))
main_var = relay.GlobalVar("main")
mod[main_var] = main_func
mod = relay.transform.InferType()(mod)
mod = CMSISNNFusePads()(mod)
pad_verifier = CheckForPadsWithinCompositeFunc()
pad_verifier.visit_function(mod[extern_var])
pad_verifier.assert_no_pads_within_func()
| 12,653 | 35.678261 | 98 | py |
tvm | tvm-main/tests/python/contrib/test_cmsisnn/test_scalar_to_tensor_constant.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""CMSIS-NN integration tests: scalar_to_tensor_constant pass"""
import numpy as np
import tvm
import tvm.testing
from tvm import relay
tvm._ffi._init_api("relay.ext.cmsisnn.transform", __name__)
def generate_variable(name, shape, dtype="int8"):
return relay.var(name, shape=shape, dtype=dtype)
def make_binary_op(
op,
input_0,
input_1,
input_0_scale,
input_0_zero_point,
input_1_scale,
input_1_zero_point,
out_scale=1.0 / 256,
out_zero_point=-128,
):
"""Create a Relay Function / network model"""
return op(
input_0,
input_1,
relay.const(input_0_scale, "float32"),
relay.const(input_0_zero_point, "int32"),
relay.const(input_1_scale, "float32"),
relay.const(input_1_zero_point, "int32"),
relay.const(out_scale, "float32"),
relay.const(out_zero_point, "int32"),
)
class CheckFunctionsForConstants(tvm.relay.ExprVisitor):
"""Provides method to test number of scalar constants present in a function"""
def __init__(self):
super().__init__()
self.num_constants_ = 0
def visit_call(self, call):
super().visit_call(call)
for arg in call.args:
if isinstance(arg, relay.Constant) and arg.data.numpy().ndim > 0:
self.num_constants_ += 1
def check_num_constants(self):
assert self.num_constants_ == 0, "Functions should not have constant arguments in Calls"
def set_external_func_attr(func, compiler, ext_symbol):
func = func.with_attr("Primitive", tvm.tir.IntImm("int32", 1))
func = func.with_attr("Compiler", compiler)
func = func.with_attr("global_symbol", ext_symbol)
return func
def set_composite_func_attr(func, name):
func = func.with_attr("Composite", name)
return func
@tvm.testing.requires_cmsisnn
def test_single_scalar_position_0():
"""Tests conversion to tensor constant when first operand is a scalar"""
dtype = "int8"
shape = (8, 8)
operand0 = generate_variable("operand0", None, dtype)
operand1 = generate_variable("operand1", shape, dtype)
binary_op = make_binary_op(
relay.qnn.op.add,
operand0,
operand1,
input_0_scale=0.0128,
input_0_zero_point=32,
input_1_scale=0.256,
input_1_zero_point=-64,
)
local_func = relay.Function([operand0, operand1], binary_op, relay.TensorType(shape, dtype))
local_func = set_composite_func_attr(local_func, "cmsis-nn.qnn_add")
arg0 = relay.expr.const(3, dtype)
arg1 = relay.var("arg1", shape=shape, dtype=dtype)
call_local_func = relay.Call(local_func, [arg0, arg1])
extern_func = relay.Function([arg1], call_local_func, relay.TensorType(shape, dtype))
x = relay.var("x", shape=shape, dtype=dtype)
global_var = relay.GlobalVar("external_function")
extern_func = set_external_func_attr(extern_func, "cmsis-nn", global_var.name_hint)
call_extern_func = relay.Call(global_var, [x])
main_func = relay.Function([x], call_extern_func, relay.TensorType(shape, dtype))
main_var = relay.GlobalVar("main")
mod = tvm.IRModule()
mod[global_var] = extern_func
mod[main_var] = main_func
mod = relay.transform.InferType()(mod)
mod = ScalarToTensorConstants()(mod)
mod = relay.transform.InferType()(mod)
check_for_constants = CheckFunctionsForConstants()
check_for_constants.visit_call(mod[global_var].body)
assert (
check_for_constants.num_constants_ == 1
), "Scalar constant wasn't converted into tensor constant"
@tvm.testing.requires_cmsisnn
def test_single_scalar_position_1():
"""Tests conversion to tensor constant when second operand is a scalar"""
dtype = "int8"
shape = (8, 8)
operand0 = generate_variable("operand0", shape, dtype)
operand1 = generate_variable("operand1", None, dtype)
binary_op = make_binary_op(
relay.qnn.op.add,
operand0,
operand1,
input_0_scale=0.0128,
input_0_zero_point=32,
input_1_scale=0.256,
input_1_zero_point=-64,
)
local_func = relay.Function([operand0, operand1], binary_op, relay.TensorType(shape, dtype))
local_func = set_composite_func_attr(local_func, "cmsis-nn.qnn_add")
arg0 = relay.var("arg0", shape=shape, dtype=dtype)
arg1 = relay.expr.const(3, dtype)
call_local_func = relay.Call(local_func, [arg0, arg1])
extern_func = relay.Function([arg0], call_local_func, relay.TensorType(shape, dtype))
x = relay.var("x", shape=shape, dtype=dtype)
global_var = relay.GlobalVar("external_function")
extern_func = set_external_func_attr(extern_func, "cmsis-nn", global_var.name_hint)
call_extern_func = relay.Call(global_var, [x])
main_func = relay.Function([x], call_extern_func, relay.TensorType(shape, dtype))
main_var = relay.GlobalVar("main")
mod = tvm.IRModule()
mod[global_var] = extern_func
mod[main_var] = main_func
mod = relay.transform.InferType()(mod)
mod = ScalarToTensorConstants()(mod)
mod = relay.transform.InferType()(mod)
check_for_constants = CheckFunctionsForConstants()
check_for_constants.visit_call(mod[global_var].body)
assert (
check_for_constants.num_constants_ == 1
), "Scalar constant wasn't converted into tensor constant"
@tvm.testing.requires_cmsisnn
def test_primary_operands_all_scalars():
"""Tests conversion to tensor constants all operands are scalars"""
dtype = "int8"
shape = None
operand0 = generate_variable("operand0", None, dtype)
operand1 = generate_variable("operand1", None, dtype)
binary_op = make_binary_op(
relay.qnn.op.add,
operand0,
operand1,
input_0_scale=0.0128,
input_0_zero_point=32,
input_1_scale=0.256,
input_1_zero_point=-64,
)
local_func = relay.Function([operand0, operand1], binary_op, relay.TensorType(shape, dtype))
local_func = set_composite_func_attr(local_func, "cmsis-nn.qnn_add")
arg0 = relay.expr.const(7, dtype)
arg1 = relay.expr.const(3, dtype)
call_local_func = relay.Call(local_func, [arg0, arg1])
extern_func = relay.Function([], call_local_func, relay.TensorType(shape, dtype))
global_var = relay.GlobalVar("external_function")
extern_func = set_external_func_attr(extern_func, "cmsis-nn", global_var.name_hint)
call_extern_func = relay.Call(global_var, [])
main_func = relay.Function([], call_extern_func, relay.TensorType(shape, dtype))
main_var = relay.GlobalVar("main")
mod = tvm.IRModule()
mod[global_var] = extern_func
mod[main_var] = main_func
mod = relay.transform.InferType()(mod)
mod = ScalarToTensorConstants()(mod)
new_mod = relay.transform.InferType()(mod)
assert tvm.ir.structural_equal(mod[global_var].body, new_mod[global_var].body)
@tvm.testing.requires_cmsisnn
def test_all_primary_operands_tensor_constants():
"""Tests conversion to tensor constants all operands are tensors"""
dtype = "int8"
shape = (1, 3, 3, 32)
operand0 = generate_variable("operand0", shape, dtype)
operand1 = generate_variable("operand1", shape, dtype)
binary_op = make_binary_op(
relay.qnn.op.add,
operand0,
operand1,
input_0_scale=0.0128,
input_0_zero_point=32,
input_1_scale=0.256,
input_1_zero_point=-64,
)
local_func = relay.Function([operand0, operand1], binary_op, relay.TensorType(shape, dtype))
local_func = set_composite_func_attr(local_func, "cmsis-nn.qnn_add")
rng = np.random.default_rng(12345)
arg0 = relay.const(rng.integers(-128, high=127, size=shape, dtype=dtype))
arg1 = relay.const(rng.integers(-128, high=127, size=shape, dtype=dtype))
call_local_func = relay.Call(local_func, [arg0, arg1])
extern_func = relay.Function([], call_local_func, relay.TensorType(shape, dtype))
global_var = relay.GlobalVar("external_function")
extern_func = set_external_func_attr(extern_func, "cmsis-nn", global_var.name_hint)
call_extern_func = relay.Call(global_var, [])
main_func = relay.Function([], call_extern_func, relay.TensorType(shape, dtype))
main_var = relay.GlobalVar("main")
mod = tvm.IRModule()
mod[global_var] = extern_func
mod[main_var] = main_func
mod = relay.transform.InferType()(mod)
mod = ScalarToTensorConstants()(mod)
new_mod = relay.transform.InferType()(mod)
assert tvm.ir.structural_equal(mod[global_var].body, new_mod[global_var].body)
@tvm.testing.requires_cmsisnn
def test_duplicate_constant_arguments():
"""Tests the pass when repeating operands are arguments to the binary op"""
dtype = "int8"
shape = (1, 3, 3, 32)
operand0 = generate_variable("operand0", shape, dtype)
operand1 = generate_variable("operand1", shape, dtype)
binary_op = make_binary_op(
relay.qnn.op.add,
operand0,
operand0,
input_0_scale=0.0128,
input_0_zero_point=32,
input_1_scale=0.256,
input_1_zero_point=-64,
)
local_func = relay.Function([operand0, operand1], binary_op, relay.TensorType(shape, dtype))
local_func = set_composite_func_attr(local_func, "cmsis-nn.qnn_add")
rng = np.random.default_rng(12345)
arg0 = relay.const(rng.integers(-128, high=127, size=shape, dtype=dtype))
call_local_func = relay.Call(local_func, [arg0, arg0])
extern_func = relay.Function([], call_local_func, relay.TensorType(shape, dtype))
global_var = relay.GlobalVar("external_function")
extern_func = set_external_func_attr(extern_func, "cmsis-nn", global_var.name_hint)
call_extern_func = relay.Call(global_var, [])
main_func = relay.Function([], call_extern_func, relay.TensorType(shape, dtype))
main_var = relay.GlobalVar("main")
mod = tvm.IRModule()
mod[global_var] = extern_func
mod[main_var] = main_func
mod = relay.transform.InferType()(mod)
mod = ScalarToTensorConstants()(mod)
new_mod = relay.transform.InferType()(mod)
assert tvm.ir.structural_equal(mod[global_var].body, new_mod[global_var].body)
@tvm.testing.requires_cmsisnn
def test_non_cmsisnn_ext_func():
"""Non CMSISNN functions should not be altered."""
def get_mod():
operand1 = relay.var("operand1", shape=None)
operand2 = relay.var("operand2", shape=None)
binary_op = operand1 + operand2
local_func = relay.Function(
[operand1, operand2], binary_op, relay.TensorType((), "float32")
)
local_func = set_composite_func_attr(local_func, "cmsis-nn.qnn_add")
arg0 = relay.expr.const(5, "float32")
arg1 = relay.expr.const(3, "float32")
call_local_func = relay.Call(local_func, [arg0, arg1])
extern_func = relay.Function([], call_local_func, relay.TensorType((), "float32"))
global_var = relay.GlobalVar("external_function")
extern_func = set_external_func_attr(extern_func, "foo", global_var.name_hint)
call_extern_func = relay.Call(global_var, [])
main_func = relay.Function([], call_extern_func, relay.TensorType((), "float32"))
main_var = relay.GlobalVar("main")
mod = tvm.IRModule()
mod[global_var] = extern_func
mod[main_var] = main_func
mod = relay.transform.InferType()(mod)
return mod
expected = get_mod()["external_function"].body
actual = ScalarToTensorConstants()(get_mod())["external_function"].body
assert tvm.ir.structural_equal(expected, actual)
if __name__ == "__main__":
tvm.testing.main()
| 12,381 | 35.74184 | 96 | py |
tvm | tvm-main/tests/python/contrib/test_cmsisnn/test_softmax.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""CMSIS-NN integration tests: Softmax"""
import itertools
import numpy as np
import pytest
import tvm.testing
from tvm import relay
from tvm.relay.op.contrib import cmsisnn
from tvm.testing.aot import get_dtype_range, AOTTestModel, compile_and_run, generate_ref_data
from .utils import (
skip_if_no_reference_system,
make_module,
assert_partitioned_function,
assert_no_external_function,
create_test_runner,
)
def make_model(
shape, in_dtype, out_dtype, in_zero_point, in_scale, out_zero_point=-128, out_scale=1.0 / 256
):
"""Create a Relay Function / network model"""
a = relay.var("in0", shape=shape, dtype=in_dtype)
dequantize = relay.qnn.op.dequantize(
a,
input_scale=relay.const(in_scale, "float32"),
input_zero_point=relay.const(in_zero_point, "int32"),
)
softmax = relay.nn.softmax(dequantize)
model = relay.qnn.op.quantize(
softmax,
output_scale=relay.const(out_scale, "float32"),
output_zero_point=relay.const(out_zero_point, "int32"),
out_dtype=out_dtype,
)
return model
@skip_if_no_reference_system
@tvm.testing.requires_cmsisnn
@pytest.mark.parametrize(["zero_point", "scale"], [[33, 0.256], [-64, 0.0128]])
@pytest.mark.parametrize(
"compiler_cpu, cpu_flags", [("cortex-m55", "+nomve"), ("cortex-m55", ""), ("cortex-m7", "")]
)
def test_op_int8(zero_point, scale, compiler_cpu, cpu_flags):
"""Tests int8 QNN Softmax for CMSIS-NN"""
interface_api = "c"
use_unpacked_api = True
dtype = "int8"
shape = [1, 16, 16, 3]
model = make_model(shape, dtype, dtype, zero_point, scale)
orig_mod = make_module(model)
cmsisnn_mod = cmsisnn.partition_for_cmsisnn(orig_mod)
# validate pattern matching
assert_partitioned_function(orig_mod, cmsisnn_mod)
# validate the output
in_min, in_max = get_dtype_range(dtype)
np.random.seed(0)
input_data = np.random.randint(in_min, high=in_max, size=shape, dtype=dtype)
inputs = {"in0": input_data}
params = {}
output_list = generate_ref_data(orig_mod["main"], inputs, params)
compile_and_run(
AOTTestModel(module=cmsisnn_mod, inputs=inputs, outputs=output_list, params=params),
create_test_runner(compiler_cpu, cpu_flags),
interface_api,
use_unpacked_api,
)
def parameterize_for_invalid_model(test):
"""Generates parameters for non int8 input and output of Softmax"""
in_dtype = ["uint8", "int8"]
out_dtype = ["uint8", "int8"]
zero_point = [-128, 64]
scale = [1.0 / 256, 0.2]
out_zero_point = [-128, 33]
out_scale = [1.0 / 256, 0.2]
all_combinations = itertools.product(
in_dtype, out_dtype, zero_point, scale, out_zero_point, out_scale
)
all_combinations = filter(
lambda parameters: not (
parameters[0] == "int8"
and parameters[1] == "int8"
and parameters[4] == -128
and parameters[5] == 1.0 / 256
),
all_combinations,
)
return pytest.mark.parametrize(
["in_dtype", "out_dtype", "zero_point", "scale", "out_zero_point", "out_scale"],
all_combinations,
)(test)
@parameterize_for_invalid_model
@tvm.testing.requires_cmsisnn
def test_invalid_parameters(in_dtype, out_dtype, zero_point, scale, out_zero_point, out_scale):
"""Tests for non int8 input and output of Softmax"""
model = make_model(
[1, 16, 16, 3], in_dtype, out_dtype, zero_point, scale, out_zero_point, out_scale
)
orig_mod = make_module(model)
cmsisnn_mod = cmsisnn.partition_for_cmsisnn(orig_mod)
assert_no_external_function(cmsisnn_mod)
if __name__ == "__main__":
tvm.testing.main()
| 4,498 | 32.325926 | 97 | py |
tvm | tvm-main/tests/python/contrib/test_hexagon/test_thread_pool.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Add hexagon thread pool test"""
import numpy as np
import tvm
import tvm.contrib.hexagon
import tvm.script
import tvm.testing
from tvm.contrib.hexagon.session import Session
from tvm.script import tir as T
from .infrastructure import get_hexagon_target
@tvm.script.ir_module
class ElemwiseSumIRModule:
"""IRModule definition for elementwise sum"""
# pylint: disable=no-self-argument,invalid-name,missing-function-docstring
@T.prim_func
def elemwise_sum_serial(a: T.handle, b: T.handle, c: T.handle, n: T.int32):
T.func_attr({"global_symbol": "elemwise_sum_serial", "tir.noalias": True})
A = T.match_buffer(a, (n,), dtype="float32")
B = T.match_buffer(b, (n,), dtype="float32")
C = T.match_buffer(c, (n,), dtype="float32")
for i in T.serial(n):
with T.block("C"):
vi = T.axis.spatial(n, i)
C[vi] = A[vi] + B[vi]
@T.prim_func
def elemwise_sum_parallel(a: T.handle, b: T.handle, c: T.handle, n: T.int32):
T.func_attr({"global_symbol": "elemwise_sum_parallel", "tir.noalias": True})
A = T.match_buffer(a, (n,), dtype="float32")
B = T.match_buffer(b, (n,), dtype="float32")
C = T.match_buffer(c, (n,), dtype="float32")
for i in T.parallel(n):
with T.block("C"):
vi = T.axis.spatial(n, i)
C[vi] = A[vi] + B[vi]
# pylint: enable=no-self-argument,invalid-name,missing-function-docstring
def generate_add_test_data(hexagon_session: Session, n=128 * 1024):
a = tvm.nd.array(np.random.uniform(size=n).astype("float32"), hexagon_session.device)
b = tvm.nd.array(np.random.uniform(size=n).astype("float32"), hexagon_session.device)
c = tvm.nd.array(np.zeros(n, dtype="float32"), hexagon_session.device)
return (a, b, c, n)
def benchmark_func(mod, name, args, hexagon_session):
(a, b, c, n) = args
evaluator = mod.time_evaluator(name, hexagon_session.device, number=100)
return evaluator(a, b, c, n).mean
@tvm.testing.requires_hexagon
def test_speedup(hexagon_session: Session, capsys):
"""Test speedup"""
func = tvm.build(
ElemwiseSumIRModule,
target=get_hexagon_target("v68"),
)
mod = hexagon_session.load_module(func)
args = generate_add_test_data(hexagon_session)
parallel_mean = benchmark_func(mod, "elemwise_sum_parallel", args, hexagon_session)
serial_mean = benchmark_func(mod, "elemwise_sum_serial", args, hexagon_session)
with capsys.disabled():
print("... speedup of {:.2f}".format(serial_mean / parallel_mean), end=" ")
@tvm.testing.requires_hexagon
def test_elemwise_sum_parallel(hexagon_session: Session):
"""Test parallel elementwise sum"""
func = tvm.build(
ElemwiseSumIRModule,
target=get_hexagon_target("v68"),
)
mod = hexagon_session.load_module(func)
(a, b, c, n) = generate_add_test_data(hexagon_session)
mod["elemwise_sum_parallel"](a, b, c, n)
tvm.testing.assert_allclose(c.numpy(), a.numpy() + b.numpy())
if __name__ == "__main__":
tvm.testing.main()
| 3,898 | 35.439252 | 89 | py |
tvm | tvm-main/tests/python/contrib/test_hexagon/test_parallel_hvx.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Test parallelizing HVX workloads and compare them to single thread examples.
"""
import numpy as np
import tvm
from tvm.script import tir as T
from .infrastructure import get_hexagon_target
TEST_OUTPUT_TEMPLATE = (
"Test {} with {} operations... \n"
" -Single Thread: {} ms \n"
" -Parallel: {} ms\n -Speedup: {}x\n"
)
def get_vrmpy_shape_dtypes(operations):
return ((operations, 128), "uint8", (operations, 128), "uint8", (operations, 32), "int32")
def get_vmpy_vadd_shape_dtype(operations):
return ((operations, 128), "uint8", (operations, 128), "uint8", (operations, 128), "int16")
def vmpy_expected_producer(shape, a, b):
expected = np.zeros(shape, dtype="int16")
for n in range(shape[0]):
for i in range(0, 128, 2):
expected[n, i // 2] = np.int16(a[n, i]) * np.int16(b[n, i])
for i in range(1, 128, 2):
expected[n, i // 2 + 64] = np.int16(a[n, i]) * np.int16(b[n, i])
return expected
def vadd_expected_producer(shape, a, b):
expected = np.zeros(shape, dtype="int16")
for n in range(shape[0]):
for i in range(0, 128, 2):
expected[n, i // 2] = np.int16(a[n, i]) + np.int16(b[n, i])
for i in range(1, 128, 2):
expected[n, i // 2 + 64] = np.int16(a[n, i]) + np.int16(b[n, i])
return expected
def vrmpy_expected_producer(shape, a, b):
expected = np.zeros(shape, dtype="int32")
for n in range(shape[0]):
for i in range(32):
for r_ind in range(4):
expected[n, i] = expected[n, i] + np.uint32(a[n, i * 4 + r_ind]) * np.uint32(
b[n, i * 4 + r_ind]
)
return expected
def get_vmpy_operator(operations):
"""Generate vector multiply operator"""
@T.prim_func
def operator(a: T.handle, b: T.handle, c: T.handle) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
a_buffer = T.match_buffer(a, [operations, 128], dtype="uint8")
b_buffer = T.match_buffer(b, [operations, 128], dtype="uint8")
c_buffer = T.match_buffer(c, [operations, 128], dtype="int16")
for n in T.grid(operations):
with T.block("c_buffer"):
vn_ind = T.axis.remap("S", [n])
c_buffer[vn_ind, T.ramp(0, 1, 128)] = T.call_llvm_intrin(
T.llvm_lookup_intrinsic_id("llvm.hexagon.V6.vmpybusv.128B"),
T.uint32(2),
T.reinterpret(a_buffer[vn_ind, T.ramp(0, 1, 128)], dtype="int32x32"),
T.reinterpret(b_buffer[vn_ind, T.ramp(0, 1, 128)], dtype="int32x32"),
dtype="int16x128",
)
return operator
def get_vadd_operator(operations):
"""Generate vadd operator."""
@T.prim_func
def operator(a: T.handle, b: T.handle, c: T.handle) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
a_buffer = T.match_buffer(a, [operations, 128], dtype="uint8")
b_buffer = T.match_buffer(b, [operations, 128], dtype="uint8")
c_buffer = T.match_buffer(c, [operations, 128], dtype="int16")
for n in T.grid(operations):
with T.block("c_buffer"):
vn_ind = T.axis.remap("S", [n])
c_buffer[vn_ind, T.ramp(0, 1, 128)] = T.call_llvm_intrin(
T.llvm_lookup_intrinsic_id("llvm.hexagon.V6.vaddubh.128B"),
T.uint32(2),
T.reinterpret(a_buffer[vn_ind, T.ramp(0, 1, 128)], dtype="int32x32"),
T.reinterpret(b_buffer[vn_ind, T.ramp(0, 1, 128)], dtype="int32x32"),
dtype="int16x128",
)
return operator
def get_vrmpy_operator(operations):
"""Generate vrmpy operator."""
@T.prim_func
def operator(a: T.handle, b: T.handle, c: T.handle) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
a_buffer = T.match_buffer(a, [operations, 128], dtype="uint8")
b_buffer = T.match_buffer(b, [operations, 128], dtype="uint8")
c_buffer = T.match_buffer(c, [operations, 32], dtype="int32")
for n in T.grid(operations):
with T.block("c_buffer"):
vn_ind = T.axis.remap("S", [n])
c_buffer[vn_ind, T.ramp(0, 1, 32)] = T.call_llvm_intrin(
T.llvm_lookup_intrinsic_id("llvm.hexagon.V6.vrmpyubv.128B"),
T.uint32(2),
T.reinterpret(a_buffer[vn_ind, T.ramp(0, 1, 128)], dtype="int32x32"),
T.reinterpret(b_buffer[vn_ind, T.ramp(0, 1, 128)], dtype="int32x32"),
dtype="int32x32",
)
return operator
def evaluate(hexagon_session, shape_dtypes, expected_output_producer, sch):
"""Evaluate schedule."""
a_shape, a_dtype, b_shape, b_dtype, c_shape, c_dtype = shape_dtypes
func_tir = tvm.build(sch.mod["main"], target=get_hexagon_target("v68"))
module = hexagon_session.load_module(func_tir)
a = np.random.randint(0, 16, a_shape, dtype=a_dtype)
b = np.random.randint(0, 16, b_shape, dtype=b_dtype)
c = np.zeros(c_shape, dtype=c_dtype)
a_hexagon = tvm.runtime.ndarray.array(a, device=hexagon_session.device)
b_hexagon = tvm.runtime.ndarray.array(b, device=hexagon_session.device)
c_hexagon = tvm.runtime.ndarray.array(c, device=hexagon_session.device)
# These are reduced for CI but number=100 and repeat=10 does a good job of removing noise.
number = 1
repeat = 1
timer = module.time_evaluator(
"__tvm_main__", hexagon_session.device, number=number, repeat=repeat
)
runtime = timer(a_hexagon, b_hexagon, c_hexagon)
tvm.testing.assert_allclose(c_hexagon.asnumpy(), expected_output_producer(c_shape, a, b))
return round(runtime.mean * 1000, 6)
class TestMatMulVec:
"""MatMul test class."""
(
operation_name,
operator_producer,
shape_dtypes_producer,
expected_output_producer,
) = tvm.testing.parameters(
("vrmpy", get_vrmpy_operator, get_vrmpy_shape_dtypes, vrmpy_expected_producer),
("vmpy", get_vmpy_operator, get_vmpy_vadd_shape_dtype, vmpy_expected_producer),
("vadd", get_vadd_operator, get_vmpy_vadd_shape_dtype, vadd_expected_producer),
)
# Experimentally best split factor but all multiples of 4 perform pretty well.
# This is because there are 4 HVX untis available on the device and pipelining
# works best with parallels of the number of available HVX.
split_factor = tvm.testing.parameter(4)
# Removed most of these to speedup CI.
operation_count = tvm.testing.parameter(
128,
# 256,
# 512,
# Single thread runs faster since L2 cache can handle the entire request quickly
# 1024,
# 2048,
# Significant performance degredation once the inputs and outputs cannot all fit in L2
# 4096,
# 8192,
# 16384,
)
@tvm.testing.requires_hexagon
def test(
self,
hexagon_session,
operation_count,
operation_name,
operator_producer,
shape_dtypes_producer,
expected_output_producer,
split_factor,
):
"""Test function handler."""
sch = tvm.tir.Schedule(operator_producer(operation_count))
single_thread_runtime = evaluate(
hexagon_session, shape_dtypes_producer(operation_count), expected_output_producer, sch
)
sch = tvm.tir.Schedule(operator_producer(operation_count))
block = sch.get_block("c_buffer")
b = sch.get_loops(block)
b_output, _ = sch.split(b[0], factors=[split_factor, None])
sch.parallel(b_output)
parallel_runtime = evaluate(
hexagon_session, shape_dtypes_producer(operation_count), expected_output_producer, sch
)
speedup = round(single_thread_runtime / parallel_runtime, 2)
print(
TEST_OUTPUT_TEMPLATE.format(
operation_name, operation_count, single_thread_runtime, parallel_runtime, speedup
)
)
if __name__ == "__main__":
tvm.testing.main()
| 8,986 | 35.983539 | 98 | py |
tvm | tvm-main/tests/python/contrib/test_hexagon/test_vtcm.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""VTCM Tests"""
import pytest
import tvm.testing
from tvm import tir
from tvm.script import tir as T
from .infrastructure import get_hexagon_target
@T.prim_func
def scale_by_two(buffer_a: T.Buffer((8192,), "int8"), buffer_c: T.Buffer((8192,), "int8")):
for i in T.serial(
0,
8192,
):
with T.block("C"):
buffer_c[i] = buffer_a[i] * T.int8(2)
def get_scale_by_two_schedule():
mod = tvm.IRModule.from_expr(scale_by_two.with_attr("global_symbol", "main"))
sch = tir.Schedule(mod, debug_mask="all")
block_c = sch.get_block("C")
(flat,) = sch.get_loops(block_c)
outer, _, _, _ = sch.split(flat, factors=[8, 4, 2, 128])
cache_block = sch.cache_read(block_c, 0, storage_scope="global.vtcm")
sch.compute_at(cache_block, outer)
return sch
@tvm.testing.requires_hexagon
def test_vtcm_building():
"""Test building with vtcm mem scope"""
sch = get_scale_by_two_schedule()
target = get_hexagon_target("v68")
built = tvm.build(sch.mod, target=target)
assert "global.vtcm" in built.get_source("asm")
@tvm.testing.requires_hexagon
@pytest.mark.parametrize("vtcm_capacity,limited", [(8192, False), (1024, False), (128, True)])
def test_vtcm_limit(vtcm_capacity, limited):
"""Test building with vtcm mem scope limit"""
sch = get_scale_by_two_schedule()
def _raises_exception(f):
try:
f()
except tvm._ffi.base.TVMError:
return True
return False
target = get_hexagon_target("v68", vtcm_capacity=vtcm_capacity)
assert (
_raises_exception(lambda: tvm.build(sch.mod, target=target)) == limited
), "Case 1 - arg. VTCM memory allocation limiter does not work correctly "
with target:
assert (
_raises_exception(lambda: tvm.build(sch.mod)) == limited
), "Case 2 - with.VTCM memory allocation limiter does not work correctly "
with tvm.transform.PassContext(config={"tir.vtcm_capacity": vtcm_capacity}):
assert (
_raises_exception(lambda: tvm.build(sch.mod, target=get_hexagon_target("v68")))
== limited
), "Case 3 - context. VTCM memory allocation limiter does not work correctly "
if __name__ == "__main__":
tvm.testing.main()
| 3,059 | 33.382022 | 94 | py |
tvm | tvm-main/tests/python/contrib/test_hexagon/test_parallel_scalar.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" Test parallelism for multiple different scalar workloads. """
import numpy as np
import tvm
from tvm.script import tir as T
from .infrastructure import get_hexagon_target
TEST_OUTPUT_TEMPLATE = (
"Test {} with {} operations... \n"
" -Single Thread: {} ms \n"
" -Parallel: {} ms\n -Speedup: {}x\n"
)
def get_add_operator(operations):
"""Generate add operator."""
@T.prim_func
def operator(a: T.handle, b: T.handle, c: T.handle) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
a_buffer = T.match_buffer(a, [operations], dtype="float64")
b_buffer = T.match_buffer(b, [operations], dtype="float64")
c_buffer = T.match_buffer(c, [operations], dtype="float64")
for n in T.grid(operations):
with T.block("c_buffer"):
vn_ind = T.axis.remap("S", [n])
c_buffer[vn_ind] = a_buffer[vn_ind] + b_buffer[vn_ind]
return operator
def get_multiply_operator(operations):
"""Generate multiply operator."""
@T.prim_func
def operator(a: T.handle, b: T.handle, c: T.handle) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
a_buffer = T.match_buffer(a, [operations], dtype="float64")
b_buffer = T.match_buffer(b, [operations], dtype="float64")
c_buffer = T.match_buffer(c, [operations], dtype="float64")
for n in T.grid(operations):
with T.block("c_buffer"):
vn_ind = T.axis.remap("S", [n])
c_buffer[vn_ind] = a_buffer[vn_ind] * b_buffer[vn_ind]
return operator
def get_sub_operator(operations):
"""Generate subtract operator."""
@T.prim_func
def operator(a: T.handle, b: T.handle, c: T.handle) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
a_buffer = T.match_buffer(a, [operations], dtype="float64")
b_buffer = T.match_buffer(b, [operations], dtype="float64")
c_buffer = T.match_buffer(c, [operations], dtype="float64")
for n in T.grid(operations):
with T.block("c_buffer"):
vn_ind = T.axis.remap("S", [n])
c_buffer[vn_ind] = a_buffer[vn_ind] - b_buffer[vn_ind]
return operator
def evaluate(hexagon_session, operations, expected, sch):
"""Evalute schedule."""
shape = operations
dtype = "float64"
func_tir = tvm.build(sch.mod["main"], target=get_hexagon_target("v68"))
module = hexagon_session.load_module(func_tir)
# np.random.random returns float64 by default, but make the cast explicit
# to make it easier to switch when necessary.
a = np.random.random(shape).astype(dtype)
b = np.random.random(shape).astype(dtype)
c = np.zeros(shape, dtype=dtype)
a_hexagon = tvm.runtime.ndarray.array(a, device=hexagon_session.device)
b_hexagon = tvm.runtime.ndarray.array(b, device=hexagon_session.device)
c_hexagon = tvm.runtime.ndarray.array(c, device=hexagon_session.device)
# These are reduced for CI but number=100 and repeat=10 does a good job of removing noise.
number = 1
repeat = 1
timer = module.time_evaluator(
"__tvm_main__", hexagon_session.device, number=number, repeat=repeat
)
runtime = timer(a_hexagon, b_hexagon, c_hexagon)
tvm.testing.assert_allclose(c_hexagon.asnumpy(), expected(a, b))
return round(runtime.mean * 1000, 6)
class TestMatMulVec:
"""MatMul test class."""
(operation_name, operator_producer, expected_output_producer,) = tvm.testing.parameters(
("add", get_add_operator, (lambda a, b: a + b)),
("mul", get_multiply_operator, (lambda a, b: a * b)),
("sub", get_sub_operator, (lambda a, b: a - b)),
)
# Removed most of these to speedup CI.
operations = tvm.testing.parameter(
128,
# 256,
# 512,
# Single thread runs faster since L2 cache can handle the entire request quickly
# 1024,
# 2048,
# Significant performance degredation once the inputs and outputs cannot all fit in L2
# 4096,
# 8192,
# 16384,
)
split_factor = tvm.testing.parameter(4)
@tvm.testing.requires_hexagon
def test_add(
self,
hexagon_session,
operation_name,
operator_producer,
expected_output_producer,
operations,
split_factor,
):
"""Test Add operator."""
sch = tvm.tir.Schedule(operator_producer(operations))
single_thread_runtime = evaluate(hexagon_session, operations, expected_output_producer, sch)
sch = tvm.tir.Schedule(operator_producer(operations))
block = sch.get_block("c_buffer")
b = sch.get_loops(block)
b_output, _ = sch.split(b[0], factors=[split_factor, None])
sch.parallel(b_output)
parallel_runtime = evaluate(hexagon_session, operations, expected_output_producer, sch)
speedup = round(single_thread_runtime / parallel_runtime, 2)
print(
TEST_OUTPUT_TEMPLATE.format(
operation_name, operations, single_thread_runtime, parallel_runtime, speedup
)
)
if __name__ == "__main__":
tvm.testing.main()
| 6,026 | 33.637931 | 100 | py |
tvm | tvm-main/tests/python/contrib/test_hexagon/conftest.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" Hexagon testing fixtures used to deduce testing argument
values from testing parameters """
# Disabling invalid-name check as the name is expected to be exactly this by pytest
# pylint: disable=invalid-name
pytest_plugins = [
"tvm.contrib.hexagon.pytest_plugin",
]
| 1,064 | 39.961538 | 83 | py |
tvm | tvm-main/tests/python/contrib/test_hexagon/test_launcher.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,missing-function-docstring,redefined-outer-name
""" Test rpc based launcher for hexagon """
import pytest
import numpy as np
import tvm.testing
from tvm import relay, te
from tvm.contrib.hexagon.session import Session
from tvm.relay.backend import Executor, Runtime
from tvm.contrib.hexagon.build import HexagonLauncherRPC
from tvm.contrib.hexagon.hexagon_profiler import HexagonProfiler
from .infrastructure import get_hexagon_target
@tvm.testing.requires_hexagon
def test_add(hexagon_session: Session):
"""Test simple add"""
dtype = "int8"
placeholder_a = tvm.te.placeholder((2,), dtype=dtype)
placeholder_b = tvm.te.placeholder((1,), dtype=dtype)
compute_c = tvm.te.compute(
placeholder_a.shape, lambda i: placeholder_a[i] + placeholder_b[0], name="C"
)
sched = tvm.te.create_schedule(compute_c.op)
func = tvm.build(
sched,
[placeholder_a, placeholder_b, compute_c],
get_hexagon_target("v68"),
name="add",
)
mod = hexagon_session.load_module(func)
a_data = tvm.nd.array(np.array([2, 3], dtype=dtype), device=hexagon_session.device)
assert (a_data.numpy() == np.array([2, 3])).all()
b_data = tvm.nd.array(np.array([4], dtype=dtype), device=hexagon_session.device)
assert (b_data.numpy() == np.array([4])).all()
c_data = tvm.nd.array(np.array([0, 0], dtype=dtype), device=hexagon_session.device)
assert (c_data.numpy() == np.array([0, 0])).all()
mod["add"](a_data, b_data, c_data)
assert (c_data.numpy() == np.array([6, 7])).all()
@tvm.testing.requires_hexagon
def test_add_vtcm(hexagon_session: Session):
"""Test add on VTCM"""
dtype = "int8"
placeholder_a = tvm.te.placeholder((2,), dtype=dtype)
placeholder_b = tvm.te.placeholder((1,), dtype=dtype)
compute_c = tvm.te.compute(
placeholder_a.shape, lambda i: placeholder_a[i] + placeholder_b[0], name="C"
)
sched = tvm.te.create_schedule(compute_c.op)
func = tvm.build(
sched,
[placeholder_a, placeholder_b, compute_c],
get_hexagon_target("v68"),
name="add",
)
mod = hexagon_session.load_module(func)
a_data = tvm.nd.empty(
placeholder_a.shape, placeholder_a.dtype, hexagon_session.device, "global.vtcm"
)
a_data.copyfrom(np.array([2, 3]))
b_data = tvm.nd.empty(
placeholder_b.shape, placeholder_b.dtype, hexagon_session.device, "global.vtcm"
)
b_data.copyfrom(np.array([4]))
c_data = tvm.nd.empty(compute_c.shape, compute_c.dtype, hexagon_session.device, "global.vtcm")
c_data.copyfrom(np.array([0, 0]))
mod["add"](a_data, b_data, c_data)
result = c_data.numpy()
assert (result == np.array([6, 7])).all()
class TestMatMul:
"""Test matmul class"""
size_m = tvm.testing.parameter(32)
size_n = tvm.testing.parameter(32)
size_k = tvm.testing.parameter(32)
@tvm.testing.requires_hexagon
def test_matmul(self, hexagon_session, size_m, size_n, size_k):
"""Test matmul"""
placeholder_x = te.placeholder((size_m, size_k), dtype="float32")
placeholder_y = te.placeholder((size_k, size_n), dtype="float32")
reduce_k1 = te.reduce_axis((0, size_k), name="k1")
compute_z = te.compute(
(size_m, size_n),
lambda i, j: te.sum(
placeholder_x[i, reduce_k1] * placeholder_y[reduce_k1, j], axis=[reduce_k1]
),
)
schedule = te.create_schedule(compute_z.op)
func = tvm.build(
schedule,
[placeholder_x, placeholder_y, compute_z],
get_hexagon_target("v68"),
)
mod = hexagon_session.load_module(func)
x_data = np.random.uniform(size=[i.value for i in placeholder_x.shape]).astype(
placeholder_x.dtype
)
y_data = np.random.uniform(size=[i.value for i in placeholder_y.shape]).astype(
placeholder_y.dtype
)
z_data = np.zeros([i.value for i in compute_z.shape], dtype=compute_z.dtype)
x_array = tvm.nd.array(x_data, device=hexagon_session.device)
y_array = tvm.nd.array(y_data, device=hexagon_session.device)
z_array = tvm.nd.array(z_data, device=hexagon_session.device)
mod(x_array, y_array, z_array)
target_llvm = tvm.target.Target("llvm")
mod = tvm.build(
schedule,
[placeholder_x, placeholder_y, compute_z],
tvm.target.Target(target_llvm, host=target_llvm),
)
device = tvm.cpu(0)
xtcpu = tvm.nd.array(x_data, device)
ytcpu = tvm.nd.array(y_data, device)
ztcpu = tvm.nd.array(z_data, device)
mod(xtcpu, ytcpu, ztcpu)
tvm.testing.assert_allclose(z_array.numpy(), ztcpu.numpy(), rtol=1e-4)
@tvm.testing.requires_hexagon
def test_graph_executor(hexagon_session: Session):
"""Test graph executor"""
dtype = "float32"
data = relay.var("data", relay.TensorType((1, 64, 64, 3), dtype))
weight = relay.var("weight", relay.TensorType((5, 5, 3, 8), dtype))
conv2d_op = relay.nn.conv2d(
data,
weight,
padding=(2, 2),
kernel_size=(5, 5),
data_layout="NHWC",
kernel_layout="HWIO",
out_dtype="float32",
)
f = relay.Function([data, weight], conv2d_op)
relay_mod = tvm.IRModule.from_expr(f)
relay_mod = relay.transform.InferType()(relay_mod)
runtime = Runtime("cpp")
executor = Executor("graph")
weight_in = np.random.rand(5, 5, 3, 8).astype(dtype=dtype)
data_in = np.random.rand(1, 64, 64, 3).astype(dtype=dtype)
params = {"weight": weight_in}
inputs = {"data": data_in}
with tvm.transform.PassContext(opt_level=3):
lowered = tvm.relay.build(
relay_mod,
get_hexagon_target("v68"),
runtime=runtime,
executor=executor,
)
graph_mod = hexagon_session.get_executor_from_factory(lowered)
graph_mod.set_input(**params)
graph_mod.run(**inputs)
hexagon_output = graph_mod.get_output(0).numpy()
target_llvm = tvm.target.Target("llvm")
with tvm.transform.PassContext(opt_level=3):
llvm_lowered = tvm.relay.build(
relay_mod,
tvm.target.Target(target_llvm, host=target_llvm),
runtime=runtime,
executor=executor,
)
llvm_graph_mod = tvm.contrib.graph_executor.GraphModule(llvm_lowered["default"](tvm.cpu(0)))
llvm_graph_mod.set_input(**params)
llvm_graph_mod.run(**inputs)
expected_output = llvm_graph_mod.get_output(0).numpy()
tvm.testing.assert_allclose(hexagon_output, expected_output, rtol=1e-4, atol=1e-5)
@tvm.testing.requires_hexagon
def test_graph_executor_multiple_conv2d(hexagon_session: Session):
"""Test multiple conv2d nodes with graph_executor"""
dtype = "float32"
input_shape = (1, 8, 8, 3)
w1_shape = (5, 5, 3, 1)
w2_shape = (5, 5, 1, 3)
data = relay.var("data", relay.TensorType(input_shape, dtype))
weight1 = relay.var("weight1", relay.TensorType(w1_shape, dtype))
weight2 = relay.var("weight2", relay.TensorType(w2_shape, dtype))
conv2d_op1 = relay.nn.conv2d(
data,
weight1,
padding=(2, 2),
kernel_size=(5, 5),
data_layout="NHWC",
kernel_layout="HWIO",
out_dtype="float32",
)
conv2d_op2 = relay.nn.conv2d(
conv2d_op1,
weight2,
padding=(2, 2),
kernel_size=(5, 5),
data_layout="NHWC",
kernel_layout="HWIO",
out_dtype="float32",
)
f = relay.Function([data, weight1, weight2], conv2d_op2)
relay_mod = tvm.IRModule.from_expr(f)
relay_mod = relay.transform.InferType()(relay_mod)
runtime = Runtime("cpp")
executor = Executor("graph")
with tvm.transform.PassContext(opt_level=3):
lowered = tvm.relay.build(
relay_mod,
get_hexagon_target("v68"),
runtime=runtime,
executor=executor,
)
weight1_data = np.random.rand(w1_shape[0], w1_shape[1], w1_shape[2], w1_shape[3]).astype(
dtype=dtype
)
weight2_data = np.random.rand(w2_shape[0], w2_shape[1], w2_shape[2], w2_shape[3]).astype(
dtype=dtype
)
input_data = np.random.rand(
input_shape[0], input_shape[1], input_shape[2], input_shape[3]
).astype(dtype=dtype)
params = {"weight1": weight1_data, "weight2": weight2_data}
inputs = {"data": input_data}
graph_mod = hexagon_session.get_executor_from_factory(lowered)
graph_mod.set_input(**params)
graph_mod.run(**inputs)
hexagon_output = graph_mod.get_output(0).numpy()
target_llvm = tvm.target.Target("llvm")
with tvm.transform.PassContext(opt_level=3):
llvm_lowered = tvm.relay.build(
relay_mod,
tvm.target.Target(target_llvm, host=target_llvm),
runtime=runtime,
executor=executor,
)
llvm_graph_mod = tvm.contrib.graph_executor.GraphModule(llvm_lowered["default"](tvm.cpu(0)))
llvm_graph_mod.set_input(**params)
llvm_graph_mod.run(**inputs)
expected_output = llvm_graph_mod.get_output(0).numpy()
tvm.testing.assert_allclose(hexagon_output, expected_output, rtol=1e-4, atol=1e-5)
@tvm.testing.requires_hexagon
def test_aot_executor(hexagon_session: Session, aot_host_target, aot_target):
"""Test AOT executor"""
dtype = "float32"
input_shape = (1, 128, 128, 3)
w_shape = (5, 5, 3, 8)
data = relay.var("data", relay.TensorType(input_shape, dtype))
weight = relay.var("weight", relay.TensorType(w_shape, dtype))
y = relay.nn.conv2d(
data,
weight,
padding=(2, 2),
kernel_size=(5, 5),
data_layout="NHWC",
kernel_layout="HWIO",
out_dtype="float32",
)
f = relay.Function([data, weight], y)
relay_mod = tvm.IRModule.from_expr(f)
relay_mod = relay.transform.InferType()(relay_mod)
weight_data = np.random.rand(w_shape[0], w_shape[1], w_shape[2], w_shape[3]).astype(dtype=dtype)
input_data = np.random.rand(
input_shape[0], input_shape[1], input_shape[2], input_shape[3]
).astype(dtype=dtype)
params = {"weight": weight_data}
inputs = {"data": input_data}
with tvm.transform.PassContext(opt_level=3):
lowered = tvm.relay.build(
relay_mod,
params=params,
target=tvm.target.Target(aot_target, host=aot_host_target),
runtime=Runtime("cpp"),
executor=Executor("aot", {"unpacked-api": False, "interface-api": "packed"}),
)
aot_mod = hexagon_session.get_executor_from_factory(lowered)
aot_mod.set_input(**inputs)
aot_mod.run()
hexagon_output = aot_mod.get_output(0).numpy()
target_llvm = tvm.target.Target("llvm")
with tvm.transform.PassContext(opt_level=3):
llvm_lowered = tvm.relay.build(
relay_mod,
tvm.target.Target(target_llvm, host=target_llvm),
runtime=Runtime("cpp"),
executor=Executor("graph"),
)
llvm_graph_mod = tvm.contrib.graph_executor.GraphModule(llvm_lowered["default"](tvm.cpu(0)))
llvm_graph_mod.set_input(**params)
llvm_graph_mod.run(**inputs)
expected_output = llvm_graph_mod.get_output(0).numpy()
tvm.testing.assert_allclose(hexagon_output, expected_output, rtol=1e-4, atol=1e-5)
@tvm.testing.requires_hexagon
def test_aot_executor_multiple_conv2d(hexagon_session: Session, aot_host_target, aot_target):
"""Test multiple conv2d nodes with AOT executor"""
dtype = "float32"
input_shape = (1, 8, 8, 3)
w1_shape = (5, 5, 3, 1)
w2_shape = (5, 5, 1, 3)
data = relay.var("data", relay.TensorType(input_shape, dtype))
weight1 = relay.var("weight1", relay.TensorType(w1_shape, dtype))
weight2 = relay.var("weight2", relay.TensorType(w2_shape, dtype))
conv2d_op1 = relay.nn.conv2d(
data,
weight1,
padding=(2, 2),
kernel_size=(5, 5),
data_layout="NHWC",
kernel_layout="HWIO",
out_dtype="float32",
)
conv2d_op2 = relay.nn.conv2d(
conv2d_op1,
weight2,
padding=(2, 2),
kernel_size=(5, 5),
data_layout="NHWC",
kernel_layout="HWIO",
out_dtype="float32",
)
f = relay.Function([data, weight1, weight2], conv2d_op2)
relay_mod = tvm.IRModule.from_expr(f)
relay_mod = relay.transform.InferType()(relay_mod)
weight1_data = np.random.rand(w1_shape[0], w1_shape[1], w1_shape[2], w1_shape[3]).astype(
dtype=dtype
)
weight2_data = np.random.rand(w2_shape[0], w2_shape[1], w2_shape[2], w2_shape[3]).astype(
dtype=dtype
)
input_data = np.random.rand(
input_shape[0], input_shape[1], input_shape[2], input_shape[3]
).astype(dtype=dtype)
params = {"weight1": weight1_data, "weight2": weight2_data}
inputs = {"data": input_data}
with tvm.transform.PassContext(opt_level=3):
lowered = tvm.relay.build(
relay_mod,
params=params,
target=tvm.target.Target(aot_target, host=aot_host_target),
runtime=Runtime("cpp"),
executor=Executor("aot", {"unpacked-api": False, "interface-api": "packed"}),
)
aot_mod = hexagon_session.get_executor_from_factory(lowered)
aot_mod.set_input(**inputs)
aot_mod.run()
hexagon_output = aot_mod.get_output(0).numpy()
target_llvm = tvm.target.Target("llvm")
with tvm.transform.PassContext(opt_level=3):
llvm_lowered = tvm.relay.build(
relay_mod,
tvm.target.Target(target_llvm, host=target_llvm),
runtime=Runtime("cpp"),
executor=Executor("graph"),
)
llvm_graph_mod = tvm.contrib.graph_executor.GraphModule(llvm_lowered["default"](tvm.cpu(0)))
llvm_graph_mod.set_input(**params)
llvm_graph_mod.run(**inputs)
expected_output = llvm_graph_mod.get_output(0).numpy()
tvm.testing.assert_allclose(hexagon_output, expected_output, rtol=1e-4, atol=1e-5)
data_dtype = tvm.testing.parameter("int8", "uint8")
weight_dtype = tvm.testing.parameter("int8", "uint8")
@tvm.testing.requires_hexagon
def test_conv2d_relay_vrmpy(hexagon_session, data_dtype, weight_dtype):
if data_dtype == "int8" and weight_dtype == "uint8":
pytest.skip("(i8, u8) input pair is not supported")
def get_conv2d_nchw(d_shape, w_shape, padding, strides=(1, 1)):
out_dtype = "int32"
data = relay.var("data", shape=d_shape, dtype=data_dtype)
weight = relay.var("weight", shape=w_shape, dtype=weight_dtype)
out_channel = w_shape[0]
return relay.nn.conv2d(
data=data,
weight=weight,
kernel_size=w_shape[2:],
channels=out_channel,
padding=padding,
strides=strides,
out_dtype=out_dtype,
)
target = get_hexagon_target("v68")
I, O, H, W = 64, 256, 56, 56
kH = kW = 3
padding = (1, 1)
strides = (1, 1)
data_shape = (1, I, H, W)
weight_shape = (O, I, kH, kW)
bias_shape = (weight_shape[0],)
bias = relay.var("bias", shape=bias_shape, dtype="int32")
conv2d = get_conv2d_nchw(
data_shape,
weight_shape,
padding,
strides=strides,
)
bias_add = relay.nn.bias_add(conv2d, bias)
mod = tvm.IRModule.from_expr(bias_add)
if data_dtype == "uint8":
data_np = np.random.uniform(0, 255, size=data_shape).astype("uint8")
else:
data_np = np.random.uniform(-128, 127, size=data_shape).astype("int8")
if weight_dtype == "uint8":
weight_np = np.random.uniform(0, 255, size=weight_shape).astype("uint8")
else:
weight_np = np.random.uniform(-128, 127, size=weight_shape).astype("int8")
bias_np = np.random.randint(low=-127, high=128, size=bias_shape).astype("int32")
params = {"weight": weight_np, "bias": bias_np}
ref = (
relay.create_executor("graph", mod=mod, device=tvm.cpu(0), target="llvm")
.evaluate()(*[data_np, weight_np, bias_np])
.numpy()
)
with tvm.transform.PassContext(
opt_level=3,
):
executor = relay.backend.Executor("graph", {"link-params": True})
lib = relay.build(mod, target=target, params=params, executor=executor)
asm = lib.lib.get_source("asm")
assert "vrmpy" in asm
rt_mod = hexagon_session.get_executor_from_factory(lib)
rt_mod.set_input("data", data_np)
rt_mod.run()
out = rt_mod.get_output(0).numpy()
np.testing.assert_equal(out, ref)
@tvm.testing.requires_hexagon
def test_dense_relay_vrmpy(hexagon_session, data_dtype, weight_dtype):
if data_dtype == "int8" and weight_dtype == "uint8":
pytest.skip("(i8, u8) input pair is not supported")
target = get_hexagon_target("v68")
M = 128
N = 1000
K = 2048
data_shape = (M, K)
weight_shape = (N, K)
data = relay.var("data", shape=data_shape, dtype=data_dtype)
weight = relay.var("weight", shape=weight_shape, dtype=weight_dtype)
dense = relay.nn.dense(data, weight, out_dtype="int32")
if data_dtype == "uint8":
data_np = np.random.uniform(0, 255, size=data_shape).astype("uint8")
else:
data_np = np.random.uniform(-128, 127, size=data_shape).astype("int8")
if weight_dtype == "uint8":
weight_np = np.random.uniform(0, 255, size=weight_shape).astype("uint8")
else:
weight_np = np.random.uniform(-128, 127, size=weight_shape).astype("int8")
bias_np = np.random.uniform(1, 10, size=(weight_shape[0],)).astype("int32")
params = {"weight": weight_np, "bias": bias_np}
bias = relay.var("bias", shape=(weight_shape[0],), dtype="int32")
bias_add = relay.nn.bias_add(dense, bias)
mod = tvm.IRModule.from_expr(bias_add)
with tvm.transform.PassContext(
opt_level=3,
):
executor = relay.backend.Executor("graph", {"link-params": True})
lib = relay.build(mod, target=target, params=params, executor=executor)
asm = lib.lib.get_source("asm")
assert "vrmpy" in asm
rt_mod = hexagon_session.get_executor_from_factory(lib)
rt_mod.set_input("data", data_np)
rt_mod.run()
out = rt_mod.get_output(0).numpy()
ref = np.dot(data_np.astype("int32"), weight_np.transpose().astype("int32"))
ref += bias_np
np.testing.assert_equal(out, ref)
@tvm.testing.requires_hexagon
def test_lwp(
hexagon_server_process,
hexagon_launcher: HexagonLauncherRPC,
hexagon_session: Session,
hexagon_debug,
):
dtype = "float32"
data = relay.var("data", relay.TensorType((1, 64, 64, 3), dtype))
weight = relay.var("weight", relay.TensorType((5, 5, 3, 8), dtype))
y = relay.nn.conv2d(
data,
weight,
padding=(2, 2),
kernel_size=(5, 5),
data_layout="NHWC",
kernel_layout="HWIO",
out_dtype="float32",
)
f = relay.Function([data, weight], y)
relay_mod = tvm.IRModule.from_expr(f)
relay_mod = relay.transform.InferType()(relay_mod)
target_hexagon = tvm.target.hexagon("v68")
runtime = Runtime("cpp")
executor = Executor("graph")
weight_in = np.random.rand(5, 5, 3, 8).astype(dtype=dtype)
data_in = np.random.rand(1, 64, 64, 3).astype(dtype=dtype)
params = {"weight": weight_in}
inputs = {"data": data_in}
with tvm.transform.PassContext(opt_level=3, config={"tir.instrument_lwp": True}):
lowered = tvm.relay.build(
relay_mod,
tvm.target.Target(target_hexagon, host=target_hexagon),
runtime=runtime,
executor=executor,
)
# Create HexagonProfiler object
dso_binary = "test_binary.so"
profiler = HexagonProfiler(dso_binary, lowered, hexagon_server_process, hexagon_debug)
graph_mod = hexagon_session.get_executor_from_factory(lowered)
graph_mod.set_input(**params)
graph_mod.run(**inputs)
hexagon_output = graph_mod.get_output(0).numpy()
# Get lightweight profiling output as a CSV file
profiler.get_profile_output(hexagon_launcher, hexagon_session)
target_llvm = tvm.target.Target("llvm")
with tvm.transform.PassContext(opt_level=3):
llvm_lowered = tvm.relay.build(
relay_mod,
tvm.target.Target(target_llvm, host=target_llvm),
runtime=runtime,
executor=executor,
)
llvm_graph_mod = tvm.contrib.graph_executor.GraphModule(llvm_lowered["default"](tvm.cpu(0)))
llvm_graph_mod.set_input(weight=weight_in)
llvm_graph_mod.run(data=data_in)
expected_output = llvm_graph_mod.get_output(0).numpy()
tvm.testing.assert_allclose(hexagon_output, expected_output, rtol=1e-4, atol=1e-5)
@tvm.testing.requires_hexagon
def test_lwp_multiple_conv2d(
hexagon_server_process,
hexagon_launcher: HexagonLauncherRPC,
hexagon_session: Session,
hexagon_debug,
):
dtype = "float32"
input_shape = (1, 8, 8, 3)
w1_shape = (5, 5, 3, 1)
w2_shape = (5, 5, 1, 3)
data = relay.var("data", relay.TensorType(input_shape, dtype))
weight1 = relay.var("weight1", relay.TensorType(w1_shape, dtype))
weight2 = relay.var("weight2", relay.TensorType(w2_shape, dtype))
y1 = relay.nn.conv2d(
data,
weight1,
padding=(2, 2),
kernel_size=(5, 5),
data_layout="NHWC",
kernel_layout="HWIO",
out_dtype="float32",
)
y2 = relay.nn.conv2d(
y1,
weight2,
padding=(2, 2),
kernel_size=(5, 5),
data_layout="NHWC",
kernel_layout="HWIO",
out_dtype="float32",
)
f = relay.Function([data, weight1, weight2], y2)
relay_mod = tvm.IRModule.from_expr(f)
relay_mod = relay.transform.InferType()(relay_mod)
target_hexagon = tvm.target.hexagon("v68")
runtime = Runtime("cpp")
executor = Executor("graph")
weight1_data = np.random.rand(w1_shape[0], w1_shape[1], w1_shape[2], w1_shape[3]).astype(
dtype=dtype
)
weight2_data = np.random.rand(w2_shape[0], w2_shape[1], w2_shape[2], w2_shape[3]).astype(
dtype=dtype
)
input_data = np.random.rand(
input_shape[0], input_shape[1], input_shape[2], input_shape[3]
).astype(dtype=dtype)
params = {"weight1": weight1_data, "weight2": weight2_data}
inputs = {"data": input_data}
with tvm.transform.PassContext(opt_level=3, config={"tir.instrument_lwp": True}):
lowered = tvm.relay.build(
relay_mod,
tvm.target.Target(target_hexagon, host=target_hexagon),
runtime=runtime,
executor=executor,
)
# Create HexagonProfiler object
dso_binary = "test_binary.so"
profiler = HexagonProfiler(dso_binary, lowered, hexagon_server_process, hexagon_debug)
graph_mod = hexagon_session.get_executor_from_factory(lowered)
graph_mod.set_input(**params)
graph_mod.run(**inputs)
hexagon_output = graph_mod.get_output(0).numpy()
# Get lightweight profiling output as a CSV file
profiler.get_profile_output(hexagon_launcher, hexagon_session)
target_llvm = tvm.target.Target("llvm")
with tvm.transform.PassContext(opt_level=3):
llvm_lowered = tvm.relay.build(
relay_mod,
tvm.target.Target(target_llvm, host=target_llvm),
runtime=runtime,
executor=executor,
)
llvm_graph_mod = tvm.contrib.graph_executor.GraphModule(llvm_lowered["default"](tvm.cpu(0)))
llvm_graph_mod.set_input(**params)
llvm_graph_mod.run(**inputs)
expected_output = llvm_graph_mod.get_output(0).numpy()
tvm.testing.assert_allclose(hexagon_output, expected_output, rtol=1e-4, atol=1e-5)
if __name__ == "__main__":
tvm.testing.main()
| 24,684 | 32.861454 | 100 | py |
tvm | tvm-main/tests/python/contrib/test_hexagon/test_parallel_hvx_load_vtcm.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" Test different strategies for loading data into vtcm before running HVX workloads. """
import numpy as np
import tvm
from tvm.script import tir as T
from .infrastructure import get_hexagon_target
TEST_OUTPUT_TEMPLATE = (
"Test with {} MB of data to load... \n"
" -No VTCM: {} Gops \n -Basic VTCM: {} Gops \n"
" -Vectorized: {} Gops\n -Vectorized and"
" Parallelized: {} Gops\n -Preallocated and Vectorized: {} Gops\n"
" -Preallocated, Vectorized, and Parallelized: {} Gops\n"
" -Single DMA: {} Gops\n -Preloaded: {} Gops\n"
)
def apply_parallel_unroll_vectorize(sch, blocks, outer_split, unroll_split, vector_split):
"""Apply parallel unroll vectorized."""
for block in blocks:
vb_index, vi_index = sch.get_loops(block)
v = sch.fuse(vb_index, vi_index)
vbo, vbi, vio, vii = sch.split( # pylint: disable=unused-variable
v, factors=[outer_split, None, unroll_split, vector_split]
) # pylint: disable=unused-variable
sch.vectorize(vii)
sch.unroll(vio)
sch.parallel(vbo)
return sch
def apply_unroll_vectorize(sch, blocks, unroll_split, vector_split):
for block in blocks:
vb_index, vi_index = sch.get_loops(block)
v = sch.fuse(vb_index, vi_index)
_, vio, vii = sch.split(v, factors=[None, unroll_split, vector_split])
sch.vectorize(vii)
sch.unroll(vio)
return sch
def apply_vrmpy_parallelization(sch):
block = sch.get_block("c_buffer")
b = sch.get_loops(block)
b_outer, _ = sch.split(b[0], factors=[4, None])
sch.parallel(b_outer)
return sch
def apply_vtcm_cache_read_write(sch):
block = sch.get_block("c_buffer")
sch.cache_read(block, 0, "global.vtcm")
sch.cache_read(block, 1, "global.vtcm")
sch.cache_write(block, 0, "global.vtcm")
return sch
def vrmpy(operations):
"""Generate VRMPY operator"""
@T.prim_func
def operator(a: T.handle, b: T.handle, c: T.handle) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
a_buffer = T.match_buffer(a, [operations, 128], dtype="uint8", align=128)
b_buffer = T.match_buffer(b, [operations, 128], dtype="uint8", align=128)
c_buffer = T.match_buffer(c, [operations, 32], dtype="int32", align=128)
for n in T.grid(operations):
with T.block("c_buffer"):
vn_ind = T.axis.remap("S", [n])
c_buffer[vn_ind, T.ramp(0, 1, 32)] = T.call_llvm_intrin(
T.llvm_lookup_intrinsic_id("llvm.hexagon.V6.vrmpyubv.128B"),
T.uint32(2),
T.reinterpret(a_buffer[vn_ind, T.ramp(0, 1, 128)], dtype="int32x32"),
T.reinterpret(b_buffer[vn_ind, T.ramp(0, 1, 128)], dtype="int32x32"),
dtype="int32x32",
)
return operator
def preloaded_vrmpy(operations):
"""Generate preloaded VRMPY operator."""
@T.prim_func
def operator(a: T.handle, b: T.handle, c: T.handle) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
a_buffer = T.match_buffer(
a,
[T.cast(operations, "int32") * 128],
dtype="uint8",
align=128,
scope="global.vtcm",
)
b_buffer = T.match_buffer(
b,
[T.cast(operations, "int32") * 128],
dtype="uint8",
align=128,
scope="global.vtcm",
)
c_buffer = T.match_buffer(
c, [T.cast(operations, "int32") * 32], dtype="int32", align=128, scope="global.vtcm"
)
for n in T.grid(operations):
with T.block("c_buffer"):
vn_ind = T.axis.remap("S", [n])
c_buffer[T.ramp(T.cast(vn_ind, "int32") * 32, 1, 32)] = T.call_llvm_intrin(
T.llvm_lookup_intrinsic_id("llvm.hexagon.V6.vrmpyubv.128B"),
T.uint32(2),
T.reinterpret(
a_buffer[T.ramp(T.cast(vn_ind, "int32") * 128, 1, 128)], dtype="int32x32"
),
T.reinterpret(
b_buffer[T.ramp(T.cast(vn_ind, "int32") * 128, 1, 128)], dtype="int32x32"
),
dtype="int32x32",
)
return operator
def preallocated_vrmpy(operations):
"""Generate preallocated VRMPY operator."""
size = operations * 128
out_size = operations * 32
@T.prim_func
def operator(
a: T.handle, b: T.handle, c: T.handle, a_v: T.handle, b_v: T.handle, c_v: T.handle
) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
a_buffer = T.match_buffer(a, [operations, 128], dtype="uint8", align=128, scope="global")
b_buffer = T.match_buffer(b, [operations, 128], dtype="uint8", align=128, scope="global")
c_buffer = T.match_buffer(c, [operations, 32], dtype="int32", align=128, scope="global")
a_global_vtcm = T.match_buffer(a_v, [size], dtype="uint8", align=128, scope="global.vtcm")
b_global_vtcm = T.match_buffer(b_v, [size], dtype="uint8", align=128, scope="global.vtcm")
c_global_vtcm = T.match_buffer(
c_v, [out_size], dtype="int32", align=128, scope="global.vtcm"
)
for n, i in T.grid(operations, 128):
with T.block("a_buffer_global.vtcm"):
vn_ind, vi_index = T.axis.remap("SS", [n, i])
a_global_vtcm[vn_ind * 128 + vi_index] = a_buffer[vn_ind, vi_index]
for n, i in T.grid(operations, 128):
with T.block("b_buffer_global.vtcm"):
vn_ind, vi_index = T.axis.remap("SS", [n, i])
b_global_vtcm[vn_ind * 128 + vi_index] = b_buffer[vn_ind, vi_index]
for n in T.grid(operations):
with T.block("c_buffer"):
vn_ind = T.axis.remap("S", [n])
c_global_vtcm[T.ramp(T.cast(vn_ind, "int32") * 32, 1, 32)] = T.call_llvm_intrin(
T.llvm_lookup_intrinsic_id("llvm.hexagon.V6.vrmpyubv.128B"),
T.uint32(2),
T.reinterpret(
a_global_vtcm[T.ramp(T.cast(vn_ind, "int32") * 128, 1, 128)],
dtype="int32x32",
),
T.reinterpret(
b_global_vtcm[T.ramp(T.cast(vn_ind, "int32") * 128, 1, 128)],
dtype="int32x32",
),
dtype="int32x32",
)
for n, i in T.grid(operations, 32):
with T.block("c_buffer_global.vtcm"):
vn_ind, vi_index = T.axis.remap("SS", [n, i])
c_buffer[vn_ind, vi_index] = c_global_vtcm[vn_ind * 32 + vi_index]
return operator
def preallocated_single_dma_vrmpy(operations):
"""Generate preallocated single DMA VRMPY operator."""
size = operations * 128
out_size = operations * 32
@T.prim_func
def operator(
a: T.handle,
b: T.handle,
c: T.handle,
a_v: T.handle,
b_v: T.handle,
c_v: T.handle,
) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
a_buffer = T.match_buffer(a, [operations, 128], dtype="uint8", align=128, scope="global")
b_buffer = T.match_buffer(b, [operations, 128], dtype="uint8", align=128, scope="global")
c_buffer = T.match_buffer(c, [operations, 32], dtype="int32", align=128, scope="global")
a_global_vtcm = T.match_buffer(a_v, [size], dtype="uint8", align=128, scope="global.vtcm")
b_global_vtcm = T.match_buffer(b_v, [size], dtype="uint8", align=128, scope="global.vtcm")
c_global_vtcm = T.match_buffer(
c_v, [out_size], dtype="int32", align=128, scope="global.vtcm"
)
T.evaluate(
T.tvm_call_packed(
"device_api.hexagon.dma_copy_dltensor",
T.tvm_stack_make_array(
a_global_vtcm.data,
T.tvm_stack_make_shape(size, dtype="handle"),
0,
1,
a_global_vtcm.dtype,
0,
dtype="handle",
),
T.tvm_stack_make_array(
a_buffer.data,
T.tvm_stack_make_shape(size, dtype="handle"),
0,
1,
a_buffer.dtype,
0,
dtype="handle",
),
T.cast(size, dtype="int"),
True, # bypass cache
dtype="int32",
)
)
T.evaluate(
T.tvm_call_packed(
"device_api.hexagon.dma_copy_dltensor",
T.tvm_stack_make_array(
b_global_vtcm.data,
T.tvm_stack_make_shape(size, dtype="handle"),
0,
1,
b_global_vtcm.dtype,
0,
dtype="handle",
),
T.tvm_stack_make_array(
b_buffer.data,
T.tvm_stack_make_shape(size, dtype="handle"),
0,
1,
b_buffer.dtype,
0,
dtype="handle",
),
T.cast(size, dtype="int"),
True, # bypass cache
dtype="int32",
)
)
for n in T.grid(operations):
with T.block("c_buffer"):
vn_ind = T.axis.remap("S", [n])
c_global_vtcm[T.ramp(T.cast(vn_ind, "int32") * 32, 1, 32)] = T.call_llvm_intrin(
T.llvm_lookup_intrinsic_id("llvm.hexagon.V6.vrmpyubv.128B"),
T.uint32(2),
T.reinterpret(
a_global_vtcm[T.ramp(T.cast(vn_ind, "int32") * 128, 1, 128)],
dtype="int32x32",
),
T.reinterpret(
b_global_vtcm[T.ramp(T.cast(vn_ind, "int32") * 128, 1, 128)],
dtype="int32x32",
),
dtype="int32x32",
)
T.evaluate(
T.tvm_call_packed(
"device_api.hexagon.dma_copy_dltensor",
T.tvm_stack_make_array(
c_buffer.data,
T.tvm_stack_make_shape(size, dtype="handle"),
0,
1,
c_buffer.dtype,
0,
dtype="handle",
),
T.tvm_stack_make_array(
c_global_vtcm.data,
T.tvm_stack_make_shape(size, dtype="handle"),
0,
1,
c_global_vtcm.dtype,
0,
dtype="handle",
),
T.cast(size, dtype="int"),
True, # bypass cache
dtype="int32",
)
)
return operator
def evaluate_result(operations, tag, time, result, expected_output):
transfer_mb = round(3 * operations * 128 / 1e6, 2)
gops = round(operations * 128 * 3 / time.mean / 1e9, 3)
mean_ms = round(time.mean * 1000, 6)
print(f"\ntest_{transfer_mb}MB_{tag} took {mean_ms} ms @ GOPS: {gops}")
tvm.testing.assert_allclose(result, expected_output)
def setup_and_run(hexagon_session, sch, a, b, c, operations, mem_scope="global"):
"""Setup and run operator."""
func_tir = tvm.build(sch.mod["main"], target=get_hexagon_target("v69"))
module = hexagon_session.load_module(func_tir)
a_hexagon = tvm.runtime.ndarray.array(a, device=hexagon_session.device, mem_scope=mem_scope)
b_hexagon = tvm.runtime.ndarray.array(b, device=hexagon_session.device, mem_scope=mem_scope)
c_hexagon = tvm.runtime.ndarray.array(c, device=hexagon_session.device, mem_scope=mem_scope)
# These are reduced for CI but number=100 and repeat=10 does a good job of removing noise.
number = 1
repeat = 1
timer = module.time_evaluator(
"__tvm_main__", hexagon_session.device, number=number, repeat=repeat
)
time = timer(a_hexagon, b_hexagon, c_hexagon)
gops = round(operations * 128 * 3 / time.mean / 1e9, 4)
return gops, c_hexagon.asnumpy()
def setup_and_run_preallocated(hexagon_session, sch, a, b, c, operations):
"""Setup and run for preallocated."""
func_tir = tvm.build(sch.mod["main"], target=get_hexagon_target("v69"))
module = hexagon_session.load_module(func_tir)
a_vtcm = np.zeros((a.size), dtype="uint8")
b_vtcm = np.zeros((b.size), dtype="uint8")
c_vtcm = np.zeros((c.size), dtype="int32")
a_hexagon = tvm.runtime.ndarray.array(a, device=hexagon_session.device, mem_scope="global")
b_hexagon = tvm.runtime.ndarray.array(b, device=hexagon_session.device, mem_scope="global")
c_hexagon = tvm.runtime.ndarray.array(c, device=hexagon_session.device, mem_scope="global")
a_vtcm_hexagon = tvm.runtime.ndarray.array(
a_vtcm, device=hexagon_session.device, mem_scope="global.vtcm"
)
b_vtcm_hexagon = tvm.runtime.ndarray.array(
b_vtcm, device=hexagon_session.device, mem_scope="global.vtcm"
)
c_vtcm_hexagon = tvm.runtime.ndarray.array(
c_vtcm, device=hexagon_session.device, mem_scope="global.vtcm"
)
# These are reduced for CI but number=100 and repeat=10 does a good job of removing noise.
number = 1
repeat = 1
timer = module.time_evaluator(
"__tvm_main__", hexagon_session.device, number=number, repeat=repeat
)
time = timer(a_hexagon, b_hexagon, c_hexagon, a_vtcm_hexagon, b_vtcm_hexagon, c_vtcm_hexagon)
gops = round(operations * 128 * 3 / time.mean / 1e9, 4)
return gops, c_hexagon.asnumpy()
class TestMatMulVec:
"""MatMul test class."""
# Removed most of these to speedup CI.
operations = tvm.testing.parameter(
1024,
# 2048,
# 4096,
# 5 * 2048, # 3.93MB of total transfer
# 16384, #Only works on 8Gen1 HDK's
# 5 * 4096, # 7.86MB of total transfer. Only works on 8Gen1 HDK's
)
# Experimentally best configurations for the memcopy
outer_split = tvm.testing.parameter(4)
unroll_split = tvm.testing.parameter(8)
vector_split = tvm.testing.parameter(64)
c_vector_split = tvm.testing.parameter(16)
c_vector_split_unallocated = tvm.testing.parameter(8)
@tvm.testing.fixture
def input_a(self, operations):
return np.random.randint(0, 16, (operations, 128), dtype="uint8")
@tvm.testing.fixture
def input_b(self, operations):
return np.random.randint(0, 16, (operations, 128), dtype="uint8")
@tvm.testing.fixture
def input_c(self, operations):
return np.zeros((operations, 32), dtype="int32")
@tvm.testing.fixture
def expected_output(self, operations, input_a, input_b, input_c):
expected_output = np.zeros(input_c.shape, dtype="int32")
for n in range(operations):
for i in range(32):
for r_ind in range(4): # pylint: disable=unused-variable
expected_output[n, i] = expected_output[n, i] + np.uint32(
input_a[n, i * 4 + r_ind]
) * np.uint32(input_b[n, i * 4 + r_ind])
return expected_output
@tvm.testing.requires_hexagon
def test_loading_vtcm_for_vrmpy(
self,
hexagon_session,
operations,
input_a,
input_b,
input_c,
expected_output,
outer_split,
unroll_split,
vector_split,
c_vector_split,
c_vector_split_unallocated,
):
"""Load VTCM for VRMPY operator test."""
# Run parallel vrmpy without loading to VTCM.
sch = tvm.tir.Schedule(vrmpy(operations))
sch = apply_vrmpy_parallelization(sch)
base_runtime, result = setup_and_run(
hexagon_session, sch, input_a, input_b, input_c, operations
)
tvm.testing.assert_allclose(result, expected_output)
# Run parallel vrmpy with basic memory loads to VTCM.
sch = tvm.tir.Schedule(vrmpy(operations))
sch = apply_vtcm_cache_read_write(sch)
sch = apply_vrmpy_parallelization(sch)
basic_load_runtime, result = setup_and_run(
hexagon_session, sch, input_a, input_b, input_c, operations
)
tvm.testing.assert_allclose(result, expected_output)
# Run parallel vrmpy with vectorized memory loads to VTCM.
sch = tvm.tir.Schedule(vrmpy(operations))
sch = apply_vtcm_cache_read_write(sch)
sch = apply_vrmpy_parallelization(sch)
sch = apply_unroll_vectorize(
sch,
[sch.get_block("a_buffer_global.vtcm"), sch.get_block("b_buffer_global.vtcm")],
unroll_split,
vector_split,
)
sch = apply_unroll_vectorize(
sch, [sch.get_block("c_buffer_global.vtcm")], unroll_split, c_vector_split_unallocated
)
vectorized_runtime, result = setup_and_run(
hexagon_session, sch, input_a, input_b, input_c, operations
)
tvm.testing.assert_allclose(result, expected_output)
# Run parallel vrmpy with vectorized and parallelized memory loads to VTCM.
sch = tvm.tir.Schedule(vrmpy(operations))
sch = apply_vtcm_cache_read_write(sch)
sch = apply_vrmpy_parallelization(sch)
sch = apply_parallel_unroll_vectorize(
sch,
[sch.get_block("a_buffer_global.vtcm"), sch.get_block("b_buffer_global.vtcm")],
outer_split,
unroll_split,
vector_split,
)
sch = apply_parallel_unroll_vectorize(
sch,
[sch.get_block("c_buffer_global.vtcm")],
outer_split,
unroll_split,
c_vector_split_unallocated,
)
vectorized_parallelized_runtime, result = setup_and_run(
hexagon_session, sch, input_a, input_b, input_c, operations
)
tvm.testing.assert_allclose(result, expected_output)
# Run parallel vrmpy with preallocated and vectorized memory loads to VTCM.
sch = tvm.tir.Schedule(preallocated_vrmpy(operations))
sch = apply_vrmpy_parallelization(sch)
sch = apply_unroll_vectorize(
sch,
[sch.get_block("a_buffer_global.vtcm"), sch.get_block("b_buffer_global.vtcm")],
unroll_split,
vector_split,
)
sch = apply_unroll_vectorize(
sch, [sch.get_block("c_buffer_global.vtcm")], unroll_split, c_vector_split
)
preallocated_vectorized_runtime, result = setup_and_run_preallocated(
hexagon_session, sch, input_a, input_b, input_c, operations
)
result = result.reshape((operations, 32))
tvm.testing.assert_allclose(result, expected_output)
# Run parallel vrmpy with preallocated, vectorized, and parallelized memory loads to VTCM.
sch = tvm.tir.Schedule(preallocated_vrmpy(operations))
sch = apply_vrmpy_parallelization(sch)
sch = apply_parallel_unroll_vectorize(
sch,
[sch.get_block("a_buffer_global.vtcm"), sch.get_block("b_buffer_global.vtcm")],
outer_split,
unroll_split,
vector_split,
)
sch = apply_parallel_unroll_vectorize(
sch, [sch.get_block("c_buffer_global.vtcm")], outer_split, unroll_split, c_vector_split
)
prealloc_vector_parallelized, result = setup_and_run_preallocated(
hexagon_session, sch, input_a, input_b, input_c, operations
)
result = result.reshape((operations, 32))
tvm.testing.assert_allclose(result, expected_output)
# Run parallel vrmpy with preallocated single dma memory load to VTCM.
sch = tvm.tir.Schedule(preallocated_single_dma_vrmpy(operations))
sch = apply_vrmpy_parallelization(sch)
single_dma_runtime, result = setup_and_run_preallocated(
hexagon_session, sch, input_a, input_b, input_c, operations
)
result = result.reshape((operations, 32))
tvm.testing.assert_allclose(result, expected_output)
# Run parallel vrmpy with data preloaded in VTCM.
sch = tvm.tir.Schedule(preloaded_vrmpy(operations))
sch = apply_vrmpy_parallelization(sch)
input_a = input_a.reshape(operations * 128)
input_b = input_b.reshape(operations * 128)
input_c = input_c.reshape(operations * 32)
preloaded_runtime, result = setup_and_run(
hexagon_session, sch, input_a, input_b, input_c, operations, "global.vtcm"
)
result = result.reshape((operations, 32))
tvm.testing.assert_allclose(result, expected_output)
transfer_mb = round(3 * operations * 128 / 1e6, 2)
print(
TEST_OUTPUT_TEMPLATE.format(
transfer_mb,
base_runtime,
basic_load_runtime,
vectorized_runtime,
vectorized_parallelized_runtime,
preallocated_vectorized_runtime,
prealloc_vector_parallelized,
single_dma_runtime,
preloaded_runtime,
)
)
if __name__ == "__main__":
tvm.testing.main()
| 22,415 | 38.464789 | 99 | py |
tvm | tvm-main/tests/python/contrib/test_hexagon/test_fixed_point_conversion.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Test float to fixed-point conversion. We do it by constructing a numpy array with the
wide range of floating-point values. These values are converted into the
fixed-point value using topi.hexagon.utils.get_fixed_point_value. Then, these values are
converted back into float using scale_factor provided by the function. These converted
floating point values are then compared against the original values and an assertion is
raised if they happened to be outside of the expected tolerance.
"""
import math
import struct
import numpy as np
from tvm.topi.hexagon import utils
class TestFixedPointConversion:
"""Fixed point conversation test class"""
def test_fixed_point_conversion(self):
"""Test fixed point conversion"""
# Construct array with wide range of values
fp1 = np.random.uniform(0.00001, 0.0002, size=(10))
fp2 = np.random.uniform(0.001, 0.02, size=(10))
fp3 = np.random.uniform(1, 20, size=(10))
fp4 = np.random.uniform(900, 1000, size=(10))
fp5 = np.random.uniform(1e9, 1e10, size=(10))
# Test for values with largest possible exponent as per IEEE-754 floating-point
# standard (actual exp value = 127, stored exp value = 254).
fp6 = np.random.uniform(2.4e38, 2.5e38, size=(1))
# Test for very small floating-point values.
fp7 = np.random.uniform(1.4e-34, 1.7e-34, size=(1))
float_arr = np.concatenate((fp1, fp2, fp3, fp4, fp5, fp6, fp7))
for flp in float_arr:
fxp, rsh = utils.get_fixed_point_value(flp, "int16")
# Compute scale_factor using rsh (rsh is log2 of the scale_factor). While doing this,
# we use IEEE-754 floating-point representation since rsh can be negative or positive.
scale = ((rsh + 127) & 0xFF) << 23 # Add bias (127) and position it into exponent bits
scale_i = struct.pack("I", scale) # Pack it as integer
scale_f = struct.unpack("f", scale_i) # Unpack as float
converted_flp = fxp / scale_f[0]
assert math.isclose(flp, converted_flp, rel_tol=1e-2)
if __name__ == "__main__":
tvm.testing.main()
| 2,952 | 42.426471 | 99 | py |
tvm | tvm-main/tests/python/contrib/test_hexagon/test_usmp.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""USMP tests"""
import numpy as np
import pytest
import tvm.testing
from tvm import relay
from tvm.contrib.hexagon.session import Session
from tvm.relay.backend import Executor, Runtime
from tvm.testing.usmp import is_tvm_backendallocworkspace_calls
@pytest.mark.parametrize("usmp_enabled", [False, True])
@tvm.testing.requires_hexagon
def test_conv2d(hexagon_session: Session, aot_host_target, aot_target, usmp_enabled):
"""Try conv2d on AOT target with usmp_enabled and check for TVMBackendAllocWorkspace calls"""
dtype = "float32"
input_shape = (1, 8, 8, 3)
w1_shape = (5, 5, 3, 1)
w2_shape = (5, 5, 1, 3)
data = relay.var("data", relay.TensorType(input_shape, dtype))
weight1 = relay.var("weight1", relay.TensorType(w1_shape, dtype))
weight2 = relay.var("weight2", relay.TensorType(w2_shape, dtype))
outpu1 = relay.nn.conv2d(
data,
weight1,
padding=(2, 2),
kernel_size=(5, 5),
data_layout="NHWC",
kernel_layout="HWIO",
out_dtype="float32",
)
output2 = relay.nn.conv2d(
outpu1,
weight2,
padding=(2, 2),
kernel_size=(5, 5),
data_layout="NHWC",
kernel_layout="HWIO",
out_dtype="float32",
)
f = relay.Function([data, weight1, weight2], output2)
relay_mod = tvm.IRModule.from_expr(f)
relay_mod = relay.transform.InferType()(relay_mod)
weight1_data = np.random.rand(w1_shape[0], w1_shape[1], w1_shape[2], w1_shape[3]).astype(
dtype=dtype
)
weight2_data = np.random.rand(w2_shape[0], w2_shape[1], w2_shape[2], w2_shape[3]).astype(
dtype=dtype
)
input_data = np.random.rand(
input_shape[0], input_shape[1], input_shape[2], input_shape[3]
).astype(dtype=dtype)
params = {"weight1": weight1_data, "weight2": weight2_data}
inputs = {"data": input_data}
with tvm.transform.PassContext(opt_level=3, config={"tir.usmp.enable": usmp_enabled}):
lowered = tvm.relay.build(
relay_mod,
params=params,
target=tvm.target.Target(aot_target, host=aot_host_target),
runtime=Runtime("cpp"),
executor=Executor("aot", {"unpacked-api": False, "interface-api": "packed"}),
)
assert is_tvm_backendallocworkspace_calls(lowered.lib) != usmp_enabled
aot_mod = hexagon_session.get_executor_from_factory(lowered)
aot_mod.set_input(**inputs)
aot_mod.run()
hexagon_output = aot_mod.get_output(0).numpy()
target_llvm = tvm.target.Target("llvm")
with tvm.transform.PassContext(opt_level=3):
llvm_lowered = tvm.relay.build(
relay_mod,
tvm.target.Target(target_llvm, host=target_llvm),
runtime=Runtime("cpp"),
executor=Executor("aot"),
)
llvm_mod = tvm.runtime.executor.AotModule(llvm_lowered["default"](tvm.cpu(0)))
llvm_mod.set_input(**params)
llvm_mod.run(**inputs)
expected_output = llvm_mod.get_output(0).numpy()
tvm.testing.assert_allclose(hexagon_output, expected_output, rtol=1e-4, atol=1e-5)
if __name__ == "__main__":
tvm.testing.main()
| 3,934 | 34.772727 | 97 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.