repo
stringlengths
2
99
file
stringlengths
13
225
code
stringlengths
0
18.3M
file_length
int64
0
18.3M
avg_line_length
float64
0
1.36M
max_line_length
int64
0
4.26M
extension_type
stringclasses
1 value
tvm
tvm-main/apps/topi_recipe/conv/test_conv_int8_intel.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # pylint: disable-msg=too-many-arguments, too-many-locals, assignment-from-no-return """ Conv Int8 functional and performance testing""" import sys import logging import numpy as np import tvm from tvm import te from tvm import topi logging.basicConfig(stream=sys.stdout, level=logging.INFO) LOGGER = logging.getLogger("test_conv_int8_intel") LOGGER.disabled = False # All the WORKLOADS from Resnet except first layer # Workload is ['height', 'width', 'in_filter', 'out_filter', # 'hkernel', 'wkernel', 'hpad', 'wpad', 'hstride', 'wstride']) WORKLOADS = [ (56, 56, 64, 64, 3, 3, 1, 1, 1, 1), (56, 56, 64, 64, 1, 1, 0, 0, 1, 1), (56, 56, 64, 128, 3, 3, 1, 1, 2, 2), (56, 56, 64, 128, 1, 1, 0, 0, 2, 2), (28, 28, 128, 128, 3, 3, 1, 1, 1, 1), (28, 28, 128, 256, 3, 3, 1, 1, 2, 2), (28, 28, 128, 256, 1, 1, 0, 0, 2, 2), (14, 14, 256, 256, 3, 3, 1, 1, 1, 1), (14, 14, 256, 512, 3, 3, 1, 1, 2, 2), (14, 14, 256, 512, 1, 1, 0, 0, 2, 2), (7, 7, 512, 512, 3, 3, 1, 1, 1, 1), (56, 56, 64, 256, 1, 1, 0, 0, 1, 1), (56, 56, 256, 64, 1, 1, 0, 0, 1, 1), (56, 56, 256, 128, 1, 1, 0, 0, 2, 2), (28, 28, 128, 512, 1, 1, 0, 0, 1, 1), (56, 56, 256, 512, 1, 1, 0, 0, 2, 2), (28, 28, 512, 128, 1, 1, 0, 0, 1, 1), (28, 28, 512, 256, 1, 1, 0, 0, 2, 2), (14, 14, 256, 1024, 1, 1, 0, 0, 1, 1), (28, 28, 512, 1024, 1, 1, 0, 0, 2, 2), (14, 14, 1024, 256, 1, 1, 0, 0, 1, 1), (14, 14, 1024, 512, 1, 1, 0, 0, 2, 2), (7, 7, 512, 2048, 1, 1, 0, 0, 1, 1), (14, 14, 1024, 2048, 1, 1, 0, 0, 2, 2), (7, 7, 2048, 512, 1, 1, 0, 0, 1, 1), ] TARGET_NAME = "llvm -mcpu=skylake-avx512" NUM_VEC_LANES = 16 DEV = tvm.device(TARGET_NAME, 0) def get_shape( im_height, im_width, in_filter, out_filter, k_h, k_w, hpad, wpad, hstride, wstride, out_dtype ): """ Finds out the shape of all data structures """ ## Find shapes data_shape = (1, in_filter // NUM_VEC_LANES, im_height, im_width, NUM_VEC_LANES) if out_dtype == "int32": kernel_shape = ( out_filter // NUM_VEC_LANES, in_filter // NUM_VEC_LANES, k_h, k_w, NUM_VEC_LANES // 4, NUM_VEC_LANES, 4, ) elif out_dtype == "float32": kernel_shape = ( out_filter // NUM_VEC_LANES, in_filter // NUM_VEC_LANES, k_h, k_w, NUM_VEC_LANES, NUM_VEC_LANES, ) out_height = (im_height + 2 * hpad - k_h) // hstride + 1 out_width = (im_width + 2 * wpad - k_w) // wstride + 1 o_shape = (1, out_filter // NUM_VEC_LANES, out_height, out_width, NUM_VEC_LANES) return (data_shape, kernel_shape, o_shape) def run_inference( data_dtype, kernel_dtype, out_dtype, im_height, im_width, in_filter, out_filter, k_h, k_w, hpad, wpad, hstride, wstride, ): """ Runs the inference and checks the functional correctness between compute and schedule outputs """ (data_shape, kernel_shape, o_shape) = get_shape( im_height, im_width, in_filter, out_filter, k_h, k_w, hpad, wpad, hstride, wstride, out_dtype, ) # Create TVM placeholders data = te.placeholder(data_shape, name="data", dtype=data_dtype) kernel = te.placeholder(kernel_shape, name="kernel", dtype=kernel_dtype) # Create the numpy arrays to be used for executing conv models if data_dtype == "float32": data_array = tvm.nd.array(np.random.rand(*data_shape).astype(dtype=data_dtype), DEV) kernel_array = tvm.nd.array(np.random.rand(*kernel_shape).astype(dtype=kernel_dtype), DEV) else: data_array = tvm.nd.array(np.random.randint(100, size=data_shape).astype(data_dtype)) kernel_array = tvm.nd.array(np.random.randint(100, size=kernel_shape).astype(kernel_dtype)) # c_orig will be used for declaration ouptut # c_sch will be used for scheduled computation output c_orig = tvm.nd.array(np.zeros(o_shape, dtype=out_dtype), DEV) c_sch = tvm.nd.array(np.zeros(o_shape, dtype=out_dtype), DEV) with tvm.target.Target(TARGET_NAME): conv = topi.nn.conv2d_NCHWc( data, kernel, stride=hstride, padding=hpad, dilation=(1, 1), layout="NCHWc", out_layout="NCHWc", out_dtype=out_dtype, ) out = topi.nn.relu(conv) sch = te.create_schedule(out.op) func = tvm.build(sch, [data, kernel, out], target=TARGET_NAME, name="out") func(data_array, kernel_array, c_orig) LOGGER.debug(tvm.lower(sch, [data, kernel], simple_mode=True)) # Generate and run the optimized schedule sconv = topi.generic.nn.schedule_conv2d_NCHWc(outs=[out]) func = tvm.build(sconv, [data, kernel, out], target=TARGET_NAME, name="conv") func(data_array, kernel_array, c_sch) # Functional check if data_dtype == "uint8": np.testing.assert_equal(c_orig.numpy(), c_sch.numpy()) else: assert np.allclose(c_orig.numpy(), c_sch.numpy()) evaluator = func.time_evaluator(func.entry_name, DEV, number=1000) LOGGER.debug(tvm.lower(sconv, [data, kernel], simple_mode=True)) return evaluator(data_array, kernel_array, c_sch).mean if __name__ == "__main__": LOGGER.info("Workload, Kernel_size, FP32_time, INT8_time, Speedup") SPEEDUP_ARRAY = [] for i, wkl in enumerate(WORKLOADS): fp32_time = run_inference("float32", "float32", "float32", *wkl) int8_time = run_inference("uint8", "int8", "int32", *wkl) kernel_h = wkl[4] kernel_w = wkl[5] LOGGER.info( "Workload#" + str(i) + ", " + str(kernel_h) + "x" + str(kernel_w) + ", " + str(fp32_time) + ", " + str(int8_time) + ", " + str(fp32_time / int8_time) ) SPEEDUP_ARRAY.append(fp32_time / int8_time) LOGGER.info("Average speedup --> %s" % str(sum(SPEEDUP_ARRAY) / float(len(SPEEDUP_ARRAY))))
7,070
32.832536
99
py
tvm
tvm-main/apps/topi_recipe/gemm/cuda_gemm_square.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """Example code to do square matrix multiplication.""" import tvm from tvm import te import os from tvm.contrib import nvcc from tvm.contrib import spirv import numpy as np import tvm.testing TASK = "gemm" USE_MANUAL_CODE = False def test_gemm(): # graph nn = 2048 n = te.var("n") n = tvm.runtime.convert(nn) m, l = n, n A = te.placeholder((l, n), name="A") B = te.placeholder((l, m), name="B") k = te.reduce_axis((0, l), name="k") C = te.compute((m, n), lambda ii, jj: te.sum(A[k, jj] * B[k, ii], axis=k), name="C") # schedule s = te.create_schedule(C.op) AA = s.cache_read(A, "shared", [C]) BB = s.cache_read(B, "shared", [C]) AL = s.cache_read(AA, "local", [C]) BL = s.cache_read(BB, "local", [C]) CC = s.cache_write(C, "local") scale = 8 num_thread = 8 block_factor = scale * num_thread block_x = te.thread_axis("blockIdx.x") thread_x = te.thread_axis((0, num_thread), "threadIdx.x") block_y = te.thread_axis("blockIdx.y") thread_y = te.thread_axis((0, num_thread), "threadIdx.y") thread_xz = te.thread_axis((0, 2), "vthread", name="vx") thread_yz = te.thread_axis((0, 2), "vthread", name="vy") by, yi = s[C].split(C.op.axis[0], factor=block_factor) bx, xi = s[C].split(C.op.axis[1], factor=block_factor) s[C].bind(by, block_y) s[C].bind(bx, block_x) s[C].reorder(by, bx, yi, xi) tyz, yi = s[C].split(yi, nparts=2) ty, yi = s[C].split(yi, nparts=num_thread) txz, xi = s[C].split(xi, nparts=2) tx, xi = s[C].split(xi, nparts=num_thread) s[C].bind(tyz, thread_yz) s[C].bind(txz, thread_xz) s[C].bind(ty, thread_y) s[C].bind(tx, thread_x) s[C].reorder(tyz, txz, ty, tx, yi, xi) s[CC].compute_at(s[C], tx) yo, xo = CC.op.axis ko, ki = s[CC].split(k, factor=8) kt, ki = s[CC].split(ki, factor=1) s[CC].reorder(ko, kt, ki, yo, xo) s[AA].compute_at(s[CC], ko) s[BB].compute_at(s[CC], ko) s[CC].unroll(kt) s[AL].compute_at(s[CC], kt) s[BL].compute_at(s[CC], kt) # Schedule for A's shared memory load ty, xi = s[AA].split(s[AA].op.axis[0], nparts=num_thread) _, xi = s[AA].split(s[AA].op.axis[1], factor=num_thread * 4) tx, xi = s[AA].split(xi, nparts=num_thread) s[AA].bind(ty, thread_y) s[AA].bind(tx, thread_x) s[AA].vectorize(xi) # Schedule for B' shared memory load ty, xi = s[BB].split(s[BB].op.axis[0], nparts=num_thread) _, xi = s[BB].split(s[BB].op.axis[1], factor=num_thread * 4) tx, xi = s[BB].split(xi, nparts=num_thread) s[BB].bind(ty, thread_y) s[BB].bind(tx, thread_x) s[BB].vectorize(xi) s[AA].double_buffer() s[BB].double_buffer() # correctness def check_device(device): dev = tvm.device(device, 0) if not dev.exist: print("Skip because %s is not enabled" % device) return print("Device %s" % device) f = tvm.build(s, [A, B, C], device) # launch the kernel. n, m, l = nn, nn, nn a_np = np.random.uniform(size=(n, l)).astype(A.dtype) b_np = np.random.uniform(size=(m, l)).astype(B.dtype) a = tvm.nd.array(a_np, dev) b = tvm.nd.array(b_np, dev) c = tvm.nd.array(np.zeros((n, m), dtype=C.dtype), dev) for i in range(2): f(a, b, c) tvm.testing.assert_allclose(c.numpy(), np.dot(b_np.T, a_np), rtol=1e-5) num_flops = 2 * nn * nn * nn num_runs = 10 timer_f = f.time_evaluator(f.entry_name, dev, number=num_runs) t = timer_f(a, b, c).mean GFLOPS = num_flops / (t * 1e3) / 1e6 print("average time cost of %d runs = %g ms, %g GFLOPS." % (num_runs, t * 1e3, GFLOPS)) for device in ["cuda", "opencl", "rocm", "nvptx", "vulkan"]: with tvm.transform.PassContext( config={"tir.UnrollLoop": {"auto_max_step": 128, "explicit_unroll": device != "cuda"}} ): check_device(device) if __name__ == "__main__": test_gemm()
4,813
34.397059
98
py
tvm
tvm-main/apps/topi_recipe/gemm/android_gemm_square.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """Example code to do square matrix multiplication on Android Phone.""" import tvm from tvm import te import os from tvm import rpc from tvm.contrib import utils, ndk import numpy as np # Set to be address of tvm proxy. proxy_host = os.environ["TVM_ANDROID_RPC_PROXY_HOST"] proxy_port = 9090 key = "android" # Change target configuration. # Run `adb shell cat /proc/cpuinfo` to find the arch. arch = "arm64" target = "llvm -mtriple=%s-linux-android" % arch def ngflops(N): return 2.0 * float(N * N * N) / (10**9) dtype = "float32" def evaluate(func, dev, N, times): a_np = np.random.uniform(size=(N, N)).astype(dtype) b_np = np.random.uniform(size=(N, N)).astype(dtype) a = tvm.nd.array(a_np, dev) b = tvm.nd.array(b_np, dev) c = tvm.nd.array(np.zeros((N, N), dtype=dtype), dev) time_f = func.time_evaluator(func.entry_name, dev, number=times) cost = time_f(a, b, c).mean gf = ngflops(N) / cost print("%g secs/op, %g GFLOPS" % (cost, gf)) np.testing.assert_almost_equal(c.numpy(), a_np.dot(b_np), decimal=2) def test_gemm_gpu(N, times, bn, num_block, num_thread): assert bn <= N assert num_thread * num_thread * 16 <= N assert num_block * num_block * 2 <= N A = te.placeholder((N, N), name="A") B = te.placeholder((N, N), name="Btmp") k = te.reduce_axis((0, N), name="k") packedB = te.compute((N, N / bn, bn), lambda x, y, z: B[x, y * bn + z], name="B") C = te.compute( (N, N), lambda ii, jj: te.sum(A[ii, k] * packedB[k, jj / bn, jj % bn], axis=k), name="C" ) s = te.create_schedule(C.op) CC = s.cache_write(C, "local") block_x = te.thread_axis("blockIdx.x") block_y = te.thread_axis("blockIdx.y") thread_x = te.thread_axis("threadIdx.x") thread_y = te.thread_axis("threadIdx.y") thread_xz = te.thread_axis((0, 2), "vthread", name="vx") thread_yz = te.thread_axis((0, 2), "vthread", name="vy") pby, pbi = s[packedB].split(packedB.op.axis[0], nparts=num_thread) pbx, pbj = s[packedB].split(packedB.op.axis[1], nparts=num_thread) s[packedB].bind(pby, thread_y) s[packedB].bind(pbx, thread_x) pbz, pbk = s[packedB].split(packedB.op.axis[2], factor=8) s[packedB].vectorize(pbk) by, yi = s[C].split(C.op.axis[0], nparts=num_block) bx, xi = s[C].split(C.op.axis[1], nparts=num_thread) s[C].bind(by, block_y) s[C].bind(bx, thread_y) s[C].reorder(by, bx, yi, xi) tyz, yi = s[C].split(yi, nparts=2) ty, yi = s[C].split(yi, nparts=num_block) txz, xi = s[C].split(xi, nparts=2) tx, xi = s[C].split(xi, nparts=num_thread) s[C].reorder(tyz, txz, ty, tx, yi, xi) s[C].bind(tyz, thread_yz) s[C].bind(txz, thread_xz) s[C].bind(ty, block_x) s[C].bind(tx, thread_x) xyi, xxi = s[C].split(xi, factor=8) s[C].reorder(tyz, txz, ty, tx, yi, xyi, xxi) s[C].vectorize(xxi) s[CC].compute_at(s[C], yi) yo, xo = CC.op.axis s[CC].reorder(k, yo, xo) xo, xi = s[CC].split(xo, factor=8) s[CC].vectorize(xi) ko, ki = s[CC].split(k, factor=2) s[CC].unroll(ki) print(tvm.lower(s, [A, B, C], simple_mode=True)) f = tvm.build(s, [A, B, C], tvm.target.Target("opencl", host=target), name="gemm_gpu") temp = utils.tempdir() path_dso = temp.relpath("gemm_gpu.so") f.export_library(path_dso, ndk.create_shared) # connect to the proxy remote = rpc.connect(proxy_host, proxy_port, key=key) dev = remote.cl(0) remote.upload(path_dso) f = remote.load_module("gemm_gpu.so") evaluate(f, dev, N, times) if __name__ == "__main__": test_gemm_gpu(1024, times=5, bn=8, num_block=2, num_thread=8)
4,450
31.021583
96
py
tvm
tvm-main/apps/topi_recipe/gemm/gemm_int8.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. "Example code to perform int8 GEMM" import logging import sys import numpy as np import tvm from tvm import te from tvm import autotvm from tvm.topi.cuda.tensor_intrin import dp4a DO_TUNING = True PRETUNED_INDEX = 75333 intrin_dp4a = dp4a("local", "local", "local") @autotvm.template def gemm_int8(n, m, l): A = te.placeholder((n, l), name="A", dtype="int8") B = te.placeholder((m, l), name="B", dtype="int8") k = te.reduce_axis((0, l), name="k") C = te.compute( (n, m), lambda i, j: te.sum(A[i, k].astype("int32") * B[j, k].astype("int32"), axis=k), name="C", ) cfg = autotvm.get_config() s = te.create_schedule(C.op) y, x = C.op.axis AA = s.cache_read(A, "shared", [C]) BB = s.cache_read(B, "shared", [C]) AL = s.cache_read(AA, "local", [C]) BL = s.cache_read(BB, "local", [C]) CC = s.cache_write(C, "local") k = CC.op.reduce_axis[0] cfg.define_split( "tile_k", cfg.axis(k), num_outputs=3, filter=lambda entity: entity.size[2] == 4 and entity.size[0] * 2 >= entity.size[1], ) ko, kt, ki = cfg["tile_k"].apply(s, CC, k) s[CC].tensorize(ki, intrin_dp4a) block_x = te.thread_axis("blockIdx.x") block_y = te.thread_axis("blockIdx.y") thread_x = te.thread_axis("threadIdx.x") thread_y = te.thread_axis("threadIdx.y") def block_size_filter(entity): return ( entity.size[0] * 2 >= entity.size[1] * 2 and entity.size[1] <= 16 and entity.size[3] <= 4 ) cfg.define_split("tile_y", cfg.axis(y), num_outputs=4, filter=block_size_filter) cfg.define_split("tile_x", cfg.axis(x), num_outputs=4, filter=block_size_filter) by, tyz, ty, yi = cfg["tile_y"].apply(s, C, y) bx, txz, tx, xi = cfg["tile_x"].apply(s, C, x) s[C].bind(by, block_y) s[C].bind(bx, block_x) s[C].bind(tyz, te.thread_axis("vthread")) s[C].bind(txz, te.thread_axis("vthread")) s[C].bind(ty, thread_y) s[C].bind(tx, thread_x) s[C].reorder(by, bx, tyz, txz, ty, tx, yi, xi) s[CC].compute_at(s[C], tx) yo, xo = CC.op.axis s[CC].reorder(ko, kt, yo, xo, ki) s[CC].unroll(kt) for stage in [AL, BL]: s[stage].compute_at(s[CC], kt) _, xi = s[stage].split(stage.op.axis[1], factor=4) s[stage].vectorize(xi) s[stage].double_buffer() cfg.define_knob("storage_align", [16, 48]) for stage in [AA, BB]: s[stage].storage_align(s[stage].op.axis[0], cfg["storage_align"].val, 0) s[stage].compute_at(s[CC], ko) fused = s[stage].fuse(*s[stage].op.axis) ty, tx = s[stage].split(fused, nparts=cfg["tile_y"].size[2]) tx, xi = s[stage].split(tx, nparts=cfg["tile_x"].size[2]) _, xi = s[stage].split(xi, factor=16) s[stage].bind(ty, thread_y) s[stage].bind(tx, thread_x) s[stage].vectorize(xi) cfg.define_knob("auto_unroll_max_step", [512, 1500]) s[C].pragma(by, "auto_unroll_max_step", cfg["auto_unroll_max_step"].val) s[C].pragma(by, "unroll_explicit", False) cfg.add_flop(n * m * l * 2) return s, [A, B, C] if __name__ == "__main__": N = 2048 n = m = l = N logging.basicConfig(level=logging.DEBUG, stream=sys.stdout) task = autotvm.task.create(gemm_int8, args=(n, m, l), target="cuda") print(task.config_space) measure_option = autotvm.measure_option( builder=autotvm.LocalBuilder(), runner=autotvm.LocalRunner(repeat=3, min_repeat_ms=100, timeout=4), ) log_name = "gemm_int8.log" if DO_TUNING: tuner = autotvm.tuner.XGBTuner(task) tuner.tune( n_trial=1000, measure_option=measure_option, callbacks=[autotvm.callback.log_to_file(log_name)], ) dispatch_context = autotvm.apply_history_best(log_name) best_config = dispatch_context.query(task.target, task.workload) print("\nBest config:") print(best_config) else: config = task.config_space.get(PRETUNED_INDEX) dispatch_context = autotvm.task.ApplyConfig(config) print("Using pretuned config:") print(config) with dispatch_context: with tvm.target.Target("cuda"): s, arg_bufs = gemm_int8(n, m, l) f = tvm.build(s, arg_bufs, "cuda", name="gemm_int8") dev = tvm.device("cuda", 0) a_np = np.random.randint(size=(n, l), low=-128, high=127, dtype="int8") b_np = np.random.randint(size=(m, l), low=-128, high=127, dtype="int8") a = tvm.nd.array(a_np, dev) b = tvm.nd.array(b_np, dev) c = tvm.nd.array(np.zeros((n, m), dtype="int32"), dev) f(a, b, c) tvm.testing.assert_allclose( c.numpy(), np.dot(a_np.astype("int32"), b_np.T.astype("int32")), rtol=1e-5 ) num_ops = 2 * l * m * n num_runs = 1000 timer_f = f.time_evaluator(f.entry_name, dev, number=num_runs) t = timer_f(a, b, c).mean GOPS = num_ops / (t * 1e3) / 1e6 print("average time cost of %d runs = %g ms, %g GOPS." % (num_runs, t * 1e3, GOPS))
5,879
31.131148
91
py
tvm
tvm-main/apps/pt_tvmdsoop/tests/test_torch_graph_module.py
#!/usr/bin/env python # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """Test script for torch module""" import tempfile import os import logging import torch import numpy as np import tvm import tvm.testing from tvm import te, relay import tvm.contrib.torch from tvm.contrib import graph_runtime TVM_ASSETS = ["mod.so", "graph.json", "params"] def test_use_pt_graph_module(): """main test function""" def build_export_graph(device): """relay build & export graph""" x = relay.var("x", shape=(10, 5)) y = relay.var("y", shape=(1, 5)) z = relay.add(x, y) z = relay.exp(z) func = relay.Function([x, y], z) x_data = np.random.rand(10, 5).astype("float32") y_data = np.random.rand(1, 5).astype("float32") params = {"y": y_data} pt_device = torch.device(device) if pt_device.type == "cuda": target = "cuda" ctx = tvm.cuda(pt_device.index) else: target = "llvm" ctx = tvm.cpu(0) graph, lib, params = relay.build(tvm.IRModule.from_expr(func), target=target, params=params) mod = graph_runtime.create(graph, lib, device=ctx) mod.set_input(**params) mod.set_input(x=x_data) mod.run() res = mod.get_output(0).asnumpy() ref_res = np.exp(y_data + x_data) tvm.testing.assert_allclose(res, ref_res, atol=1e-5, rtol=1e-5) # export to tempdir export_dir = tempfile.mkdtemp("tvm_export") lib.export_library(os.path.join(export_dir, TVM_ASSETS[0])) with open(os.path.join(export_dir, TVM_ASSETS[1]), "w") as fout: fout.write(graph) with open(os.path.join(export_dir, TVM_ASSETS[2]), "wb") as fout: fout.write(relay.save_param_dict(params)) return export_dir def test_pt_run(device, trace=True, to_device=None): """test add lib with Pytorch wrapper""" print("\n############## Test on device:", device, "#################") export_dir = build_export_graph(device) engine = tvm.contrib.torch.GraphModule(num_inputs=2, num_outputs=1).to(device) x = np.random.rand(10, 5).astype("float32") y = np.random.rand(1, 5).astype("float32") expect = np.exp(y + x) def get_inputs_by_device(device): inps = [torch.Tensor(x), torch.Tensor(y)] if device == "cpu": return inps else: device_type, device_id = device.split(":") assert device_type == "cuda" return [inp.cuda(int(device_id)) for inp in inps] assets = [os.path.join(export_dir, i) for i in TVM_ASSETS] engine.init((x.shape, y.shape), *assets) outputs = engine.forward(get_inputs_by_device(device)) tvm.testing.assert_allclose(outputs[0].cpu(), expect, atol=1e-5, rtol=1e-5) if trace: print("\n################ Test trace and load #################") scripted = torch.jit.script(engine) scripted_dir = tempfile.mkdtemp("scripted") scripted_path = os.path.join(scripted_dir, "model.pt") scripted.save(scripted_path) loaded = torch.jit.load(scripted_path) outputs = loaded.forward(get_inputs_by_device(device)) tvm.testing.assert_allclose(outputs[0].cpu(), expect, atol=1e-5, rtol=1e-5) del scripted del loaded if to_device: print( "\n################ Test move from [{}] to [{}] #################".format( device, to_device ) ) engine = engine.to(to_device) outputs = engine.forward(get_inputs_by_device(to_device)) tvm.testing.assert_allclose(outputs[0].cpu(), expect, atol=1e-5, rtol=1e-5) del engine test_pt_run(device="cuda:0", trace=True, to_device="cuda:1") test_pt_run(device="cpu", trace=True) if __name__ == "__main__": test_use_pt_graph_module()
4,793
35.876923
100
py
tvm
tvm-main/apps/pt_tvmdsoop/tests/test_optimize_torch.py
# pylint: disable=missing-class-docstring #!/usr/bin/env python # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """Test script for tvm torch module""" import tempfile import torch from torch.utils import benchmark from torchvision.models import resnet18 import tvm import tvm.testing from tvm.contrib.torch import optimize_torch from tvm.meta_schedule import TuneConfig def test_matmul_tuning_relay(): def matmul(x, w): return torch.matmul(x, w) x = torch.randn(15, 20) w = torch.randn(20, 30) example_inputs = (x, w) rt_mod = optimize_torch(matmul, example_inputs) torch_answer = torch.matmul(x, w).numpy() tvm_answer = rt_mod(x, w).numpy() tvm.testing.assert_allclose(torch_answer, tvm_answer, atol=1e-5, rtol=1e-5) class InnerModel(torch.nn.Module): def __init__(self): super().__init__() self.conv = torch.nn.Conv2d(1, 20, 5) def forward(self, x): return torch.nn.functional.relu(self.conv(x)) class SimpleModel(torch.nn.Module): def __init__(self): super().__init__() self.conv = torch.nn.Conv2d(20, 20, 5) self.relu = InnerModel() def forward(self, x): x = self.relu(x) return torch.nn.functional.relu(self.conv(x)) def test_nested_module(): simple_module = SimpleModel() example_input = torch.randn(20, 1, 10, 10) optimized_module = optimize_torch(simple_module, example_input) ret1 = simple_module(example_input).detach().numpy() ret2 = optimized_module(example_input).detach().numpy() tvm.testing.assert_allclose(ret1, ret2, atol=1e-5, rtol=1e-5) def test_save_load_function(): def foo(x): return 2 * x + 1 example_input = torch.rand(3) opt_foo = optimize_torch(foo, example_input) ret1 = opt_foo(example_input) with tempfile.NamedTemporaryFile(suffix=".pt") as tmp: torch.save(opt_foo, tmp.name) loaded_mod = torch.load(tmp.name) ret2 = loaded_mod(example_input) tvm.testing.assert_allclose(ret1.numpy(), ret2.numpy(), atol=1e-5, rtol=1e-5) class MyResNet18(torch.nn.Module): def __init__(self, config, target=None): super(MyResNet18, self).__init__() self.means = torch.nn.Parameter( torch.tensor([103.939, 116.779, 123.68]).resize_(1, 3, 1, 1) ).cuda() self.resnet = optimize_torch(resnet18(), [torch.rand(1, 3, 224, 224)], config, target) def forward(self, input): return self.resnet(input - self.means) class JitModule(torch.nn.Module): def __init__(self): super(JitModule, self).__init__() self.means = torch.nn.Parameter( torch.tensor([103.939, 116.779, 123.68]).resize_(1, 3, 1, 1) ).cuda() self.resnet = torch.jit.optimize_for_inference(torch.jit.script(resnet18().cuda().eval())) def forward(self, input): return self.resnet(input - self.means) # default config for testing config = TuneConfig( strategy="evolutionary", num_trials_per_iter=4, max_trials_per_task=8, max_trials_global=16, ) if torch.cuda.is_available(): target_cuda = "nvidia/geforce-rtx-3070" meta_module_resnet18 = MyResNet18(config, target_cuda) jit_module_resnet18 = JitModule() def compare_optimize_resnet18_to_torchscript(): results = [] for i in range(20): test_input = torch.rand(1, 3, 224, 224).half().cuda() sub_label = f"[test {i}]" results.append( benchmark.Timer( stmt="meta_module_resnet18(test_input)", setup="from __main__ import meta_module_resnet18", globals={"test_input": test_input}, sub_label=sub_label, description="tuning by meta", ).blocked_autorange() ) results.append( benchmark.Timer( stmt="jit_module_resnet18(test_input)", setup="from __main__ import jit_module_resnet18", globals={"test_input": test_input}, sub_label=sub_label, description="tuning by jit", ).blocked_autorange() ) compare = benchmark.Compare(results) compare.print() if __name__ == "__main__": test_matmul_tuning_relay() test_nested_module() test_save_load_function() if torch.cuda.is_available(): compare_optimize_resnet18_to_torchscript()
5,138
30.722222
98
py
tvm
tvm-main/apps/pt_tvmdsoop/tests/test_as_torch.py
#!/usr/bin/env python # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """Test script for tvm torch module""" import tempfile import numpy as np import torch import torch.nn import tvm from tvm.target.target import Target import tvm.testing from tvm.contrib.torch import as_torch from tvm.script import tir as T @as_torch def matmul(M: int, N: int, K: int, dtype: str): @T.prim_func def main(a: T.handle, b: T.handle, c: T.handle) -> None: A = T.match_buffer(a, [M, K], dtype=dtype) B = T.match_buffer(b, [N, K], dtype=dtype) C = T.match_buffer(c, [M, N], dtype=dtype) for i, j, k in T.grid(M, N, K): with T.block(): vi, vj, vk = T.axis.remap("SSR", [i, j, k]) with T.init(): C[vi, vj] = T.float32(0) C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vj, vk] return main @as_torch @tvm.script.ir_module class ModuleGPU: @T.prim_func def main(A: T.Buffer(8, "float32"), B: T.Buffer(8, "float32")) -> None: T.func_attr({"global_symbol": "main", "tir.noalias": True}) for i_0 in T.thread_binding(2, thread="blockIdx.x"): for i_2 in T.thread_binding(2, thread="threadIdx.x"): for i_1 in T.serial(2): with T.block("B"): vi = T.axis.spatial(8, i_0 * 4 + i_1 * 2 + i_2) T.reads(A[vi]) T.writes(B[vi]) B[vi] = A[vi] + T.float32(1) @as_torch @T.prim_func def func_with_part_access_region(a: T.handle, b: T.handle, c: T.handle) -> None: A = T.match_buffer(a, [128, 128]) B = T.match_buffer(b, [128, 128]) C = T.match_buffer(c, [128, 128]) with T.block(): for i, j in T.grid(128, 128): with T.block("s1"): vi, vj = T.axis.remap("SS", [i, j]) T.reads(A[vi, vj]) B[vi, vj] = A[vi, vj] + T.float32(1) for i, j in T.grid(128, 128): with T.block("s2"): vi, vj = T.axis.remap("SS", [i, j]) T.writes(C[vi, vj]) C[vi, vj] = B[vi, vj] + T.float32(1) @as_torch @tvm.script.ir_module class MyModule: @T.prim_func def main(a: T.handle, b: T.handle): # We exchange data between function by handles, which are similar to pointer. T.func_attr({"global_symbol": "main", "tir.noalias": True}) # Create buffer from handles. A = T.match_buffer(a, (8,), dtype="float32") B = T.match_buffer(b, (8,), dtype="float32") for i in range(8): # A block is an abstraction for computation. with T.block("B"): # Define a spatial block iterator and bind it to value i. vi = T.axis.spatial(8, i) B[vi] = A[vi] + 1.0 @as_torch @T.prim_func def loop_split(a: T.handle, b: T.handle) -> None: A = T.match_buffer(a, [128, 128], dtype="float32") B = T.match_buffer(b, [128], dtype="float32") for i, ko in T.grid(128, 4): for ki in T.thread_binding(0, 32, thread="threadIdx.x"): with T.block("B"): vi = T.axis.S(128, i) vk = T.axis.R(128, ko * 32 + ki) T.reads([B[vi], A[vi, vk]]) T.writes([B[vi]]) with T.init(): B[vi] = T.float32(0) B[vi] = B[vi] + A[vi, vk] @as_torch def elementwise_with_root(M: int, N: int, dtype: str): @T.prim_func def f(a: T.handle, b: T.handle, c: T.handle) -> None: A = T.match_buffer(a, [M, N]) B = T.match_buffer(b, [M, N]) C = T.match_buffer(c, [M, N]) with T.block(): for i, j in T.grid(M, N): with T.block("s1"): vi, vj = T.axis.remap("SS", [i, j]) B[vi, vj] = A[vi, vj] + T.float32(1) for i, j in T.grid(M, N): with T.block("s2"): vi, vj = T.axis.remap("SS", [i, j]) C[vi, vj] = B[vi, vj] + T.float32(1) return f class MinuesOnes(torch.nn.Module): def __init__(self): super(MinuesOnes, self).__init__() self.engine = MyModule def forward(self, *input): self.engine.forward(*input) return input[-1] - 1 def test_tvmscript_torch_matmul(): s1 = np.random.rand(128, 128).astype("float32") s2 = np.random.rand(128, 128).astype("float32") s3 = np.random.rand(128, 128).astype("float32") q1 = torch.from_numpy(s1) q2 = torch.from_numpy(s2) q3 = torch.from_numpy(s3) numpy_result = np.matmul(s1, np.transpose(s2)) nn_module = matmul(128, 128, 128, "float32") nn_module(q1, q2, q3) tvm.testing.assert_allclose(q3.numpy(), numpy_result, atol=1e-5, rtol=1e-5) def test_tvmscript_torch_decorator(): q1 = torch.arange(8).type(torch.float32) q2 = torch.zeros((8,), dtype=torch.float32) MyModule(q1, q2) tvm.testing.assert_allclose(q2.numpy(), (q1 + 1).numpy(), atol=1e-5, rtol=1e-5) def test_tvmscript_torch_gpu(): cuda0 = torch.device("cuda:0") q1 = torch.arange(8, device=cuda0).type(torch.float32) q2 = torch.zeros((8,), dtype=torch.float32, device=cuda0) with tempfile.NamedTemporaryFile(suffix=".pt") as tmp: torch.save(ModuleGPU, tmp.name) loaded_mod = torch.load(tmp.name) loaded_mod(q1, q2) tvm.testing.assert_allclose(q2.cpu().numpy(), (q1 + 1).cpu().numpy(), atol=1e-5, rtol=1e-5) def test_torch_with_tvmscript(): ref_result = np.arange(8).astype("float32") q1 = torch.arange(8).type(torch.float32) q2 = torch.zeros((8,), dtype=torch.float32) nn_module = MinuesOnes() ret = nn_module.forward(q1, q2) tvm.testing.assert_allclose(ret.numpy(), ref_result, atol=1e-5, rtol=1e-5) def test_tvmscript_torch_func_with_part_access_region(): a1 = torch.rand(128, 128) a2 = torch.zeros(128, 128) a3 = torch.zeros(128, 128) result = a1 + 2 func_with_part_access_region.tune() func_with_part_access_region(a1, a2, a3) tvm.testing.assert_allclose(a3.numpy(), result.numpy(), atol=1e-5, rtol=1e-5) def test_tvmscript_torch_loop_split(): x = torch.rand(128, 128).cuda() y = torch.zeros(128).cuda() result = torch.sum(x.cpu(), dim=1).numpy() loop_split.tune( "nvidia/geforce-rtx-3070", max_trials_global=128, strategy="replay-trace", ) loop_split(x, y) tvm.testing.assert_allclose(y.cpu().numpy(), result, atol=1e-5, rtol=1e-5) def test_tvmscript_torch_elementwise_with_root(): a1 = torch.rand(128, 128) a2 = torch.zeros(128, 128) a3 = torch.zeros(128, 128) result = a1 + 2 func = elementwise_with_root(128, 128, "float32") func.tune( max_trials_global=128, strategy="replay-trace", ) func(a1, a2, a3) tvm.testing.assert_allclose(a3.numpy(), result.numpy(), atol=1e-5, rtol=1e-5) if __name__ == "__main__": test_tvmscript_torch_matmul() test_tvmscript_torch_decorator() test_tvmscript_torch_gpu() test_torch_with_tvmscript() test_tvmscript_torch_func_with_part_access_region() test_tvmscript_torch_loop_split() test_tvmscript_torch_elementwise_with_root()
8,027
29.758621
95
py
tvm
tvm-main/apps/pt_tvmdsoop/tests/test_torch_compile_cpu.py
#!/usr/bin/env python # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """Test script for torch module""" import torch import time import tvm from tvm.contrib.torch import compile class Model(torch.nn.Module): def __init__(self): super().__init__() def forward(self, x: torch.Tensor): return x * x model = Model() x = torch.rand([1, 3, 224, 224]) model_jit = torch.jit.trace(model, x) print(model_jit.graph) print("run torchscript...") for i in range(20): t = time.time() model_jit(x) print(time.time() - t) option = { "input_infos": [ ("x", (1, 3, 224, 224)), ], "default_dtype": "float16", "export_dir": "pytorch_compiled", "num_outputs": 1, "tuning_n_trials": 1, # set zero to skip tuning "tuning_log_file": "tuning.log", "target": "llvm", "device": tvm.cpu(), } pytorch_tvm_module = compile(model_jit, option) torch.jit.script(pytorch_tvm_module).save("model_tvm.pt") print("Run PyTorch...") for i in range(20): t = time.time() outputs = pytorch_tvm_module.forward([x.cpu()]) print(1000 * (time.time() - t)) print(outputs[0].shape)
1,877
26.217391
62
py
tvm
tvm-main/apps/pt_tvmdsoop/tests/test_torch_vm_module.py
#!/usr/bin/env python # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """Test script for torch vm module""" import tempfile import os import logging import torch import numpy as np import tvm from tvm.contrib.torch.pytorch_tvm import TVM_ASSETS import tvm.testing from tvm import te, relay import tvm.contrib.torch from tvm.contrib import graph_runtime TVM_ASSETS = ["mod.so", "code.ro"] def test_use_pt_vm_module(): """main test function""" def build_export_vm(device): """relay build & export graph""" x = relay.var("x", shape=(10, 5)) y = relay.var("y", shape=(1, 5)) z = relay.add(x, y) z = relay.exp(z) func = relay.Function([x, y], z) x_data = np.random.rand(10, 5).astype("float32") y_data = np.random.rand(1, 5).astype("float32") pt_device = torch.device(device) if pt_device.type == "cuda": target = "cuda" ctx = tvm.cuda(pt_device.index) else: target = "llvm" ctx = tvm.cpu(0) exe = relay.vm.compile(tvm.IRModule.from_expr(func), target=target, params={}) code, lib = exe.save() export_dir = tempfile.mkdtemp("tvm_export") # export to tempdir lib.export_library(os.path.join(export_dir, TVM_ASSETS[0])) with open(os.path.join(export_dir, TVM_ASSETS[1]), "wb") as fout: fout.write(code) vm = tvm.runtime.vm.VirtualMachine(exe, ctx) res = vm.run(x_data, y_data) ref_res = np.exp(y_data + x_data) tvm.testing.assert_allclose(res.numpy(), ref_res, atol=1e-5, rtol=1e-5) return export_dir def test_pt_run(device, trace=True, to_device=None, inp_on_cuda=False): """test add lib with Pytorch wrapper""" print("\n############## Test on device:", device, "#################") export_dir = build_export_vm(device) engine = tvm.contrib.torch.VMModule(num_inputs=2, num_outputs=1).to(device) x = np.random.rand(10, 5).astype("float32") y = np.random.rand(1, 5).astype("float32") expect = np.exp(y + x) def get_inputs_by_device(device): inps = [torch.Tensor(x), torch.Tensor(y)] if device == "cpu": return inps else: device_type, device_id = device.split(":") assert device_type == "cuda" return [inp.cuda(int(device_id)) for inp in inps] assets = [os.path.join(export_dir, i) for i in TVM_ASSETS] engine.init((x.shape, y.shape), *assets) outputs = engine.forward(get_inputs_by_device(device)) tvm.testing.assert_allclose(outputs[0].cpu(), expect, atol=1e-5, rtol=1e-5) if trace: print("\n################ Test trace and load #################") scripted = torch.jit.script(engine) scripted_dir = tempfile.mkdtemp("scripted") scripted_path = os.path.join(scripted_dir, "model.pt") scripted.save(scripted_path) loaded = torch.jit.load(scripted_path) outputs = loaded.forward(get_inputs_by_device(device)) tvm.testing.assert_allclose(outputs[0].cpu(), expect, atol=1e-5, rtol=1e-5) del scripted del loaded if to_device: print( "\n################ Test move from [{}] to [{}] #################".format( device, to_device ) ) engine = engine.to(to_device) outputs = engine.forward(get_inputs_by_device(to_device)) tvm.testing.assert_allclose(outputs[0].cpu(), expect, atol=1e-5, rtol=1e-5) del engine test_pt_run(device="cuda:0", trace=True, to_device="cuda:1", inp_on_cuda=True) test_pt_run(device="cpu", trace=True, inp_on_cuda=False) if __name__ == "__main__": test_use_pt_vm_module()
4,647
36.788618
90
py
tvm
tvm-main/apps/pt_tvmdsoop/tests/test_boolean_tensor.py
#!/usr/bin/env python # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """Test script for boolean tensor support""" import tempfile import torch import tvm import tvm.testing from tvm.contrib.torch import as_torch, optimize_torch from tvm.script import tir as T def negate(x): return x.logical_not() def sum_up_tensor(x): return x.size(dim=0) - torch.sum(x.int()) def tensor_boolean_operation(x): arr1 = (x + 0.3).floor().bool() arr2 = (~((x + 0.7).int().bool())).bool() ret = ((arr1 & arr2).byte() + 0.5).half() return ~(ret.bool()) def test_bool_tensor_negate(): input = torch.ones(1, dtype=torch.bool) optimized_negate = optimize_torch( negate, input, ) with tempfile.NamedTemporaryFile(suffix=".pt") as tmp: torch.save(optimized_negate, tmp.name) loaded_mod = torch.load(tmp.name) output = loaded_mod(negate(input)) tvm.testing.assert_allclose(input.numpy(), output.numpy(), atol=1e-5, rtol=1e-5) def test_sum_up_tensor(): x = torch.randint(0, 2, (16,)) y = x.bool() optimized_func = optimize_torch( sum_up_tensor, (y,), ) ret1 = (x[x == 0]).size(dim=0) ret2 = optimized_func(y).numpy() tvm.testing.assert_allclose(ret1, ret2, atol=1e-5, rtol=1e-5) def test_tensor_boolean_operation(): input = torch.rand(200) model = optimize_torch( tensor_boolean_operation, input, ) ret1 = tensor_boolean_operation(input) ret2 = model(input) tvm.testing.assert_allclose(ret1, ret2, atol=1e-5, rtol=1e-5) @as_torch @T.prim_func def negate_tvmscript( X: T.Buffer((8, 8), "bool"), Y: T.Buffer((8, 8), "float32"), Z: T.Buffer((8, 8), "bool"), U: T.Buffer((8, 8), "float32"), ) -> None: for i, j in T.grid(8, 8): with T.block(): if Y[i, j] > 0.0: Z[i, j] = X[i, j] U[i, j] = Y[i, j] else: Z[i, j] = not X[i, j] U[i, j] = 0.0 - Y[i, j] def negate_vanila(x, y): z = torch.zeros(8, 8).bool() for i in range(8): for j in range(8): if y[i, j] > 0: z[i, j] = x[i, j] else: z[i, j] = ~x[i, j] return z def test_tvmscript_torch_decorator(): q1 = (torch.rand(8, 8) + 0.5).int().bool() q2 = torch.rand(8, 8) - 0.5 q3 = torch.zeros(8, 8).bool() q4 = torch.zeros(8, 8) std1 = negate_vanila(q1, q2) std2 = torch.abs(q2) negate_tvmscript(q1, q2, q3, q4) tvm.testing.assert_allclose(std1.numpy(), q3.numpy(), atol=1e-5, rtol=1e-5) tvm.testing.assert_allclose(std2.numpy(), q4.numpy(), atol=1e-5, rtol=1e-5) if __name__ == "__main__": test_tvmscript_torch_decorator() test_bool_tensor_negate() test_sum_up_tensor() test_tensor_boolean_operation()
3,590
26.623077
84
py
tvm
tvm-main/apps/pt_tvmdsoop/tests/test_torch_script.py
#!/usr/bin/env python # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """Test script for torch module""" import os import torch import time import numpy as np import tvm import tvm.testing import tempfile from tvm.contrib.torch import PyTorchTVMModule, compile class Model(torch.nn.Module): def forward(self, x, y): return torch.matmul(x, y.softmax(1)) model = Model() model.cuda().half() x = torch.rand([1280, 2464, 4]).cuda().half() y = torch.rand([1280, 4, 1]).cuda().half() for i in range(20): t = time.time() o = model(x, y) torch.cuda.synchronize() print(1000 * (time.time() - t)) print(o.shape) model_jit = torch.jit.script(model) print(model_jit.graph) input_shapes = [("x", list(x.shape)), ("y", list(y.shape))] dtype = "float16" export_dir = tempfile.mkdtemp("pytorch_compiled") print("tmp export_dir:", export_dir) mod = PyTorchTVMModule() print("Converting...") mod.from_pytorch(model_jit, input_shapes, dtype) log_file = os.path.join(export_dir, "tuning.log") if not os.path.exists(log_file): print("Tuning...") mod.tune_tvm(log_file=log_file, n_trial=20) print("Building...") tvm_mod = mod.build_tvm(export_dir) pytorch_mod = mod.build_pytorch_module(num_inputs=2, num_outputs=1) ## Or you can load from a prebuilt tvm module # mod = PyTorchTVMModule() # tvm_mod = mod.load_tvm(export_dir) # pytorch_mod = mod.build_pytorch_module(num_inputs=2, num_outputs=1, input_infos=input_shapes) print("Run TVM...") tvm_x = tvm.nd.array(x.cpu().numpy().astype(dtype), device=tvm.gpu(0)) tvm_y = tvm.nd.array(y.cpu().numpy().astype(dtype), device=tvm.gpu(0)) for i in range(20): t = time.time() tvm_mod.run(x=tvm_x, y=tvm_y) print(1000 * (time.time() - t)) tvm_output = tvm_mod.get_output(0) print(tvm_output.shape) print("Run PyTorch...") for i in range(20): t = time.time() outputs = pytorch_mod.forward([x, y]) torch.cuda.synchronize() print(1000 * (time.time() - t)) print(outputs[0].shape) class EnsembleModel(torch.nn.Module): def __init__(self): super().__init__() self.layer = torch.jit.script(pytorch_mod) def forward(self, x, y, z) -> torch.Tensor: if x > 1: out = self.layer(y, z)[0] else: out = torch.ones([1280, 2464, 1]) return out print("Exporting...") scripted = torch.jit.script(EnsembleModel()) print(scripted.graph) scripted_path = os.path.join(export_dir, "model_tvm.pt") scripted.save(scripted_path) # print(o == outputs[0]) # print(o - outputs[0])
3,270
26.957265
95
py
tvm
tvm-main/apps/pt_tvmdsoop/tests/test_trace_tvm_module.py
#!/usr/bin/env python # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """Test script for torch module""" import torch import time import tvm from tvm.contrib.torch import compile, TraceTvmModule, pytorch_tvm class Model(torch.nn.Module): def __init__(self): super().__init__() def forward(self, x: torch.Tensor, y: torch.Tensor): return x * y model = Model() x = torch.rand([1, 2, 3]) y = torch.rand([1, 2, 3]) model_jit = torch.jit.script(model) option = { "input_infos": [("x", (1, 2, 3)), ("y", (1, 2, 3))], "default_dtype": "float32", "export_dir": "pytorch_compiled", "num_outputs": 1, "tuning_n_trials": 0, # set zero to skip tuning "tuning_log_file": "tuning.log", "target": "llvm", "device": tvm.cpu(), } # use TraceTvmModule to convert List[Tensor] input/output # to tuple of Tensors pytorch_tvm_module = compile(model_jit, option) scripted = torch.jit.script(pytorch_tvm_module) traced = torch.jit.trace(TraceTvmModule(scripted), (x, y)) res_traced = traced.forward(x, y) res_expected = pytorch_tvm_module.forward([x, y])[0] tvm.testing.assert_allclose(res_traced, res_expected)
1,892
31.084746
66
py
tvm
tvm-main/apps/pt_tvmdsoop/tests/test_torch_compile_gpu.py
#!/usr/bin/env python # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """Test script for torch module""" import torch import time from torchvision.models import resnet50 import tvm from tvm.contrib.torch import compile model = resnet50().half().cuda() x = torch.rand([1, 3, 224, 224]).half().cuda() model_jit = torch.jit.trace(model, x) print(model_jit.graph) print("run torchscript...") for i in range(20): t = time.time() model_jit(x) torch.cuda.synchronize() print(time.time() - t) option = { "input_infos": [ ("x", (1, 3, 224, 224)), ], "default_dtype": "float16", "export_dir": "pytorch_compiled", "num_outputs": 1, "tuning_n_trials": 1, # set zero to skip tuning "tuning_log_file": "tuning.log", "target": "cuda", "device": tvm.cuda(0), } pytorch_tvm_module = compile(model_jit, option) torch.jit.script(pytorch_tvm_module).save("model_tvm.pt") print("Run PyTorch...") for i in range(20): t = time.time() outputs = pytorch_tvm_module.forward([x]) torch.cuda.synchronize() print(1000 * (time.time() - t)) print(outputs[0].shape)
1,857
28.03125
62
py
tvm
tvm-main/apps/benchmark/arm_cpu_imagenet_bench.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """Benchmark script for ImageNet models on ARM CPU. see README.md for the usage and results of this script. """ import argparse import numpy as np import tvm from tvm import te from tvm.contrib.utils import tempdir import tvm.contrib.graph_executor as runtime from tvm import relay from util import get_network, print_progress def evaluate_network(network, target, target_host, repeat): # connect to remote device tracker = tvm.rpc.connect_tracker(args.host, args.port) remote = tracker.request(args.rpc_key) print_progress(network) net, params, input_shape, output_shape = get_network(network, batch_size=1) print_progress("%-20s building..." % network) with tvm.transform.PassContext(opt_level=3): lib = relay.build(net, target=tvm.target.Target(target, host=target_host), params=params) tmp = tempdir() if "android" in str(target): from tvm.contrib import ndk filename = "%s.so" % network lib.export_library(tmp.relpath(filename), ndk.create_shared) else: filename = "%s.tar" % network lib.export_library(tmp.relpath(filename)) # upload library and params print_progress("%-20s uploading..." % network) dev = remote.device(str(target), 0) remote.upload(tmp.relpath(filename)) rlib = remote.load_module(filename) module = runtime.GraphModule(rlib["default"](dev)) data_tvm = tvm.nd.array((np.random.uniform(size=input_shape)).astype(dtype)) module.set_input("data", data_tvm) # evaluate print_progress("%-20s evaluating..." % network) ftimer = module.module.time_evaluator("run", dev, number=1, repeat=repeat) prof_res = np.array(ftimer().results) * 1000 # multiply 1000 for converting to millisecond print( "%-20s %-19s (%s)" % (network, "%.2f ms" % np.mean(prof_res), "%.2f ms" % np.std(prof_res)) ) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--network", type=str, choices=[ "resnet-18", "resnet-34", "resnet-50", "vgg-16", "vgg-19", "densenet-121", "inception_v3", "mobilenet", "squeezenet_v1.0", "squeezenet_v1.1", ], help="The name of neural network", ) parser.add_argument( "--model", type=str, choices=["rk3399", "mate10", "mate10pro", "p20", "p20pro", "pixel2", "rasp3b", "pynq"], default="rk3399", help="The model of the test device. If your device is not listed in " "the choices list, pick the most similar one as argument.", ) parser.add_argument("--host", type=str, default="127.0.0.1") parser.add_argument("--port", type=int, default=9190) parser.add_argument("--rpc-key", type=str, required=True) parser.add_argument("--repeat", type=int, default=10) args = parser.parse_args() dtype = "float32" if args.network is None: networks = ["squeezenet_v1.1", "mobilenet", "resnet-18", "vgg-16"] else: networks = [args.network] target = tvm.target.arm_cpu(model=args.model) target_host = None print("--------------------------------------------------") print("%-20s %-20s" % ("Network Name", "Mean Inference Time (std dev)")) print("--------------------------------------------------") for network in networks: evaluate_network(network, target, target_host, args.repeat)
4,285
34.131148
99
py
tvm
tvm-main/apps/benchmark/mobile_gpu_imagenet_bench.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """Benchmark script for ImageNet models on mobile GPU. see README.md for the usage and results of this script. """ import argparse import numpy as np import tvm from tvm import te from tvm.contrib.utils import tempdir import tvm.contrib.graph_executor as runtime from tvm import relay from util import get_network, print_progress def evaluate_network(network, target, target_host, dtype, repeat): # connect to remote device tracker = tvm.rpc.connect_tracker(args.host, args.port) remote = tracker.request(args.rpc_key) print_progress(network) net, params, input_shape, output_shape = get_network(network, batch_size=1, dtype=dtype) print_progress("%-20s building..." % network) with tvm.transform.PassContext(opt_level=3): lib = relay.build(net, target=tvm.target.Target(target, host=target_host), params=params) tmp = tempdir() if "android" in str(target) or "android" in str(target_host): from tvm.contrib import ndk filename = "%s.so" % network lib.export_library(tmp.relpath(filename), ndk.create_shared) else: filename = "%s.tar" % network lib.export_library(tmp.relpath(filename)) # upload library and params print_progress("%-20s uploading..." % network) dev = remote.device(str(target), 0) remote.upload(tmp.relpath(filename)) rlib = remote.load_module(filename) module = runtime.GraphModule(rlib["default"](dev)) data_tvm = tvm.nd.array((np.random.uniform(size=input_shape)).astype(dtype)) module.set_input("data", data_tvm) # evaluate print_progress("%-20s evaluating..." % network) ftimer = module.module.time_evaluator("run", dev, number=1, repeat=repeat) prof_res = np.array(ftimer().results) * 1000 # multiply 1000 for converting to millisecond print( "%-20s %-19s (%s)" % (network, "%.2f ms" % np.mean(prof_res), "%.2f ms" % np.std(prof_res)) ) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--network", type=str, choices=[ "resnet-18", "resnet-34", "resnet-50", "vgg-16", "vgg-19", "densenet-121", "inception_v3", "mobilenet", "squeezenet_v1.0", "squeezenet_v1.1", ], help="The name of neural network", ) parser.add_argument( "--model", type=str, choices=["rk3399"], default="rk3399", help="The model of the test device. If your device is not listed in " "the choices list, pick the most similar one as argument.", ) parser.add_argument("--host", type=str, default="127.0.0.1") parser.add_argument("--port", type=int, default=9190) parser.add_argument("--rpc-key", type=str, required=True) parser.add_argument("--repeat", type=int, default=30) parser.add_argument("--dtype", type=str, default="float32") args = parser.parse_args() if args.network is None: networks = ["squeezenet_v1.1", "mobilenet", "resnet-18", "vgg-16"] else: networks = [args.network] target = tvm.target.mali(model=args.model) target_host = tvm.target.arm_cpu(model=args.model) print("--------------------------------------------------") print("%-20s %-20s" % ("Network Name", "Mean Inference Time (std dev)")) print("--------------------------------------------------") for network in networks: evaluate_network(network, target, target_host, args.dtype, args.repeat)
4,356
34.713115
99
py
tvm
tvm-main/apps/benchmark/gpu_imagenet_bench.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """Benchmark script for ImageNet models on GPU. see README.md for the usage and results of this script. """ import argparse import threading import numpy as np import tvm from tvm import te import tvm.contrib.graph_executor as runtime from tvm import relay from util import get_network def benchmark(network, target): net, params, input_shape, output_shape = get_network(network, batch_size=1) with tvm.transform.PassContext(opt_level=3): lib = relay.build(net, target=target, params=params) # create runtime dev = tvm.device(str(target), 0) module = runtime.GraphModule(lib["default"](dev)) data_tvm = tvm.nd.array((np.random.uniform(size=input_shape)).astype(dtype)) module.set_input("data", data_tvm) # evaluate ftimer = module.module.time_evaluator("run", dev, number=1, repeat=args.repeat) prof_res = np.array(ftimer().results) * 1000 # multiply 1000 for converting to millisecond print( "%-20s %-19s (%s)" % (network, "%.2f ms" % np.mean(prof_res), "%.2f ms" % np.std(prof_res)) ) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--network", type=str, choices=[ "resnet-18", "resnet-34", "resnet-50", "vgg-16", "vgg-19", "densenet-121", "inception_v3", "mobilenet", "squeezenet_v1.0", "squeezenet_v1.1", ], help="The name of neural network", ) parser.add_argument( "--device", type=str, choices=["amd_apu"], default="amd_apu", help="The name of the test device. If your device is not listed in " "the choices list, pick the most similar one as argument.", ) parser.add_argument( "--model", type=str, choices=["1080ti", "titanx", "tx2", "gfx900", "v1000"], default="1080ti", help="The model of the test device. If your device is not listed in " "the choices list, pick the most similar one as argument.", ) parser.add_argument("--repeat", type=int, default=600) parser.add_argument( "--target", type=str, choices=["cuda", "opencl", "rocm", "nvptx", "metal", "vulkan"], default="cuda", help="The tvm compilation target", ) parser.add_argument("--thread", type=int, default=1, help="The number of threads to be run.") args = parser.parse_args() dtype = "float32" if args.network is None: networks = ["resnet-50", "mobilenet", "vgg-19", "inception_v3"] else: networks = [args.network] target = tvm.target.Target("%s -device=%s -model=%s" % (args.target, args.device, args.model)) print("--------------------------------------------------") print("%-20s %-20s" % ("Network Name", "Mean Inference Time (std dev)")) print("--------------------------------------------------") for network in networks: if args.thread == 1: benchmark(network, target) else: threads = list() for n in range(args.thread): thread = threading.Thread( target=benchmark, args=([network, target]), name="thread%d" % n ) threads.append(thread) for thread in threads: thread.start() for thread in threads: thread.join()
4,260
32.551181
99
py
tvm
tvm-main/apps/benchmark/util.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """Utility for benchmark""" import sys from tvm import relay from tvm.relay import testing def get_network(name, batch_size, dtype="float32"): """Get the symbol definition and random weight of a network Parameters ---------- name: str The name of the network, can be 'resnet-18', 'resnet-50', 'vgg-16', 'inception_v3', 'mobilenet', ... batch_size: int batch size dtype: str Data type Returns ------- net: tvm.IRModule The relay function of network definition params: dict The random parameters for benchmark input_shape: tuple The shape of input tensor output_shape: tuple The shape of output tensor """ input_shape = (batch_size, 3, 224, 224) output_shape = (batch_size, 1000) if name == "mobilenet": net, params = testing.mobilenet.get_workload(batch_size=batch_size, dtype=dtype) elif name == "inception_v3": input_shape = (batch_size, 3, 299, 299) net, params = testing.inception_v3.get_workload(batch_size=batch_size, dtype=dtype) elif "resnet" in name: n_layer = int(name.split("-")[1]) net, params = testing.resnet.get_workload( num_layers=n_layer, batch_size=batch_size, dtype=dtype ) elif "vgg" in name: n_layer = int(name.split("-")[1]) net, params = testing.vgg.get_workload( num_layers=n_layer, batch_size=batch_size, dtype=dtype ) elif "densenet" in name: n_layer = int(name.split("-")[1]) net, params = testing.densenet.get_workload( densenet_size=n_layer, batch_size=batch_size, dtype=dtype ) elif "squeezenet" in name: version = name.split("_v")[1] net, params = testing.squeezenet.get_workload( batch_size=batch_size, version=version, dtype=dtype ) elif name == "mxnet": # an example for mxnet model from mxnet.gluon.model_zoo.vision import get_model block = get_model("resnet18_v1", pretrained=True) net, params = relay.frontend.from_mxnet(block, shape={"data": input_shape}, dtype=dtype) net = net["main"] net = relay.Function( net.params, relay.nn.softmax(net.body), None, net.type_params, net.attrs ) net = tvm.IRModule.from_expr(net) else: raise ValueError("Unsupported network: " + name) return net, params, input_shape, output_shape def print_progress(msg): """print progress message Parameters ---------- msg: str The message to print """ sys.stdout.write(msg + "\r") sys.stdout.flush()
3,448
32.813725
108
py
tvm
tvm-main/apps/benchmark/adreno/adreno_gpu_bench_texture.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """Benchmark script for various models on Adreno GPU. """ import argparse import numpy as np import os import sys import tvm from tvm import te from tvm.relay import testing from tvm.contrib.utils import tempdir import tvm.contrib.graph_executor as runtime from tvm import relay from tvm import autotvm from tvm.contrib import utils, ndk def get_network(name, batch_size, dtype="float32"): """Get the symbol definition and random weight of a network Parameters ---------- name: str The name of the network, can be 'resnet-18', 'resnet-50', 'vgg-16', 'inception_v3', 'mobilenet', ... batch_size: int batch size dtype: str Data type Returns ------- net: tvm.IRModule The relay function of network definition params: dict The random parameters for benchmark input_shape: tuple The shape of input tensor output_shape: tuple The shape of output tensor """ input_shape = (batch_size, 3, 224, 224) output_shape = (batch_size, 1000) if name == "mobilenet": net, params = testing.mobilenet.get_workload(batch_size=batch_size, dtype=dtype) elif name == "inception_v3": input_shape = (batch_size, 3, 299, 299) net, params = testing.inception_v3.get_workload(batch_size=batch_size, dtype=dtype) elif "resnet" in name: n_layer = int(name.split("-")[1]) net, params = testing.resnet.get_workload( num_layers=n_layer, batch_size=batch_size, dtype=dtype ) elif "vgg" in name: n_layer = int(name.split("-")[1]) net, params = testing.vgg.get_workload( num_layers=n_layer, batch_size=batch_size, dtype=dtype ) elif "densenet" in name: n_layer = int(name.split("-")[1]) net, params = testing.densenet.get_workload( densenet_size=n_layer, batch_size=batch_size, dtype=dtype ) elif "squeezenet" in name: version = name.split("_v")[1] net, params = testing.squeezenet.get_workload( batch_size=batch_size, version=version, dtype=dtype ) elif name == "mxnet": # an example for mxnet model from mxnet.gluon.model_zoo.vision import get_model block = get_model("resnet18_v1", pretrained=True) net, params = relay.frontend.from_mxnet(block, shape={"data": input_shape}, dtype=dtype) net = net["main"] net = relay.Function( net.params, relay.nn.softmax(net.body), None, net.type_params, net.attrs ) net = tvm.IRModule.from_expr(net) else: raise ValueError("Unsupported network: " + name) return net, params, input_shape, output_shape def print_progress(msg): """print progress message Parameters ---------- msg: str The message to print """ sys.stdout.write(msg + "\r") sys.stdout.flush() def tune_tasks( tasks, measure_option, tuner="xgb", n_trial=1024, early_stopping=None, log_filename="tuning.log", ): from tvm.autotvm.tuner import XGBTuner tmp_log_file = log_filename + ".tmp" for i, tsk in enumerate(reversed(tasks)): print("Task: ", tsk) prefix = "[Task %2d/%2d] " % (i + 1, len(tasks)) # create tuner if tuner == "xgb": tuner_obj = XGBTuner(tsk, loss_type="reg") elif tuner == "xgb_knob": tuner_obj = XGBTuner(tsk, loss_type="reg", feature_type="knob") elif tuner == "xgb_itervar": tuner_obj = XGBTuner(tsk, loss_type="reg", feature_type="itervar") elif tuner == "xgb_curve": tuner_obj = XGBTuner(tsk, loss_type="reg", feature_type="curve") elif tuner == "xgb_rank": tuner_obj = XGBTuner(tsk, loss_type="rank") elif tuner == "xgb_rank_knob": tuner_obj = XGBTuner(tsk, loss_type="rank", feature_type="knob") elif tuner == "xgb_rank_itervar": tuner_obj = XGBTuner(tsk, loss_type="rank", feature_type="itervar") elif tuner == "xgb_rank_curve": tuner_obj = XGBTuner(tsk, loss_type="rank", feature_type="curve") elif tuner == "xgb_rank_binary": tuner_obj = XGBTuner(tsk, loss_type="rank-binary") elif tuner == "xgb_rank_binary_knob": tuner_obj = XGBTuner(tsk, loss_type="rank-binary", feature_type="knob") elif tuner == "xgb_rank_binary_itervar": tuner_obj = XGBTuner(tsk, loss_type="rank-binary", feature_type="itervar") elif tuner == "xgb_rank_binary_curve": tuner_obj = XGBTuner(tsk, loss_type="rank-binary", feature_type="curve") elif tuner == "ga": tuner_obj = GATuner(tsk, pop_size=50) elif tuner == "random": tuner_obj = RandomTuner(tsk) elif tuner == "gridsearch": tuner_obj = GridSearchTuner(tsk) else: raise ValueError("Invalid tuner: " + tuner) tsk_trial = min(n_trial, len(tsk.config_space)) tuner_obj.tune( n_trial=tsk_trial, early_stopping=early_stopping, measure_option=measure_option, callbacks=[ autotvm.callback.progress_bar(tsk_trial, prefix=prefix), autotvm.callback.log_to_file(tmp_log_file), ], ) autotvm.record.pick_best(tmp_log_file, log_filename) def evaluate_network(network, target, target_host, dtype, repeat): print_progress(network) net, params, input_shape, output_shape = get_network(network, batch_size=1, dtype=dtype) # Auto Tuning tune_log = "adreno-" + network + "-" + dtype + ".log" tuning_options = { "log_filename": tune_log, "early_stopping": None, "measure_option": autotvm.measure_option( builder=autotvm.LocalBuilder(build_func=ndk.create_shared, timeout=15), runner=autotvm.RPCRunner( args.rpc_key, host=args.host, port=args.port, number=3, timeout=600, ), ), } if args.tune: tasks = autotvm.task.extract_from_program( net, target=target, target_host=target_host, params=params ) tune_tasks(tasks, **tuning_options) print_progress("%-20s building..." % network) # Build the tuning log if os.path.exists(tune_log): with autotvm.apply_history_best(tune_log): with tvm.transform.PassContext(opt_level=3): lib = relay.build( net, target=tvm.target.Target(target, host=target_host), params=params ) else: with tvm.transform.PassContext(opt_level=3): lib = relay.build( net, target=tvm.target.Target(target, host=target_host), params=params ) tmp = tempdir() filename = "%s.so" % network lib.export_library(tmp.relpath(filename), ndk.create_shared) # upload library and params print_progress("%-20s uploading..." % network) # connect to remote device tracker = tvm.rpc.connect_tracker(args.host, args.port) remote = tracker.request(args.rpc_key) dev = remote.device(str(target), 0) remote.upload(tmp.relpath(filename)) rlib = remote.load_module(filename) module = runtime.GraphModule(rlib["default"](dev)) data_tvm = tvm.nd.array((np.random.uniform(size=input_shape)).astype(dtype)) module.set_input("data", data_tvm) # evaluate print_progress("%-20s evaluating..." % network) ftimer = module.module.time_evaluator("run", dev, number=1, repeat=repeat) prof_res = np.array(ftimer().results) * 1000 # multiply 1000 for converting to millisecond print( "%-20s %-19s (%s)" % (network + "-" + dtype, "%.2f ms" % np.mean(prof_res), "%.2f ms" % np.std(prof_res)) ) return (np.mean(prof_res), np.std(prof_res)) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--network", type=str, choices=[ "resnet-18", "resnet-34", "resnet-50", "vgg-16", "vgg-19", "densenet-121", "inception_v3", "mobilenet", "squeezenet_v1.0", "squeezenet_v1.1", ], help="The name of neural network", ) parser.add_argument("--host", type=str, default="127.0.0.1") parser.add_argument("--port", type=int, default=9190) parser.add_argument("--rpc-key", type=str, default="android") parser.add_argument("--repeat", type=int, default=30) parser.add_argument("--tune", type=bool, default=False) args = parser.parse_args() if args.network is None: networks = [ "resnet-18", "resnet-34", "resnet-50", "vgg-16", "vgg-19", "densenet-121", "inception_v3", "mobilenet", "squeezenet_v1.0", "squeezenet_v1.1", ] else: networks = [args.network] target = "opencl -device=adreno" target_host = "llvm -mtriple=arm64-linux-android" print("--------------------------------------------------") print("%-20s %-20s" % ("Network Name", "Mean Inference Time (std dev)")) print("--------------------------------------------------") results = {} for network in networks: ftime = evaluate_network(network, target, target_host, "float32", args.repeat) results[network + "-float32"] = ftime ftime = evaluate_network(network, target, target_host, "float16", args.repeat) results[network + "-float16"] = ftime print("----------------------------------------------------------------------") print("%-30s %-30s" % ("Network Name", "Mean Inference Time (std dev)")) print("----------------------------------------------------------------------") for key, val in results.items(): print("%-30s %-30s (%s)" % (key, "%.2f ms" % val[0], "%.2f ms" % val[1]))
10,884
33.776358
108
py
tvm
tvm-main/apps/benchmark/adreno/adreno_gpu_bench_clml.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """Benchmark script for various models on Adreno GPU. """ import argparse import numpy as np import os import sys import tvm from tvm import te from tvm.relay import testing from tvm.contrib.utils import tempdir from tvm.relay.op.contrib import clml import tvm.contrib.graph_executor as runtime from tvm import relay from tvm import autotvm from tvm.contrib import utils, ndk def get_network(name, batch_size, dtype="float32"): """Get the symbol definition and random weight of a network Parameters ---------- name: str The name of the network, can be 'resnet-18', 'resnet-50', 'vgg-16', 'inception_v3', 'mobilenet', ... batch_size: int batch size dtype: str Data type Returns ------- net: tvm.IRModule The relay function of network definition params: dict The random parameters for benchmark input_shape: tuple The shape of input tensor output_shape: tuple The shape of output tensor """ input_shape = (batch_size, 3, 224, 224) output_shape = (batch_size, 1000) if name == "mobilenet": net, params = testing.mobilenet.get_workload(batch_size=batch_size, dtype=dtype) elif name == "inception_v3": input_shape = (batch_size, 3, 299, 299) net, params = testing.inception_v3.get_workload(batch_size=batch_size, dtype=dtype) elif "resnet" in name: n_layer = int(name.split("-")[1]) net, params = testing.resnet.get_workload( num_layers=n_layer, batch_size=batch_size, dtype=dtype ) elif "vgg" in name: n_layer = int(name.split("-")[1]) net, params = testing.vgg.get_workload( num_layers=n_layer, batch_size=batch_size, dtype=dtype ) elif "densenet" in name: n_layer = int(name.split("-")[1]) net, params = testing.densenet.get_workload( densenet_size=n_layer, batch_size=batch_size, dtype=dtype ) elif "squeezenet" in name: version = name.split("_v")[1] net, params = testing.squeezenet.get_workload( batch_size=batch_size, version=version, dtype=dtype ) elif name == "mxnet": # an example for mxnet model from mxnet.gluon.model_zoo.vision import get_model block = get_model("resnet18_v1", pretrained=True) net, params = relay.frontend.from_mxnet(block, shape={"data": input_shape}, dtype=dtype) net = net["main"] net = relay.Function( net.params, relay.nn.softmax(net.body), None, net.type_params, net.attrs ) net = tvm.IRModule.from_expr(net) else: raise ValueError("Unsupported network: " + name) return net, params, input_shape, output_shape def print_progress(msg): """print progress message Parameters ---------- msg: str The message to print """ sys.stdout.write(msg + "\r") sys.stdout.flush() def tune_tasks( tasks, measure_option, tuner="xgb", n_trial=1024, early_stopping=None, log_filename="tuning.log", ): from tvm.autotvm.tuner import XGBTuner tmp_log_file = log_filename + ".tmp" for i, tsk in enumerate(reversed(tasks)): print("Task: ", tsk) prefix = "[Task %2d/%2d] " % (i + 1, len(tasks)) # create tuner if tuner == "xgb": tuner_obj = XGBTuner(tsk, loss_type="reg") elif tuner == "xgb_knob": tuner_obj = XGBTuner(tsk, loss_type="reg", feature_type="knob") elif tuner == "xgb_itervar": tuner_obj = XGBTuner(tsk, loss_type="reg", feature_type="itervar") elif tuner == "xgb_curve": tuner_obj = XGBTuner(tsk, loss_type="reg", feature_type="curve") elif tuner == "xgb_rank": tuner_obj = XGBTuner(tsk, loss_type="rank") elif tuner == "xgb_rank_knob": tuner_obj = XGBTuner(tsk, loss_type="rank", feature_type="knob") elif tuner == "xgb_rank_itervar": tuner_obj = XGBTuner(tsk, loss_type="rank", feature_type="itervar") elif tuner == "xgb_rank_curve": tuner_obj = XGBTuner(tsk, loss_type="rank", feature_type="curve") elif tuner == "xgb_rank_binary": tuner_obj = XGBTuner(tsk, loss_type="rank-binary") elif tuner == "xgb_rank_binary_knob": tuner_obj = XGBTuner(tsk, loss_type="rank-binary", feature_type="knob") elif tuner == "xgb_rank_binary_itervar": tuner_obj = XGBTuner(tsk, loss_type="rank-binary", feature_type="itervar") elif tuner == "xgb_rank_binary_curve": tuner_obj = XGBTuner(tsk, loss_type="rank-binary", feature_type="curve") elif tuner == "ga": tuner_obj = GATuner(tsk, pop_size=50) elif tuner == "random": tuner_obj = RandomTuner(tsk) elif tuner == "gridsearch": tuner_obj = GridSearchTuner(tsk) else: raise ValueError("Invalid tuner: " + tuner) tsk_trial = min(n_trial, len(tsk.config_space)) tuner_obj.tune( n_trial=tsk_trial, early_stopping=early_stopping, measure_option=measure_option, callbacks=[ autotvm.callback.progress_bar(tsk_trial, prefix=prefix), autotvm.callback.log_to_file(tmp_log_file), ], ) autotvm.record.pick_best(tmp_log_file, log_filename) def evaluate_network(network, target, target_host, dtype, repeat): print_progress(network) net, params, input_shape, output_shape = get_network(network, batch_size=1, dtype=dtype) # Auto Tuning tune_log = "adreno-" + network + "-" + dtype + ".log" tuning_options = { "log_filename": tune_log, "early_stopping": None, "measure_option": autotvm.measure_option( builder=autotvm.LocalBuilder(build_func=ndk.create_shared, timeout=15), runner=autotvm.RPCRunner( args.rpc_key, host=args.host, port=args.port, number=3, timeout=600, ), ), } if args.tune: tasks = autotvm.task.extract_from_program( net, target=target, target_host=target_host, params=params ) tune_tasks(tasks, **tuning_options) print_progress("%-20s building..." % network) # Build the tuning log if os.path.exists(tune_log): with autotvm.apply_history_best(tune_log): with tvm.transform.PassContext(opt_level=3): net = clml.partition_for_clml(net, params) lib = relay.build( net, target=tvm.target.Target(target, host=target_host), params=params ) else: with tvm.transform.PassContext(opt_level=3): net = clml.partition_for_clml(net, params) lib = relay.build( net, target=tvm.target.Target(target, host=target_host), params=params ) tmp = tempdir() filename = "%s.so" % network lib.export_library(tmp.relpath(filename), ndk.create_shared) # upload library and params print_progress("%-20s uploading..." % network) # connect to remote device tracker = tvm.rpc.connect_tracker(args.host, args.port) remote = tracker.request(args.rpc_key) dev = remote.device(str(target), 0) remote.upload(tmp.relpath(filename)) rlib = remote.load_module(filename) module = runtime.GraphModule(rlib["default"](dev)) data_tvm = tvm.nd.array((np.random.uniform(size=input_shape)).astype(dtype)) module.set_input("data", data_tvm) # evaluate print_progress("%-20s evaluating..." % network) ftimer = module.module.time_evaluator("run", dev, number=1, repeat=repeat) prof_res = np.array(ftimer().results) * 1000 # multiply 1000 for converting to millisecond print( "%-20s %-19s (%s)" % (network + "-" + dtype, "%.2f ms" % np.mean(prof_res), "%.2f ms" % np.std(prof_res)) ) return (np.mean(prof_res), np.std(prof_res)) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--network", type=str, choices=[ "resnet-18", "resnet-34", "resnet-50", "vgg-16", "vgg-19", "densenet-121", "inception_v3", "mobilenet", "squeezenet_v1.0", "squeezenet_v1.1", ], help="The name of neural network", ) parser.add_argument("--host", type=str, default="127.0.0.1") parser.add_argument("--port", type=int, default=9190) parser.add_argument("--rpc-key", type=str, default="android") parser.add_argument("--repeat", type=int, default=30) parser.add_argument("--tune", type=bool, default=False) args = parser.parse_args() if args.network is None: networks = [ "resnet-18", "resnet-34", "resnet-50", # "vgg-16", # "vgg-19", "densenet-121", "inception_v3", "mobilenet", "squeezenet_v1.0", "squeezenet_v1.1", ] else: networks = [args.network] target = "opencl" target_host = "llvm -mtriple=arm64-linux-android" print("--------------------------------------------------") print("%-20s %-20s" % ("Network Name", "Mean Inference Time (std dev)")) print("--------------------------------------------------") results = {} for network in networks: ftime = evaluate_network(network, target, target_host, "float32", args.repeat) results[network + "-float32"] = ftime ftime = evaluate_network(network, target, target_host, "float16", args.repeat) results[network + "-float16"] = ftime print("----------------------------------------------------------------------") print("%-30s %-30s" % ("Network Name", "Mean Inference Time (std dev)")) print("----------------------------------------------------------------------") for key, val in results.items(): print("%-30s %-30s (%s)" % (key, "%.2f ms" % val[0], "%.2f ms" % val[1]))
11,026
33.785489
108
py
tvm
tvm-main/apps/howto_deploy/python_deploy.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # brief Example code on load and run TVM module.s # file python_deploy.py import tvm from tvm import te import numpy as np def verify(mod, fname): # Get the function from the module f = mod.get_function(fname) # Use tvm.nd.array to convert numpy ndarray to tvm # NDArray type, so that function can be invoked normally N = 10 x = tvm.nd.array(np.arange(N, dtype=np.float32)) y = tvm.nd.array(np.zeros(N, dtype=np.float32)) # Invoke the function f(x, y) np_x = x.numpy() np_y = y.numpy() # Verify correctness of function assert np.all([xi + 1 == yi for xi, yi in zip(np_x, np_y)]) print("Finish verification...") if __name__ == "__main__": # The normal dynamic loading method for deployment mod_dylib = tvm.runtime.load_module("lib/test_addone_dll.so") print("Verify dynamic loading from test_addone_dll.so") verify(mod_dylib, "addone") # There might be methods to use the system lib way in # python, but dynamic loading is good enough for now.
1,814
35.3
65
py
tvm
tvm-main/apps/howto_deploy/prepare_test_libs.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """Script to prepare test_addone.so""" import tvm import numpy as np from tvm import te from tvm import relay import os def prepare_test_libs(base_path): n = te.var("n") A = te.placeholder((n,), name="A") B = te.compute(A.shape, lambda *i: A(*i) + 1.0, name="B") s = te.create_schedule(B.op) # Compile library as dynamic library fadd_dylib = tvm.build(s, [A, B], "llvm", name="addone") dylib_path = os.path.join(base_path, "test_addone_dll.so") fadd_dylib.export_library(dylib_path) # Compile library in system library mode fadd_syslib = tvm.build(s, [A, B], "llvm", name="addonesys") syslib_path = os.path.join(base_path, "test_addone_sys.o") fadd_syslib.save(syslib_path) def prepare_graph_lib(base_path): x = relay.var("x", shape=(2, 2), dtype="float32") y = relay.var("y", shape=(2, 2), dtype="float32") params = {"y": np.ones((2, 2), dtype="float32")} mod = tvm.IRModule.from_expr(relay.Function([x, y], x + y)) # build a module compiled_lib = relay.build(mod, tvm.target.Target("llvm"), params=params) # export it as a shared library # If you are running cross compilation, you can also consider export # to tar and invoke host compiler later. dylib_path = os.path.join(base_path, "test_relay_add.so") compiled_lib.export_library(dylib_path) if __name__ == "__main__": curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__))) prepare_test_libs(os.path.join(curr_path, "lib")) prepare_graph_lib(os.path.join(curr_path, "lib"))
2,346
38.779661
78
py
tvm
tvm-main/apps/android_rpc/tests/android_rpc_test.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """Testcode for Android RPC. To use it, start an RPC tracker with "python -m tvm.exec.rpc_tracker". Use the tracker's address and port when configuring the RPC app. Use "android" as the key if you wish to avoid modifying this script. """ import tvm from tvm import te import os from tvm import rpc from tvm.contrib import utils, ndk import numpy as np # Set to be address of tvm proxy. tracker_host = os.environ["TVM_TRACKER_HOST"] tracker_port = int(os.environ["TVM_TRACKER_PORT"]) key = "android" # Change target configuration. # Run `adb shell cat /proc/cpuinfo` to find the arch. arch = "arm64" target = "llvm -mtriple=%s-linux-android" % arch # whether enable to execute test on OpenCL target test_opencl = False # whether enable to execute test on Vulkan target test_vulkan = False def test_rpc_module(): # graph n = tvm.runtime.convert(1024) A = te.placeholder((n,), name="A") B = te.compute(A.shape, lambda *i: A(*i) + 1.0, name="B") a_np = np.random.uniform(size=1024).astype(A.dtype) temp = utils.tempdir() # Establish remote connection with target hardware tracker = rpc.connect_tracker(tracker_host, tracker_port) remote = tracker.request(key, priority=0, session_timeout=60) # Compile the Graph for CPU target s = te.create_schedule(B.op) xo, xi = s[B].split(B.op.axis[0], factor=64) s[B].parallel(xi) s[B].pragma(xo, "parallel_launch_point") s[B].pragma(xi, "parallel_barrier_when_finish") f = tvm.build(s, [A, B], target, name="myadd_cpu") path_dso_cpu = temp.relpath("cpu_lib.so") f.export_library(path_dso_cpu, ndk.create_shared) # Execute the portable graph on cpu target print("Run CPU test ...") dev = remote.cpu(0) remote.upload(path_dso_cpu) f2 = remote.load_module("cpu_lib.so") a = tvm.nd.array(a_np, dev) b = tvm.nd.array(np.zeros(1024, dtype=A.dtype), dev) time_f = f2.time_evaluator(f2.entry_name, dev, number=10) cost = time_f(a, b).mean print("%g secs/op\n" % cost) np.testing.assert_equal(b.numpy(), a.numpy() + 1) # Compile the Graph for OpenCL target if test_opencl: s = te.create_schedule(B.op) xo, xi = s[B].split(B.op.axis[0], factor=64) s[B].bind(xi, te.thread_axis("threadIdx.x")) s[B].bind(xo, te.thread_axis("blockIdx.x")) # Build the dynamic lib. # If we don't want to do metal and only use cpu, just set target to be target f = tvm.build(s, [A, B], tvm.target.Target("opencl", host=target), name="myadd") path_dso_cl = temp.relpath("dev_lib_cl.so") f.export_library(path_dso_cl, ndk.create_shared) print("Run GPU(OpenCL Flavor) test ...") dev = remote.cl(0) remote.upload(path_dso_cl) f1 = remote.load_module("dev_lib_cl.so") a = tvm.nd.array(a_np, dev) b = tvm.nd.array(np.zeros(1024, dtype=A.dtype), dev) time_f = f1.time_evaluator(f1.entry_name, dev, number=10) cost = time_f(a, b).mean print("%g secs/op\n" % cost) np.testing.assert_equal(b.numpy(), a.numpy() + 1) # Compile the Graph for Vulkan target if test_vulkan: s = te.create_schedule(B.op) xo, xi = s[B].split(B.op.axis[0], factor=64) s[B].bind(xi, te.thread_axis("threadIdx.x")) s[B].bind(xo, te.thread_axis("blockIdx.x")) # Build the dynamic lib. # If we don't want to do metal and only use cpu, just set target to be target f = tvm.build(s, [A, B], tvm.target.Target("vulkan", host=target), name="myadd") path_dso_vulkan = temp.relpath("dev_lib_vulkan.so") f.export_library(path_dso_vulkan, ndk.create_shared) print("Run GPU(Vulkan Flavor) test ...") dev = remote.vulkan(0) remote.upload(path_dso_vulkan) f1 = remote.load_module("dev_lib_vulkan.so") a = tvm.nd.array(a_np, dev) b = tvm.nd.array(np.zeros(1024, dtype=A.dtype), dev) time_f = f1.time_evaluator(f1.entry_name, dev, number=10) cost = time_f(a, b).mean print("%g secs/op\n" % cost) np.testing.assert_equal(b.numpy(), a.numpy() + 1) if __name__ == "__main__": test_rpc_module()
4,985
37.353846
88
py
tvm
tvm-main/apps/uma/uma_cli.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ UMA Command Line Interface (CLI) Tool to create code skeletons for an easy integration of new AI hardware accelerators/libraries into TVM using UMA """ import argparse import os import shutil import sys import pathlib from inflection import camelize, underscore def _parse_args(): parser = argparse.ArgumentParser(description="UMA Interface command line interface") parser.add_argument( "--add_hardware", type=str, required=True, ) parser.add_argument( "--tutorial", type=str, ) args = parser.parse_args() return args def replace_template_name( files: list, template_name: str, add_hw_name: str, template_source: str = "_template" ) -> None: """ Replace names in template skeleton code by new name """ for f in files: with open(f) as read_file: data = read_file.read() for case in [underscore, camelize]: data = data.replace(case(template_name), case(add_hw_name)) data = data.replace(template_source, underscore(add_hw_name)) with open(f, "w") as write_file: write_file.write(data) def main(): """ UMA Command Line Interface (CLI) """ args = _parse_args() add_hw_name = args.add_hardware uma_template_path = pathlib.Path(os.getcwd(), "_template").absolute() add_hw_path = os.path.join(uma_template_path.parent, add_hw_name) if os.path.exists(add_hw_path): print( f"Hardware with name {add_hw_name} already exists in UMA file structure: {add_hw_path}" ) sys.exit(-1) else: os.mkdir(add_hw_path) uma_files = ["backend.py", "codegen.py", "passes.py", "patterns.py", "run.py", "strategies.py"] if args.tutorial == "vanilla": uma_files.append("conv2dnchw.cc") source_files = [os.path.join(uma_template_path, f) for f in uma_files] destination_files = [os.path.join(add_hw_path, f) for f in uma_files] for src, dst in zip(source_files, destination_files): shutil.copyfile(src, dst) template_name = "my_ai_hw" replace_template_name(destination_files, template_name, add_hw_name) print(f"Success: added {add_hw_name} to {add_hw_path}") if __name__ == "__main__": main()
3,069
30.010101
99
py
tvm
tvm-main/apps/uma/_template/codegen.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """UMA codegen for the my_ai_hw accelerator""" import tvm import pathlib def gen_includes() -> str: topdir = pathlib.Path(__file__).parent.absolute() includes = "" includes += f'#include "{topdir}/conv2dnchw.cc"' return includes
1,034
34.689655
62
py
tvm
tvm-main/apps/uma/_template/patterns.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """Relay graph patterns for the my_ai_hw accelerator""" from tvm.relay.dataflow_pattern import is_op, wildcard def conv2d_pattern(): pattern = is_op("nn.conv2d")(wildcard(), wildcard()) pattern = pattern.has_attr({"strides": [1, 1], "groups": 1}) return pattern def dense_pattern(): pattern = is_op("nn.dense")(wildcard(), wildcard()) return pattern
1,160
36.451613
64
py
tvm
tvm-main/apps/uma/_template/strategies.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """Strategies for the my_ai_hw accelerator""" # Example how to integrate a custom conv1d strategy: # @relay.op.strategy.override_native_generic_func("custom_conv1d_strategy") # def custom_conv1d_strategy(attrs, inputs, out_type, target): # strategy = _op.OpStrategy() # strategy.add_implementation( # wrap_compute_conv1d(custom_conv1d_compute), # wrap_topi_schedule(custom_conv1d_schedule), # name="custom_conv1d.generic", # return strategy # # For further details see: # - github.com/apache/tvm-rfcs/blob/main/rfcs/0060_UMA_Unified_Modular_Accelerator_Interface.md # - $TVM_HOME/python/tvm/relay/op/strategy/x86.py
1,440
41.382353
95
py
tvm
tvm-main/apps/uma/_template/run.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from tvm.micro.testing.aot_test_utils import AOT_DEFAULT_RUNNER import tvm from tvm import relay from backend import MyAiHwBackend from tvm.relay import transform from collections import OrderedDict import numpy as np from tvm.testing.aot import ( AOTTestModel as AOTModel, AOTTestRunner as AOTRunner, generate_ref_data, compile_and_run, ) def create_conv2d(groups=1, runner=AOT_DEFAULT_RUNNER, weight_shape=32): dtype = "float32" ishape = (1, 32, 14, 14) wshape = (32, weight_shape, 3, 3) pass_config = {"tir.usmp.enable": True} runner = AOTRunner( makefile=runner.makefile, prologue=runner.prologue, epilogue=runner.epilogue, includes=runner.includes, parameters=runner.parameters, pass_config=pass_config, ) data0 = relay.var("data", shape=ishape, dtype=dtype) weight0 = relay.var("weight", shape=wshape, dtype=dtype) out = relay.nn.conv2d(data0, weight0, kernel_size=(3, 3), padding=(1, 1), groups=groups) main_f = relay.Function([data0, weight0], out) mod = tvm.IRModule() mod["main"] = main_f mod = transform.InferType()(mod) i_data = np.random.uniform(0, 1, ishape).astype(dtype) w1_data = np.random.uniform(0, 1, wshape).astype(dtype) inputs = OrderedDict([("data", i_data), ("weight", w1_data)]) output_list = generate_ref_data(mod, inputs) return mod, inputs, output_list, runner def main(): mod, inputs, output_list, runner = create_conv2d() uma_backend = MyAiHwBackend() uma_backend.register() mod = uma_backend.partition(mod) target = tvm.target.Target("my_ai_hw", host=tvm.target.Target("c")) export_directory = tvm.contrib.utils.tempdir(keep_for_debug=True).path print(f"Generated files are in {export_directory}") compile_and_run( AOTModel(module=mod, inputs=inputs, outputs=output_list), runner, interface_api="c", use_unpacked_api=True, target=target, test_dir=str(export_directory), ) if __name__ == "__main__": main()
2,857
33.433735
92
py
tvm
tvm-main/apps/uma/_template/backend.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """UMA backend for the my_ai_hw accelerator""" from passes import MyAiHwConv2dPass from tvm.relay.backend.contrib.uma.api.utils import PassPhase from tvm.relay.backend.contrib.uma.backend import UMABackend from codegen import gen_includes from patterns import conv2d_pattern class MyAiHwBackend(UMABackend): """UMA backend for the MyAiHw accelerator.""" def __init__(self): super().__init__() # Target configuration self._register_target_attr("dimension") # Relay Pattern registration self._register_pattern("conv2d", conv2d_pattern()) # Relay to TIR function registration self._register_tir_pass(PassPhase.TIR_PHASE_0, MyAiHwConv2dPass()) # TIR to runtime function registration self._register_codegen(fmt="c", includes=gen_includes) @property def target_name(self): return "my_ai_hw"
1,674
35.413043
74
py
tvm
tvm-main/apps/uma/_template/__init__.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ Template files for UMA tutorial """
828
35.043478
62
py
tvm
tvm-main/apps/uma/_template/passes.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """Transform passes for the my_ai_hw accelerator""" import tvm from tvm import tir from tvm.relay.backend.contrib.uma.api.utils import add_llvm_to_block @tvm.tir.transform.prim_func_pass(opt_level=2) class MyAiHwConv2dPass: _EXTERNAL_FUNCTION_NAME = "my_ai_hw_conv2dnchw" _TVM_BLOCK_MATCH_NAME = "conv2d_nchw" def transform_function( self, func: tvm.tir.PrimFunc, mod: tvm.ir.IRModule, ctx: tvm.ir.transform.PassContext ) -> tvm.tir.PrimFunc: return self._my_ai_hw_conv2d_pass(func, mod, ctx) @classmethod def _my_ai_hw_conv2d_pass(cls, func, mod, ctx): _loops = dict() _handles = [] _entry_node = None def _has_block(name: str, func: tvm.tir.PrimFunc) -> bool: """ Determine of a tir.block with `name` exists in `func` """ def _hb(op): if isinstance(op, tvm.tir.Block): _found_blocks.append(op.name_hint) _found_blocks = [] tvm.tir.stmt_functor.post_order_visit(func.body, _hb) return name in _found_blocks def _detect_and_replace_conv2d( func: tvm.tir.PrimFunc, mod: tvm.ir.IRModule, ctx: tvm.ir.transform.PassContext ) -> tvm.tir.PrimFunc: def _replace_conv2d(op): if op == _entry_node: irb = tvm.tir.ir_builder.create() # Collection of buffer address buffers = [b[1].data for b in _handles] # extraction of loop offsets for k, v in _loops.items(): assert v.min.value == 0 offset_order = ["co", "w", "h", "ci", "kh", "kw"] offsets = [_loops[i].extent.value for i in offset_order] args = buffers + offsets irb.emit(tir_call(irb, True, cls._EXTERNAL_FUNCTION_NAME, *args)) irb_result = irb.get() return irb_result elif isinstance(op, tvm.tir.SeqStmt): # Remove that pad block of TOPI's conv2DNCHW by only returning the 2nd statement return op.seq[1] return op sch = tir.Schedule(func) if _has_block(cls._TVM_BLOCK_MATCH_NAME, func): conv2d_block = sch.get_block(cls._TVM_BLOCK_MATCH_NAME) rv_loops = sch.get_loops(conv2d_block) assert len(rv_loops) == 7 loops = dict( n=rv_loops[0], co=rv_loops[1], h=rv_loops[2], w=rv_loops[3], ci=rv_loops[4], kh=rv_loops[5], kw=rv_loops[6], ) _entry_node = sch.get(rv_loops[1]) _loops = {k: sch.get(v) for k, v in loops.items()} _handles = func.buffer_map.items() x = tvm.tir.stmt_functor.ir_transform( func.body, None, _replace_conv2d, ["tir.For", "tir.SeqStmt"] ) return func.with_body(x) else: return func r = _detect_and_replace_conv2d(func, mod, ctx) return r def tir_call(ib: tvm.tir.ir_builder, extern: bool, name: str, *args): """ ib: ir_builder extern: bool True --> tvm.tir.call_extern False --> tvm.tir.call_packed name: str function name *args: arguments for function call """ def buf_from_array(ib, arr, dtype): # Allocate enough memory to store the whole array var = ib.allocate("int32", (len(arr),), scope="global") for i, v in enumerate(arr): var[i] = v # Declare a buffer, which is basically a view on the chunk of memory that we allocated buf = tvm.tir.decl_buffer((len(arr),), dtype, data=var, scope="global") return buf if extern: args = [i.data if isinstance(i, tvm.tir.Buffer) else i for i in args] return tvm.tir.call_extern("int32", name, *args) else: args = [ buf_from_array(ib, i, "int32") if isinstance(i, (tuple, list, tvm.ir.container.Array)) else i for i in args ] return tvm.tir.call_packed(name, *args)
5,159
36.664234
100
py
tvm
tvm-main/src/runtime/crt/host/microtvm_api_server.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import fcntl import os import os.path import pathlib import select import shutil import subprocess import tarfile import time import re from tvm.micro.project_api import server PROJECT_DIR = pathlib.Path(os.path.dirname(__file__) or os.path.getcwd()) MODEL_LIBRARY_FORMAT_RELPATH = "model.tar" IS_TEMPLATE = not os.path.exists(os.path.join(PROJECT_DIR, MODEL_LIBRARY_FORMAT_RELPATH)) # Used this size to pass most CRT tests in TVM. WORKSPACE_SIZE_BYTES = 2 * 1024 * 1024 CMAKEFILE_FILENAME = "CMakeLists.txt" # The build target given to make BUILD_TARGET = "build/main" class Handler(server.ProjectAPIHandler): BUILD_TARGET = "build/main" def __init__(self): super(Handler, self).__init__() self._proc = None def server_info_query(self, tvm_version): return server.ServerInfo( platform_name="host", is_template=IS_TEMPLATE, model_library_format_path="" if IS_TEMPLATE else PROJECT_DIR / MODEL_LIBRARY_FORMAT_RELPATH, project_options=[ server.ProjectOption( "verbose", optional=["build"], type="bool", default=False, help="Run make with verbose output", ), server.ProjectOption( "workspace_size_bytes", optional=["generate_project"], type="int", default=WORKSPACE_SIZE_BYTES, help="Sets the value of TVM_WORKSPACE_SIZE_BYTES.", ), ], ) # These files and directories will be recursively copied into generated projects from the CRT. CRT_COPY_ITEMS = ("include", "CMakeLists.txt", "src") def _populate_cmake( self, cmakefile_template_path: pathlib.Path, cmakefile_path: pathlib.Path, memory_size: int, verbose: bool, ): """Generate CMakeList file from template.""" regex = re.compile(r"([A-Z_]+) := (<[A-Z_]+>)") with open(cmakefile_path, "w") as cmakefile_f: with open(cmakefile_template_path, "r") as cmakefile_template_f: for line in cmakefile_template_f: cmakefile_f.write(line) cmakefile_f.write( f"target_compile_definitions(main PUBLIC -DTVM_WORKSPACE_SIZE_BYTES={memory_size})\n" ) if verbose: cmakefile_f.write(f"set(CMAKE_VERBOSE_MAKEFILE TRUE)\n") def generate_project(self, model_library_format_path, standalone_crt_dir, project_dir, options): # Make project directory. project_dir.mkdir(parents=True) current_dir = pathlib.Path(__file__).parent.absolute() # Copy ourselves to the generated project. TVM may perform further build steps on the generated project # by launching the copy. shutil.copy2(__file__, project_dir / os.path.basename(__file__)) # Place Model Library Format tarball in the special location, which this script uses to decide # whether it's being invoked in a template or generated project. project_model_library_format_path = project_dir / MODEL_LIBRARY_FORMAT_RELPATH shutil.copy2(model_library_format_path, project_model_library_format_path) # Extract Model Library Format tarball.into <project_dir>/model. extract_path = project_dir / project_model_library_format_path.stem with tarfile.TarFile(project_model_library_format_path) as tf: os.makedirs(extract_path) tf.extractall(path=extract_path) # Populate CRT. crt_path = project_dir / "crt" os.mkdir(crt_path) for item in self.CRT_COPY_ITEMS: src_path = standalone_crt_dir / item dst_path = crt_path / item if os.path.isdir(src_path): shutil.copytree(src_path, dst_path) else: shutil.copy2(src_path, dst_path) # Populate CMake file self._populate_cmake( current_dir / f"{CMAKEFILE_FILENAME}.template", project_dir / CMAKEFILE_FILENAME, options.get("workspace_size_bytes", WORKSPACE_SIZE_BYTES), options.get("verbose"), ) # Populate crt-config.h crt_config_dir = project_dir / "crt_config" crt_config_dir.mkdir() shutil.copy2( current_dir / "crt_config" / "crt_config.h", crt_config_dir / "crt_config.h", ) # Populate src/ src_dir = project_dir / "src" src_dir.mkdir() shutil.copy2( current_dir / "src" / "main.cc", src_dir / "main.cc", ) shutil.copy2( current_dir / "src" / "platform.cc", src_dir / "platform.cc", ) def build(self, options): build_dir = PROJECT_DIR / "build" build_dir.mkdir() subprocess.check_call(["cmake", ".."], cwd=build_dir) subprocess.check_call(["make"], cwd=build_dir) def flash(self, options): pass # Flashing does nothing on host. def _set_nonblock(self, fd): flag = fcntl.fcntl(fd, fcntl.F_GETFL) fcntl.fcntl(fd, fcntl.F_SETFL, flag | os.O_NONBLOCK) new_flag = fcntl.fcntl(fd, fcntl.F_GETFL) assert (new_flag & os.O_NONBLOCK) != 0, "Cannot set file descriptor {fd} to non-blocking" def open_transport(self, options): self._proc = subprocess.Popen( [self.BUILD_TARGET], stdin=subprocess.PIPE, stdout=subprocess.PIPE, bufsize=0 ) self._set_nonblock(self._proc.stdin.fileno()) self._set_nonblock(self._proc.stdout.fileno()) return server.TransportTimeouts( session_start_retry_timeout_sec=0, session_start_timeout_sec=0, session_established_timeout_sec=0, ) def close_transport(self): if self._proc is not None: proc = self._proc self._proc = None proc.terminate() proc.wait() def _await_ready(self, rlist, wlist, timeout_sec=None, end_time=None): if timeout_sec is None and end_time is not None: timeout_sec = max(0, end_time - time.monotonic()) rlist, wlist, xlist = select.select(rlist, wlist, rlist + wlist, timeout_sec) if not rlist and not wlist and not xlist: raise server.IoTimeoutError() return True def read_transport(self, n, timeout_sec): if self._proc is None: raise server.TransportClosedError() fd = self._proc.stdout.fileno() end_time = None if timeout_sec is None else time.monotonic() + timeout_sec try: self._await_ready([fd], [], end_time=end_time) to_return = os.read(fd, n) except BrokenPipeError: to_return = 0 if not to_return: self.close_transport() raise server.TransportClosedError() return to_return def write_transport(self, data, timeout_sec): if self._proc is None: raise server.TransportClosedError() fd = self._proc.stdin.fileno() end_time = None if timeout_sec is None else time.monotonic() + timeout_sec data_len = len(data) while data: self._await_ready([], [fd], end_time=end_time) try: num_written = os.write(fd, data) except BrokenPipeError: num_written = 0 if not num_written: self.close_transport() raise server.TransportClosedError() data = data[num_written:] if __name__ == "__main__": server.main(Handler())
8,606
33.428
111
py
tvm
tvm-main/gallery/tutorial/tensor_ir_blitz_course.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ .. _tir_blitz: Blitz Course to TensorIR ======================== **Author**: `Siyuan Feng <https://github.com/Hzfengsy>`_ TensorIR is a domain specific language for deep learning programs serving two broad purposes: - An implementation for transforming and optimizing programs on various hardware backends. - An abstraction for automatic _tensorized_ program optimization. """ # sphinx_gallery_start_ignore # sphinx_gallery_requires_cuda = True # sphinx_gallery_end_ignore import tvm from tvm.ir.module import IRModule from tvm.script import tir as T import numpy as np ################################################################################################ # IRModule # -------- # An IRModule is the central data structure in TVM, which contains deep learning programs. # It is the basic object of interest of IR transformation and model building. # # .. image:: https://raw.githubusercontent.com/tlc-pack/web-data/main/images/design/tvm_life_of_irmodule.png # :align: center # :width: 85% # # This is the life cycle of an IRModule, which can be created from TVMScript. TensorIR schedule # primitives and passes are two major ways to transform an IRModule. Also, a sequence of # transformations on an IRModule is acceptable. Note that we can print an IRModule at **ANY** stage # to TVMScript. After all transformations and optimizations are complete, we can build the IRModule # to a runnable module to deploy on target devices. # # Based on the design of TensorIR and IRModule, we are able to create a new programming method: # # 1. Write a program by TVMScript in a python-AST based syntax. # # 2. Transform and optimize a program with python api. # # 3. Interactively inspect and try the performance with an imperative style transformation API. ################################################################################################ # Create an IRModule # ------------------ # IRModule can be created by writing TVMScript, which is a round-trippable syntax for TVM IR. # # Different than creating a computational expression by Tensor Expression # (:ref:`tutorial-tensor-expr-get-started`), TensorIR allow users to program through TVMScript, # a language embedded in python AST. The new method makes it possible to write complex programs # and further schedule and optimize it. # # Following is a simple example for vector addition. # @tvm.script.ir_module class MyModule: @T.prim_func def main(a: T.handle, b: T.handle): # We exchange data between function by handles, which are similar to pointer. T.func_attr({"global_symbol": "main", "tir.noalias": True}) # Create buffer from handles. A = T.match_buffer(a, (8,), dtype="float32") B = T.match_buffer(b, (8,), dtype="float32") for i in range(8): # A block is an abstraction for computation. with T.block("B"): # Define a spatial block iterator and bind it to value i. vi = T.axis.spatial(8, i) B[vi] = A[vi] + 1.0 ir_module = MyModule print(type(ir_module)) print(ir_module.script()) ################################################################################################ # Besides, we can also use tensor expression DSL to write simple operators, and convert them # to an IRModule. # from tvm import te A = te.placeholder((8,), dtype="float32", name="A") B = te.compute((8,), lambda *i: A(*i) + 1.0, name="B") func = te.create_prim_func([A, B]) ir_module_from_te = IRModule({"main": func}) print(ir_module_from_te.script()) ################################################################################################ # Build and Run an IRModule # ------------------------- # We can build the IRModule into a runnable module with specific target backends. # mod = tvm.build(ir_module, target="llvm") # The module for CPU backends. print(type(mod)) ################################################################################################ # Prepare the input array and output array, then run the module. # a = tvm.nd.array(np.arange(8).astype("float32")) b = tvm.nd.array(np.zeros((8,)).astype("float32")) mod(a, b) print(a) print(b) ################################################################################################ # Transform an IRModule # --------------------- # The IRModule is the central data structure for program optimization, which can be transformed # by :code:`Schedule`. # A schedule contains multiple primitive methods to interactively transform the program. # Each primitive transforms the program in certain ways to bring additional performance optimizations. # # .. image:: https://raw.githubusercontent.com/tlc-pack/web-data/main/images/design/tvm_tensor_ir_opt_flow.png # :align: center # :width: 100% # # The image above is a typical workflow for optimizing a tensor program. First, we need to create a # schedule on the initial IRModule created from either TVMScript or Tensor Expression. Then, a # sequence of schedule primitives will help to improve the performance. And at last, we can lower # and build it into a runnable module. # # Here we just demonstrate a very simple transformation. First we create schedule on the input `ir_module`. sch = tvm.tir.Schedule(ir_module) print(type(sch)) ################################################################################################ # Tile the loop into 3 loops and print the result. # Get block by its name block_b = sch.get_block("B") # Get loops surrounding the block (i,) = sch.get_loops(block_b) # Tile the loop nesting. i_0, i_1, i_2 = sch.split(i, factors=[2, 2, 2]) print(sch.mod.script()) ################################################################################################ # We can also reorder the loops. Now we move loop `i_2` to outside of `i_1`. sch.reorder(i_0, i_2, i_1) print(sch.mod.script()) ################################################################################################ # Transform to a GPU program # ~~~~~~~~~~~~~~~~~~~~~~~~~~ # If we want to deploy models on GPUs, thread binding is necessary. Fortunately, we can # also use primitives and do incrementally transformation. # sch.bind(i_0, "blockIdx.x") sch.bind(i_2, "threadIdx.x") print(sch.mod.script()) ################################################################################################ # After binding the threads, now build the IRModule with :code:`cuda` backends. ctx = tvm.cuda(0) cuda_mod = tvm.build(sch.mod, target="cuda") cuda_a = tvm.nd.array(np.arange(8).astype("float32"), ctx) cuda_b = tvm.nd.array(np.zeros((8,)).astype("float32"), ctx) cuda_mod(cuda_a, cuda_b) print(cuda_a) print(cuda_b)
7,471
37.317949
110
py
tvm
tvm-main/gallery/tutorial/uma.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ .. _tutorial-uma: Making your Hardware Accelerator TVM-ready with UMA =================================================== **Authors**: `Michael J. Klaiber <https://github.com/MichaelJKlaiber>`_, `Christoph Gerum <https://github.com/cgerum>`_, `Paul Palomero Bernardo <https://github.com/PaulPalomeroBernardo/>`_ """ ###################################################################### # This is an introductory tutorial to the **Universal Modular Accelerator Interface** (UMA). # UMA provides an easy-to-use API to integrate new hardware accelerators into TVM. # # This tutorial gives you step-by-step guidance how to use UMA to # make your hardware accelerator TVM-ready. # While there is no one-fits-all solution for this problem, UMA targets to provide a stable and Python-only # API to integrate a number of hardware accelerator classes into TVM. # # # In this tutorial you will get to know the UMA API in three use cases of increasing complexity. # In these use case the three mock-accelerators # **Vanilla**, **Strawberry** and **Chocolate** are introduced and # integrated into TVM using UMA. # ###################################################################### # Vanilla # ------------- # **Vanilla** is a simple accelerator consisting of a MAC array and has no internal memory. # It is can ONLY process Conv2D layers, all other layers are executed on a CPU, that also orchestrates **Vanilla**. # Both the CPU and Vanilla use a shared memory. # ###################################################################### # .. image:: https://raw.githubusercontent.com/tlc-pack/web-data/main/images/tutorial/uma_vanilla_block_diagram.png # :width: 100% # :alt: A block diagram of Vanilla # ###################################################################### # **Vanilla** has a C interface ``vanilla_conv2dnchw(...)``` for carrying out a Conv2D operation (including same-padding), # that accepts pointers to input feature map, weights and result, # as well as the dimensions of `Conv2D`: `oc`, `iw`, `ih`, `ic`, `kh`, `kw`. # # .. code-block:: c++ # # int vanilla_conv2dnchw(float* ifmap, float* weights, float* result, int oc, int iw, int ih, int ic, int kh, int kw); ################################################################################ # The script `uma_cli` creates code skeletons with API-calls into the UMA-API for new accelerators. # # For **Vanilla** we use it as follows: (``--tutorial vanilla`` adds all the additional files required for this part of the tutorial) # # .. code-block:: bash # # pip install inflection # cd $TVM_HOME/apps/uma # python uma_cli.py --add_hardware vanilla_accelerator --tutorial vanilla # ################################################################################ # uma_cli.py generates these files in the directory ``vanilla_accelerator`` which we are going to revisit. # # .. code-block:: bash # # backend.py # codegen.py # conv2dnchw.cc # passes.py # patterns.py # run.py # strategies.py ################################################################################ # Vanilla backend # # The generated backend for vanilla is found in `vanilla_accelerator/backend.py`: ###################################################################### # # .. code-block:: python # # class VanillaAcceleratorBackend(UMABackend): # """UMA backend for VanillaAccelerator.""" # # def __init__(self): # super().__init__() # # self._register_pattern("conv2d", conv2d_pattern()) # self._register_tir_pass(PassPhase.TIR_PHASE_0, VanillaAcceleratorConv2DPass()) # self._register_codegen(fmt="c", includes=gen_includes) # # @property # def target_name(self): # return "vanilla_accelerator" ################################################################################ # Define offloaded patterns # # To specify that `Conv2D` is offloaded to **Vanilla**, it is described as Relay dataflow pattern # (`DFPattern <https://tvm.apache.org/docs/reference/langref/relay_pattern.html>`_) in `vanilla_accelerator/patterns.py` ################################################################################ # # .. code-block:: python # # def conv2d_pattern(): # pattern = is_op("nn.conv2d")(wildcard(), wildcard()) # pattern = pattern.has_attr({"strides": [1, 1]}) # return pattern ################################################################################ # To map **Conv2D** operations from the input graph to **Vanilla**'s # low level function call ``vanilla_conv2dnchw(...)``, the TIR pass # *VanillaAcceleratorConv2DPass* (that will be discussed later in this tutorial) # is registered in `VanillaAcceleratorBackend`. ################################################################################ # Codegen ################################################################################ # The file ``vanilla_accelerator/codegen.py`` defines static C-code that is added to the # resulting C-Code generated by TVMś C-Codegen in ``gen_includes``. # Here C-code is added to include **Vanilla**'s low level library``vanilla_conv2dnchw()``. # # .. code-block:: python # # def gen_includes() -> str: # topdir = pathlib.Path(__file__).parent.absolute() # # includes = "" # includes += f'#include "{topdir}/conv2dnchw.cc"' # return includes ################################################################################ # As shown above in `VanillaAcceleratorBackend` it is registered to UMA with # the `self._register_codegen` # # .. code-block:: python # # self._register_codegen(fmt="c", includes=gen_includes) ########################################################### # Building the Neural Network and run it on Vanilla # # To demonstrate UMA's functionality, we will generate C code for a single Conv2D layer and run it on # the Vanilla accelerator. # The file ``vanilla_accelerator/run.py`` provides a demo running a Conv2D layer # making use of Vanilla's C-API. # # # .. code-block:: python # # def main(): # mod, inputs, output_list, runner = create_conv2d() # # uma_backend = VanillaAcceleratorBackend() # uma_backend.register() # mod = uma_backend.partition(mod) # target = tvm.target.Target("vanilla_accelerator", host=tvm.target.Target("c")) # # export_directory = tvm.contrib.utils.tempdir(keep_for_debug=True).path # print(f"Generated files are in {export_directory}") # compile_and_run( # AOTModel(module=mod, inputs=inputs, outputs=output_list), # runner, # interface_api="c", # use_unpacked_api=True, # target=target, # test_dir=str(export_directory), # ) # # # main() ############################################################ # By running ``vanilla_accelerator/run.py`` the output files are generated in the model library format (MLF). # ########################################################### # Output: # # .. code-block:: bash # # Generated files are in /tmp/tvm-debug-mode-tempdirs/2022-07-13T13-26-22___x5u76h0p/00000 ########################################################### # Let's examine the generated files: # # # Output: # # .. code-block:: bash # # cd /tmp/tvm-debug-mode-tempdirs/2022-07-13T13-26-22___x5u76h0p/00000 # cd build/ # ls -1 # # codegen # lib.tar # metadata.json # parameters # runtime # src ########################################################### # To evaluate the generated C code go to ``codegen/host/src/default_lib2.c`` # # .. code-block:: bash # # cd codegen/host/src/ # ls -1 # # default_lib0.c # default_lib1.c # default_lib2.c # ########################################################### # In `default_lib2.c` you can now see that the generated code calls # into Vanilla's C-API and executes a Conv2D layer: # # .. code-block:: c++ # # TVM_DLL int32_t tvmgen_default_vanilla_accelerator_main_0(float* placeholder, float* placeholder1, float* conv2d_nchw, uint8_t* global_workspace_1_var) { # vanilla_accelerator_conv2dnchw(placeholder, placeholder1, conv2d_nchw, 32, 14, 14, 32, 3, 3); # return 0; # } # ########################################################### # Strawberry # --------------- # Coming soon ... ########################################################### # Chocolate # -------------- # Coming soon ... # ###################################################################### # Request for Community Input # ----------------------------- # If this tutorial **did not** fit to your accelerator, lease add your requirements to the UMA thread in # the TVM discuss forum: `Link <https://discuss.tvm.apache.org/t/rfc-uma-universal-modular-accelerator-interface/12039>`_. # We are eager to extend this tutorial to provide guidance on making further classes of AI hardware # accelerators TVM-ready using the UMA interface. # ###################################################################### # References # ----------- # [UMA-RFC] `UMA: Universal Modular Accelerator Interface <https://github.com/apache/tvm-rfcs/blob/main/rfcs/0060_UMA_Unified_Modular_Accelerator_Interface.md>`_, # TVM RFC, June 2022. # # [DFPattern] `Pattern Matching in Relay <https://tvm.apache.org/docs/reference/langref/relay_pattern.html>`_ #
10,060
34.055749
162
py
tvm
tvm-main/gallery/tutorial/tvmc_command_line_driver.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ Compiling and Optimizing a Model with TVMC ========================================== **Authors**: `Leandro Nunes <https://github.com/leandron>`_, `Matthew Barrett <https://github.com/mbaret>`_, `Chris Hoge <https://github.com/hogepodge>`_ In this section, we will work with TVMC, the TVM command line driver. TVMC is a tool that exposes TVM features such as auto-tuning, compiling, profiling and execution of models through a command line interface. Upon completion of this section, we will have used TVMC to accomplish the following tasks: * Compile a pre-trained ResNet-50 v2 model for the TVM runtime. * Run a real image through the compiled model, and interpret the output and model performance. * Tune the model on a CPU using TVM. * Re-compile an optimized model using the tuning data collected by TVM. * Run the image through the optimized model, and compare the output and model performance. The goal of this section is to give you an overview of TVM and TVMC's capabilities, and set the stage for understanding how TVM works. """ ################################################################################ # Using TVMC # ---------- # # TVMC is a Python application, part of the TVM Python package. # When you install TVM using a Python package, you will get TVMC as # as a command line application called ``tvmc``. The location of this command # will vary depending on your platform and installation method. # # Alternatively, if you have TVM as a Python module on your # ``$PYTHONPATH``, you can access the command line driver functionality # via the executable python module, ``python -m tvm.driver.tvmc``. # # For simplicity, this tutorial will mention TVMC command line using # ``tvmc <options>``, but the same results can be obtained with # ``python -m tvm.driver.tvmc <options>``. # # You can check the help page using: # # .. code-block:: bash # # tvmc --help # # The main features of TVM available to ``tvmc`` are from subcommands # ``compile``, and ``run``, and ``tune``. To read about specific options under # a given subcommand, use ``tvmc <subcommand> --help``. We will cover each of # these commands in this tutorial, but first we need to download a pre-trained # model to work with. # ################################################################################ # Obtaining the Model # ------------------- # # For this tutorial, we will be working with ResNet-50 v2. ResNet-50 is a # convolutional neural network that is 50 layers deep and designed to classify # images. The model we will be using has been pre-trained on more than a # million images with 1000 different classifications. The network has an input # image size of 224x224. If you are interested exploring more of how the # ResNet-50 model is structured, we recommend downloading `Netron # <https://netron.app>`_, a freely available ML model viewer. # # For this tutorial we will be using the model in ONNX format. # # .. code-block:: bash # # wget https://github.com/onnx/models/raw/b9a54e89508f101a1611cd64f4ef56b9cb62c7cf/vision/classification/resnet/model/resnet50-v2-7.onnx # ################################################################################ # .. admonition:: Supported model formats # # TVMC supports models created with Keras, ONNX, TensorFlow, TFLite # and Torch. Use the option ``--model-format`` if you need to # explicitly provide the model format you are using. See ``tvmc # compile --help`` for more information. # ################################################################################ # .. admonition:: Adding ONNX Support to TVM # # TVM relies on the ONNX python library being available on your system. You can # install ONNX using the command ``pip3 install --user onnx onnxoptimizer``. You # may remove the ``--user`` option if you have root access and want to install # ONNX globally. The ``onnxoptimizer`` dependency is optional, and is only used # for ``onnx>=1.9``. # ################################################################################ # Compiling an ONNX Model to the TVM Runtime # ------------------------------------------ # # Once we've downloaded the ResNet-50 model, the next step is to compile it. To # accomplish that, we are going to use ``tvmc compile``. The output we get from # the compilation process is a TAR package of the model compiled to a dynamic # library for our target platform. We can run that model on our target device # using the TVM runtime. # # .. code-block:: bash # # # This may take several minutes depending on your machine # tvmc compile \ # --target "llvm" \ # --input-shapes "data:[1,3,224,224]" \ # --output resnet50-v2-7-tvm.tar \ # resnet50-v2-7.onnx # # Let's take a look at the files that ``tvmc compile`` creates in the module: # # .. code-block:: bash # # mkdir model # tar -xvf resnet50-v2-7-tvm.tar -C model # ls model # # You will see three files listed. # # * ``mod.so`` is the model, represented as a C++ library, that can be loaded # by the TVM runtime. # * ``mod.json`` is a text representation of the TVM Relay computation graph. # * ``mod.params`` is a file containing the parameters for the pre-trained # model. # # This module can be directly loaded by your application, and the model can be # run via the TVM runtime APIs. ################################################################################ # .. admonition:: Defining the Correct Target # # Specifying the correct target (option ``--target``) can have a huge # impact on the performance of the compiled module, as it can take # advantage of hardware features available on the target. For more # information, please refer to :ref:`Auto-tuning a convolutional network for # x86 CPU <tune_relay_x86>`. We recommend identifying which CPU you are # running, along with optional features, and set the target appropriately. ################################################################################ # Running the Model from The Compiled Module with TVMC # ---------------------------------------------------- # # Now that we've compiled the model to this module, we can use the TVM runtime # to make predictions with it. TVMC has the TVM runtime built in to it, # allowing you to run compiled TVM models. To use TVMC to run the model and # make predictions, we need two things: # # - The compiled module, which we just produced. # - Valid input to the model to make predictions on. # # Each model is particular when it comes to expected tensor shapes, formats and # data types. For this reason, most models require some pre and # post-processing, to ensure the input is valid and to interpret the output. # TVMC has adopted NumPy's ``.npz`` format for both input and output data. This # is a well-supported NumPy format to serialize multiple arrays into a file. # # As input for this tutorial, we will use the image of a cat, but you can feel # free to substitute this image for any of your choosing. # # .. image:: https://s3.amazonaws.com/model-server/inputs/kitten.jpg # :height: 224px # :width: 224px # :align: center ################################################################################ # Input pre-processing # ~~~~~~~~~~~~~~~~~~~~ # # For our ResNet-50 v2 model, the input is expected to be in ImageNet format. # Here is an example of a script to pre-process an image for ResNet-50 v2. # # You will need to have a supported version of the Python Image Library # installed. You can use ``pip3 install --user pillow`` to satisfy this # requirement for the script. # # .. code-block:: python # :caption: preprocess.py # :name: preprocess.py # # #!python ./preprocess.py # from tvm.contrib.download import download_testdata # from PIL import Image # import numpy as np # # img_url = "https://s3.amazonaws.com/model-server/inputs/kitten.jpg" # img_path = download_testdata(img_url, "imagenet_cat.png", module="data") # # # Resize it to 224x224 # resized_image = Image.open(img_path).resize((224, 224)) # img_data = np.asarray(resized_image).astype("float32") # # # ONNX expects NCHW input, so convert the array # img_data = np.transpose(img_data, (2, 0, 1)) # # # Normalize according to ImageNet # imagenet_mean = np.array([0.485, 0.456, 0.406]) # imagenet_stddev = np.array([0.229, 0.224, 0.225]) # norm_img_data = np.zeros(img_data.shape).astype("float32") # for i in range(img_data.shape[0]): # norm_img_data[i, :, :] = (img_data[i, :, :] / 255 - imagenet_mean[i]) / imagenet_stddev[i] # # # Add batch dimension # img_data = np.expand_dims(norm_img_data, axis=0) # # # Save to .npz (outputs imagenet_cat.npz) # np.savez("imagenet_cat", data=img_data) # ################################################################################ # Running the Compiled Module # ~~~~~~~~~~~~~~~~~~~~~~~~~~~ # # With both the model and input data in hand, we can now run TVMC to make a # prediction: # # .. code-block:: bash # # tvmc run \ # --inputs imagenet_cat.npz \ # --output predictions.npz \ # resnet50-v2-7-tvm.tar # # Recall that the ``.tar`` model file includes a C++ library, a description of # the Relay model, and the parameters for the model. TVMC includes the TVM # runtime, which can load the model and make predictions against input. When # running the above command, TVMC outputs a new file, ``predictions.npz``, that # contains the model output tensors in NumPy format. # # In this example, we are running the model on the same machine that we used # for compilation. In some cases we might want to run it remotely via an RPC # Tracker. To read more about these options please check ``tvmc run --help``. ################################################################################ # Output Post-Processing # ~~~~~~~~~~~~~~~~~~~~~~ # # As previously mentioned, each model will have its own particular way of # providing output tensors. # # In our case, we need to run some post-processing to render the outputs from # ResNet-50 v2 into a more human-readable form, using the lookup-table provided # for the model. # # The script below shows an example of the post-processing to extract labels # from the output of our compiled module. # # .. code-block:: python # :caption: postprocess.py # :name: postprocess.py # # #!python ./postprocess.py # import os.path # import numpy as np # # from scipy.special import softmax # # from tvm.contrib.download import download_testdata # # # Download a list of labels # labels_url = "https://s3.amazonaws.com/onnx-model-zoo/synset.txt" # labels_path = download_testdata(labels_url, "synset.txt", module="data") # # with open(labels_path, "r") as f: # labels = [l.rstrip() for l in f] # # output_file = "predictions.npz" # # # Open the output and read the output tensor # if os.path.exists(output_file): # with np.load(output_file) as data: # scores = softmax(data["output_0"]) # scores = np.squeeze(scores) # ranks = np.argsort(scores)[::-1] # # for rank in ranks[0:5]: # print("class='%s' with probability=%f" % (labels[rank], scores[rank])) # # Running this script should produce the following output: # # .. code-block:: bash # # python postprocess.py # # class='n02123045 tabby, tabby cat' with probability=0.610553 # # class='n02123159 tiger cat' with probability=0.367179 # # class='n02124075 Egyptian cat' with probability=0.019365 # # class='n02129604 tiger, Panthera tigris' with probability=0.001273 # # class='n04040759 radiator' with probability=0.000261 # # Try replacing the cat image with other images, and see what sort of # predictions the ResNet model makes. ################################################################################ # Automatically Tuning the ResNet Model # ------------------------------------- # # The previous model was compiled to work on the TVM runtime, but did not # include any platform specific optimization. In this section, we will show you # how to build an optimized model using TVMC to target your working platform. # # In some cases, we might not get the expected performance when running # inferences using our compiled module. In cases like this, we can make use of # the auto-tuner, to find a better configuration for our model and get a boost # in performance. Tuning in TVM refers to the process by which a model is # optimized to run faster on a given target. This differs from training or # fine-tuning in that it does not affect the accuracy of the model, but only # the runtime performance. As part of the tuning process, TVM will try running # many different operator implementation variants to see which perform best. # The results of these runs are stored in a tuning records file, which is # ultimately the output of the ``tune`` subcommand. # # In the simplest form, tuning requires you to provide three things: # # - the target specification of the device you intend to run this model on # - the path to an output file in which the tuning records will be stored, and # finally # - a path to the model to be tuned. # # The example below demonstrates how that works in practice: # # .. code-block:: bash # # # The default search algorithm requires xgboost, see below for further # # details on tuning search algorithms # pip install xgboost # # tvmc tune \ # --target "llvm" \ # --output resnet50-v2-7-autotuner_records.json \ # resnet50-v2-7.onnx # # In this example, you will see better results if you indicate a more specific # target for the ``--target`` flag. For example, on an Intel i7 processor you # could use ``--target llvm -mcpu=skylake``. For this tuning example, we are # tuning locally on the CPU using LLVM as the compiler for the specified # achitecture. # # TVMC will perform a search against the parameter space for the model, trying # out different configurations for operators and choosing the one that runs # fastest on your platform. Although this is a guided search based on the CPU # and model operations, it can still take several hours to complete the search. # The output of this search will be saved to the # ``resnet50-v2-7-autotuner_records.json`` file, which will later be used to # compile an optimized model. # # .. admonition:: Defining the Tuning Search Algorithm # # By default this search is guided using an ``XGBoost Grid`` algorithm. # Depending on your model complexity and amount of time avilable, you might # want to choose a different algorithm. A full list is available by # consulting ``tvmc tune --help``. # # The output will look something like this for a consumer-level Skylake CPU: # # .. code-block:: bash # # tvmc tune \ # --target "llvm -mcpu=broadwell" \ # --output resnet50-v2-7-autotuner_records.json \ # resnet50-v2-7.onnx # # [Task 1/24] Current/Best: 9.65/ 23.16 GFLOPS | Progress: (60/1000) | 130.74 s Done. # # [Task 1/24] Current/Best: 3.56/ 23.16 GFLOPS | Progress: (192/1000) | 381.32 s Done. # # [Task 2/24] Current/Best: 13.13/ 58.61 GFLOPS | Progress: (960/1000) | 1190.59 s Done. # # [Task 3/24] Current/Best: 31.93/ 59.52 GFLOPS | Progress: (800/1000) | 727.85 s Done. # # [Task 4/24] Current/Best: 16.42/ 57.80 GFLOPS | Progress: (960/1000) | 559.74 s Done. # # [Task 5/24] Current/Best: 12.42/ 57.92 GFLOPS | Progress: (800/1000) | 766.63 s Done. # # [Task 6/24] Current/Best: 20.66/ 59.25 GFLOPS | Progress: (1000/1000) | 673.61 s Done. # # [Task 7/24] Current/Best: 15.48/ 59.60 GFLOPS | Progress: (1000/1000) | 953.04 s Done. # # [Task 8/24] Current/Best: 31.97/ 59.33 GFLOPS | Progress: (972/1000) | 559.57 s Done. # # [Task 9/24] Current/Best: 34.14/ 60.09 GFLOPS | Progress: (1000/1000) | 479.32 s Done. # # [Task 10/24] Current/Best: 12.53/ 58.97 GFLOPS | Progress: (972/1000) | 642.34 s Done. # # [Task 11/24] Current/Best: 30.94/ 58.47 GFLOPS | Progress: (1000/1000) | 648.26 s Done. # # [Task 12/24] Current/Best: 23.66/ 58.63 GFLOPS | Progress: (1000/1000) | 851.59 s Done. # # [Task 13/24] Current/Best: 25.44/ 59.76 GFLOPS | Progress: (1000/1000) | 534.58 s Done. # # [Task 14/24] Current/Best: 26.83/ 58.51 GFLOPS | Progress: (1000/1000) | 491.67 s Done. # # [Task 15/24] Current/Best: 33.64/ 58.55 GFLOPS | Progress: (1000/1000) | 529.85 s Done. # # [Task 16/24] Current/Best: 14.93/ 57.94 GFLOPS | Progress: (1000/1000) | 645.55 s Done. # # [Task 17/24] Current/Best: 28.70/ 58.19 GFLOPS | Progress: (1000/1000) | 756.88 s Done. # # [Task 18/24] Current/Best: 19.01/ 60.43 GFLOPS | Progress: (980/1000) | 514.69 s Done. # # [Task 19/24] Current/Best: 14.61/ 57.30 GFLOPS | Progress: (1000/1000) | 614.44 s Done. # # [Task 20/24] Current/Best: 10.47/ 57.68 GFLOPS | Progress: (980/1000) | 479.80 s Done. # # [Task 21/24] Current/Best: 34.37/ 58.28 GFLOPS | Progress: (308/1000) | 225.37 s Done. # # [Task 22/24] Current/Best: 15.75/ 57.71 GFLOPS | Progress: (1000/1000) | 1024.05 s Done. # # [Task 23/24] Current/Best: 23.23/ 58.92 GFLOPS | Progress: (1000/1000) | 999.34 s Done. # # [Task 24/24] Current/Best: 17.27/ 55.25 GFLOPS | Progress: (1000/1000) | 1428.74 s Done. # # Tuning sessions can take a long time, so ``tvmc tune`` offers many options to customize your tuning # process, in terms of number of repetitions (``--repeat`` and ``--number``, for example), the tuning # algorithm to be used, and so on. Check ``tvmc tune --help`` for more information. # # In some situations it might be a good idea, to only tune specific tasks (i.e. the most relevant ones) # to waste less time tuning simpler workworloads. The flag `--task` offers versatile options to limt # the tasks used for tuning, e.g. `--task 20,22` or `--task 16-`. All available tasks can be printed # using `--task list`. # ################################################################################ # Compiling an Optimized Model with Tuning Data # ---------------------------------------------- # # As an output of the tuning process above, we obtained the tuning records # stored in ``resnet50-v2-7-autotuner_records.json``. This file can be used in # two ways: # # - As input to further tuning (via ``tvmc tune --tuning-records``). # - As input to the compiler # # The compiler will use the results to generate high performance code for the # model on your specified target. To do that we can use ``tvmc compile # --tuning-records``. Check ``tvmc compile --help`` for more information. # # Now that tuning data for the model has been collected, we can re-compile the # model using optimized operators to speed up our computations. # # .. code-block:: bash # # tvmc compile \ # --target "llvm" \ # --tuning-records resnet50-v2-7-autotuner_records.json \ # --output resnet50-v2-7-tvm_autotuned.tar \ # resnet50-v2-7.onnx # # Verify that the optimized model runs and produces the same results: # # .. code-block:: bash # # tvmc run \ # --inputs imagenet_cat.npz \ # --output predictions.npz \ # resnet50-v2-7-tvm_autotuned.tar # # python postprocess.py # # Verifying that the predictions are the same: # # .. code-block:: bash # # # class='n02123045 tabby, tabby cat' with probability=0.610550 # # class='n02123159 tiger cat' with probability=0.367181 # # class='n02124075 Egyptian cat' with probability=0.019365 # # class='n02129604 tiger, Panthera tigris' with probability=0.001273 # # class='n04040759 radiator' with probability=0.000261 ################################################################################ # Comparing the Tuned and Untuned Models # -------------------------------------- # # TVMC gives you tools for basic performance benchmarking between the models. # You can specify a number of repetitions and that TVMC report on the model run # time (independent of runtime startup). We can get a rough idea of how much # tuning has improved the model performance. For example, on a test Intel i7 # system, we see that the tuned model runs 47% faster than the untuned model: # # .. code-block:: bash # # tvmc run \ # --inputs imagenet_cat.npz \ # --output predictions.npz \ # --print-time \ # --repeat 100 \ # resnet50-v2-7-tvm_autotuned.tar # # # Execution time summary: # # mean (ms) max (ms) min (ms) std (ms) # # 92.19 115.73 89.85 3.15 # # tvmc run \ # --inputs imagenet_cat.npz \ # --output predictions.npz \ # --print-time \ # --repeat 100 \ # resnet50-v2-7-tvm.tar # # # Execution time summary: # # mean (ms) max (ms) min (ms) std (ms) # # 193.32 219.97 185.04 7.11 # ################################################################################ # Final Remarks # ------------- # # In this tutorial, we presented TVMC, a command line driver for TVM. We # demonstrated how to compile, run, and tune a model. We also discussed the # need for pre and post-processing of inputs and outputs. After the tuning # process, we demonstrated how to compare the performance of the unoptimized # and optimize models. # # Here we presented a simple example using ResNet-50 v2 locally. However, TVMC # supports many more features including cross-compilation, remote execution and # profiling/benchmarking. # # To see what other options are available, please have a look at ``tvmc # --help``. # # In the `next tutorial <tvmc_python>`, we introduce the Python interface to TVM, # and in the tutorial after that, # `Compiling and Optimizing a Model with the Python Interface <autotvm_relay_x86>`, # we will cover the same compilation and optimization steps using the Python # interface.
22,558
41.887833
138
py
tvm
tvm-main/gallery/tutorial/tensor_expr_get_started.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ .. _tutorial-tensor-expr-get-started: Working with Operators Using Tensor Expression ============================================== **Author**: `Tianqi Chen <https://tqchen.github.io>`_ In this tutorial we will turn our attention to how TVM works with Tensor Expression (TE) to define tensor computations and apply loop optimizations. TE describes tensor computations in a pure functional language (that is each expression has no side effects). When viewed in context of the TVM as a whole, Relay describes a computation as a set of operators, and each of these operators can be represented as a TE expression where each TE expression takes input tensors and produces an output tensor. This is an introductory tutorial to the Tensor Expression language in TVM. TVM uses a domain specific tensor expression for efficient kernel construction. We will demonstrate the basic workflow with two examples of using the tensor expression language. The first example introduces TE and scheduling with vector addition. The second expands on these concepts with a step-by-step optimization of a matrix multiplication with TE. This matrix multiplication example will serve as the comparative basis for future tutorials covering more advanced features of TVM. """ ################################################################################ # Example 1: Writing and Scheduling Vector Addition in TE for CPU # --------------------------------------------------------------- # # Let's look at an example in Python in which we will implement a TE for # vector addition, followed by a schedule targeted towards a CPU. # We begin by initializing a TVM environment. import tvm import tvm.testing from tvm import te import numpy as np ################################################################################ # You will get better performance if you can identify the CPU you are targeting # and specify it. If you're using LLVM, you can get this information from the # command ``llc --version`` to get the CPU type, and you can check # ``/proc/cpuinfo`` for additional extensions that your processor might # support. For example, you can use ``llvm -mcpu=skylake-avx512`` for CPUs with # AVX-512 instructions. tgt = tvm.target.Target(target="llvm", host="llvm") ################################################################################ # Describing the Vector Computation # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # We describe a vector addition computation. TVM adopts tensor semantics, with # each intermediate result represented as a multi-dimensional array. The user # needs to describe the computation rule that generates the tensors. We first # define a symbolic variable ``n`` to represent the shape. We then define two # placeholder Tensors, ``A`` and ``B``, with given shape ``(n,)``. We then # describe the result tensor ``C``, with a ``compute`` operation. The # ``compute`` defines a computation, with the output conforming to the # specified tensor shape and the computation to be performed at each position # in the tensor defined by the lambda function. Note that while ``n`` is a # variable, it defines a consistent shape between the ``A``, ``B`` and ``C`` # tensors. Remember, no actual computation happens during this phase, as we # are only declaring how the computation should be done. n = te.var("n") A = te.placeholder((n,), name="A") B = te.placeholder((n,), name="B") C = te.compute(A.shape, lambda i: A[i] + B[i], name="C") ################################################################################ # .. admonition:: Lambda Functions # # The second argument to the ``te.compute`` method is the function that # performs the computation. In this example, we're using an anonymous function, # also known as a ``lambda`` function, to define the computation, in this case # addition on the ``i``\th element of ``A`` and ``B``. ################################################################################ # Create a Default Schedule for the Computation # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # # While the above lines describe the computation rule, we can compute ``C`` in # many different ways to fit different devices. For a tensor with multiple # axes, you can choose which axis to iterate over first, or computations can be # split across different threads. TVM requires that the user to provide a # schedule, which is a description of how the computation should be performed. # Scheduling operations within TE can change loop orders, split computations # across different threads, and group blocks of data together, amongst other # operations. An important concept behind schedules is that they only describe # how the computation is performed, so different schedules for the same TE will # produce the same result. # # TVM allows you to create a naive schedule that will compute ``C`` in by # iterating in row major order. # # .. code-block:: c # # for (int i = 0; i < n; ++i) { # C[i] = A[i] + B[i]; # } s = te.create_schedule(C.op) ###################################################################### # Compile and Evaluate the Default Schedule # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # With the TE expression and a schedule, we can produce runnable code for our # target language and architecture, in this case LLVM and a CPU. We provide # TVM with the schedule, a list of the TE expressions that are in the schedule, # the target and host, and the name of the function we are producing. The result # of the output is a type-erased function that can be called directly from Python. # # In the following line, we use ``tvm.build`` to create a function. The build # function takes the schedule, the desired signature of the function (including # the inputs and outputs) as well as target language we want to compile to. fadd = tvm.build(s, [A, B, C], tgt, name="myadd") ################################################################################ # Let's run the function, and compare the output to the same computation in # numpy. The compiled TVM function exposes a concise C API that can be invoked # from any language. We begin by creating a device, which is a device (CPU in this # example) that TVM can compile the schedule to. In this case the device is an # LLVM CPU target. We can then initialize the tensors in our device and # perform the custom addition operation. To verify that the computation is # correct, we can compare the result of the output of the c tensor to the same # computation performed by numpy. dev = tvm.device(tgt.kind.name, 0) n = 1024 a = tvm.nd.array(np.random.uniform(size=n).astype(A.dtype), dev) b = tvm.nd.array(np.random.uniform(size=n).astype(B.dtype), dev) c = tvm.nd.array(np.zeros(n, dtype=C.dtype), dev) fadd(a, b, c) tvm.testing.assert_allclose(c.numpy(), a.numpy() + b.numpy()) ################################################################################ # To get a comparison of how fast this version is compared to numpy, create a # helper function to run a profile of the TVM generated code. import timeit np_repeat = 100 np_running_time = timeit.timeit( setup="import numpy\n" "n = 32768\n" 'dtype = "float32"\n' "a = numpy.random.rand(n, 1).astype(dtype)\n" "b = numpy.random.rand(n, 1).astype(dtype)\n", stmt="answer = a + b", number=np_repeat, ) print("Numpy running time: %f" % (np_running_time / np_repeat)) def evaluate_addition(func, target, optimization, log): dev = tvm.device(target.kind.name, 0) n = 32768 a = tvm.nd.array(np.random.uniform(size=n).astype(A.dtype), dev) b = tvm.nd.array(np.random.uniform(size=n).astype(B.dtype), dev) c = tvm.nd.array(np.zeros(n, dtype=C.dtype), dev) evaluator = func.time_evaluator(func.entry_name, dev, number=10) mean_time = evaluator(a, b, c).mean print("%s: %f" % (optimization, mean_time)) log.append((optimization, mean_time)) log = [("numpy", np_running_time / np_repeat)] evaluate_addition(fadd, tgt, "naive", log=log) ################################################################################ # Updating the Schedule to Use Parallelism # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # # Now that we've illustrated the fundamentals of TE, let's go deeper into what # schedules do, and how they can be used to optimize tensor expressions for # different architectures. A schedule is a series of steps that are applied to # an expression to transform it in a number of different ways. When a schedule # is applied to an expression in TE, the inputs and outputs remain the same, # but when compiled the implementation of the expression can change. This # tensor addition, in the default schedule, is run serially but is easy to # parallelize across all of the processor threads. We can apply the parallel # schedule operation to our computation. s[C].parallel(C.op.axis[0]) ################################################################################ # The ``tvm.lower`` command will generate the Intermediate Representation (IR) # of the TE, with the corresponding schedule. By lowering the expression as we # apply different schedule operations, we can see the effect of scheduling on # the ordering of the computation. We use the flag ``simple_mode=True`` to # return a readable C-style statement. print(tvm.lower(s, [A, B, C], simple_mode=True)) ################################################################################ # It's now possible for TVM to run these blocks on independent threads. Let's # compile and run this new schedule with the parallel operation applied: fadd_parallel = tvm.build(s, [A, B, C], tgt, name="myadd_parallel") fadd_parallel(a, b, c) tvm.testing.assert_allclose(c.numpy(), a.numpy() + b.numpy()) evaluate_addition(fadd_parallel, tgt, "parallel", log=log) ################################################################################ # Updating the Schedule to Use Vectorization # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # Modern CPUs also have the ability to perform SIMD operations on floating # point values, and we can apply another schedule to our computation expression # to take advantage of this. Accomplishing this requires multiple steps: first # we have to split the schedule into inner and outer loops using the split # scheduling primitive. The inner loops can use vectorization to use SIMD # instructions using the vectorize scheduling primitive, then the outer loops # can be parallelized using the parallel scheduling primitive. Choose the split # factor to be the number of threads on your CPU. # Recreate the schedule, since we modified it with the parallel operation in # the previous example n = te.var("n") A = te.placeholder((n,), name="A") B = te.placeholder((n,), name="B") C = te.compute(A.shape, lambda i: A[i] + B[i], name="C") s = te.create_schedule(C.op) # This factor should be chosen to match the number of threads appropriate for # your CPU. This will vary depending on architecture, but a good rule is # setting this factor to equal the number of available CPU cores. factor = 4 outer, inner = s[C].split(C.op.axis[0], factor=factor) s[C].parallel(outer) s[C].vectorize(inner) fadd_vector = tvm.build(s, [A, B, C], tgt, name="myadd_parallel") evaluate_addition(fadd_vector, tgt, "vector", log=log) print(tvm.lower(s, [A, B, C], simple_mode=True)) ################################################################################ # Comparing the Different Schedules # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # We can now compare the different schedules baseline = log[0][1] print("%s\t%s\t%s" % ("Operator".rjust(20), "Timing".rjust(20), "Performance".rjust(20))) for result in log: print( "%s\t%s\t%s" % (result[0].rjust(20), str(result[1]).rjust(20), str(result[1] / baseline).rjust(20)) ) ################################################################################ # .. admonition:: Code Specialization # # As you may have noticed, the declarations of ``A``, ``B`` and ``C`` all # take the same shape argument, ``n``. TVM will take advantage of this to # pass only a single shape argument to the kernel, as you will find in the # printed device code. This is one form of specialization. # # On the host side, TVM will automatically generate check code that checks # the constraints in the parameters. So if you pass arrays with different # shapes into fadd, an error will be raised. # # We can do more specializations. For example, we can write :code:`n = # tvm.runtime.convert(1024)` instead of :code:`n = te.var("n")`, in the # computation declaration. The generated function will only take vectors with # length 1024. ################################################################################ # We've defined, scheduled, and compiled a vector addition operator, which we # were then able to execute on the TVM runtime. We can save the operator as a # library, which we can then load later using the TVM runtime. ################################################################################ # Targeting Vector Addition for GPUs (Optional) # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # TVM is capable of targeting multiple architectures. In the next example, we # will target compilation of the vector addition to GPUs. # If you want to run this code, change ``run_cuda = True`` # Note that by default this example is not run in the docs CI. run_cuda = False if run_cuda: # Change this target to the correct backend for you gpu. For example: cuda (NVIDIA GPUs), # rocm (Radeon GPUS), OpenCL (opencl). tgt_gpu = tvm.target.Target(target="cuda", host="llvm") # Recreate the schedule n = te.var("n") A = te.placeholder((n,), name="A") B = te.placeholder((n,), name="B") C = te.compute(A.shape, lambda i: A[i] + B[i], name="C") print(type(C)) s = te.create_schedule(C.op) bx, tx = s[C].split(C.op.axis[0], factor=64) ################################################################################ # Finally we must bind the iteration axis bx and tx to threads in the GPU # compute grid. The naive schedule is not valid for GPUs, and these are # specific constructs that allow us to generate code that runs on a GPU. s[C].bind(bx, te.thread_axis("blockIdx.x")) s[C].bind(tx, te.thread_axis("threadIdx.x")) ###################################################################### # Compilation # ----------- # After we have finished specifying the schedule, we can compile it # into a TVM function. By default TVM compiles into a type-erased # function that can be directly called from the python side. # # In the following line, we use tvm.build to create a function. # The build function takes the schedule, the desired signature of the # function (including the inputs and outputs) as well as target language # we want to compile to. # # The result of compilation fadd is a GPU device function (if GPU is # involved) as well as a host wrapper that calls into the GPU # function. fadd is the generated host wrapper function, it contains # a reference to the generated device function internally. fadd = tvm.build(s, [A, B, C], target=tgt_gpu, name="myadd") ################################################################################ # The compiled TVM function exposes a concise C API that can be invoked from # any language. # # We provide a minimal array API in python to aid quick testing and prototyping. # The array API is based on the `DLPack <https://github.com/dmlc/dlpack>`_ standard. # # - We first create a GPU device. # - Then tvm.nd.array copies the data to the GPU. # - ``fadd`` runs the actual computation # - ``numpy()`` copies the GPU array back to the CPU (so we can verify correctness). # # Note that copying the data to and from the memory on the GPU is a required step. dev = tvm.device(tgt_gpu.kind.name, 0) n = 1024 a = tvm.nd.array(np.random.uniform(size=n).astype(A.dtype), dev) b = tvm.nd.array(np.random.uniform(size=n).astype(B.dtype), dev) c = tvm.nd.array(np.zeros(n, dtype=C.dtype), dev) fadd(a, b, c) tvm.testing.assert_allclose(c.numpy(), a.numpy() + b.numpy()) ################################################################################ # Inspect the Generated GPU Code # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # You can inspect the generated code in TVM. The result of tvm.build is a TVM # Module. fadd is the host module that contains the host wrapper, it also # contains a device module for the CUDA (GPU) function. # # The following code fetches the device module and prints the content code. if ( tgt_gpu.kind.name == "cuda" or tgt_gpu.kind.name == "rocm" or tgt_gpu.kind.name.startswith("opencl") ): dev_module = fadd.imported_modules[0] print("-----GPU code-----") print(dev_module.get_source()) else: print(fadd.get_source()) ################################################################################ # Saving and Loading Compiled Modules # ----------------------------------- # Besides runtime compilation, we can save the compiled modules into a file and # load them back later. # # The following code first performs the following steps: # # - It saves the compiled host module into an object file. # - Then it saves the device module into a ptx file. # - cc.create_shared calls a compiler (gcc) to create a shared library from tvm.contrib import cc from tvm.contrib import utils temp = utils.tempdir() fadd.save(temp.relpath("myadd.o")) if tgt.kind.name == "cuda": fadd.imported_modules[0].save(temp.relpath("myadd.ptx")) if tgt.kind.name == "rocm": fadd.imported_modules[0].save(temp.relpath("myadd.hsaco")) if tgt.kind.name.startswith("opencl"): fadd.imported_modules[0].save(temp.relpath("myadd.cl")) cc.create_shared(temp.relpath("myadd.so"), [temp.relpath("myadd.o")]) print(temp.listdir()) ################################################################################ # .. admonition:: Module Storage Format # # The CPU (host) module is directly saved as a shared library (.so). There # can be multiple customized formats of the device code. In our example, the # device code is stored in ptx, as well as a meta data json file. They can be # loaded and linked separately via import. ################################################################################ # Load Compiled Module # ~~~~~~~~~~~~~~~~~~~~ # We can load the compiled module from the file system and run the code. The # following code loads the host and device module separately and links them # together. We can verify that the newly loaded function works. fadd1 = tvm.runtime.load_module(temp.relpath("myadd.so")) if tgt.kind.name == "cuda": fadd1_dev = tvm.runtime.load_module(temp.relpath("myadd.ptx")) fadd1.import_module(fadd1_dev) if tgt.kind.name == "rocm": fadd1_dev = tvm.runtime.load_module(temp.relpath("myadd.hsaco")) fadd1.import_module(fadd1_dev) if tgt.kind.name.startswith("opencl"): fadd1_dev = tvm.runtime.load_module(temp.relpath("myadd.cl")) fadd1.import_module(fadd1_dev) fadd1(a, b, c) tvm.testing.assert_allclose(c.numpy(), a.numpy() + b.numpy()) ################################################################################ # Pack Everything into One Library # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # In the above example, we store the device and host code separately. TVM also # supports export everything as one shared library. Under the hood, we pack # the device modules into binary blobs and link them together with the host # code. Currently we support packing of Metal, OpenCL and CUDA modules. fadd.export_library(temp.relpath("myadd_pack.so")) fadd2 = tvm.runtime.load_module(temp.relpath("myadd_pack.so")) fadd2(a, b, c) tvm.testing.assert_allclose(c.numpy(), a.numpy() + b.numpy()) ################################################################################ # .. admonition:: Runtime API and Thread-Safety # # The compiled modules of TVM do not depend on the TVM compiler. Instead, # they only depend on a minimum runtime library. The TVM runtime library # wraps the device drivers and provides thread-safe and device agnostic calls # into the compiled functions. # # This means that you can call the compiled TVM functions from any thread, on # any GPUs, provided that you have compiled the code for that GPU. ################################################################################ # Generate OpenCL Code # -------------------- # TVM provides code generation features into multiple backends. We can also # generate OpenCL code or LLVM code that runs on CPU backends. # # The following code blocks generate OpenCL code, creates array on an OpenCL # device, and verifies the correctness of the code. if tgt.kind.name.startswith("opencl"): fadd_cl = tvm.build(s, [A, B, C], tgt, name="myadd") print("------opencl code------") print(fadd_cl.imported_modules[0].get_source()) dev = tvm.cl(0) n = 1024 a = tvm.nd.array(np.random.uniform(size=n).astype(A.dtype), dev) b = tvm.nd.array(np.random.uniform(size=n).astype(B.dtype), dev) c = tvm.nd.array(np.zeros(n, dtype=C.dtype), dev) fadd_cl(a, b, c) tvm.testing.assert_allclose(c.numpy(), a.numpy() + b.numpy()) ################################################################################ # .. admonition:: TE Scheduling Primitives # # TVM includes a number of different scheduling primitives: # # - split: splits a specified axis into two axises by the defined factor. # - tile: tiles will split a computation across two axes by the defined factors. # - fuse: fuses two consecutive axises of one computation. # - reorder: can reorder the axises of a computation into a defined order. # - bind: can bind a computation to a specific thread, useful in GPU programming. # - compute_at: by default, TVM will compute tensors at the outermost level # of the function, or the root, by default. compute_at specifies that one # tensor should be computed at the first axis of computation for another # operator. # - compute_inline: when marked inline, a computation will be expanded then # inserted into the address where the tensor is required. # - compute_root: moves a computation to the outermost layer, or root, of the # function. This means that stage of the computation will be fully computed # before it moves on to the next stage. # # A complete description of these primitives can be found in the # :ref:`Schedule Primitives <schedule_primitives>` docs page. ################################################################################ # Example 2: Manually Optimizing Matrix Multiplication with TE # ------------------------------------------------------------ # # Now we will consider a second, more advanced example, demonstrating how with # just 18 lines of python code TVM speeds up a common matrix multiplication operation by 18x. # # **Matrix multiplication is a compute intensive operation. There are # two important optimizations for good CPU performance:** # # 1. Increase the cache hit rate of memory access. Both complex # numerical computation and hot-spot memory access can be # accelerated by a high cache hit rate. This requires us to # transform the origin memory access pattern to a pattern that fits # the cache policy. # # 2. SIMD (Single instruction multi-data), also known as the vector # processing unit. On each cycle instead of processing a single # value, SIMD can process a small batch of data. This requires us # to transform the data access pattern in the loop body in uniform # pattern so that the LLVM backend can lower it to SIMD. # # The techniques used in this tutorial are a subset of tricks mentioned in this # `repository <https://github.com/flame/how-to-optimize-gemm>`_. Some of them # have been applied by TVM abstraction automatically, but some of them cannot # be automatically applied due to TVM constraints. ################################################################################ # Preparation and Performance Baseline # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # # We begin by collecting performance data on the `numpy` implementation of # matrix multiplication. import tvm import tvm.testing from tvm import te import numpy # The size of the matrix # (M, K) x (K, N) # You are free to try out different shapes, sometimes TVM optimization outperforms numpy with MKL. M = 1024 K = 1024 N = 1024 # The default tensor data type in tvm dtype = "float32" # You will want to adjust the target to match any CPU vector extensions you # might have. For example, if you're using using Intel AVX2 (Advanced Vector # Extensions) ISA for SIMD, you can get the best performance by changing the # following line to ``llvm -mcpu=core-avx2``, or specific type of CPU you use. # Recall that you're using llvm, you can get this information from the command # ``llc --version`` to get the CPU type, and you can check ``/proc/cpuinfo`` # for additional extensions that your processor might support. target = tvm.target.Target(target="llvm", host="llvm") dev = tvm.device(target.kind.name, 0) # Random generated tensor for testing a = tvm.nd.array(numpy.random.rand(M, K).astype(dtype), dev) b = tvm.nd.array(numpy.random.rand(K, N).astype(dtype), dev) # Repeatedly perform a matrix multiplication to get a performance baseline # for the default numpy implementation np_repeat = 100 np_running_time = timeit.timeit( setup="import numpy\n" "M = " + str(M) + "\n" "K = " + str(K) + "\n" "N = " + str(N) + "\n" 'dtype = "float32"\n' "a = numpy.random.rand(M, K).astype(dtype)\n" "b = numpy.random.rand(K, N).astype(dtype)\n", stmt="answer = numpy.dot(a, b)", number=np_repeat, ) print("Numpy running time: %f" % (np_running_time / np_repeat)) answer = numpy.dot(a.numpy(), b.numpy()) ################################################################################ # Now we write a basic matrix multiplication using TVM TE and verify that it # produces the same results as the numpy implementation. We also write a # function that will help us measure the performance of the schedule # optimizations. # TVM Matrix Multiplication using TE k = te.reduce_axis((0, K), "k") A = te.placeholder((M, K), name="A") B = te.placeholder((K, N), name="B") C = te.compute((M, N), lambda x, y: te.sum(A[x, k] * B[k, y], axis=k), name="C") # Default schedule s = te.create_schedule(C.op) func = tvm.build(s, [A, B, C], target=target, name="mmult") c = tvm.nd.array(numpy.zeros((M, N), dtype=dtype), dev) func(a, b, c) tvm.testing.assert_allclose(c.numpy(), answer, rtol=1e-5) def evaluate_operation(s, vars, target, name, optimization, log): func = tvm.build(s, [A, B, C], target=target, name="mmult") assert func c = tvm.nd.array(numpy.zeros((M, N), dtype=dtype), dev) func(a, b, c) tvm.testing.assert_allclose(c.numpy(), answer, rtol=1e-5) evaluator = func.time_evaluator(func.entry_name, dev, number=10) mean_time = evaluator(a, b, c).mean print("%s: %f" % (optimization, mean_time)) log.append((optimization, mean_time)) log = [] evaluate_operation(s, [A, B, C], target=target, name="mmult", optimization="none", log=log) ################################################################################ # Let's take a look at the intermediate representation of the operator and # default schedule using the TVM lower function. Note how the implementation is # essentially a naive implementation of a matrix multiplication, using three # nested loops over the indices of the A and B matrices. print(tvm.lower(s, [A, B, C], simple_mode=True)) ################################################################################ # Optimization 1: Blocking # ~~~~~~~~~~~~~~~~~~~~~~~~ # # A important trick to enhance the cache hit rate is blocking, where you # structure memory access such that the inside a block is a small neighborhood # that has high memory locality. In this tutorial, we pick a block factor of # 32. This will result in a block that will fill a 32 * 32 * sizeof(float) area # of memory. This corresponds to a cache size of 4KB, in relation to a # reference cache size of 32 KB for L1 cache. # # We begin by creating a default schedule for the ``C`` operation, then apply a # ``tile`` scheduling primitive to it with the specified block factor, with the # scheduling primitive returning the resulting loop order from outermost to # innermost, as a vector ``[x_outer, y_outer, x_inner, y_inner]``. We then get # the reduction axis for output of the operation, and perform a split operation # on it using a factor of 4. This factor doesn't directly impact the blocking # optimization we're working on right now, but will be useful later when we # apply vectorization. # # Now that the operation has been blocked, we can reorder the computation to # put the reduction operation into the outermost loop of the computation, # helping to guarantee that the blocked data remains in cache. This completes # the schedule, and we can build and test the performance compared to the naive # schedule. bn = 32 # Blocking by loop tiling xo, yo, xi, yi = s[C].tile(C.op.axis[0], C.op.axis[1], bn, bn) (k,) = s[C].op.reduce_axis ko, ki = s[C].split(k, factor=4) # Hoist reduction domain outside the blocking loop s[C].reorder(xo, yo, ko, ki, xi, yi) evaluate_operation(s, [A, B, C], target=target, name="mmult", optimization="blocking", log=log) ################################################################################ # By reordering the computation to take advantage of caching, you should see a # significant improvement in the performance of the computation. Now, print the # internal representation and compare it to the original: print(tvm.lower(s, [A, B, C], simple_mode=True)) ################################################################################ # Optimization 2: Vectorization # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # # Another important optimization trick is vectorization. When the memory access # pattern is uniform, the compiler can detect this pattern and pass the # continuous memory to the SIMD vector processor. In TVM, we can use the # ``vectorize`` interface to hint the compiler this pattern, taking advantage # of this hardware feature. # # In this tutorial, we chose to vectorize the inner loop row data since it is # already cache friendly from our previous optimizations. # Apply the vectorization optimization s[C].vectorize(yi) evaluate_operation(s, [A, B, C], target=target, name="mmult", optimization="vectorization", log=log) # The generalized IR after vectorization print(tvm.lower(s, [A, B, C], simple_mode=True)) ################################################################################ # Optimization 3: Loop Permutation # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # # If we look at the above IR, we can see the inner loop row data is vectorized # and B is transformed into PackedB (this is evident by the `(float32x32*)B2` # portion of the inner loop). The traversal of PackedB is sequential now. So we # will look at the access pattern of A. In current schedule, A is accessed # column by column which is not cache friendly. If we change the nested loop # order of `ki` and inner axes `xi`, the access pattern for A matrix will be # more cache friendly. s = te.create_schedule(C.op) xo, yo, xi, yi = s[C].tile(C.op.axis[0], C.op.axis[1], bn, bn) (k,) = s[C].op.reduce_axis ko, ki = s[C].split(k, factor=4) # re-ordering s[C].reorder(xo, yo, ko, xi, ki, yi) s[C].vectorize(yi) evaluate_operation( s, [A, B, C], target=target, name="mmult", optimization="loop permutation", log=log ) # Again, print the new generalized IR print(tvm.lower(s, [A, B, C], simple_mode=True)) ################################################################################ # Optimization 4: Array Packing # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # # Another important trick is array packing. This trick is to reorder the # storage dimension of the array to convert the continuous access pattern on # certain dimension to a sequential pattern after flattening. # # .. image:: https://github.com/dmlc/web-data/raw/main/tvm/tutorial/array-packing.png # :align: center # # Just as it is shown in the figure above, after blocking the computations, we # can observe the array access pattern of B (after flattening), which is # regular but discontinuous. We expect that after some transformation we can # get a continuous access pattern. By reordering a ``[16][16]`` array to a # ``[16/4][16][4]`` array the access pattern of B will be sequential when # grabbing the corresponding value from the packed array. # # To accomplish this, we are going to have to start with a new default # schedule, taking into account the new packing of B. It's worth taking a # moment to comment on this: TE is a powerful and expressive language for # writing optimized operators, but it often requires some knowledge of the # underlying algorithm, data structures, and hardware target that you are # writing for. Later in the tutorial, we will discuss some of the options for # letting TVM take that burden. Regardless, let's move on with the new # optimized schedule. # We have to re-write the algorithm slightly. packedB = te.compute((N / bn, K, bn), lambda x, y, z: B[y, x * bn + z], name="packedB") C = te.compute( (M, N), lambda x, y: te.sum(A[x, k] * packedB[y // bn, k, tvm.tir.indexmod(y, bn)], axis=k), name="C", ) s = te.create_schedule(C.op) xo, yo, xi, yi = s[C].tile(C.op.axis[0], C.op.axis[1], bn, bn) (k,) = s[C].op.reduce_axis ko, ki = s[C].split(k, factor=4) s[C].reorder(xo, yo, ko, xi, ki, yi) s[C].vectorize(yi) x, y, z = s[packedB].op.axis s[packedB].vectorize(z) s[packedB].parallel(x) evaluate_operation(s, [A, B, C], target=target, name="mmult", optimization="array packing", log=log) # Here is the generated IR after array packing. print(tvm.lower(s, [A, B, C], simple_mode=True)) ################################################################################ # Optimization 5: Optimizing Block Writing Through Caching # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # # Up to this point all of our optimizations have focused on efficiently # accessing and computing the data from the `A` and `B` matrices to compute the # `C` matrix. After the blocking optimization, the operator will write result # to `C` block by block, and the access pattern is not sequential. We can # address this by using a sequential cache array, using a combination of # `cache_write`, `compute_at`, and `unroll`to hold the block results and write # to `C` when all the block results are ready. s = te.create_schedule(C.op) # Allocate write cache CC = s.cache_write(C, "global") xo, yo, xi, yi = s[C].tile(C.op.axis[0], C.op.axis[1], bn, bn) # Write cache is computed at yo s[CC].compute_at(s[C], yo) # New inner axes xc, yc = s[CC].op.axis (k,) = s[CC].op.reduce_axis ko, ki = s[CC].split(k, factor=4) s[CC].reorder(ko, xc, ki, yc) s[CC].unroll(ki) s[CC].vectorize(yc) x, y, z = s[packedB].op.axis s[packedB].vectorize(z) s[packedB].parallel(x) evaluate_operation(s, [A, B, C], target=target, name="mmult", optimization="block caching", log=log) # Here is the generated IR after write cache blocking. print(tvm.lower(s, [A, B, C], simple_mode=True)) ################################################################################ # Optimization 6: Parallelization # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # # So far, our computation is only designed to use a single core. Nearly all # modern processors have multiple cores, and computation can benefit from # running computations in parallel. The final optimization is to take advantage # of thread-level parallelization. # parallel s[C].parallel(xo) x, y, z = s[packedB].op.axis s[packedB].vectorize(z) s[packedB].parallel(x) evaluate_operation( s, [A, B, C], target=target, name="mmult", optimization="parallelization", log=log ) # Here is the generated IR after parallelization. print(tvm.lower(s, [A, B, C], simple_mode=True)) ################################################################################ # Summary of Matrix Multiplication Example # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # # After applying the above simple optimizations with only 18 lines of code, our # generated code can begin to approach the performance of `numpy` with the Math # Kernel Library (MKL). Since we've been logging the performance as we've been # working, we can compare the results. baseline = log[0][1] print("%s\t%s\t%s" % ("Operator".rjust(20), "Timing".rjust(20), "Performance".rjust(20))) for result in log: print( "%s\t%s\t%s" % (result[0].rjust(20), str(result[1]).rjust(20), str(result[1] / baseline).rjust(20)) ) ################################################################################ # Note that the outputs on the web page reflect the running times on a # non-exclusive Docker container, and should be considered unreliable. It is # highly encouraged to run the tutorial by yourself to observe the performance # gain achieved by TVM, and to carefully work through each example to # understand the iterative improvements that are made to the matrix # multiplication operation. ################################################################################ # Final Notes and Summary # ----------------------- # As mentioned earlier, how to apply optimizations using TE and scheduling # primitives can require some knowledge of the underlying architecture and # algorithms. However, TE was designed to act as a foundation for more complex # algorithms that can search the potential optimization. With the knowledge you # have from this introduction to TE, we can now begin to explore how TVM can # automate the schedule optimization process. # # This tutorial provided a walk-through of TVM Tensor Expression (TE) workflow # using a vector add and a matrix multiplication examples. The general workflow # is # # - Describe your computation via a series of operations. # - Describe how we want to compute use schedule primitives. # - Compile to the target function we want. # - Optionally, save the function to be loaded later. # # Upcoming tutorials expand on the matrix multiplication example, and show how # you can build generic templates of the matrix multiplication and other # operations with tunable parameters that allows you to automatically optimize # the computation for specific platforms.
39,410
42.5
100
py
tvm
tvm-main/gallery/tutorial/autotvm_matmul_x86.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ .. _tutorial-autotvm-matmul-x86: Optimizing Operators with Schedule Templates and AutoTVM ======================================================== **Authors**: `Lianmin Zheng <https://github.com/merrymercy>`_, `Chris Hoge <https://github.com/hogepodge>`_ In this tutorial, we show how the TVM Tensor Expression (TE) language can be used to write schedule templates that can be searched by AutoTVM to find the optimal schedule. This process is called Auto-Tuning, which helps automate the process of optimizing tensor computation. This tutorial builds on the previous :doc:`tutorial on how to write a matrix multiplication using TE <tensor_expr_get_started>`. There are two steps in auto-tuning. - The first step is defining a search space. - The second step is running a search algorithm to explore through this space. In this tutorial, you can learn how to perform these two steps in TVM. The whole workflow is illustrated by a matrix multiplication example. .. note:: Note that this tutorial will not run on Windows or recent versions of macOS. To get it to run, you will need to wrap the body of this tutorial in a :code:`if __name__ == "__main__":` block. """ ################################################################################ # Install dependencies # -------------------- # To use autotvm package in TVM, we need to install some extra dependencies. # # .. code-block:: bash # # pip3 install --user psutil xgboost cloudpickle # # To make TVM run faster in tuning, it is recommended to use cython as FFI of # TVM. In the root directory of TVM, execute: # # .. code-block:: bash # # pip3 install --user cython # sudo make cython3 # # Now return to python code. Begin by importing the required packages. import logging import sys import numpy as np import tvm from tvm import te import tvm.testing # the module is called `autotvm` from tvm import autotvm ################################################################################ # Basic Matrix Multiplication with TE # ----------------------------------- # Recall the basic implementation of matrix multiplication using TE. We write # it down here with a few changes. We will wrap the multiplication in a python # function definition. For simplicity, we will focus our attention on a split # optimization, using a fixed value that defines the block size of the # reordering. def matmul_basic(N, L, M, dtype): A = te.placeholder((N, L), name="A", dtype=dtype) B = te.placeholder((L, M), name="B", dtype=dtype) k = te.reduce_axis((0, L), name="k") C = te.compute((N, M), lambda i, j: te.sum(A[i, k] * B[k, j], axis=k), name="C") s = te.create_schedule(C.op) # schedule y, x = s[C].op.axis k = s[C].op.reduce_axis[0] yo, yi = s[C].split(y, 8) xo, xi = s[C].split(x, 8) s[C].reorder(yo, xo, k, yi, xi) return s, [A, B, C] ################################################################################ # Matrix Multiplication with AutoTVM # ---------------------------------- # In the previous schedule code, we use a constant "8" as the tiling factor. # However, it might not be the best one because the best tiling factor depends # on real hardware environment and input shape. # # If you want the schedule code to be portable across a wider range of input # shapes and target hardware, it is better to define a set of candidate values # and pick the best one according to the measurement results on target # hardware. # # In autotvm, we can define a tunable parameter, or a "knob" for such kind of # value. ################################################################################ # A Basic Matrix Multiplication Template # -------------------------------------- # We begin with an example of how to create a tunable parameter set for the # block size of the `split` scheduling operation. # Matmul V1: List candidate values @autotvm.template("tutorial/matmul_v1") # 1. use a decorator def matmul_v1(N, L, M, dtype): A = te.placeholder((N, L), name="A", dtype=dtype) B = te.placeholder((L, M), name="B", dtype=dtype) k = te.reduce_axis((0, L), name="k") C = te.compute((N, M), lambda i, j: te.sum(A[i, k] * B[k, j], axis=k), name="C") s = te.create_schedule(C.op) # schedule y, x = s[C].op.axis k = s[C].op.reduce_axis[0] # 2. get the config object cfg = autotvm.get_config() # 3. define search space cfg.define_knob("tile_y", [1, 2, 4, 8, 16]) cfg.define_knob("tile_x", [1, 2, 4, 8, 16]) # 4. schedule according to config yo, yi = s[C].split(y, cfg["tile_y"].val) xo, xi = s[C].split(x, cfg["tile_x"].val) s[C].reorder(yo, xo, k, yi, xi) return s, [A, B, C] ################################################################################ # Here we make four modifications to the previous schedule code and get a # tunable "template". We can explain the modifications one by one. # # 1. Use a decorator to mark this function as a simple template. # 2. Get a config object: You can regard this :code:`cfg` as an argument of # this function but we obtain it in a different way. With this argument, this # function is no longer a deterministic schedule. Instead, we can pass # different configurations to this function and get different schedules. A # function that uses a configuration object like this is called a "template". # # To make the template function more compact, we can do two things to define # the parameter search space within a single function. # # 1. Define a search space across a set values. This is done by making # :code:`cfg` a :any:`ConfigSpace` object. It will collect all of the # tunable knobs in this function and build a search space from it. # 2. Schedule according to an entity in this space. This is done by making # :code:`cfg` a :any:`ConfigEntity` object. When it is a # :any:`ConfigEntity`, it will ignore all space definition API (namely, # :code:`cfg.define_XXXXX(...)`). Instead, it will store deterministic # values for all tunable knobs, and we schedule according to these values. # # During auto-tuning, we will first call this template with a # :any:`ConfigSpace` object to build the search space. Then we call this # template with different :any:`ConfigEntity` in the built space to get # different schedules. Finally we will measure the code generated by # different schedules and pick the best one. # # 3. Define two tunable knobs. The first one is :code:`tile_y` with 5 possible # values. The second one is :code:`tile_x` with a same list of possible values. # These two knobs are independent, so they span a search space with size 25 = # 5x5. # 4. The configuration knobs are passed to the :code:`split` schedule # operation, allowing us to schedule according to the 5x5 deterministic values # we previously defined in :code:`cfg`. ################################################################################ # A Matrix Multiplication Template with the Advanced Parameter API # ---------------------------------------------------------------- # In the previous template, we manually listed all of the possible values for a # knob. This is the lowest level API to define the space, and gives an explicit # enumeration of the parameter space to search. However, we also provide # another set of APIs that can make the definition of the search space easier # and smarter. Where possible, we recommend you use this higher-level API # # In the following example, we use :any:`ConfigSpace.define_split` to define a # split knob. It will enumerate all the possible ways to split an axis and # construct the space. # # We also have :any:`ConfigSpace.define_reorder` for reorder knob and # :any:`ConfigSpace.define_annotate` for annotation like unroll, vectorization, # thread binding. When the high level API cannot meet your requirements, you # can always fall back to using the low level API. @autotvm.template("tutorial/matmul") def matmul(N, L, M, dtype): A = te.placeholder((N, L), name="A", dtype=dtype) B = te.placeholder((L, M), name="B", dtype=dtype) k = te.reduce_axis((0, L), name="k") C = te.compute((N, M), lambda i, j: te.sum(A[i, k] * B[k, j], axis=k), name="C") s = te.create_schedule(C.op) # schedule y, x = s[C].op.axis k = s[C].op.reduce_axis[0] ##### define space begin ##### cfg = autotvm.get_config() cfg.define_split("tile_y", y, num_outputs=2) cfg.define_split("tile_x", x, num_outputs=2) ##### define space end ##### # schedule according to config yo, yi = cfg["tile_y"].apply(s, C, y) xo, xi = cfg["tile_x"].apply(s, C, x) s[C].reorder(yo, xo, k, yi, xi) return s, [A, B, C] ################################################################################ # .. admonition:: More Explanation on :code:`cfg.define_split` # # In this template, :code:`cfg.define_split("tile_y", y, num_outputs=2)` will # enumerate all possible combinations that can split axis y into two axes with # factors of the length of y. For example, if the length of y is 32 and we # want to split it into two axes using factors of 32, then there are 6 # possible values for (length of outer axis, length of inner axis) pair, # namely (32, 1), (16, 2), (8, 4), (4, 8), (2, 16) or (1, 32). These are all 6 # possible values of `tile_y`. # # During scheduling, :code:`cfg["tile_y"]` is a :code:`SplitEntity` object. # We stores the lengths of outer axes and inner axes in # :code:`cfg['tile_y'].size` (a tuple with two elements). In this template, # we apply it by using :code:`yo, yi = cfg['tile_y'].apply(s, C, y)`. # Actually, this is equivalent to :code:`yo, yi = s[C].split(y, # cfg["tile_y"].size[1])` or :code:`yo, yi = s[C].split(y, # nparts=cfg['tile_y"].size[0])` # # The advantage of using cfg.apply API is that it makes multi-level splits # (that is, when num_outputs >= 3) easier. ################################################################################ # Step 2: Use AutoTVM to Optimize the Matrix Multiplication # --------------------------------------------------------- # In Step 1, we wrote a matrix multiplication template that allowed us to # parameterize the block size used in the `split` schedule. We can now conduct # a search over this parameter space. The next step is to pick a tuner to guide # the exploration of this space. # # Auto-tuners in TVM # ~~~~~~~~~~~~~~~~~~ # The job for a tuner can be described by following pseudo code # # .. code-block:: c # # ct = 0 # while ct < max_number_of_trials: # propose a batch of configs # measure this batch of configs on real hardware and get results # ct += batch_size # # When proposing the next batch of configs, the tuner can take different # strategies. Some of the tuner strategies provided by TVM include: # # * :any:`tvm.autotvm.tuner.RandomTuner`: Enumerate the space in a random order # * :any:`tvm.autotvm.tuner.GridSearchTuner`: Enumerate the space in a grid search order # * :any:`tvm.autotvm.tuner.GATuner`: Using genetic algorithm to search through the space # * :any:`tvm.autotvm.tuner.XGBTuner`: Uses a model based method. Train a XGBoost model to # predict the speed of lowered IR and pick the next batch according to the # prediction. # # You can choose the tuner according to the size of your space, your time # budget and other factors. For example, if your space is very small (less # than 1000), a grid-search tuner or a random tuner is good enough. If your # space is at the level of 10^9 (this is the space size of a conv2d operator on # CUDA GPU), XGBoostTuner can explore more efficiently and find better configs. ################################################################################ # Begin tuning # ~~~~~~~~~~~~ # Here we continue our matrix multiplication example. First we create a tuning # task. We can also inspect the initialized search space. In this case, for a # 512x512 square matrix multiplication, the space size is 10x10=100 Note that # the task and search space are independent of the tuner picked. N, L, M = 512, 512, 512 task = autotvm.task.create("tutorial/matmul", args=(N, L, M, "float32"), target="llvm") print(task.config_space) ################################################################################ # Then we need to define how to measure the generated code and pick a tuner. # Since our space is small, a random tuner is just okay. # # We only make 10 trials in this tutorial for demonstration. In practice, you # can do more trials according to your time budget. We will log the tuning # results into a log file. This file can be used to choose the best # configuration discovered by the tuner later. # logging config (for printing tuning log to the screen) logging.getLogger("autotvm").setLevel(logging.DEBUG) logging.getLogger("autotvm").addHandler(logging.StreamHandler(sys.stdout)) ################################################################################ # There are two steps for measuring a config: build and run. By default, we use # all CPU cores to compile program. We then measure them sequentially. To help # reduce variance, we take 5 measurements and average them. measure_option = autotvm.measure_option(builder="local", runner=autotvm.LocalRunner(number=5)) # Begin tuning with RandomTuner, log records to file `matmul.log` # You can use alternatives like XGBTuner. tuner = autotvm.tuner.RandomTuner(task) tuner.tune( n_trial=10, measure_option=measure_option, callbacks=[autotvm.callback.log_to_file("matmul.log")], ) ################################################################################ # With tuning completed, we can choose the configuration from the log file that # has the best measured performance and compile the schedule with the # corresponding parameters. We also do a quick verification that the schedule is # producing correct answers. We can call the function :code:`matmul` directly # under the :any:`autotvm.apply_history_best` context. When we call this # function, it will query the dispatch context with its argument and get the # best config with the same argument. # apply history best from log file with autotvm.apply_history_best("matmul.log"): with tvm.target.Target("llvm"): s, arg_bufs = matmul(N, L, M, "float32") func = tvm.build(s, arg_bufs) # check correctness a_np = np.random.uniform(size=(N, L)).astype(np.float32) b_np = np.random.uniform(size=(L, M)).astype(np.float32) c_np = a_np.dot(b_np) c_tvm = tvm.nd.empty(c_np.shape) func(tvm.nd.array(a_np), tvm.nd.array(b_np), c_tvm) tvm.testing.assert_allclose(c_np, c_tvm.numpy(), rtol=1e-4) ################################################################################ # Final Notes and Summary # ----------------------- # In this tutorial, we have shown how to build operator templates that allow # TVM to search a parameter space and choose optimized schedule configurations. # To gain a deeper understanding of how this works, we recommend expanding on # this example by adding new search parameters to the schedule based on # schedule operations demonstrated in the :ref: `Getting Started With Tensor # Expressions <tensor_expr_get_started>_` tutorial. In the upcoming sections, we # will demonstrate the AutoScheduler, a method for TVM to optimize common # operators without the need for the user to provide a user-defined template.
16,251
41.881266
94
py
tvm
tvm-main/gallery/tutorial/install.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ Installing TVM ============== **Authors**: `Jocelyn Shiue <https://github.com/>`_, `Chris Hoge <https://github.com/hogepodge>`_ Depending on your needs and your working environment, there are a few different methods for installing TVM. These include: * Installing from source * Installing from third-party binary package. """ ################################################################################ # Installing From Source # ---------------------- # Installing from source is the recommended method for installing TVM. It will # allow you to enable specific features such as GPU support, microcontroller # support (microTVM), and a debugging runtime, and other features. You will also # want to install from source if you want to actively contribute to the TVM # project. The full instructions are on the :ref:`Install TVM From Source # <install-from-source>` page. ################################################################################ # Installing From Binary Packages # -------------------------------- # You may install convenient third party binary package distributions to # quickly try things out. TLCPack is a third party volunteer community that # builds binary packages from TVM source. It offers a support matrix with # instructions to install on different platforms, with different features. # Check out `TLCPack <https://tlcpack.ai>`_ to learn more. Note that the # third party binary packages could contain additional licensing terms for # the hardware drivers that are bundled with it.
2,314
44.392157
80
py
tvm
tvm-main/gallery/tutorial/cross_compilation_and_rpc.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ .. _tutorial-cross-compilation-and-rpc: Cross Compilation and RPC ========================= **Author**: `Ziheng Jiang <https://github.com/ZihengJiang/>`_, `Lianmin Zheng <https://github.com/merrymercy/>`_ This tutorial introduces cross compilation and remote device execution with RPC in TVM. With cross compilation and RPC, you can **compile a program on your local machine then run it on the remote device**. It is useful when the remote device resource are limited, like Raspberry Pi and mobile platforms. In this tutorial, we will use the Raspberry Pi for a CPU example and the Firefly-RK3399 for an OpenCL example. """ ###################################################################### # Build TVM Runtime on Device # --------------------------- # # The first step is to build the TVM runtime on the remote device. # # .. note:: # # All instructions in both this section and the next section should be # executed on the target device, e.g. Raspberry Pi. We assume the target # is running Linux. # # Since we do compilation on the local machine, the remote device is only used # for running the generated code. We only need to build the TVM runtime on # the remote device. # # .. code-block:: bash # # git clone --recursive https://github.com/apache/tvm tvm # cd tvm # make runtime -j2 # # After building the runtime successfully, we need to set environment variables # in :code:`~/.bashrc` file. We can edit :code:`~/.bashrc` # using :code:`vi ~/.bashrc` and add the line below (Assuming your TVM # directory is in :code:`~/tvm`): # # .. code-block:: bash # # export PYTHONPATH=$PYTHONPATH:~/tvm/python # # To update the environment variables, execute :code:`source ~/.bashrc`. ###################################################################### # Set Up RPC Server on Device # --------------------------- # To start an RPC server, run the following command on your remote device # (Which is Raspberry Pi in this example). # # .. code-block:: bash # # python -m tvm.exec.rpc_server --host 0.0.0.0 --port=9090 # # If you see the line below, it means the RPC server started # successfully on your device. # # .. code-block:: bash # # INFO:root:RPCServer: bind to 0.0.0.0:9090 # ###################################################################### # Declare and Cross Compile Kernel on Local Machine # ------------------------------------------------- # # .. note:: # # Now we go back to the local machine, which has a full TVM installed # (with LLVM). # # Here we will declare a simple kernel on the local machine: import numpy as np import tvm from tvm import te from tvm import rpc from tvm.contrib import utils n = tvm.runtime.convert(1024) A = te.placeholder((n,), name="A") B = te.compute((n,), lambda i: A[i] + 1.0, name="B") s = te.create_schedule(B.op) ###################################################################### # Then we cross compile the kernel. # The target should be 'llvm -mtriple=armv7l-linux-gnueabihf' for # Raspberry Pi 3B, but we use 'llvm' here to make this tutorial runnable # on our webpage building server. See the detailed note in the following block. local_demo = True if local_demo: target = "llvm" else: target = "llvm -mtriple=armv7l-linux-gnueabihf" func = tvm.build(s, [A, B], target=target, name="add_one") # save the lib at a local temp folder temp = utils.tempdir() path = temp.relpath("lib.tar") func.export_library(path) ###################################################################### # .. note:: # # To run this tutorial with a real remote device, change :code:`local_demo` # to False and replace :code:`target` in :code:`build` with the appropriate # target triple for your device. The target triple which might be # different for different devices. For example, it is # :code:`'llvm -mtriple=armv7l-linux-gnueabihf'` for Raspberry Pi 3B and # :code:`'llvm -mtriple=aarch64-linux-gnu'` for RK3399. # # Usually, you can query the target by running :code:`gcc -v` on your # device, and looking for the line starting with :code:`Target:` # (Though it may still be a loose configuration.) # # Besides :code:`-mtriple`, you can also set other compilation options # like: # # * -mcpu=<cpuname> # Specify a specific chip in the current architecture to generate code for. By default this is inferred from the target triple and autodetected to the current architecture. # * -mattr=a1,+a2,-a3,... # Override or control specific attributes of the target, such as whether SIMD operations are enabled or not. The default set of attributes is set by the current CPU. # To get the list of available attributes, you can do: # # .. code-block:: bash # # llc -mtriple=<your device target triple> -mattr=help # # These options are consistent with `llc <http://llvm.org/docs/CommandGuide/llc.html>`_. # It is recommended to set target triple and feature set to contain specific # feature available, so we can take full advantage of the features of the # board. # You can find more details about cross compilation attributes from # `LLVM guide of cross compilation <https://clang.llvm.org/docs/CrossCompilation.html>`_. ###################################################################### # Run CPU Kernel Remotely by RPC # ------------------------------ # We show how to run the generated CPU kernel on the remote device. # First we obtain an RPC session from remote device. if local_demo: remote = rpc.LocalSession() else: # The following is my environment, change this to the IP address of your target device host = "10.77.1.162" port = 9090 remote = rpc.connect(host, port) ###################################################################### # Upload the lib to the remote device, then invoke a device local # compiler to relink them. Now `func` is a remote module object. remote.upload(path) func = remote.load_module("lib.tar") # create arrays on the remote device dev = remote.cpu() a = tvm.nd.array(np.random.uniform(size=1024).astype(A.dtype), dev) b = tvm.nd.array(np.zeros(1024, dtype=A.dtype), dev) # the function will run on the remote device func(a, b) np.testing.assert_equal(b.numpy(), a.numpy() + 1) ###################################################################### # When you want to evaluate the performance of the kernel on the remote # device, it is important to avoid the overhead of network. # :code:`time_evaluator` will returns a remote function that runs the # function over number times, measures the cost per run on the remote # device and returns the measured cost. Network overhead is excluded. time_f = func.time_evaluator(func.entry_name, dev, number=10) cost = time_f(a, b).mean print("%g secs/op" % cost) ######################################################################### # Run OpenCL Kernel Remotely by RPC # --------------------------------- # For remote OpenCL devices, the workflow is almost the same as above. # You can define the kernel, upload files, and run via RPC. # # .. note:: # # Raspberry Pi does not support OpenCL, the following code is tested on # Firefly-RK3399. You may follow this `tutorial <https://gist.github.com/mli/585aed2cec0b5178b1a510f9f236afa2>`_ # to setup the OS and OpenCL driver for RK3399. # # Also we need to build the runtime with OpenCL enabled on rk3399 board. In the TVM # root directory, execute # # .. code-block:: bash # # cp cmake/config.cmake . # sed -i "s/USE_OPENCL OFF/USE_OPENCL ON/" config.cmake # make runtime -j4 # # The following function shows how we run an OpenCL kernel remotely def run_opencl(): # NOTE: This is the setting for my rk3399 board. You need to modify # them according to your environment. opencl_device_host = "10.77.1.145" opencl_device_port = 9090 target = tvm.target.Target("opencl", host="llvm -mtriple=aarch64-linux-gnu") # create schedule for the above "add one" compute declaration s = te.create_schedule(B.op) xo, xi = s[B].split(B.op.axis[0], factor=32) s[B].bind(xo, te.thread_axis("blockIdx.x")) s[B].bind(xi, te.thread_axis("threadIdx.x")) func = tvm.build(s, [A, B], target=target) remote = rpc.connect(opencl_device_host, opencl_device_port) # export and upload path = temp.relpath("lib_cl.tar") func.export_library(path) remote.upload(path) func = remote.load_module("lib_cl.tar") # run dev = remote.cl() a = tvm.nd.array(np.random.uniform(size=1024).astype(A.dtype), dev) b = tvm.nd.array(np.zeros(1024, dtype=A.dtype), dev) func(a, b) np.testing.assert_equal(b.numpy(), a.numpy() + 1) print("OpenCL test passed!") ###################################################################### # Summary # ------- # This tutorial provides a walk through of cross compilation and RPC # features in TVM. # # - Set up an RPC server on the remote device. # - Set up the target device configuration to cross compile the kernels on the # local machine. # - Upload and run the kernels remotely via the RPC API.
9,851
35.898876
178
py
tvm
tvm-main/gallery/tutorial/autotvm_relay_x86.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ Compiling and Optimizing a Model with the Python Interface (AutoTVM) ==================================================================== **Author**: `Chris Hoge <https://github.com/hogepodge>`_ In the `TVMC Tutorial <tvmc_command_line_driver>`_, we covered how to compile, run, and tune a pre-trained vision model, ResNet-50 v2 using the command line interface for TVM, TVMC. TVM is more that just a command-line tool though, it is an optimizing framework with APIs available for a number of different languages that gives you tremendous flexibility in working with machine learning models. In this tutorial we will cover the same ground we did with TVMC, but show how it is done with the Python API. Upon completion of this section, we will have used the Python API for TVM to accomplish the following tasks: * Compile a pre-trained ResNet-50 v2 model for the TVM runtime. * Run a real image through the compiled model, and interpret the output and model performance. * Tune the model that model on a CPU using TVM. * Re-compile an optimized model using the tuning data collected by TVM. * Run the image through the optimized model, and compare the output and model performance. The goal of this section is to give you an overview of TVM's capabilites and how to use them through the Python API. """ ################################################################################ # TVM is a deep learning compiler framework, with a number of different modules # available for working with deep learning models and operators. In this # tutorial we will work through how to load, compile, and optimize a model # using the Python API. # # We begin by importing a number of dependencies, including ``onnx`` for # loading and converting the model, helper utilities for downloading test data, # the Python Image Library for working with the image data, ``numpy`` for pre # and post-processing of the image data, the TVM Relay framework, and the TVM # Graph Executor. import onnx from tvm.contrib.download import download_testdata from PIL import Image import numpy as np import tvm.relay as relay import tvm from tvm.contrib import graph_executor ################################################################################ # Downloading and Loading the ONNX Model # -------------------------------------- # # For this tutorial, we will be working with ResNet-50 v2. ResNet-50 is a # convolutional neural network that is 50 layers deep and designed to classify # images. The model we will be using has been pre-trained on more than a # million images with 1000 different classifications. The network has an input # image size of 224x224. If you are interested exploring more of how the # ResNet-50 model is structured, we recommend downloading # `Netron <https://netron.app>`_, a freely available ML model viewer. # # TVM provides a helper library to download pre-trained models. By providing a # model URL, file name, and model type through the module, TVM will download # the model and save it to disk. For the instance of an ONNX model, you can # then load it into memory using the ONNX runtime. # # .. admonition:: Working with Other Model Formats # # TVM supports many popular model formats. A list can be found in the # :ref:`Compile Deep Learning Models <tutorial-frontend>` section of the TVM # Documentation. model_url = ( "https://github.com/onnx/models/raw/main/" "vision/classification/resnet/model/" "resnet50-v2-7.onnx" ) model_path = download_testdata(model_url, "resnet50-v2-7.onnx", module="onnx") onnx_model = onnx.load(model_path) # Seed numpy's RNG to get consistent results np.random.seed(0) ################################################################################ # Downloading, Preprocessing, and Loading the Test Image # ------------------------------------------------------ # # Each model is particular when it comes to expected tensor shapes, formats and # data types. For this reason, most models require some pre and # post-processing, to ensure the input is valid and to interpret the output. # TVMC has adopted NumPy's ``.npz`` format for both input and output data. # # As input for this tutorial, we will use the image of a cat, but you can feel # free to substitute this image for any of your choosing. # # .. image:: https://s3.amazonaws.com/model-server/inputs/kitten.jpg # :height: 224px # :width: 224px # :align: center # # Download the image data, then convert it to a numpy array to use as an input to the model. img_url = "https://s3.amazonaws.com/model-server/inputs/kitten.jpg" img_path = download_testdata(img_url, "imagenet_cat.png", module="data") # Resize it to 224x224 resized_image = Image.open(img_path).resize((224, 224)) img_data = np.asarray(resized_image).astype("float32") # Our input image is in HWC layout while ONNX expects CHW input, so convert the array img_data = np.transpose(img_data, (2, 0, 1)) # Normalize according to the ImageNet input specification imagenet_mean = np.array([0.485, 0.456, 0.406]).reshape((3, 1, 1)) imagenet_stddev = np.array([0.229, 0.224, 0.225]).reshape((3, 1, 1)) norm_img_data = (img_data / 255 - imagenet_mean) / imagenet_stddev # Add the batch dimension, as we are expecting 4-dimensional input: NCHW. img_data = np.expand_dims(norm_img_data, axis=0) ############################################################################### # Compile the Model With Relay # ---------------------------- # # The next step is to compile the ResNet model. We begin by importing the model # to relay using the `from_onnx` importer. We then build the model, with # standard optimizations, into a TVM library. Finally, we create a TVM graph # runtime module from the library. target = "llvm" ###################################################################### # .. admonition:: Defining the Correct Target # # Specifying the correct target can have a huge impact on the performance of # the compiled module, as it can take advantage of hardware features # available on the target. For more information, please refer to # :ref:`Auto-tuning a convolutional network for x86 CPU <tune_relay_x86>`. # We recommend identifying which CPU you are running, along with optional # features, and set the target appropriately. For example, for some # processors ``target = "llvm -mcpu=skylake"``, or ``target = "llvm # -mcpu=skylake-avx512"`` for processors with the AVX-512 vector instruction # set. # # The input name may vary across model types. You can use a tool # like Netron to check input names input_name = "data" shape_dict = {input_name: img_data.shape} mod, params = relay.frontend.from_onnx(onnx_model, shape_dict) with tvm.transform.PassContext(opt_level=3): lib = relay.build(mod, target=target, params=params) dev = tvm.device(str(target), 0) module = graph_executor.GraphModule(lib["default"](dev)) ###################################################################### # Execute on the TVM Runtime # -------------------------- # Now that we've compiled the model, we can use the TVM runtime to make # predictions with it. To use TVM to run the model and make predictions, we # need two things: # # - The compiled model, which we just produced. # - Valid input to the model to make predictions on. dtype = "float32" module.set_input(input_name, img_data) module.run() output_shape = (1, 1000) tvm_output = module.get_output(0, tvm.nd.empty(output_shape)).numpy() ################################################################################ # Collect Basic Performance Data # ------------------------------ # We want to collect some basic performance data associated with this # unoptimized model and compare it to a tuned model later. To help account for # CPU noise, we run the computation in multiple batches in multiple # repetitions, then gather some basis statistics on the mean, median, and # standard deviation. import timeit timing_number = 10 timing_repeat = 10 unoptimized = ( np.array(timeit.Timer(lambda: module.run()).repeat(repeat=timing_repeat, number=timing_number)) * 1000 / timing_number ) unoptimized = { "mean": np.mean(unoptimized), "median": np.median(unoptimized), "std": np.std(unoptimized), } print(unoptimized) ################################################################################ # Postprocess the output # ---------------------- # # As previously mentioned, each model will have its own particular way of # providing output tensors. # # In our case, we need to run some post-processing to render the outputs from # ResNet-50 v2 into a more human-readable form, using the lookup-table provided # for the model. from scipy.special import softmax # Download a list of labels labels_url = "https://s3.amazonaws.com/onnx-model-zoo/synset.txt" labels_path = download_testdata(labels_url, "synset.txt", module="data") with open(labels_path, "r") as f: labels = [l.rstrip() for l in f] # Open the output and read the output tensor scores = softmax(tvm_output) scores = np.squeeze(scores) ranks = np.argsort(scores)[::-1] for rank in ranks[0:5]: print("class='%s' with probability=%f" % (labels[rank], scores[rank])) ################################################################################ # This should produce the following output: # # .. code-block:: bash # # # class='n02123045 tabby, tabby cat' with probability=0.610553 # # class='n02123159 tiger cat' with probability=0.367179 # # class='n02124075 Egyptian cat' with probability=0.019365 # # class='n02129604 tiger, Panthera tigris' with probability=0.001273 # # class='n04040759 radiator' with probability=0.000261 ################################################################################ # Tune the model # -------------- # The previous model was compiled to work on the TVM runtime, but did not # include any platform specific optimization. In this section, we will show you # how to build an optimized model using TVM to target your working platform. # # In some cases, we might not get the expected performance when running # inferences using our compiled module. In cases like this, we can make use of # the auto-tuner, to find a better configuration for our model and get a boost # in performance. Tuning in TVM refers to the process by which a model is # optimized to run faster on a given target. This differs from training or # fine-tuning in that it does not affect the accuracy of the model, but only # the runtime performance. As part of the tuning process, TVM will try running # many different operator implementation variants to see which perform best. # The results of these runs are stored in a tuning records file. # # In the simplest form, tuning requires you to provide three things: # # - the target specification of the device you intend to run this model on # - the path to an output file in which the tuning records will be stored # - a path to the model to be tuned. # import tvm.auto_scheduler as auto_scheduler from tvm.autotvm.tuner import XGBTuner from tvm import autotvm ################################################################################ # Set up some basic parameters for the runner. The runner takes compiled code # that is generated with a specific set of parameters and measures the # performance of it. ``number`` specifies the number of different # configurations that we will test, while ``repeat`` specifies how many # measurements we will take of each configuration. ``min_repeat_ms`` is a value # that specifies how long need to run configuration test. If the number of # repeats falls under this time, it will be increased. This option is necessary # for accurate tuning on GPUs, and is not required for CPU tuning. Setting this # value to 0 disables it. The ``timeout`` places an upper limit on how long to # run training code for each tested configuration. number = 10 repeat = 1 min_repeat_ms = 0 # since we're tuning on a CPU, can be set to 0 timeout = 10 # in seconds # create a TVM runner runner = autotvm.LocalRunner( number=number, repeat=repeat, timeout=timeout, min_repeat_ms=min_repeat_ms, enable_cpu_cache_flush=True, ) ################################################################################ # Create a simple structure for holding tuning options. We use an XGBoost # algorithim for guiding the search. For a production job, you will want to set # the number of trials to be larger than the value of 20 used here. For CPU we # recommend 1500, for GPU 3000-4000. The number of trials required can depend # on the particular model and processor, so it's worth spending some time # evaluating performance across a range of values to find the best balance # between tuning time and model optimization. Because running tuning is time # intensive we set number of trials to 10, but do not recommend a value this # small. The ``early_stopping`` parameter is the minimum number of trails to # run before a condition that stops the search early can be applied. The # measure option indicates where trial code will be built, and where it will be # run. In this case, we're using the ``LocalRunner`` we just created and a # ``LocalBuilder``. The ``tuning_records`` option specifies a file to write # the tuning data to. tuning_option = { "tuner": "xgb", "trials": 20, "early_stopping": 100, "measure_option": autotvm.measure_option( builder=autotvm.LocalBuilder(build_func="default"), runner=runner ), "tuning_records": "resnet-50-v2-autotuning.json", } ################################################################################ # .. admonition:: Defining the Tuning Search Algorithm # # By default this search is guided using an `XGBoost Grid` algorithm. # Depending on your model complexity and amount of time available, you might # want to choose a different algorithm. ################################################################################ # .. admonition:: Setting Tuning Parameters # # In this example, in the interest of time, we set the number of trials and # early stopping to 20 and 100. You will likely see more performance improvements if # you set these values to be higher but this comes at the expense of time # spent tuning. The number of trials required for convergence will vary # depending on the specifics of the model and the target platform. # begin by extracting the tasks from the onnx model tasks = autotvm.task.extract_from_program(mod["main"], target=target, params=params) # Tune the extracted tasks sequentially. for i, task in enumerate(tasks): prefix = "[Task %2d/%2d] " % (i + 1, len(tasks)) # choose tuner tuner = "xgb" # create tuner if tuner == "xgb": tuner_obj = XGBTuner(task, loss_type="reg") elif tuner == "xgb_knob": tuner_obj = XGBTuner(task, loss_type="reg", feature_type="knob") elif tuner == "xgb_itervar": tuner_obj = XGBTuner(task, loss_type="reg", feature_type="itervar") elif tuner == "xgb_curve": tuner_obj = XGBTuner(task, loss_type="reg", feature_type="curve") elif tuner == "xgb_rank": tuner_obj = XGBTuner(task, loss_type="rank") elif tuner == "xgb_rank_knob": tuner_obj = XGBTuner(task, loss_type="rank", feature_type="knob") elif tuner == "xgb_rank_itervar": tuner_obj = XGBTuner(task, loss_type="rank", feature_type="itervar") elif tuner == "xgb_rank_curve": tuner_obj = XGBTuner(task, loss_type="rank", feature_type="curve") elif tuner == "xgb_rank_binary": tuner_obj = XGBTuner(task, loss_type="rank-binary") elif tuner == "xgb_rank_binary_knob": tuner_obj = XGBTuner(task, loss_type="rank-binary", feature_type="knob") elif tuner == "xgb_rank_binary_itervar": tuner_obj = XGBTuner(task, loss_type="rank-binary", feature_type="itervar") elif tuner == "xgb_rank_binary_curve": tuner_obj = XGBTuner(task, loss_type="rank-binary", feature_type="curve") elif tuner == "ga": tuner_obj = GATuner(task, pop_size=50) elif tuner == "random": tuner_obj = RandomTuner(task) elif tuner == "gridsearch": tuner_obj = GridSearchTuner(task) else: raise ValueError("Invalid tuner: " + tuner) tuner_obj.tune( n_trial=min(tuning_option["trials"], len(task.config_space)), early_stopping=tuning_option["early_stopping"], measure_option=tuning_option["measure_option"], callbacks=[ autotvm.callback.progress_bar(tuning_option["trials"], prefix=prefix), autotvm.callback.log_to_file(tuning_option["tuning_records"]), ], ) ################################################################################ # The output from this tuning process will look something like this: # # .. code-block:: bash # # # [Task 1/24] Current/Best: 10.71/ 21.08 GFLOPS | Progress: (60/1000) | 111.77 s Done. # # [Task 1/24] Current/Best: 9.32/ 24.18 GFLOPS | Progress: (192/1000) | 365.02 s Done. # # [Task 2/24] Current/Best: 22.39/ 177.59 GFLOPS | Progress: (960/1000) | 976.17 s Done. # # [Task 3/24] Current/Best: 32.03/ 153.34 GFLOPS | Progress: (800/1000) | 776.84 s Done. # # [Task 4/24] Current/Best: 11.96/ 156.49 GFLOPS | Progress: (960/1000) | 632.26 s Done. # # [Task 5/24] Current/Best: 23.75/ 130.78 GFLOPS | Progress: (800/1000) | 739.29 s Done. # # [Task 6/24] Current/Best: 38.29/ 198.31 GFLOPS | Progress: (1000/1000) | 624.51 s Done. # # [Task 7/24] Current/Best: 4.31/ 210.78 GFLOPS | Progress: (1000/1000) | 701.03 s Done. # # [Task 8/24] Current/Best: 50.25/ 185.35 GFLOPS | Progress: (972/1000) | 538.55 s Done. # # [Task 9/24] Current/Best: 50.19/ 194.42 GFLOPS | Progress: (1000/1000) | 487.30 s Done. # # [Task 10/24] Current/Best: 12.90/ 172.60 GFLOPS | Progress: (972/1000) | 607.32 s Done. # # [Task 11/24] Current/Best: 62.71/ 203.46 GFLOPS | Progress: (1000/1000) | 581.92 s Done. # # [Task 12/24] Current/Best: 36.79/ 224.71 GFLOPS | Progress: (1000/1000) | 675.13 s Done. # # [Task 13/24] Current/Best: 7.76/ 219.72 GFLOPS | Progress: (1000/1000) | 519.06 s Done. # # [Task 14/24] Current/Best: 12.26/ 202.42 GFLOPS | Progress: (1000/1000) | 514.30 s Done. # # [Task 15/24] Current/Best: 31.59/ 197.61 GFLOPS | Progress: (1000/1000) | 558.54 s Done. # # [Task 16/24] Current/Best: 31.63/ 206.08 GFLOPS | Progress: (1000/1000) | 708.36 s Done. # # [Task 17/24] Current/Best: 41.18/ 204.45 GFLOPS | Progress: (1000/1000) | 736.08 s Done. # # [Task 18/24] Current/Best: 15.85/ 222.38 GFLOPS | Progress: (980/1000) | 516.73 s Done. # # [Task 19/24] Current/Best: 15.78/ 203.41 GFLOPS | Progress: (1000/1000) | 587.13 s Done. # # [Task 20/24] Current/Best: 30.47/ 205.92 GFLOPS | Progress: (980/1000) | 471.00 s Done. # # [Task 21/24] Current/Best: 46.91/ 227.99 GFLOPS | Progress: (308/1000) | 219.18 s Done. # # [Task 22/24] Current/Best: 13.33/ 207.66 GFLOPS | Progress: (1000/1000) | 761.74 s Done. # # [Task 23/24] Current/Best: 53.29/ 192.98 GFLOPS | Progress: (1000/1000) | 799.90 s Done. # # [Task 24/24] Current/Best: 25.03/ 146.14 GFLOPS | Progress: (1000/1000) | 1112.55 s Done. ################################################################################ # Compiling an Optimized Model with Tuning Data # ---------------------------------------------- # # As an output of the tuning process above, we obtained the tuning records # stored in ``resnet-50-v2-autotuning.json``. The compiler will use the results to # generate high performance code for the model on your specified target. # # Now that tuning data for the model has been collected, we can re-compile the # model using optimized operators to speed up our computations. with autotvm.apply_history_best(tuning_option["tuning_records"]): with tvm.transform.PassContext(opt_level=3, config={}): lib = relay.build(mod, target=target, params=params) dev = tvm.device(str(target), 0) module = graph_executor.GraphModule(lib["default"](dev)) ################################################################################ # Verify that the optimized model runs and produces the same results: dtype = "float32" module.set_input(input_name, img_data) module.run() output_shape = (1, 1000) tvm_output = module.get_output(0, tvm.nd.empty(output_shape)).numpy() scores = softmax(tvm_output) scores = np.squeeze(scores) ranks = np.argsort(scores)[::-1] for rank in ranks[0:5]: print("class='%s' with probability=%f" % (labels[rank], scores[rank])) ################################################################################ # Verifying that the predictions are the same: # # .. code-block:: bash # # # class='n02123045 tabby, tabby cat' with probability=0.610550 # # class='n02123159 tiger cat' with probability=0.367181 # # class='n02124075 Egyptian cat' with probability=0.019365 # # class='n02129604 tiger, Panthera tigris' with probability=0.001273 # # class='n04040759 radiator' with probability=0.000261 ################################################################################ # Comparing the Tuned and Untuned Models # -------------------------------------- # We want to collect some basic performance data associated with this optimized # model to compare it to the unoptimized model. Depending on your underlying # hardware, number of iterations, and other factors, you should see a performance # improvement in comparing the optimized model to the unoptimized model. import timeit timing_number = 10 timing_repeat = 10 optimized = ( np.array(timeit.Timer(lambda: module.run()).repeat(repeat=timing_repeat, number=timing_number)) * 1000 / timing_number ) optimized = {"mean": np.mean(optimized), "median": np.median(optimized), "std": np.std(optimized)} print("optimized: %s" % (optimized)) print("unoptimized: %s" % (unoptimized)) ################################################################################ # Final Remarks # ------------- # # In this tutorial, we gave a short example of how to use the TVM Python API # to compile, run, and tune a model. We also discussed the need for pre and # post-processing of inputs and outputs. After the tuning process, we # demonstrated how to compare the performance of the unoptimized and optimize # models. # # Here we presented a simple example using ResNet-50 v2 locally. However, TVM # supports many more features including cross-compilation, remote execution and # profiling/benchmarking.
23,341
44.148936
99
py
tvm
tvm-main/gallery/tutorial/relay_quick_start.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ .. _tutorial-relay-quick-start: Quick Start Tutorial for Compiling Deep Learning Models ======================================================= **Author**: `Yao Wang <https://github.com/kevinthesun>`_, `Truman Tian <https://github.com/SiNZeRo>`_ This example shows how to build a neural network with Relay python frontend and generates a runtime library for Nvidia GPU with TVM. Notice that you need to build TVM with cuda and llvm enabled. """ ###################################################################### # Overview for Supported Hardware Backend of TVM # ---------------------------------------------- # The image below shows hardware backend currently supported by TVM: # # .. image:: https://github.com/dmlc/web-data/raw/main/tvm/tutorial/tvm_support_list.png # :align: center # # In this tutorial, we'll choose cuda and llvm as target backends. # To begin with, let's import Relay and TVM. # sphinx_gallery_start_ignore # sphinx_gallery_requires_cuda = True # sphinx_gallery_end_ignore import numpy as np from tvm import relay from tvm.relay import testing import tvm from tvm import te from tvm.contrib import graph_executor import tvm.testing ###################################################################### # Define Neural Network in Relay # ------------------------------ # First, let's define a neural network with relay python frontend. # For simplicity, we'll use pre-defined resnet-18 network in Relay. # Parameters are initialized with Xavier initializer. # Relay also supports other model formats such as MXNet, CoreML, ONNX and # Tensorflow. # # In this tutorial, we assume we will do inference on our device and # the batch size is set to be 1. Input images are RGB color images of # size 224 * 224. We can call the # :py:meth:`tvm.relay.expr.TupleWrapper.astext()` to show the network # structure. batch_size = 1 num_class = 1000 image_shape = (3, 224, 224) data_shape = (batch_size,) + image_shape out_shape = (batch_size, num_class) mod, params = relay.testing.resnet.get_workload( num_layers=18, batch_size=batch_size, image_shape=image_shape ) # set show_meta_data=True if you want to show meta data print(mod.astext(show_meta_data=False)) ###################################################################### # Compilation # ----------- # Next step is to compile the model using the Relay/TVM pipeline. # Users can specify the optimization level of the compilation. # Currently this value can be 0 to 3. The optimization passes include # operator fusion, pre-computation, layout transformation and so on. # # :py:func:`relay.build` returns three components: the execution graph in # json format, the TVM module library of compiled functions specifically # for this graph on the target hardware, and the parameter blobs of # the model. During the compilation, Relay does the graph-level # optimization while TVM does the tensor-level optimization, resulting # in an optimized runtime module for model serving. # # We'll first compile for Nvidia GPU. Behind the scene, :py:func:`relay.build` # first does a number of graph-level optimizations, e.g. pruning, fusing, etc., # then registers the operators (i.e. the nodes of the optimized graphs) to # TVM implementations to generate a `tvm.module`. # To generate the module library, TVM will first transfer the high level IR # into the lower intrinsic IR of the specified target backend, which is CUDA # in this example. Then the machine code will be generated as the module library. opt_level = 3 target = tvm.target.cuda() with tvm.transform.PassContext(opt_level=opt_level): lib = relay.build(mod, target, params=params) ##################################################################### # Run the generate library # ------------------------ # Now we can create graph executor and run the module on Nvidia GPU. # create random input dev = tvm.cuda() data = np.random.uniform(-1, 1, size=data_shape).astype("float32") # create module module = graph_executor.GraphModule(lib["default"](dev)) # set input and parameters module.set_input("data", data) # run module.run() # get output out = module.get_output(0, tvm.nd.empty(out_shape)).numpy() # Print first 10 elements of output print(out.flatten()[0:10]) ###################################################################### # Save and Load Compiled Module # ----------------------------- # We can also save the graph, lib and parameters into files and load them # back in deploy environment. #################################################### # save the graph, lib and params into separate files from tvm.contrib import utils temp = utils.tempdir() path_lib = temp.relpath("deploy_lib.tar") lib.export_library(path_lib) print(temp.listdir()) #################################################### # load the module back. loaded_lib = tvm.runtime.load_module(path_lib) input_data = tvm.nd.array(data) module = graph_executor.GraphModule(loaded_lib["default"](dev)) module.run(data=input_data) out_deploy = module.get_output(0).numpy() # Print first 10 elements of output print(out_deploy.flatten()[0:10]) # check whether the output from deployed module is consistent with original one tvm.testing.assert_allclose(out_deploy, out, atol=1e-5)
6,007
36.786164
101
py
tvm
tvm-main/gallery/tutorial/tvmc_python.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ Getting Starting using TVMC Python: a high-level API for TVM ============================================================= **Author**: `Jocelyn Shiue <https://github.com/CircleSpin>`_ Hi! Here we explain the scripting tool designed for the complete TVM beginner. 🙂 Before we get started let's get an example model if you don't already have one. Follow the steps to download a resnet model via the terminal: .. code-block:: python mkdir myscripts cd myscripts wget https://github.com/onnx/models/raw/b9a54e89508f101a1611cd64f4ef56b9cb62c7cf/vision/classification/resnet/model/resnet50-v2-7.onnx mv resnet50-v2-7.onnx my_model.onnx touch tvmcpythonintro.py Let's start editing the python file in your favorite text editor. """ ################################################################################ # Step 0: Imports # ~~~~~~~~~~~~~~~ # # .. code-block:: python # # from tvm.driver import tvmc # # ################################################################################ # Step 1: Load a model # ~~~~~~~~~~~~~~~~~~~~ # # Let's import our model into tvmc. This step converts a machine learning model from # a supported framework into TVM's high level graph representation language called Relay. # This is to have a unified starting point for all models in tvm. The frameworks we currently # support are: Keras, ONNX, Tensorflow, TFLite, and PyTorch. # # .. code-block:: python # # model = tvmc.load('my_model.onnx') #Step 1: Load # # If you'd like to see the Relay, you can run: # ``model.summary()`` # # All frameworks support overwriting the input shapes with a shape_dict argument. # For most frameworks this is optional, but for Pytorch this is necessary as # TVM cannot automatically search for it. # # .. code-block:: python # # #model = tvmc.load('my_model.onnx', shape_dict={'input1' : [1, 2, 3, 4], 'input2' : [1, 2, 3, 4]}) #Step 1: Load + shape_dict # # A suggested way to see the model's input/shape_dict is via `netron <https://netron.app/>`_. After opening the model, # click the first node to see the name(s) and shape(s) in the inputs section. ################################################################################ # Step 2: Compile # ~~~~~~~~~~~~~~~ # # Now that our model is in Relay, our next step is to compile it to a desired # hardware to run on. We refer to this hardware as a target. This compilation process # translates the model from Relay into a lower-level language that the # target machine can understand. # # In order to compile a model a tvm.target string is required. # To learn more about tvm.targets and their options look at the `documentation <https://tvm.apache.org/docs/api/python/target.html>`_. # Some examples include: # # 1. cuda (Nvidia GPU) # 2. llvm (CPU) # 3. llvm -mcpu=cascadelake (Intel CPU) # # .. code-block:: python # # package = tvmc.compile(model, target="llvm") #Step 2: Compile # # # The compilation step returns a package. # ################################################################################ # Step 3: Run # ~~~~~~~~~~~ # # The compiled package can now be run on the hardware target. The device # input options are: CPU, Cuda, CL, Metal, and Vulkan. # # .. code-block:: python # # result = tvmc.run(package, device="cpu") #Step 3: Run # # And you can print the results: # ``print(result)`` # ################################################################################ # Step 1.5: Tune [Optional & Recommended] # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # # Run speed can further be improved by tuning. This optional step uses # machine learning to look at each operation within a model (a function) and # tries to find a faster way to run it. We do this through a cost model, and # benchmarking possible schedules. # # The target is the same as compile. # # .. code-block:: python # # tvmc.tune(model, target="llvm") #Step 1.5: Optional Tune # # The terminal output should look like: # # .. code-block:: python # # [Task 1/13] Current/Best: 82.00/ 106.29 GFLOPS | Progress: (48/769) | 18.56 s # [Task 1/13] Current/Best: 54.47/ 113.50 GFLOPS | Progress: (240/769) | 85.36 s # ..... # # There may be UserWarnings that can be ignored. # This should make the end result faster, but it can take hours to tune. # # See the section 'Saving the Tuning Results' below. Be sure to pass the tuning # results into compile if you want the results to apply. # # .. code-block:: python # # #tvmc.compile(model, target="llvm", tuning_records = "records.log") #Step 2: Compile ################################################################################ # Save and then start the process in the terminal: # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # # .. code-block:: python # # python my_tvmc_script.py # # Note: Your fans may become very active # ################################################################################ # Example results: # ~~~~~~~~~~~~~~~~ # # .. code-block:: python # # Time elapsed for training: 18.99 s # Execution time summary: # mean (ms) max (ms) min (ms) std (ms) # 25.24 26.12 24.89 0.38 # # # Output Names: # ['output_0'] # ################################################################################ # Additional TVMC Functionalities # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # ################################################################################ # Saving the model # ~~~~~~~~~~~~~~~~ # # To make things faster for later, after loading the model (Step 1) save the Relay version. # The model will then appear where you saved it for later in the coverted syntax. # # .. code-block:: python # # model = tvmc.load('my_model.onnx') #Step 1: Load # model.save(desired_model_path) # # ################################################################################ # Saving the package # ~~~~~~~~~~~~~~~~~~ # # After the model has been compiled (Step 2) the package also is also saveable. # # .. code-block:: python # # tvmc.compile(model, target="llvm", package_path="whatever") #Step 2: Compile # # new_package = tvmc.TVMCPackage(package_path="whatever") # result = tvmc.run(new_package, device="cpu") #Step 3: Run # # ################################################################################ # Using Autoscheduler # ~~~~~~~~~~~~~~~~~~~ # # Use the next generation of tvm to enable potentially faster run speed results. # The search space of the schedules is automatically generated unlike # previously where they needed to be hand written. (Learn more: # `1 <https://tvm.apache.org/2021/03/03/intro-auto-scheduler>`_, # `2 <https://arxiv.org/abs/2006.06762>`_) # # .. code-block:: python # # tvmc.tune(model, target="llvm", enable_autoscheduler = True) # # ################################################################################ # Saving the tuning results # ~~~~~~~~~~~~~~~~~~~~~~~~~ # # The tuning results can be saved in a file for later reuse. # # Method 1: # .. code-block:: python # # log_file = "hello.json" # # # Run tuning # tvmc.tune(model, target="llvm", tuning_records=log_file) # # ... # # # Later run tuning and reuse tuning results # tvmc.tune(model, target="llvm", prior_records=log_file) # # Method 2: # .. code-block:: python # # # Run tuning # tuning_records = tvmc.tune(model, target="llvm") # # ... # # # Later run tuning and reuse tuning results # tvmc.tune(model, target="llvm", prior_records=tuning_records) # ################################################################################ # Tuning a more complex model: # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # # If you notice T's printing that look like ``.........T.T..T..T..T.T.T.T.T.T.`` # increase the searching time frame: # # .. code-block:: python # # tvmc.tune(model,trials=10000,timeout=10,) # ################################################################################ # Compiling a model for a remote device: # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # # A remote procedural call (RPC) is useful when you would like to compile for hardware # that is not on your local machine. The tvmc methods support this. # To set up the RPC server take a look at the 'Set up RPC Server on Device' # section in this `document <https://tvm.apache.org/docs/tutorials/get_started/cross_compilation_and_rpc.html>`_. # # Within the TVMC Script include the following and adjust accordingly: # # .. code-block:: python # # tvmc.tune( # model, # target=target, # Compilation target as string // Device to compile for # target_host=target_host, # Host processor # hostname=host_ip_address, # The IP address of an RPC tracker, used when benchmarking remotely. # port=port_number, # The port of the RPC tracker to connect to. Defaults to 9090. # rpc_key=your_key, # The RPC tracker key of the target device. Required when rpc_tracker is provided # ) #
9,741
32.136054
139
py
tvm
tvm-main/gallery/tutorial/auto_scheduler_matmul_x86.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ Optimizing Operators with Auto-scheduling ========================================= **Author**: `Lianmin Zheng <https://github.com/merrymercy>`_, \ `Chengfan Jia <https://github.com/jcf94/>`_ In this tutorial, we will show how TVM's Auto Scheduling feature can find optimal schedules without the need for writing a custom template. Different from the template-based :doc:`AutoTVM <autotvm_matmul_x86>` which relies on manual templates to define the search space, the auto-scheduler does not require any templates. Users only need to write the computation declaration without any schedule commands or templates. The auto-scheduler can automatically generate a large search space and find a good schedule in the space. We use matrix multiplication as an example in this tutorial. .. note:: Note that this tutorial will not run on Windows or recent versions of macOS. To get it to run, you will need to wrap the body of this tutorial in a :code:`if __name__ == "__main__":` block. """ import numpy as np import tvm from tvm import te, auto_scheduler ################################################################################ # Defining the Matrix Multiplication # ---------------------------------- # To start, we define a matrix multiplication with a bias addition. Note that # this uses standard operations available in TVMs Tensor Expression language. # The major difference is the use of the :any:`register_workload` decorator at the top # of the function definition. The function should return a list of # input/output tensors. From these tensors, the auto-scheduler can get the # whole computational graph. @auto_scheduler.register_workload # Note the auto_scheduler decorator def matmul_add(N, L, M, dtype): A = te.placeholder((N, L), name="A", dtype=dtype) B = te.placeholder((L, M), name="B", dtype=dtype) C = te.placeholder((N, M), name="C", dtype=dtype) k = te.reduce_axis((0, L), name="k") matmul = te.compute( (N, M), lambda i, j: te.sum(A[i, k] * B[k, j], axis=k), name="matmul", attrs={"layout_free_placeholders": [B]}, # enable automatic layout transform for tensor B ) out = te.compute((N, M), lambda i, j: matmul[i, j] + C[i, j], name="out") return [A, B, C, out] ################################################################################ # Create the search task # ---------------------- # With the function defined, we can now create the task for the auto_scheduler # to search against. We specify the particular parameters for this matrix # multiplication, in this case a multiplication of two square matrices of size # 1024x1024. We then create a search task with N=L=M=1024 and dtype="float32" # # .. admonition:: Improve performance with custom targets # # In order for TVM to take full advantage of specific hardware platforms, # you will want to manually specify your CPU capabilities. For example: # # - replace ``llvm`` below with ``llvm -mcpu=core-avx2`` to enable AVX2 # - replace ``llvm`` below with ``llvm -mcpu=skylake-avx512`` to enable AVX-512 target = tvm.target.Target("llvm") N = L = M = 1024 task = tvm.auto_scheduler.SearchTask(func=matmul_add, args=(N, L, M, "float32"), target=target) # Inspect the computational graph print("Computational DAG:") print(task.compute_dag) ################################################################################ # Set Parameters for Auto-Scheduler # --------------------------------- # Next, we set parameters for the auto-scheduler. # # * :code:`num_measure_trials` is the number of measurement trials we can use # during the search. We only make 10 trials in this tutorial for a fast # demonstration. In practice, 1000 is a good value for the search to converge. # You can do more trials according to your time budget. # * In addition, we use :any:`RecordToFile <auto_scheduler.RecordToFile>` to log measurement records into a # file ``matmul.json``. The measurement records can be used to query the history # best, resume the search, and do more analyses later. # * see :any:`TuningOptions <auto_scheduler.TuningOptions>` for more parameters log_file = "matmul.json" tune_option = auto_scheduler.TuningOptions( num_measure_trials=10, measure_callbacks=[auto_scheduler.RecordToFile(log_file)], verbose=2, ) ################################################################################ # Run the search # -------------- # Now we get all inputs ready. Pretty simple, isn't it? We can kick off the # search and let the auto-scheduler do its magic. After some measurement # trials, we can load the best schedule from the log file and apply it. # Run auto-tuning (search) task.tune(tune_option) # Apply the best schedule sch, args = task.apply_best(log_file) ################################################################################ # Inspecting the Optimized Schedule # --------------------------------- # We can lower the schedule to see the IR after auto-scheduling. The # auto-scheduler correctly performs optimizations including multi-level tiling, # layout transformation, parallelization, vectorization, unrolling, and # operator fusion. print("Lowered TIR:") print(tvm.lower(sch, args, simple_mode=True)) ################################################################################ # Check correctness and evaluate performance # ------------------------------------------ # We build the binary and check its correctness and performance. func = tvm.build(sch, args, target) a_np = np.random.uniform(size=(N, L)).astype(np.float32) b_np = np.random.uniform(size=(L, M)).astype(np.float32) c_np = np.random.uniform(size=(N, M)).astype(np.float32) out_np = a_np.dot(b_np) + c_np dev = tvm.cpu() a_tvm = tvm.nd.array(a_np, device=dev) b_tvm = tvm.nd.array(b_np, device=dev) c_tvm = tvm.nd.array(c_np, device=dev) out_tvm = tvm.nd.empty(out_np.shape, device=dev) func(a_tvm, b_tvm, c_tvm, out_tvm) # Check results np.testing.assert_allclose(out_np, out_tvm.numpy(), rtol=1e-3) # Evaluate execution time. evaluator = func.time_evaluator(func.entry_name, dev, min_repeat_ms=500) print( "Execution time of this operator: %.3f ms" % (np.median(evaluator(a_tvm, b_tvm, c_tvm, out_tvm).results) * 1000) ) ################################################################################ # Using the record file # --------------------- # During the search, all measurement records are logged into the record file # ``matmul.json```. The measurement records can be used to re-apply search # results, resume the search, and perform other analyses. # # Here is an example where we load the best schedule from a file, and print the # equivalent python schedule API. This can be used for debugging and learning # the behavior of the auto-scheduler. print("Equivalent python schedule:") print(task.print_best(log_file)) ################################################################################ # A more complicated example is to resume the search. In this case, we need to # create the search policy and cost model by ourselves and resume the status of # search policy and cost model with the log file. In the example below we # resume the status and do more 5 trials. def resume_search(task, log_file): print("Resume search:") cost_model = auto_scheduler.XGBModel() cost_model.update_from_file(log_file) search_policy = auto_scheduler.SketchPolicy( task, cost_model, init_search_callbacks=[auto_scheduler.PreloadMeasuredStates(log_file)] ) tune_option = auto_scheduler.TuningOptions( num_measure_trials=5, measure_callbacks=[auto_scheduler.RecordToFile(log_file)] ) task.tune(tune_option, search_policy=search_policy) resume_search(task, log_file) ################################################################################ # Final Notes and Summary # ----------------------- # In this tutorial, we have shown how to use the TVM Auto-Scheduler to # automatically optimize a matrix multiplication, without the need to specify a # search template. It ends a series of examples that starts from the Tensor # Expression (TE) language that demonstrates how TVM can optimize computational # operations.
9,011
40.722222
107
py
tvm
tvm-main/gallery/tutorial/intro_topi.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ .. _tutorial-topi: Introduction to TOPI ==================== **Author**: `Ehsan M. Kermani <https://github.com/ehsanmok>`_ This is an introductory tutorial to TVM Operator Inventory (TOPI). TOPI provides numpy-style generic operations and schedules with higher abstractions than TVM. In this tutorial, we will see how TOPI can save us from writing boilerplate code in TVM. """ # sphinx_gallery_start_ignore # sphinx_gallery_requires_cuda = True # sphinx_gallery_end_ignore import tvm import tvm.testing from tvm import te from tvm import topi import numpy as np ###################################################################### # Basic example # ------------- # Let's revisit the sum of rows operation (equivalent to :code:`B = numpy.sum(A, axis=1)`') \ # To compute the sum of rows of a two dimensional TVM tensor A, we should # specify the symbolic operation as well as schedule as follows # n = te.var("n") m = te.var("m") A = te.placeholder((n, m), name="A") k = te.reduce_axis((0, m), "k") B = te.compute((n,), lambda i: te.sum(A[i, k], axis=k), name="B") s = te.create_schedule(B.op) ###################################################################### # and to examine the IR code in human readable format, we can do # print(tvm.lower(s, [A], simple_mode=True)) ###################################################################### # However, for such a common operation we had to define the reduce axis ourselves as well as explicit computation with # :code:`te.compute`. Imagine for more complicated operations how much details we need to provide. # Fortunately, we can replace those two lines with simple :code:`topi.sum` much like :code:`numpy.sum` # C = topi.sum(A, axis=1) ts = te.create_schedule(C.op) print(tvm.lower(ts, [A], simple_mode=True)) ###################################################################### # Numpy-style operator overloading # -------------------------------- # We can add two tensors using :code:`topi.broadcast_add` that have correct (broadcastable with specific) shapes. # Even shorter, TOPI provides operator overloading for such common operations. For example, # x, y = 100, 10 a = te.placeholder((x, y, y), name="a") b = te.placeholder((y, y), name="b") c = a + b # same as topi.broadcast_add d = a * b # same as topi.broadcast_mul ###################################################################### # Overloaded with the same syntax, TOPI handles broadcasting a primitive (`int`, `float`) to a tensor :code:`d - 3.14`. ###################################################################### # Generic schedules and fusing operations # --------------------------------------- # Up to now, we have seen an example of how TOPI can save us from writing explicit computations in lower level API. # But it doesn't stop here. Still we did the scheduling as before. TOPI also provides higher level # scheduling recipes depending on a given context. For example, for CUDA, # we can schedule the following series of operations ending with :code:`topi.sum` using only # :code:`topi.generic.schedule_reduce` # e = topi.elemwise_sum([c, d]) f = e / 2.0 g = topi.sum(f) with tvm.target.cuda(): sg = topi.cuda.schedule_reduce(g) print(tvm.lower(sg, [a, b], simple_mode=True)) ###################################################################### # As you can see, scheduled stages of computation have been accumulated and we can examine them by # print(sg.stages) ###################################################################### # We can test the correctness by comparing with :code:`numpy` result as follows # func = tvm.build(sg, [a, b, g], "cuda") dev = tvm.cuda(0) a_np = np.random.uniform(size=(x, y, y)).astype(a.dtype) b_np = np.random.uniform(size=(y, y)).astype(b.dtype) g_np = np.sum(np.add(a_np + b_np, a_np * b_np) / 2.0) a_nd = tvm.nd.array(a_np, dev) b_nd = tvm.nd.array(b_np, dev) g_nd = tvm.nd.array(np.zeros(g_np.shape, dtype=g_np.dtype), dev) func(a_nd, b_nd, g_nd) tvm.testing.assert_allclose(g_nd.numpy(), g_np, rtol=1e-5) ###################################################################### # TOPI also provides common neural nets operations such as _softmax_ with optimized schedule # tarray = te.placeholder((512, 512), name="tarray") softmax_topi = topi.nn.softmax(tarray) with tvm.target.Target("cuda"): sst = topi.cuda.schedule_softmax(softmax_topi) print(tvm.lower(sst, [tarray], simple_mode=True)) ###################################################################### # Fusing convolutions # ------------------- # We can fuse :code:`topi.nn.conv2d` and :code:`topi.nn.relu` together. # # .. note:: # # TOPI functions are all generic functions. They have different implementations # for different backends to optimize for performance. # For each backend, it is necessary to call them under a target scope for both # compute declaration and schedule. TVM will choose the right function to call with # the target information. data = te.placeholder((1, 3, 224, 224)) kernel = te.placeholder((10, 3, 5, 5)) with tvm.target.Target("cuda"): conv = topi.cuda.conv2d_nchw(data, kernel, 1, 2, 1) out = topi.nn.relu(conv) sconv = topi.cuda.schedule_conv2d_nchw([out]) print(tvm.lower(sconv, [data, kernel], simple_mode=True)) ###################################################################### # Summary # ------- # In this tutorial, we have seen # # - How to use TOPI API for common operations with numpy-style operators. # - How TOPI facilitates generic schedules and operator fusion for a context, to generate optimized kernel codes.
6,364
40.331169
119
py
tvm
tvm-main/gallery/tutorial/introduction.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ Introduction ============ **Authors**: `Jocelyn Shiue <https://github.com/>`_, `Chris Hoge <https://github.com/hogepodge>`_, `Lianmin Zheng <https://github.com/merrymercy>`_ Apache TVM is an open source machine learning compiler framework for CPUs, GPUs, and machine learning accelerators. It aims to enable machine learning engineers to optimize and run computations efficiently on any hardware backend. The purpose of this tutorial is to take a guided tour through all of the major features of TVM by defining and demonstrating key concepts. A new user should be able to work through the tutorial from start to finish and be able to operate TVM for automatic model optimization, while having a basic understanding of the TVM architecture and how it works. Contents -------- #. :doc:`Introduction <introduction>` #. :doc:`Installing TVM <install>` #. :doc:`Compiling and Optimizing a Model with the Command Line Interface <tvmc_command_line_driver>` #. :doc:`Compiling and Optimizing a Model with the Python Interface <autotvm_relay_x86>` #. :doc:`Working with Operators Using Tensor Expression <tensor_expr_get_started>` #. :doc:`Optimizing Operators with Templates and AutoTVM <autotvm_matmul_x86>` #. :doc:`Optimizing Operators with Template-free AutoScheduler <auto_scheduler_matmul_x86>` #. :doc:`Cross Compilation and Remote Procedure Calls (RPC) <cross_compilation_and_rpc>` #. :doc:`Compiling Deep Learning Models for GPUs <relay_quick_start>` """ ################################################################################ # An Overview of TVM and Model Optimization # ========================================= # # The diagram below illustrates the steps a machine model takes as it is # transformed with the TVM optimizing compiler framework. # # .. image:: https://raw.githubusercontent.com/apache/tvm-site/main/images/tutorial/overview.png # :width: 100% # :alt: A High Level View of TVM # # 1. Import the model from a framework like *Tensorflow*, *PyTorch*, or *Onnx*. # The importer layer is where TVM can ingest models from other frameworks, like # Tensorflow, PyTorch, or ONNX. The level of support that TVM offers for each # frontend varies as we are constantly improving the open source project. If # you're having issues importing your model into TVM, you may want to try # converting it to ONNX. # # 2. Translate to *Relay*, TVM's high-level model language. # A model that has been imported into TVM is represented in Relay. Relay is a # functional language and intermediate representation (IR) for neural networks. # It has support for: # # - Traditional data flow-style representations # - Functional-style scoping, let-binding which makes it a fully featured # differentiable language # - Ability to allow the user to mix the two programming styles # # Relay applies graph-level optimization passes to optimize the model. # # 3. Lower to *Tensor Expression* (TE) representation. Lowering is when a # higher-level representation is transformed into a lower-level # representation. After applying the high-level optimizations, Relay # runs FuseOps pass to partition the model into many small subgraphs and lowers # the subgraphs to TE representation. Tensor Expression (TE) is a # domain-specific language for describing tensor computations. # TE also provides several *schedule* primitives to specify low-level loop # optimizations, such as tiling, vectorization, parallelization, # unrolling, and fusion. # To aid in the process of converting Relay representation into TE representation, # TVM includes a Tensor Operator Inventory (TOPI) that has pre-defined # templates of common tensor operators (e.g., conv2d, transpose). # # 4. Search for the best schedule using the auto-tuning module *AutoTVM* or *AutoScheduler*. # A schedule specifies the low-level loop optimizations for an operator or # subgraph defined in TE. Auto-tuning modules search for the best schedule # and compare them with cost models and on-device measurements. # There are two auto-tuning modules in TVM. # # - **AutoTVM**: A template-based auto-tuning module. It runs search algorithms # to find the best values for the tunable knobs in a user-defined template. # For common operators, their templates are already provided in TOPI. # - **AutoScheduler (a.k.a. Ansor)**: A template-free auto-tuning module. # It does not require pre-defined schedule templates. Instead, it generates # the search space automatically by analyzing the computation definition. # It then searches for the best schedule in the generated search space. # # 5. Choose the optimal configurations for model compilation. After tuning, the # auto-tuning module generates tuning records in JSON format. This step # picks the best schedule for each subgraph. # # 6. Lower to Tensor Intermediate Representation (TIR), TVM's low-level # intermediate representation. After selecting the optimal configurations # based on the tuning step, each TE subgraph is lowered to TIR and be # optimized by low-level optimization passes. Next, the optimized TIR is # lowered to the target compiler of the hardware platform. # This is the final code generation phase to produce an optimized model # that can be deployed into production. TVM supports several different # compiler backends including: # # - LLVM, which can target arbitrary microprocessor architecture including # standard x86 and ARM processors, AMDGPU and NVPTX code generation, and any # other platform supported by LLVM. # - Specialized compilers, such as NVCC, NVIDIA's compiler. # - Embedded and specialized targets, which are implemented through TVM's # Bring Your Own Codegen (BYOC) framework. # # 7. Compile down to machine code. At the end of this process, the # compiler-specific generated code can be lowered to machine code. # # TVM can compile models down to a linkable object module, which can then be # run with a lightweight TVM runtime that provides C APIs to dynamically # load the model, and entry points for other languages such as Python and # Rust. TVM can also build a bundled deployment in which the runtime is # combined with the model in a single package. # # The remainder of the tutorial will cover these aspects of TVM in more detail.
7,168
51.713235
101
py
tvm
tvm-main/gallery/how_to/work_with_microtvm/micro_autotune.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ .. _tutorial-micro-autotune: 6. Model Tuning with microTVM ============================= **Authors**: `Andrew Reusch <https://github.com/areusch>`_, `Mehrdad Hessar <https://github.com/mehrdadh>`_ This tutorial explains how to autotune a model using the C runtime. """ ###################################################################### # # .. include:: ../../../../gallery/how_to/work_with_microtvm/install_dependencies.rst # # You can skip the following section (installing Zephyr) if the following flag is False. # Installing Zephyr takes ~20 min. import os use_physical_hw = bool(os.getenv("TVM_MICRO_USE_HW")) ###################################################################### # # .. include:: ../../../../gallery/how_to/work_with_microtvm/install_zephyr.rst # ###################################################################### # Import Python dependencies # ------------------------------- # import json import numpy as np import pathlib import tvm from tvm.relay.backend import Runtime import tvm.micro.testing #################### # Defining the model #################### # # To begin with, define a model in Relay to be executed on-device. Then create an IRModule from relay model and # fill parameters with random numbers. # data_shape = (1, 3, 10, 10) weight_shape = (6, 3, 5, 5) data = tvm.relay.var("data", tvm.relay.TensorType(data_shape, "float32")) weight = tvm.relay.var("weight", tvm.relay.TensorType(weight_shape, "float32")) y = tvm.relay.nn.conv2d( data, weight, padding=(2, 2), kernel_size=(5, 5), kernel_layout="OIHW", out_dtype="float32", ) f = tvm.relay.Function([data, weight], y) relay_mod = tvm.IRModule.from_expr(f) relay_mod = tvm.relay.transform.InferType()(relay_mod) weight_sample = np.random.rand( weight_shape[0], weight_shape[1], weight_shape[2], weight_shape[3] ).astype("float32") params = {"weight": weight_sample} ####################### # Defining the target ####################### # Now we define the TVM target that describes the execution environment. This looks very similar # to target definitions from other microTVM tutorials. Alongside this we pick the C Runtime to code # generate our model against. # # When running on physical hardware, choose a target and a board that # describe the hardware. There are multiple hardware targets that could be selected from # PLATFORM list in this tutorial. You can chose the platform by passing --platform argument when running # this tutorial. # RUNTIME = Runtime("crt", {"system-lib": True}) TARGET = tvm.micro.testing.get_target("crt") # Compiling for physical hardware # -------------------------------------------------------------------------- # When running on physical hardware, choose a TARGET and a BOARD that describe the hardware. The # STM32L4R5ZI Nucleo target and board is chosen in the example below. if use_physical_hw: BOARD = os.getenv("TVM_MICRO_BOARD", default="nucleo_l4r5zi") SERIAL = os.getenv("TVM_MICRO_SERIAL", default=None) TARGET = tvm.micro.testing.get_target("zephyr", BOARD) ######################### # Extracting tuning tasks ######################### # Not all operators in the Relay program printed above can be tuned. Some are so trivial that only # a single implementation is defined; others don't make sense as tuning tasks. Using # `extract_from_program`, you can produce a list of tunable tasks. # # Because task extraction involves running the compiler, we first configure the compiler's # transformation passes; we'll apply the same configuration later on during autotuning. # pass_context = tvm.transform.PassContext(opt_level=3, config={"tir.disable_vectorize": True}) with pass_context: tasks = tvm.autotvm.task.extract_from_program(relay_mod["main"], {}, TARGET) assert len(tasks) > 0 ###################### # Configuring microTVM ###################### # Before autotuning, we need to define a module loader and then pass that to # a `tvm.autotvm.LocalBuilder`. Then we create a `tvm.autotvm.LocalRunner` and use # both builder and runner to generates multiple measurements for auto tunner. # # In this tutorial, we have the option to use x86 host as an example or use different targets # from Zephyr RTOS. If you choose pass `--platform=host` to this tutorial it will uses x86. You can # choose other options by choosing from `PLATFORM` list. # module_loader = tvm.micro.AutoTvmModuleLoader( template_project_dir=pathlib.Path(tvm.micro.get_microtvm_template_projects("crt")), project_options={"verbose": False}, ) builder = tvm.autotvm.LocalBuilder( n_parallel=1, build_kwargs={"build_option": {"tir.disable_vectorize": True}}, do_fork=True, build_func=tvm.micro.autotvm_build_func, runtime=RUNTIME, ) runner = tvm.autotvm.LocalRunner(number=1, repeat=1, timeout=100, module_loader=module_loader) measure_option = tvm.autotvm.measure_option(builder=builder, runner=runner) # Compiling for physical hardware if use_physical_hw: module_loader = tvm.micro.AutoTvmModuleLoader( template_project_dir=pathlib.Path(tvm.micro.get_microtvm_template_projects("zephyr")), project_options={ "board": BOARD, "verbose": False, "project_type": "host_driven", "serial_number": SERIAL, }, ) builder = tvm.autotvm.LocalBuilder( n_parallel=1, build_kwargs={"build_option": {"tir.disable_vectorize": True}}, do_fork=False, build_func=tvm.micro.autotvm_build_func, runtime=RUNTIME, ) runner = tvm.autotvm.LocalRunner(number=1, repeat=1, timeout=100, module_loader=module_loader) measure_option = tvm.autotvm.measure_option(builder=builder, runner=runner) ########################## # Run Autotuning ########################## # Now we can run autotuning separately on each extracted task on microTVM device. # autotune_log_file = pathlib.Path("microtvm_autotune.log.txt") if os.path.exists(autotune_log_file): os.remove(autotune_log_file) num_trials = 10 for task in tasks: tuner = tvm.autotvm.tuner.GATuner(task) tuner.tune( n_trial=num_trials, measure_option=measure_option, callbacks=[ tvm.autotvm.callback.log_to_file(str(autotune_log_file)), tvm.autotvm.callback.progress_bar(num_trials, si_prefix="M"), ], si_prefix="M", ) ############################ # Timing the untuned program ############################ # For comparison, let's compile and run the graph without imposing any autotuning schedules. TVM # will select a randomly-tuned implementation for each operator, which should not perform as well as # the tuned operator. # with pass_context: lowered = tvm.relay.build(relay_mod, target=TARGET, runtime=RUNTIME, params=params) temp_dir = tvm.contrib.utils.tempdir() project = tvm.micro.generate_project( str(tvm.micro.get_microtvm_template_projects("crt")), lowered, temp_dir / "project", {"verbose": False}, ) # Compiling for physical hardware if use_physical_hw: temp_dir = tvm.contrib.utils.tempdir() project = tvm.micro.generate_project( str(tvm.micro.get_microtvm_template_projects("zephyr")), lowered, temp_dir / "project", { "board": BOARD, "verbose": False, "project_type": "host_driven", "serial_number": SERIAL, "config_main_stack_size": 4096, }, ) project.build() project.flash() with tvm.micro.Session(project.transport()) as session: debug_module = tvm.micro.create_local_debug_executor( lowered.get_graph_json(), session.get_system_lib(), session.device ) debug_module.set_input(**lowered.get_params()) print("########## Build without Autotuning ##########") debug_module.run() del debug_module ########################## # Timing the tuned program ########################## # Once autotuning completes, you can time execution of the entire program using the Debug Runtime: with tvm.autotvm.apply_history_best(str(autotune_log_file)): with pass_context: lowered_tuned = tvm.relay.build(relay_mod, target=TARGET, runtime=RUNTIME, params=params) temp_dir = tvm.contrib.utils.tempdir() project = tvm.micro.generate_project( str(tvm.micro.get_microtvm_template_projects("crt")), lowered_tuned, temp_dir / "project", {"verbose": False}, ) # Compiling for physical hardware if use_physical_hw: temp_dir = tvm.contrib.utils.tempdir() project = tvm.micro.generate_project( str(tvm.micro.get_microtvm_template_projects("zephyr")), lowered_tuned, temp_dir / "project", { "board": BOARD, "verbose": False, "project_type": "host_driven", "serial_number": SERIAL, "config_main_stack_size": 4096, }, ) project.build() project.flash() with tvm.micro.Session(project.transport()) as session: debug_module = tvm.micro.create_local_debug_executor( lowered_tuned.get_graph_json(), session.get_system_lib(), session.device ) debug_module.set_input(**lowered_tuned.get_params()) print("########## Build with Autotuning ##########") debug_module.run() del debug_module
10,068
33.132203
111
py
tvm
tvm-main/gallery/how_to/work_with_microtvm/micro_mlperftiny.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ .. _tutorial-micro-mlperftiny: 8. Creating Your MLPerfTiny Submission with microTVM ==================================================== **Authors**: `Mehrdad Hessar <https://github.com/mehrdadh>`_ This tutorial is showcasing building an MLPerfTiny submission using microTVM. This tutorial shows the steps to import a TFLite model from MLPerfTiny benchmark models, compile it with TVM and generate a Zephyr project which can be flashed to a Zephyr supported board to benchmark the model using EEMBC runner. """ ###################################################################### # # .. include:: ../../../../gallery/how_to/work_with_microtvm/install_dependencies.rst # import os import pathlib import tarfile import tempfile import shutil ###################################################################### # # .. include:: ../../../../gallery/how_to/work_with_microtvm/install_zephyr.rst # ###################################################################### # # **Note:** Install CMSIS-NN only if you are interested to generate this submission # using CMSIS-NN code generator. # ###################################################################### # # .. include:: ../../../../gallery/how_to/work_with_microtvm/install_cmsis.rst # ###################################################################### # Import Python dependencies # ------------------------------- # import tensorflow as tf import numpy as np import tvm from tvm import relay from tvm.relay.backend import Executor, Runtime from tvm.contrib.download import download_testdata from tvm.micro import export_model_library_format import tvm.micro.testing from tvm.micro.testing.utils import ( create_header_file, mlf_extract_workspace_size_bytes, ) ###################################################################### # Import Visual Wake Word Model # -------------------------------------------------------------------- # # To begin with, download and import the Visual Wake Word (VWW) TFLite model from MLPerfTiny. # This model is originally from `MLPerf Tiny repository <https://github.com/mlcommons/tiny>`_. # We also capture metadata information from the TFLite model such as input/output name, # quantization parameters, etc. which will be used in following steps. # # We use indexing for various models to build the submission. The indices are defined as follows: # To build another model, you need to update the model URL, the short name and index number. # # * Keyword Spotting(KWS) 1 # * Visual Wake Word(VWW) 2 # * Anomaly Detection(AD) 3 # * Image Classification(IC) 4 # # If you would like to build the submission with CMSIS-NN, modify USE_CMSIS environment variable. # # .. code-block:: bash # # export USE_CMSIS=1 # MODEL_URL = "https://github.com/mlcommons/tiny/raw/bceb91c5ad2e2deb295547d81505721d3a87d578/benchmark/training/visual_wake_words/trained_models/vww_96_int8.tflite" MODEL_PATH = download_testdata(MODEL_URL, "vww_96_int8.tflite", module="model") MODEL_SHORT_NAME = "VWW" MODEL_INDEX = 2 USE_CMSIS = os.environ.get("TVM_USE_CMSIS", False) tflite_model_buf = open(MODEL_PATH, "rb").read() try: import tflite tflite_model = tflite.Model.GetRootAsModel(tflite_model_buf, 0) except AttributeError: import tflite.Model tflite_model = tflite.Model.Model.GetRootAsModel(tflite_model_buf, 0) interpreter = tf.lite.Interpreter(model_path=str(MODEL_PATH)) interpreter.allocate_tensors() input_details = interpreter.get_input_details() output_details = interpreter.get_output_details() input_name = input_details[0]["name"] input_shape = tuple(input_details[0]["shape"]) input_dtype = np.dtype(input_details[0]["dtype"]).name output_name = output_details[0]["name"] output_shape = tuple(output_details[0]["shape"]) output_dtype = np.dtype(output_details[0]["dtype"]).name # We extract quantization information from TFLite model. # This is required for all models except Anomaly Detection, # because for other models we send quantized data to interpreter # from host, however, for AD model we send floating data and quantization # happens on the microcontroller. if MODEL_SHORT_NAME != "AD": quant_output_scale = output_details[0]["quantization_parameters"]["scales"][0] quant_output_zero_point = output_details[0]["quantization_parameters"]["zero_points"][0] relay_mod, params = relay.frontend.from_tflite( tflite_model, shape_dict={input_name: input_shape}, dtype_dict={input_name: input_dtype} ) ###################################################################### # Defining Target, Runtime and Executor # -------------------------------------------------------------------- # # Now we need to define the target, runtime and executor to compile this model. In this tutorial, # we use Ahead-of-Time (AoT) compilation and we build a standalone project. This is different # than using AoT with host-driven mode where the target would communicate with host using host-driven # AoT executor to run inference. # # Use the C runtime (crt) RUNTIME = Runtime("crt") # Use the AoT executor with `unpacked-api=True` and `interface-api=c`. `interface-api=c` forces # the compiler to generate C type function APIs and `unpacked-api=True` forces the compiler # to generate minimal unpacked format inputs which reduces the stack memory usage on calling # inference layers of the model. EXECUTOR = Executor( "aot", {"unpacked-api": True, "interface-api": "c", "workspace-byte-alignment": 8}, ) # Select a Zephyr board BOARD = os.getenv("TVM_MICRO_BOARD", default="nucleo_l4r5zi") # Get the the full target description using the BOARD TARGET = tvm.micro.testing.get_target("zephyr", BOARD) ###################################################################### # Compile the model and export model library format # -------------------------------------------------------------------- # # Now, we compile the model for the target. Then, we generate model # library format for the compiled model. We also need to calculate the # workspace size that is required for the compiled model. # # config = {"tir.disable_vectorize": True} if USE_CMSIS: from tvm.relay.op.contrib import cmsisnn config["relay.ext.cmsisnn.options"] = {"mcpu": TARGET.mcpu} relay_mod = cmsisnn.partition_for_cmsisnn(relay_mod, params, mcpu=TARGET.mcpu) with tvm.transform.PassContext(opt_level=3, config=config): module = tvm.relay.build( relay_mod, target=TARGET, params=params, runtime=RUNTIME, executor=EXECUTOR ) temp_dir = tvm.contrib.utils.tempdir() model_tar_path = temp_dir / "model.tar" export_model_library_format(module, model_tar_path) workspace_size = mlf_extract_workspace_size_bytes(model_tar_path) ###################################################################### # Generate input/output header files # -------------------------------------------------------------------- # # To create a microTVM standalone project with AoT, we need to generate # input and output header files. These header files are used to connect # the input and output API from generated code to the rest of the # standalone project. For this specific submission, we only need to generate # output header file since the input API call is handled differently. # extra_tar_dir = tvm.contrib.utils.tempdir() extra_tar_file = extra_tar_dir / "extra.tar" with tarfile.open(extra_tar_file, "w:gz") as tf: create_header_file( "output_data", np.zeros( shape=output_shape, dtype=output_dtype, ), "include/tvm", tf, ) ###################################################################### # Create the project, build and prepare the project tar file # -------------------------------------------------------------------- # # Now that we have the compiled model as a model library format, # we can generate the full project using Zephyr template project. First, # we prepare the project options, then build the project. Finally, we # cleanup the temporary files and move the submission project to the # current working directory which could be downloaded and used on # your development kit. # input_total_size = 1 for i in range(len(input_shape)): input_total_size *= input_shape[i] template_project_path = pathlib.Path(tvm.micro.get_microtvm_template_projects("zephyr")) project_options = { "extra_files_tar": str(extra_tar_file), "project_type": "mlperftiny", "board": BOARD, "compile_definitions": [ f"-DWORKSPACE_SIZE={workspace_size + 512}", # Memory workspace size, 512 is a temporary offset # since the memory calculation is not accurate. f"-DTARGET_MODEL={MODEL_INDEX}", # Sets the model index for project compilation. f"-DTH_MODEL_VERSION=EE_MODEL_VERSION_{MODEL_SHORT_NAME}01", # Sets model version. This is required by MLPerfTiny API. f"-DMAX_DB_INPUT_SIZE={input_total_size}", # Max size of the input data array. ], } if MODEL_SHORT_NAME != "AD": project_options["compile_definitions"].append(f"-DOUT_QUANT_SCALE={quant_output_scale}") project_options["compile_definitions"].append(f"-DOUT_QUANT_ZERO={quant_output_zero_point}") if USE_CMSIS: project_options["compile_definitions"].append(f"-DCOMPILE_WITH_CMSISNN=1") # Note: You might need to adjust this based on the board that you are using. project_options["config_main_stack_size"] = 4000 if USE_CMSIS: project_options["cmsis_path"] = os.environ.get("CMSIS_PATH", "/content/cmsis") generated_project_dir = temp_dir / "project" project = tvm.micro.project.generate_project_from_mlf( template_project_path, generated_project_dir, model_tar_path, project_options ) project.build() # Cleanup the build directory and extra artifacts shutil.rmtree(generated_project_dir / "build") (generated_project_dir / "model.tar").unlink() project_tar_path = pathlib.Path(os.getcwd()) / "project.tar" with tarfile.open(project_tar_path, "w:tar") as tar: tar.add(generated_project_dir, arcname=os.path.basename("project")) print(f"The generated project is located here: {project_tar_path}") ###################################################################### # Use this project with your board # -------------------------------------------------------------------- # # Now that we have the generated project, you can use this project locally # to flash your board and prepare it for EEMBC runner software. # To do this follow these steps: # # .. code-block:: bash # # tar -xf project.tar # cd project # mkdir build # cmake .. # make -j2 # west flash # # Now you can connect your board to EEMBC runner using this # `instructions <https://github.com/eembc/energyrunner>`_ # and benchmark this model on your board. #
11,521
36.777049
163
py
tvm
tvm-main/gallery/how_to/work_with_microtvm/micro_ethosu.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ .. _tutorial-micro-ethosu: 7. Running TVM on bare metal Arm(R) Cortex(R)-M55 CPU and Ethos(TM)-U55 NPU with CMSIS-NN ========================================================================================= **Author**: `Grant Watson <https://github.com/grant-arm>`_ This section contains an example of how to use TVM to run a model on an Arm(R) Cortex(R)-M55 CPU and Ethos(TM)-U55 NPU with CMSIS-NN, using bare metal. The Cortex(R)-M55 is a small, low-power CPU designed for use in embedded devices. CMSIS-NN is a collection of kernels optimized for Arm(R) Cortex(R)-M CPUs. The Ethos(TM)-U55 is a microNPU, specifically designed to accelerate ML inference in resource-constrained embedded devices. In order to run the demo application without having access to a Cortex(R)-M55 and Ethos(TM)-U55 development board, we will be running our sample application on a Fixed Virtual Platform (FVP). The FVP based on Arm(R) Corstone(TM)-300 software, models a hardware system containing a Cortex(R)-M55 and Ethos(TM)-U55. It provides a programmer's view that is suitable for software development. In this tutorial, we will be compiling a MobileNet v1 model and instructing TVM to offload operators to the Ethos(TM)-U55 where possible. """ ################################################################################ # Obtaining TVM # ------------- # # To obtain TVM for you platform, please visit https://tlcpack.ai/ and follow the # instructions. Once TVM has been installed correctly, you should have access to # ``tvmc`` from the command line. # # Typing ``tvmc`` on the command line should display the following: # # .. code-block:: text # # usage: tvmc [-h] [-v] [--version] {tune,compile,run} ... # # TVM compiler driver # # optional arguments: # -h, --help show this help message and exit # -v, --verbose increase verbosity # --version print the version and exit # # commands: # {tune,compile,run} # tune auto-tune a model # compile compile a model. # run run a compiled module # # TVMC - TVM driver command-line interface # ################################################################################ # Installing additional python dependencies # ----------------------------------------- # # In order to run the demo, you will need some additional python packages. # These can be installed by using the requirements.txt file below: # # .. code-block:: text # :caption: requirements.txt # :name: requirements.txt # # attrs==21.2.0 # cloudpickle==2.0.0 # decorator==5.1.0 # ethos-u-vela==3.8.0 # flatbuffers==2.0.7 # lxml==4.6.3 # nose==1.3.7 # numpy==1.19.5 # Pillow==8.3.2 # psutil==5.8.0 # scipy==1.5.4 # tflite==2.4.0 # tornado==6.1 # # These packages can be installed by running the following from the command line: # # .. code-block:: bash # # pip install -r requirements.txt # ################################################################################ # Obtaining the Model # ------------------- # # For this tutorial, we will be working with MobileNet v1. # MobileNet v1 is a convolutional neural network designed to classify images, # that has been optimized for edge devices. The model we will be using has been # pre-trained to classify images into one of 1001 different categories. # The network has an input image size of 224x224 so any input images will need # to be resized to those dimensions before being used. # # For this tutorial we will be using the model in Tflite format. # # .. code-block:: bash # # mkdir -p ./build # cd build # wget https://storage.googleapis.com/download.tensorflow.org/models/mobilenet_v1_2018_08_02/mobilenet_v1_1.0_224_quant.tgz # gunzip mobilenet_v1_1.0_224_quant.tgz # tar xvf mobilenet_v1_1.0_224_quant.tar # ###################################################################################### # Compiling the model for Arm(R) Cortex(R)-M55 CPU and Ethos(TM)-U55 NPU with CMSIS-NN # ------------------------------------------------------------------------------------ # # Once we've downloaded the MobileNet v1 model, the next step is to compile it. # To accomplish that, we are going to use ``tvmc compile``. The output we get from # the compilation process is a TAR package of the model compiled to the Model # Library Format (MLF) for our target platform. We will be able to run that model # on our target device using the TVM runtime. # # .. code-block:: bash # # tvmc compile --target=ethos-u,cmsis-nn,c \ # --target-ethos-u-accelerator_config=ethos-u55-256 \ # --target-cmsis-nn-mcpu=cortex-m55 \ # --target-c-mcpu=cortex-m55 \ # --runtime=crt \ # --executor=aot \ # --executor-aot-interface-api=c \ # --executor-aot-unpacked-api=1 \ # --pass-config tir.usmp.enable=1 \ # --pass-config tir.usmp.algorithm=hill_climb \ # --pass-config tir.disable_storage_rewrite=1 \ # --pass-config tir.disable_vectorize=1 \ # ./mobilenet_v1_1.0_224_quant.tflite \ # --output-format=mlf # ################################################################################ # .. note:: Explanation of tvmc compile arguments: # # * ``--target=ethos-u,cmsis-nn,c`` : offload operators to the microNPU where possible, falling back to CMSIS-NN and finally generated C code where an operator is not supported on the microNPU.. # # * ``--target-ethos-u-accelerator_config=ethos-u55-256`` : specifies the microNPU configuration # # * ``--target-c-mcpu=cortex-m55`` : Cross-compile for the Cortex(R)-M55. # # * ``--runtime=crt`` : Generate glue code to allow operators to work with C runtime. # # * ``--executor=aot`` : Use Ahead Of Time compiltaion instead of the Graph Executor. # # * ``--executor-aot-interface-api=c`` : Generate a C-style interface with structures designed for integrating into C apps at the boundary. # # * ``--executor-aot-unpacked-api=1`` : Use the unpacked API internally. # # * ``--pass-config tir.usmp.enable=1`` : Enable Unified Static Memory Planning # # * ``--pass-config tir.usmp.algorithm=hill_climb`` : Use the hill-climb algorithm for USMP # # * ``--pass-config tir.disable_storage_rewrite=1`` : Disable storage rewrite # # * ``--pass-config tir.disable_vectorize=1`` : Disable vectorize since there are no standard vectorized types in C. # # * ``./mobilenet_v1_1.0_224_quant.tflite`` : The TFLite model that is being compiled. # # * ``--output-format=mlf`` : Output should be generated in the Model Library Format. # ################################################################################ # .. note:: If you don't want to make use of the microNPU and want to offload # operators to CMSIS-NN only: # # * Use ``--target=cmsis-nn,c`` in place of ``--target=ethos-u,cmsis-nn,c`` # # * Remove the microNPU config parameter ``--target-ethos-u-accelerator_config=ethos-u55-256`` # ################################################################################ # Extracting the generated code into the current directory # -------------------------------------------------------- # # .. code-block:: bash # # tar xvf module.tar # ################################################################################ # Getting ImageNet labels # ----------------------- # # When running MobileNet v1 on an image, the result is an index in the range 0 to # 1000. In order to make our application a little more user friendly, instead of # just displaying the category index, we will display the associated label. We # will download these image labels into a text file now and use a python script # to include them in our C application later. # # .. code-block:: bash # # curl -sS https://raw.githubusercontent.com/tensorflow/tensorflow/master/tensorflow/lite/java/demo/app/src/main/assets/labels_mobilenet_quant_v1_224.txt \ # -o ./labels_mobilenet_quant_v1_224.txt # ################################################################################ # Getting the input image # ----------------------- # # As input for this tutorial, we will use the image of a cat, but you can # substitute an image of your choosing. # # .. image:: https://s3.amazonaws.com/model-server/inputs/kitten.jpg # :height: 224px # :width: 224px # :align: center # # We download the image into the build directory and we will use a python script # in the next step to convert the image into an array of bytes in a C header file. # # .. code-block:: bash # # curl -sS https://s3.amazonaws.com/model-server/inputs/kitten.jpg -o ./kitten.jpg # ################################################################################ # Pre-processing the image # ------------------------ # # The following script will create 2 C header files in the src directory: # # * ``inputs.h`` - The image supplied as an argument to the script will be converted # to an array of integers for input to our MobileNet v1 model. # * ``outputs.h`` - An integer array of zeroes will reserve 1001 integer values # for the output of inference. # # .. code-block:: python # :caption: convert_image.py # :name: convert_image.py # # #!python ./convert_image.py # import os # import pathlib # import re # import sys # from PIL import Image # import numpy as np # # # def create_header_file(name, section, tensor_name, tensor_data, output_path): # """ # This function generates a header file containing the data from the numpy array provided. # """ # file_path = pathlib.Path(f"{output_path}/" + name).resolve() # # Create header file with npy_data as a C array # raw_path = file_path.with_suffix(".h").resolve() # with open(raw_path, "w") as header_file: # header_file.write( # "#include <tvmgen_default.h>\n" # + f"const size_t {tensor_name}_len = {tensor_data.size};\n" # + f'uint8_t {tensor_name}[] __attribute__((section("{section}"), aligned(16))) = "' # ) # data_hexstr = tensor_data.tobytes().hex() # for i in range(0, len(data_hexstr), 2): # header_file.write(f"\\x{data_hexstr[i:i+2]}") # header_file.write('";\n\n') # # # def create_headers(image_name): # """ # This function generates C header files for the input and output arrays required to run inferences # """ # img_path = os.path.join("./", f"{image_name}") # # # Resize image to 224x224 # resized_image = Image.open(img_path).resize((224, 224)) # img_data = np.asarray(resized_image).astype("float32") # # # Convert input to NCHW # img_data = np.transpose(img_data, (2, 0, 1)) # # # Create input header file # input_data = img_data.astype(np.uint8) # create_header_file("inputs", "ethosu_scratch", "input", input_data, "./include") # # Create output header file # output_data = np.zeros([1001], np.uint8) # create_header_file( # "outputs", # "output_data_sec", # "output", # output_data, # "./include", # ) # # # if __name__ == "__main__": # create_headers(sys.argv[1]) # # Run the script from the command line: # # .. code-block:: bash # # python convert_image.py ./kitten.jpg ################################################################################ # Pre-processing the labels # ------------------------- # # The following script will create a ``labels.h`` header file in the src directory. # The labels.txt file that we downloaded previously will be turned # into an array of strings. This array will be used to display the label that # our image has been classified as. # # .. code-block:: python # :caption: convert_labels.py # :name: convert_labels.py # # #!python ./convert_labels.py # import os # import pathlib # import sys # # # def create_labels_header(labels_file, section, output_path): # """ # This function generates a header file containing the ImageNet labels as an array of strings # """ # labels_path = pathlib.Path(labels_file).resolve() # file_path = pathlib.Path(f"{output_path}/labels.h").resolve() # # with open(labels_path) as f: # labels = f.readlines() # # with open(file_path, "w") as header_file: # header_file.write(f'char* labels[] __attribute__((section("{section}"), aligned(16))) = {{') # # for _, label in enumerate(labels): # header_file.write(f'"{label.rstrip()}",') # # header_file.write("};\n") # # # if __name__ == "__main__": # create_labels_header(sys.argv[1], "ethosu_scratch", "./include") # # Run the script from the command line: # # .. code-block:: bash # # python convert_labels.py ################################################################################ # Writing the demo application # ---------------------------- # # The following C application will run a single inference of the MobileNet v1 # model on the image that we downloaded and converted to an array of integers # previously. Since the model was compiled with a target of "ethos-u ...", # operators supported by the Ethos(TM)-U55 NPU will be offloaded for acceleration. # Once the application is built and run, our test image should be correctly # classied as a "tabby" and the result should be displayed on the console. # This file should be placed in ``./src`` # # .. code-block:: c # :caption: demo.c # :name: demo.c # # #include <stdio.h> # #include <tvm_runtime.h> # # #include "ethosu_mod.h" # #include "uart_stdout.h" # # // Header files generated by convert_image.py and convert_labels.py # #include "inputs.h" # #include "labels.h" # #include "outputs.h" # # int abs(int v) { return v * ((v > 0) - (v < 0)); } # # int main(int argc, char** argv) { # UartStdOutInit(); # printf("Starting Demo\n"); # EthosuInit(); # # printf("Allocating memory\n"); # StackMemoryManager_Init(&app_workspace, g_aot_memory, WORKSPACE_SIZE); # # printf("Running inference\n"); # struct tvmgen_default_outputs outputs = { # .output = output, # }; # struct tvmgen_default_inputs inputs = { # .input = input, # }; # struct ethosu_driver* driver = ethosu_reserve_driver(); # struct tvmgen_default_devices devices = { # .ethos_u = driver, # }; # tvmgen_default_run(&inputs, &outputs, &devices); # ethosu_release_driver(driver); # # // Calculate index of max value # uint8_t max_value = 0; # int32_t max_index = -1; # for (unsigned int i = 0; i < output_len; ++i) { # if (output[i] > max_value) { # max_value = output[i]; # max_index = i; # } # } # printf("The image has been classified as '%s'\n", labels[max_index]); # # // The FVP will shut down when it receives "EXITTHESIM" on the UART # printf("EXITTHESIM\n"); # while (1 == 1) # ; # return 0; # } # # # In addition, you will need these header files from github in your ``./include`` directory: # # `include files <https://github.com/apache/tvm/tree/main/apps/microtvm/ethosu/include>`_ ################################################################################ # .. note:: # # If you'd like to use FreeRTOS for task scheduling and queues, a sample application can be found here # `demo_freertos.c <https://github.com/apache/tvm/blob/main/apps/microtvm/ethosu/src/demo_freertos.c>` ################################################################################ # Creating the linker script # -------------------------- # # We need to create a linker script that will be used when we build our application # in the following section. The linker script tells the linker where everything # should be placed in memory. The corstone300.ld linker script below should be # placed in your working directory. # # An example linker script for the FVP can be found here # `corstone300.ld <https://github.com/apache/tvm/blob/main/apps/microtvm/ethosu/corstone300.ld>`_ ################################################################################ # .. note:: # # The code generated by TVM will place the model weights and the Arm(R) # Ethos(TM)-U55 command stream in a section named ``ethosu_scratch``. # For a model the size of MobileNet v1, the weights and command stream will not # fit into the limited SRAM available. For this reason it's important that the # linker script places the ``ethosu_scratch`` section into DRAM (DDR). ################################################################################ # .. note:: # # Before building and running the application, you will need to update your # PATH environment variable to include the path to cmake 3.19.5 and the FVP. # For example if you've installed these in ``/opt/arm`` , then you would do # the following: # # ``export PATH=/opt/arm/FVP_Corstone_SSE-300_Ethos-U55/models/Linux64_GCC-6.4:/opt/arm/cmake/bin:$PATH`` # ################################################################################ # Building the demo application using make # ---------------------------------------- # # We can now build the demo application using make. The Makefile should be placed # in your working directory before running ``make`` on the command line: # # An example Makefile can be found here: # `Makefile <https://github.com/apache/tvm/blob/main/apps/microtvm/ethosu/Makefile>`_ ################################################################################ # .. note:: # # If you're using FreeRTOS, the Makefile builds it from the specified FREERTOS_PATH: # ``make FREERTOS_PATH=<FreeRTOS directory>`` # ################################################################################ # Running the demo application # ---------------------------- # # Finally, we can run our demo appliction on the Fixed Virtual Platform (FVP), # by using the following command: # # .. code-block:: bash # # FVP_Corstone_SSE-300_Ethos-U55 -C cpu0.CFGDTCMSZ=15 \ # -C cpu0.CFGITCMSZ=15 -C mps3_board.uart0.out_file=\"-\" -C mps3_board.uart0.shutdown_tag=\"EXITTHESIM\" \ # -C mps3_board.visualisation.disable-visualisation=1 -C mps3_board.telnetterminal0.start_telnet=0 \ # -C mps3_board.telnetterminal1.start_telnet=0 -C mps3_board.telnetterminal2.start_telnet=0 -C mps3_board.telnetterminal5.start_telnet=0 \ # -C ethosu.extra_args="--fast" \ # -C ethosu.num_macs=256 ./build/demo # # You should see the following output displayed in your console window: # # .. code-block:: text # # telnetterminal0: Listening for serial connection on port 5000 # telnetterminal1: Listening for serial connection on port 5001 # telnetterminal2: Listening for serial connection on port 5002 # telnetterminal5: Listening for serial connection on port 5003 # # Ethos-U rev dedfa618 --- Jan 12 2021 23:03:55 # (C) COPYRIGHT 2019-2021 Arm Limited # ALL RIGHTS RESERVED # # Starting Demo # ethosu_init. base_address=0x48102000, fast_memory=0x0, fast_memory_size=0, secure=1, privileged=1 # ethosu_register_driver: New NPU driver at address 0x20000de8 is registered. # CMD=0x00000000 # Soft reset NPU # Allocating memory # Running inference # ethosu_find_and_reserve_driver - Driver 0x20000de8 reserved. # ethosu_invoke # CMD=0x00000004 # QCONFIG=0x00000002 # REGIONCFG0=0x00000003 # REGIONCFG1=0x00000003 # REGIONCFG2=0x00000013 # REGIONCFG3=0x00000053 # REGIONCFG4=0x00000153 # REGIONCFG5=0x00000553 # REGIONCFG6=0x00001553 # REGIONCFG7=0x00005553 # AXI_LIMIT0=0x0f1f0000 # AXI_LIMIT1=0x0f1f0000 # AXI_LIMIT2=0x0f1f0000 # AXI_LIMIT3=0x0f1f0000 # ethosu_invoke OPTIMIZER_CONFIG # handle_optimizer_config: # Optimizer release nbr: 0 patch: 1 # Optimizer config cmd_stream_version: 0 macs_per_cc: 8 shram_size: 48 custom_dma: 0 # Optimizer config Ethos-U version: 1.0.6 # Ethos-U config cmd_stream_version: 0 macs_per_cc: 8 shram_size: 48 custom_dma: 0 # Ethos-U version: 1.0.6 # ethosu_invoke NOP # ethosu_invoke NOP # ethosu_invoke NOP # ethosu_invoke COMMAND_STREAM # handle_command_stream: cmd_stream=0x61025be0, cms_length 1181 # QBASE=0x0000000061025be0, QSIZE=4724, base_pointer_offset=0x00000000 # BASEP0=0x0000000061026e60 # BASEP1=0x0000000060002f10 # BASEP2=0x0000000060002f10 # BASEP3=0x0000000061000fb0 # BASEP4=0x0000000060000fb0 # CMD=0x000Interrupt. status=0xffff0022, qread=4724 # CMD=0x00000006 # 00006 # CMD=0x0000000c # ethosu_release_driver - Driver 0x20000de8 released # The image has been classified as 'tabby' # EXITTHESIM # Info: /OSCI/SystemC: Simulation stopped by user. # # You should see near the end of the output that the image has been correctly # classified as 'tabby'.
22,079
37.4
196
py
tvm
tvm-main/gallery/how_to/work_with_microtvm/micro_custom_ide.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ .. _tutorial-micro-ide: 9. Bring microTVM to your own development environment ====================================================== **Author**: `Mohamad Katanbaf <https://github.com/mkatanbaf>`_ This tutorial describes the steps required to integrate a model compiled with microTVM into a custom development environment. We use `STM32CubeIDE <https://www.st.com/en/development-tools/stm32cubeide.html>`_, as the target IDE in this tutorial, but we do not rely on any specific feature of this IDE and integrating microTVM in other IDEs would be similar. We also use the Visual Wake Word (VWW) model from MLPerf Tiny and the nucleo_l4r5zi board here, but the same steps can be used for any other model or target MCU. If you want to use another target MCU with the vww model, we recommend a cortex-M4 or cortex-M7 device with ~512 KB and ~256 KB of Flash and RAM respectively. Here is a brief overview of the steps that we would take in this tutorial. 1. We start by importing the model, compiling it using TVM and generating the `Model Library Format <https://tvm.apache.org/docs/arch/model_library_format.html>`_ (MLF) tar-file that includes the generated code for the model as well as all the required TVM dependencies. 2. We also add two sample images in binary format (one person and one not-person sample) to the .tar file for evaluating the model. 3. Next we use the stmCubeMX to generate the initialization code for the project in stmCube IDE. 4. After that, we include our MLF file and the required CMSIS libraries in the project and build it. 5. Finally, we flash the device and evaluate the model performance on our sample images. Let's Begin. """ ###################################################################### # Install microTVM Python dependencies # ------------------------------------ # # TVM does not include a package for Python serial communication, so # we must install one before using microTVM. We will also need TFLite # to load models, and Pillow to prepare the sample images. # # .. code-block:: bash # # %%shell # pip install pyserial==3.5 tflite==2.1 Pillow==9.0 typing_extensions # ###################################################################### # Import Python dependencies # --------------------------- # # If you want to run this script locally, check out `TVM Online Documentation <https://tvm.apache.org/docs/install/index.html>`_ for instructions to install TVM. # import os import numpy as np import pathlib import json from PIL import Image import tarfile import tvm from tvm import relay from tvm.relay.backend import Executor, Runtime from tvm.contrib.download import download_testdata from tvm.micro import export_model_library_format from tvm.relay.op.contrib import cmsisnn from tvm.micro.testing.utils import create_header_file ###################################################################### # Import the TFLite model # ------------------------ # # To begin with, download and import a Visual Wake Word TFLite model. This model takes in a 96x96x3 RGB image and determines whether a person is present in the image or not. # This model is originally from `MLPerf Tiny repository <https://github.com/mlcommons/tiny>`_. # To test this model, we use two samples from `COCO 2014 Train images <https://cocodataset.org/>`_. # MODEL_URL = "https://github.com/mlcommons/tiny/raw/bceb91c5ad2e2deb295547d81505721d3a87d578/benchmark/training/visual_wake_words/trained_models/vww_96_int8.tflite" MODEL_NAME = "vww_96_int8.tflite" MODEL_PATH = download_testdata(MODEL_URL, MODEL_NAME, module="model") tflite_model_buf = open(MODEL_PATH, "rb").read() try: import tflite tflite_model = tflite.Model.GetRootAsModel(tflite_model_buf, 0) except AttributeError: import tflite.Model tflite_model = tflite.Model.Model.GetRootAsModel(tflite_model_buf, 0) input_shape = (1, 96, 96, 3) INPUT_NAME = "input_1_int8" relay_mod, params = relay.frontend.from_tflite( tflite_model, shape_dict={INPUT_NAME: input_shape}, dtype_dict={INPUT_NAME: "int8"} ) ###################################################################### # Generate the Model Library Format file # ----------------------------------------- # # First we define the target, runtime and executor. Then we compile the model for the target device and # finally we export the generated code and all the required dependencies in a single file. # # We can use TVM native schedules or rely on the CMSIS-NN kernels using TVM Bring-Your-Own-Code (BYOC) capability. USE_CMSIS_NN = True # USMP (Unified Static Memory Planning) performs memory planning of all tensors holistically to achieve best memory utilization DISABLE_USMP = False # Use the C runtime (crt) RUNTIME = Runtime("crt") # We define the target by passing the board name to `tvm.target.target.micro`. # If your board is not included in the supported models, you can define the target such as: # TARGET = tvm.target.Target("c -keys=arm_cpu,cpu -mcpu=cortex-m4") TARGET = tvm.target.target.micro("stm32l4r5zi") # Use the AOT executor rather than graph or vm executors. Use unpacked API and C calling style. EXECUTOR = tvm.relay.backend.Executor( "aot", {"unpacked-api": True, "interface-api": "c", "workspace-byte-alignment": 8} ) # Now, we set the compilation configurations and compile the model for the target: config = {"tir.disable_vectorize": True} if USE_CMSIS_NN: config["relay.ext.cmsisnn.options"] = {"mcpu": TARGET.mcpu} if DISABLE_USMP: config["tir.usmp.enable"] = False with tvm.transform.PassContext(opt_level=3, config=config): if USE_CMSIS_NN: # When we are using CMSIS-NN, TVM searches for patterns in the # relay graph that it can offload to the CMSIS-NN kernels. relay_mod = cmsisnn.partition_for_cmsisnn(relay_mod, params, mcpu=TARGET.mcpu) lowered = tvm.relay.build( relay_mod, target=TARGET, params=params, runtime=RUNTIME, executor=EXECUTOR ) parameter_size = len(tvm.runtime.save_param_dict(lowered.get_params())) print(f"Model parameter size: {parameter_size}") # We need to pick a directory where our file will be saved. # If running on Google Colab, we'll save everything in ``/root/tutorial`` (aka ``~/tutorial``) # but you'll probably want to store it elsewhere if running locally. BUILD_DIR = pathlib.Path("/root/tutorial") # sphinx_gallery_start_ignore BUILD_DIR = pathlib.Path(os.getcwd()) / "tutorial" # sphinx_gallery_end_ignore BUILD_DIR.mkdir(exist_ok=True) # Now, we export the model into a tar file: TAR_PATH = pathlib.Path(BUILD_DIR) / "model.tar" export_model_library_format(lowered, TAR_PATH) ###################################################################### # Add sample images to the MLF files # ----------------------------------- # Finally, we downlaod two sample images (one person and one not-person), convert them to binary format and store them in two header files. # with tarfile.open(TAR_PATH, mode="a") as tar_file: SAMPLES_DIR = "samples" SAMPLE_PERSON_URL = ( "https://github.com/tlc-pack/web-data/raw/main/testdata/microTVM/data/vww_sample_person.jpg" ) SAMPLE_NOT_PERSON_URL = "https://github.com/tlc-pack/web-data/raw/main/testdata/microTVM/data/vww_sample_not_person.jpg" SAMPLE_PERSON_PATH = download_testdata(SAMPLE_PERSON_URL, "person.jpg", module=SAMPLES_DIR) img = Image.open(SAMPLE_PERSON_PATH) create_header_file("sample_person", np.asarray(img), SAMPLES_DIR, tar_file) SAMPLE_NOT_PERSON_PATH = download_testdata( SAMPLE_NOT_PERSON_URL, "not_person.jpg", module=SAMPLES_DIR ) img = Image.open(SAMPLE_NOT_PERSON_PATH) create_header_file("sample_not_person", np.asarray(img), SAMPLES_DIR, tar_file) ###################################################################### # At this point you have all you need to take the compiled model to your IDE and evaluate it. Inside the MLF file (model.tar), you should find the following file hierearchy: # # .. code-block:: # # /root # ├── codegen # ├── parameters # ├── runtime # ├── samples # ├── src # ├── templates # ├── metadata.json # # * The codegen folder includes the C code TVM generated for your model. # * The runtime folder includes all the TVM dependencies that the target needs to compile the generated C code. # * The samples folder includes the two generated sample files for evaluating the model. # * The src folder includes the relay module describing the model. # * The templates folder includes two template files that you might need to edit based on your platform. # * The metadata.json file includes information about the model, its layers and memory requirement. # ###################################################################### # Generate the project in your IDE # ----------------------------------- # # The next step is to create a project for our target device. We use STM32CubeIDE, you can download it `here <https://www.st.com/en/development-tools/stm32cubeide.html>`_. # We are using version 1.11.0 in this tutorial. Once you install STM32CubeIDE follow these steps to create a project: # # #. select File -> New -> STM32Project. The target selection Window appears. # # #. Navigate to the "Board Selector" tab, type in the board name "nucleo-l4r5zi" in the "Commercial Part Number" text box. Select the board from the list of boards that appear on the right side of the screen and click "Next". # # #. Type in your project name (for example microtvm_vww_demo). We are using the default options. (Target Language: C, Binary Type: Executable, Project Type: STM32Cube). Click "Finish". # # #. A text box will appear asking if you want to "Initialize all the peripherals with their default mode?". click "Yes". This will generate the project and open the device configuration tool where you can use the GUI to setup the peripherals. By default the USB, USART3 and LPUART1 are enabled, as well as a few GPIOs. # # #. We will use LPUART1 to send data to the host pc. From the connectivity section, select the LPUART1 and set the "Baud Rate" to 115200 and the "Word Length" to 8. Save the changes and click "Yes" to regenerate the initialization code. This should regenerate the code and open your main.c file. You can also find main.c from the Project Explorer panel on the left, under microtvm_vww_demo -> Core -> Src. # # #. For sanity check, copy the code below and paste it in the "Infinite loop (aka. While (1) ) section of the main function. # # * Note: Make sure to write your code inside the sections marked by USER CODE BEGIN <...> and USER CODE END <...>. The code outside these sections get erased if you regenerate the initialization code. # # .. code-block:: c # # HAL_GPIO_TogglePin(LD2_GPIO_Port, LD2_Pin); # HAL_UART_Transmit(&hlpuart1, "Hello World.\r\n", 14, 100); # HAL_Delay(1000); # # #. From the menu bar, select Project -> Build (or right click on project name and select Build). This should build the project and generate the .elf file. Select Run -> Run to download the binary on your MCU. If the "Edit Configuration" window opens, just click "OK". # # #. Open the terminal console on your host machine. On Mac you can simply use the "screen <usb_device> 115200" command, e.g. "screen tty.usbmodemXXXX 115200". An LED should blink on the board and the string "Hello World." should print out on your terminal console every second. Press "Control-a k" to exit screen. # ###################################################################### # Import the model to the generated project # ------------------------------------------ # # To integrate the compiled model into the generated project, follow these steps: # # #. Extract the tar file and include it in the project # # * Open the project Properties. (by right clicking on the project name and selecting "Properties" or by selecting Project -> Properties from the menu bar). # * Select C/C++ General -> Paths and Symbols. Select the Source Location tab. # * If you extracted the model inside the project folder, click "Add Folder" and select the "model" folder. (You might need to right click on the project name and select "Refresh" before it appears.) # * If you extracted the model file somewhere else, click on the "Link Folder" button, check the box for "Link to folder in the file system" in the window that appears, click "Browse" and select the model folder. # # #. If you used CMSIS-NN in compiling the model, you need to include the CMSIS-NN source files in your project too. # # * Download or clone the files from the `CMSIS-NN repository <https://github.com/ARM-software/CMSIS-NN>`_, and follow the above steps to include the CMSIS-NN folder in the project. # # #. Open the project properties. In C/C++ Build -> Settings: add the following folders to the list of Include Paths for MCU GCC Compiler (and MCU G++ Compiler if you have a C++ project) by clicking on the "+" button, selecting "Workspace" and navigating to each of the following folders: # # * model/runtime/include # * model/codegen/host/include # * model/samples # * CMSIS-NN/Include # # #. Copy crt_config.h.template from model/templates to the Core/Inc folder, and rename it to crt_config.h. # # #. Copy platform.c.template from model/templates to the Core/Src folder, and rename it to platform.c. # * This file includes functions for managing the memory that you might need to edit based on your platform. # * define "TVM_WORKSPACE_SIZE_BYTES" in platform.c. if you are using USMP, a small value (for example 1024 Bytes) is enough. # * if you are not using usmp, checkout "workspace_size_bytes" field in metadata.json for an estimate of the required memory. # # #. Exclude the following folders from build (right click on the folder name, select Resource Configuration → Exclude from build). Check Debug and Release configurations. # # * CMSIS_NN/Tests # # #. Download the CMSIS drivers from `CMSIS Version 5 repository <https://github.com/ARM-software/CMSIS_5>`_. # # * In your Project directory, delete the Drivers/CMSIS/Include folder (which is an older version of the CMSIS drivers) and copy the CMSIS/Core/Include from the one you downloaded in its place. # # #. Edit the main.c file: # # * Include following header files: # # .. code-block:: c # # #include <stdio.h> # #include <string.h> # #include <stdarg.h> # #include "tvmgen_default.h" # #include "sample_person.h" # #include "sample_not_person.h" # # * Copy the following code into the main function right before the infinite loop. It sets the input and output to the model. # # .. code-block:: c # # TVMPlatformInitialize(); # signed char output[2]; # struct tvmgen_default_inputs inputs = { # .input_1_int8 = (void*)&sample_person, # }; # struct tvmgen_default_outputs outputs = { # .Identity_int8 = (void*)&output, # }; # char msg[] = "Evaluating VWW model using microTVM:\r\n"; # HAL_UART_Transmit(&hlpuart1, msg, strlen(msg), 100); # uint8_t sample = 0; # uint32_t timer_val; # char buf[50]; # uint16_t buf_len; # # * Copy the following code inside the infinite loop to run inference on both images and print the result on the console: # # .. code-block:: c # # if (sample == 0) # inputs.input_1_int8 = (void*)&sample_person; # else # inputs.input_1_int8 = (void*)&sample_not_person; # # timer_val = HAL_GetTick(); # tvmgen_default_run(&inputs, &outputs); # timer_val = HAL_GetTick() - timer_val; # if (output[0] > output[1]) # buf_len = sprintf(buf, "Person not detected, inference time = %lu ms\r\n", timer_val); # else # buf_len = sprintf(buf, "Person detected, inference time = %lu ms\r\n", timer_val); # HAL_UART_Transmit(&hlpuart1, buf, buf_len, 100); # # sample++; # if (sample == 2) # sample = 0; # # # * Define the TVMLogf function in main, to receive TVM runtime errors on serial console. # # .. code-block:: c # # void TVMLogf(const char* msg, ...) { # char buffer[128]; # int size; # va_list args; # va_start(args, msg); # size = TVMPlatformFormatMessage(buffer, 128, msg, args); # va_end(args); # HAL_UART_Transmit(&hlpuart1, buffer, size, 100); # } # # #. In project properties, C/C++ Build -> Settings, MCU GCC Compiler -> Optimization, set the Optimization level to "Optimize more (-O2)" ###################################################################### # Evaluate the model # ------------------- # # Now, select Run -> Run from the menu bar to flash the MCU and run the project. # You should see the LED blinking and the inference result printing on the console. #
17,719
47.950276
406
py
tvm
tvm-main/gallery/how_to/work_with_microtvm/micro_pytorch.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ .. _tutorial-micro-pytorch: 4. microTVM PyTorch Tutorial ============================ **Authors**: `Mehrdad Hessar <https://github.com/mehrdadh>`_ This tutorial is showcasing microTVM host-driven AoT compilation with a PyTorch model. This tutorial can be executed on a x86 CPU using C runtime (CRT). **Note:** This tutorial only runs on x86 CPU using CRT and does not run on Zephyr since the model would not fit on our current supported Zephyr boards. """ ###################################################################### # # .. include:: ../../../../gallery/how_to/work_with_microtvm/install_dependencies.rst # import pathlib import torch import torchvision from torchvision import transforms import numpy as np from PIL import Image import tvm from tvm import relay from tvm.contrib.download import download_testdata from tvm.relay.backend import Executor import tvm.micro.testing ################################## # Load a pre-trained PyTorch model # -------------------------------- # # To begin with, load pre-trained MobileNetV2 from torchvision. Then, # download a cat image and preprocess it to use as the model input. # model = torchvision.models.quantization.mobilenet_v2(weights="DEFAULT", quantize=True) model = model.eval() input_shape = [1, 3, 224, 224] input_data = torch.randn(input_shape) scripted_model = torch.jit.trace(model, input_data).eval() img_url = "https://github.com/dmlc/mxnet.js/blob/main/data/cat.png?raw=true" img_path = download_testdata(img_url, "cat.png", module="data") img = Image.open(img_path).resize((224, 224)) # Preprocess the image and convert to tensor my_preprocess = transforms.Compose( [ transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ] ) img = my_preprocess(img) img = np.expand_dims(img, 0) input_name = "input0" shape_list = [(input_name, input_shape)] relay_mod, params = relay.frontend.from_pytorch(scripted_model, shape_list) ##################################### # Define Target, Runtime and Executor # ----------------------------------- # # In this tutorial we use AOT host-driven executor. To compile the model # for an emulated embedded environment on an x86 machine we use C runtime (CRT) # and we use `host` micro target. Using this setup, TVM compiles the model # for C runtime which can run on a x86 CPU machine with the same flow that # would run on a physical microcontroller. # CRT Uses the main() from `src/runtime/crt/host/main.cc` # To use physical hardware, replace `board` with another physical micro target, e.g. `nrf5340dk_nrf5340_cpuapp` # or `mps2_an521` and change the platform type to Zephyr. # See more target examples in :ref:`Training Vision Models for microTVM on Arduino <tutorial-micro-train-arduino>` # and :ref:`microTVM TFLite Tutorial<tutorial_micro_tflite>`. # target = tvm.micro.testing.get_target(platform="crt", board=None) # Use the C runtime (crt) and enable static linking by setting system-lib to True runtime = tvm.relay.backend.Runtime("crt", {"system-lib": True}) # Use the AOT executor rather than graph or vm executors. Don't use unpacked API or C calling style. executor = Executor("aot") #################### # Compile the model # ------------------ # # Now, we compile the model for the target: # with tvm.transform.PassContext( opt_level=3, config={"tir.disable_vectorize": True}, ): module = tvm.relay.build( relay_mod, target=target, runtime=runtime, executor=executor, params=params ) ########################### # Create a microTVM project # ------------------------- # # Now that we have the compiled model as an IRModule, we need to create a firmware project # to use the compiled model with microTVM. To do this, we use Project API. # template_project_path = pathlib.Path(tvm.micro.get_microtvm_template_projects("crt")) project_options = {"verbose": False, "workspace_size_bytes": 6 * 1024 * 1024} temp_dir = tvm.contrib.utils.tempdir() / "project" project = tvm.micro.generate_project( str(template_project_path), module, temp_dir, project_options, ) #################################### # Build, flash and execute the model # ---------------------------------- # Next, we build the microTVM project and flash it. Flash step is specific to # physical microcontroller and it is skipped if it is simulating a microcontroller # via the host `main.cc`` or if a Zephyr emulated board is selected as the target. # project.build() project.flash() input_data = {input_name: tvm.nd.array(img.astype("float32"))} with tvm.micro.Session(project.transport()) as session: aot_executor = tvm.runtime.executor.aot_executor.AotModule(session.create_aot_executor()) aot_executor.set_input(**input_data) aot_executor.run() result = aot_executor.get_output(0).numpy() ##################### # Look up synset name # ------------------- # Look up prediction top 1 index in 1000 class synset. # synset_url = ( "https://raw.githubusercontent.com/Cadene/" "pretrained-models.pytorch/master/data/" "imagenet_synsets.txt" ) synset_name = "imagenet_synsets.txt" synset_path = download_testdata(synset_url, synset_name, module="data") with open(synset_path) as f: synsets = f.readlines() synsets = [x.strip() for x in synsets] splits = [line.split(" ") for line in synsets] key_to_classname = {spl[0]: " ".join(spl[1:]) for spl in splits} class_url = ( "https://raw.githubusercontent.com/Cadene/" "pretrained-models.pytorch/master/data/" "imagenet_classes.txt" ) class_path = download_testdata(class_url, "imagenet_classes.txt", module="data") with open(class_path) as f: class_id_to_key = f.readlines() class_id_to_key = [x.strip() for x in class_id_to_key] # Get top-1 result for TVM top1_tvm = np.argmax(result) tvm_class_key = class_id_to_key[top1_tvm] # Convert input to PyTorch variable and get PyTorch result for comparison with torch.no_grad(): torch_img = torch.from_numpy(img) output = model(torch_img) # Get top-1 result for PyTorch top1_torch = np.argmax(output.numpy()) torch_class_key = class_id_to_key[top1_torch] print("Relay top-1 id: {}, class name: {}".format(top1_tvm, key_to_classname[tvm_class_key])) print("Torch top-1 id: {}, class name: {}".format(top1_torch, key_to_classname[torch_class_key]))
7,190
33.572115
114
py
tvm
tvm-main/gallery/how_to/work_with_microtvm/micro_tflite.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ .. _tutorial_micro_tflite: 2. microTVM TFLite Tutorial =========================== **Author**: `Tom Gall <https://github.com/tom-gall>`_ This tutorial is an introduction to working with microTVM and a TFLite model with Relay. """ ###################################################################### # # .. include:: ../../../../gallery/how_to/work_with_microtvm/install_dependencies.rst # import os # By default, this tutorial runs on x86 CPU using TVM's C runtime. If you would like # to run on real Zephyr hardware, you must export the `TVM_MICRO_USE_HW` environment # variable. Otherwise (if you are using the C runtime), you can skip installing # Zephyr. It takes ~20 minutes to install Zephyr. use_physical_hw = bool(os.getenv("TVM_MICRO_USE_HW")) ###################################################################### # # .. include:: ../../../../gallery/how_to/work_with_microtvm/install_zephyr.rst # ###################################################################### # Import Python dependencies # ------------------------------- # import json import tarfile import pathlib import tempfile import numpy as np import tvm import tvm.micro import tvm.micro.testing from tvm import relay import tvm.contrib.utils from tvm.micro import export_model_library_format from tvm.contrib.download import download_testdata model_url = ( "https://github.com/tlc-pack/web-data/raw/main/testdata/microTVM/model/sine_model.tflite" ) model_file = "sine_model.tflite" model_path = download_testdata(model_url, model_file, module="data") tflite_model_buf = open(model_path, "rb").read() ###################################################################### # Using the buffer, transform into a tflite model python object try: import tflite tflite_model = tflite.Model.GetRootAsModel(tflite_model_buf, 0) except AttributeError: import tflite.Model tflite_model = tflite.Model.Model.GetRootAsModel(tflite_model_buf, 0) ###################################################################### # Print out the version of the model version = tflite_model.Version() print("Model Version: " + str(version)) ###################################################################### # Parse the python model object to convert it into a relay module # and weights. # It is important to note that the input tensor name must match what # is contained in the model. # # If you are unsure what that might be, this can be discovered by using # the ``visualize.py`` script within the Tensorflow project. # See `How do I inspect a .tflite file? <https://www.tensorflow.org/lite/guide/faq>`_ input_tensor = "dense_4_input" input_shape = (1,) input_dtype = "float32" mod, params = relay.frontend.from_tflite( tflite_model, shape_dict={input_tensor: input_shape}, dtype_dict={input_tensor: input_dtype} ) ###################################################################### # Defining the target # ------------------- # # Now we create a build config for relay, turning off two options and then calling relay.build which # will result in a C source file for the selected TARGET. When running on a simulated target of the # same architecture as the host (where this Python script is executed) choose "crt" below for the # TARGET, the C Runtime as the RUNTIME and a proper board/VM to run it (Zephyr will create the right # QEMU VM based on BOARD. In the example below the x86 arch is selected and a x86 VM is picked up accordingly: # RUNTIME = tvm.relay.backend.Runtime("crt", {"system-lib": True}) TARGET = tvm.micro.testing.get_target("crt") # When running on physical hardware, choose a TARGET and a BOARD that describe the hardware. The # STM32L4R5ZI Nucleo target and board is chosen in the example below. You could change the testing # board by simply exporting `TVM_MICRO_BOARD` variable with a different Zephyr supported board. if use_physical_hw: BOARD = os.getenv("TVM_MICRO_BOARD", default="nucleo_l4r5zi") SERIAL = os.getenv("TVM_MICRO_SERIAL", default=None) TARGET = tvm.micro.testing.get_target("zephyr", BOARD) # For some boards, Zephyr runs them emulated by default, using QEMU. For example, below is the # TARGET and BOARD used to build a microTVM firmware for the mps2-an521 board. # # `mps2_an521 = "mps2_an521"` # `TARGET = tvm.micro.testing.get_target("zephyr", BOARD)` ###################################################################### # Now, compile the model for the target. If you do not specify Executor, # by default it uses GraphExecutor. with tvm.transform.PassContext(opt_level=3, config={"tir.disable_vectorize": True}): module = relay.build(mod, target=TARGET, runtime=RUNTIME, params=params) ###################################################################### # Inspecting the compilation output # --------------------------------- # # The compilation process has produced some C code implementing the operators in this graph. We # can inspect it by printing the CSourceModule contents (for the purposes of this tutorial, let's # just print the first 10 lines): # c_source_module = module.get_lib().imported_modules[0] assert c_source_module.type_key == "c", "tutorial is broken" c_source_code = c_source_module.get_source() first_few_lines = c_source_code.split("\n")[:10] assert any( l.startswith("TVM_DLL int32_t tvmgen_default_") for l in first_few_lines ), f"tutorial is broken: {first_few_lines!r}" print("\n".join(first_few_lines)) ###################################################################### # Compiling the generated code # ---------------------------- # # Now we need to incorporate the generated C code into a project that allows us to run inference on the # device. The simplest way to do this is to integrate it yourself, using microTVM's standard output format # model library format. This is a tarball with a standard layout. # Get a temporary path where we can store the tarball (since this is running as a tutorial). temp_dir = tvm.contrib.utils.tempdir() model_tar_path = temp_dir / "model.tar" export_model_library_format(module, model_tar_path) with tarfile.open(model_tar_path, "r:*") as tar_f: print("\n".join(f" - {m.name}" for m in tar_f.getmembers())) # TVM also provides a standard way for embedded platforms to automatically generate a standalone # project, compile and flash it to a target, and communicate with it using the standard TVM RPC # protocol. The Model Library Format serves as the model input to this process. When embedded # platforms provide such an integration, they can be used directly by TVM for both host-driven # inference and autotuning . This integration is provided by the # `microTVM Project API` <https://github.com/apache/tvm-rfcs/blob/main/rfcs/0008-microtvm-project-api.md>_, # # Embedded platforms need to provide a Template Project containing a microTVM API Server (typically, # this lives in a file ``microtvm_api_server.py`` in the root directory). Let's use the example ``host`` # project in this tutorial, which simulates the device using a POSIX subprocess and pipes: template_project_path = pathlib.Path(tvm.micro.get_microtvm_template_projects("crt")) project_options = {} # You can use options to provide platform-specific options through TVM. # For physical hardware, you can try out the Zephyr platform by using a different template project # and options: if use_physical_hw: template_project_path = pathlib.Path(tvm.micro.get_microtvm_template_projects("zephyr")) project_options = { "project_type": "host_driven", "board": BOARD, "serial_number": SERIAL, "config_main_stack_size": 4096, "zephyr_base": os.getenv("ZEPHYR_BASE", default="/content/zephyrproject/zephyr"), } # Create a temporary directory temp_dir = tvm.contrib.utils.tempdir() generated_project_dir = temp_dir / "generated-project" generated_project = tvm.micro.generate_project( template_project_path, module, generated_project_dir, project_options ) # Build and flash the project generated_project.build() generated_project.flash() ###################################################################### # Next, establish a session with the simulated device and run the # computation. The `with session` line would typically flash an attached # microcontroller, but in this tutorial, it simply launches a subprocess # to stand in for an attached microcontroller. with tvm.micro.Session(transport_context_manager=generated_project.transport()) as session: graph_mod = tvm.micro.create_local_graph_executor( module.get_graph_json(), session.get_system_lib(), session.device ) # Set the model parameters using the lowered parameters produced by `relay.build`. graph_mod.set_input(**module.get_params()) # The model consumes a single float32 value and returns a predicted sine value. To pass the # input value we construct a tvm.nd.array object with a single contrived number as input. For # this model values of 0 to 2Pi are acceptable. graph_mod.set_input(input_tensor, tvm.nd.array(np.array([0.5], dtype="float32"))) graph_mod.run() tvm_output = graph_mod.get_output(0).numpy() print("result is: " + str(tvm_output))
9,974
40.390041
110
py
tvm
tvm-main/gallery/how_to/work_with_microtvm/micro_train.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ .. _tutorial-micro-train-arduino: 5. Training Vision Models for microTVM on Arduino ================================================= **Author**: `Gavin Uberti <https://github.com/guberti>`_ This tutorial shows how MobileNetV1 models can be trained to fit on embedded devices, and how those models can be deployed to Arduino using TVM. """ ###################################################################### # Motivation # ---------- # When building IOT devices, we often want them to **see and understand** the world around them. # This can take many forms, but often times a device will want to know if a certain **kind of # object** is in its field of vision. # # For example, a security camera might look for **people**, so it can decide whether to save a video # to memory. A traffic light might look for **cars**, so it can judge which lights should change # first. Or a forest camera might look for a **kind of animal**, so they can estimate how large # the animal population is. # # To make these devices affordable, we would like them to need only a low-cost processor like the # `nRF52840 <https://www.nordicsemi.com/Products/nRF52840>`_ (costing five dollars each on Mouser) or the `RP2040 <https://www.raspberrypi.com/products/rp2040/>`_ (just $1.45 each!). # # These devices have very little memory (~250 KB RAM), meaning that no conventional edge AI # vision model (like MobileNet or EfficientNet) will be able to run. In this tutorial, we will # show how these models can be modified to work around this requirement. Then, we will use TVM # to compile and deploy it for an Arduino that uses one of these processors. # # Installing the Prerequisites # ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ # # This tutorial will use TensorFlow to train the model - a widely used machine learning library # created by Google. TensorFlow is a very low-level library, however, so we will the Keras # interface to talk to TensorFlow. We will also use TensorFlow Lite to perform quantization on # our model, as TensorFlow by itself does not support this. # # Once we have our generated model, we will use TVM to compile and test it. To avoid having to # build from source, we'll install ``tlcpack`` - a community build of TVM. Lastly, we'll also # install ``imagemagick`` and ``curl`` to preprocess data: # # .. code-block:: bash # # %%shell # pip install -q tensorflow tflite # pip install -q tlcpack-nightly -f https://tlcpack.ai/wheels # apt-get -qq install imagemagick curl # # # Install Arduino CLI and library for Nano 33 BLE # curl -fsSL https://raw.githubusercontent.com/arduino/arduino-cli/master/install.sh | sh # /content/bin/arduino-cli core update-index # /content/bin/arduino-cli core install arduino:mbed_nano # # Using the GPU # ^^^^^^^^^^^^^ # # This tutorial demonstrates training a neural network, which is requires a lot of computing power # and will go much faster if you have a GPU. If you are viewing this tutorial on Google Colab, you # can enable a GPU by going to **Runtime->Change runtime type** and selecting "GPU" as our hardware # accelerator. If you are running locally, you can `follow TensorFlow's guide <https://www.tensorflow.org/guide/gpu>`_ instead. # # We can test our GPU installation with the following code: import tensorflow as tf if not tf.test.gpu_device_name(): print("No GPU was detected!") print("Model training will take much longer (~30 minutes instead of ~5)") else: print("GPU detected - you're good to go.") ###################################################################### # Choosing Our Work Dir # ^^^^^^^^^^^^^^^^^^^^^ # We need to pick a directory where our image datasets, trained model, and eventual Arduino sketch # will all live. If running on Google Colab, we'll save everything in ``/root`` (aka ``~``) but you'll # probably want to store it elsewhere if running locally. Note that this variable only affects Python # scripts - you'll have to adjust the Bash commands too. import os FOLDER = "/root" # sphinx_gallery_start_ignore import tempfile FOLDER = tempfile.mkdtemp() # sphinx_gallery_end_ignore ###################################################################### # Downloading the Data # -------------------- # Convolutional neural networks usually learn by looking at many images, along with labels telling # the network what those images are. To get these images, we'll need a publicly available dataset # with thousands of images of all sorts of objects and labels of what's in each image. We'll also # need a bunch of images that **aren't** of cars, as we're trying to distinguish these two classes. # # In this tutorial, we'll create a model to detect if an image contains a **car**, but you can use # whatever category you like! Just change the source URL below to one containing images of another # type of object. # # To get our car images, we'll be downloading the `Stanford Cars dataset <http://ai.stanford.edu/~jkrause/cars/car_dataset.html>`_, # which contains 16,185 full color images of cars. We'll also need images of random things that # aren't cars, so we'll use the `COCO 2017 <https://cocodataset.org/#home>`_ validation set (it's # smaller, and thus faster to download than the full training set. Training on the full data set # would yield better results). Note that there are some cars in the COCO 2017 data set, but it's # a small enough fraction not to matter - just keep in mind that this will drive down our percieved # accuracy slightly. # # We could use the TensorFlow dataloader utilities, but we'll instead do it manually to make sure # it's easy to change the datasets being used. We'll end up with the following file hierarchy: # # .. code-block:: # # /root # ├── images # │ ├── object # │ │ ├── 000001.jpg # │ │ │ ... # │ │ └── 016185.jpg # │ ├── object.tgz # │ ├── random # │ │ ├── 000000000139.jpg # │ │ │ ... # │ │ └── 000000581781.jpg # │ └── random.zip # # We should also note that Stanford cars has 8k images, while the COCO 2017 validation set is 5k # images - it is not a 50/50 split! If we wanted to, we could weight these classes differently # during training to correct for this, but training will still work if we ignore it. It should # take about **2 minutes** to download the Stanford Cars, while COCO 2017 validation will take # **1 minute**. import os import shutil import urllib.request # Download datasets os.makedirs(f"{FOLDER}/downloads") os.makedirs(f"{FOLDER}/images") urllib.request.urlretrieve( "https://data.deepai.org/stanfordcars.zip", f"{FOLDER}/downloads/target.zip" ) urllib.request.urlretrieve( "http://images.cocodataset.org/zips/val2017.zip", f"{FOLDER}/downloads/random.zip" ) # Extract them and rename their folders shutil.unpack_archive(f"{FOLDER}/downloads/target.zip", f"{FOLDER}/downloads") shutil.unpack_archive(f"{FOLDER}/downloads/random.zip", f"{FOLDER}/downloads") shutil.move(f"{FOLDER}/downloads/cars_train/cars_train", f"{FOLDER}/images/target") shutil.move(f"{FOLDER}/downloads/val2017", f"{FOLDER}/images/random") ###################################################################### # Loading the Data # ---------------- # Currently, our data is stored on-disk as JPG files of various sizes. To train with it, we'll have # to load the images into memory, resize them to be 64x64, and convert them to raw, uncompressed # data. Keras's ``image_dataset_from_directory`` will take care of most of this, though it loads # images such that each pixel value is a float from 0 to 255. # # We'll also need to load labels, though Keras will help with this. From our subdirectory structure, # it knows the images in ``/objects`` are one class, and those in ``/random`` another. Setting # ``label_mode='categorical'`` tells Keras to convert these into **categorical labels** - a 2x1 vector # that's either ``[1, 0]`` for an object of our target class, or ``[0, 1]`` vector for anything else. # We'll also set ``shuffle=True`` to randomize the order of our examples. # # We will also **batch** the data - grouping samples into clumps to make our training go faster. # Setting ``batch_size = 32`` is a decent number. # # Lastly, in machine learning we generally want our inputs to be small numbers. We'll thus use a # ``Rescaling`` layer to change our images such that each pixel is a float between ``0.0`` and ``1.0``, # instead of ``0`` to ``255``. We need to be careful not to rescale our categorical labels though, so # we'll use a ``lambda`` function. IMAGE_SIZE = (64, 64, 3) unscaled_dataset = tf.keras.utils.image_dataset_from_directory( f"{FOLDER}/images", batch_size=32, shuffle=True, label_mode="categorical", image_size=IMAGE_SIZE[0:2], ) rescale = tf.keras.layers.Rescaling(scale=1.0 / 255) full_dataset = unscaled_dataset.map(lambda im, lbl: (rescale(im), lbl)) ###################################################################### # What's Inside Our Dataset? # ^^^^^^^^^^^^^^^^^^^^^^^^^^ # Before giving this data set to our neural network, we ought to give it a quick visual inspection. # Does the data look properly transformed? Do the labels seem appropriate? And what's our ratio of # objects to other stuff? We can display some examples from our datasets using ``matplotlib``: import matplotlib.pyplot as plt num_target_class = len(os.listdir(f"{FOLDER}/images/target/")) num_random_class = len(os.listdir(f"{FOLDER}/images/random/")) print(f"{FOLDER}/images/target contains {num_target_class} images") print(f"{FOLDER}/images/random contains {num_random_class} images") # Show some samples and their labels SAMPLES_TO_SHOW = 10 plt.figure(figsize=(20, 10)) for i, (image, label) in enumerate(unscaled_dataset.unbatch()): if i >= SAMPLES_TO_SHOW: break ax = plt.subplot(1, SAMPLES_TO_SHOW, i + 1) plt.imshow(image.numpy().astype("uint8")) plt.title(list(label.numpy())) plt.axis("off") ###################################################################### # Validating our Accuracy # ^^^^^^^^^^^^^^^^^^^^^^^ # While developing our model, we'll often want to check how accurate it is (e.g. to see if it # improves during training). How do we do this? We could just train it on *all* of the data, and # then ask it to classify that same data. However, our model could cheat by just memorizing all of # the samples, which would make it *appear* to have very high accuracy, but perform very badly in # reality. In practice, this "memorizing" is called **overfitting**. # # To prevent this, we will set aside some of the data (we'll use 20%) as a **validation set**. Our # model will never be trained on validation data - we'll only use it to check our model's accuracy. num_batches = len(full_dataset) train_dataset = full_dataset.take(int(num_batches * 0.8)) validation_dataset = full_dataset.skip(len(train_dataset)) ###################################################################### # Loading the Data # ---------------- # In the past decade, `convolutional neural networks <https://en.wikipedia.org/wiki/Convolutional_neural_network>`_ have been widely # adopted for image classification tasks. State-of-the-art models like `EfficientNet V2 <https://arxiv.org/abs/2104.00298>`_ are able # to perform image classification better than even humans! Unfortunately, these models have tens of # millions of parameters, and thus won't fit on cheap security camera computers. # # Our applications generally don't need perfect accuracy - 90% is good enough. We can thus use the # older and smaller MobileNet V1 architecture. But this *still* won't be small enough - by default, # MobileNet V1 with 224x224 inputs and alpha 1.0 takes ~50 MB to just **store**. To reduce the size # of the model, there are three knobs we can turn. First, we can reduce the size of the input images # from 224x224 to 96x96 or 64x64, and Keras makes it easy to do this. We can also reduce the **alpha** # of the model, from 1.0 to 0.25, which downscales the width of the network (and the number of # filters) by a factor of four. And if we were really strapped for space, we could reduce the # number of **channels** by making our model take grayscale images instead of RGB ones. # # In this tutorial, we will use an RGB 64x64 input image and alpha 0.25. This is not quite # ideal, but it allows the finished model to fit in 192 KB of RAM, while still letting us perform # transfer learning using the official TensorFlow source models (if we used alpha <0.25 or a # grayscale input, we wouldn't be able to do this). # # What is Transfer Learning? # ^^^^^^^^^^^^^^^^^^^^^^^^^^ # Deep learning has `dominated image classification <https://paperswithcode.com/sota/image-classification-on-imagenet>`_ for a long time, # but training neural networks takes a lot of time. When a neural network is trained "from scratch", # its parameters start out randomly initialized, forcing it to learn very slowly how to tell images # apart. # # With transfer learning, we instead start with a neural network that's **already** good at a # specific task. In this example, that task is classifying images from `the ImageNet database <https://www.image-net.org/>`_. This # means the network already has some object detection capabilities, and is likely closer to what you # want then a random model would be. # # This works especially well with image processing neural networks like MobileNet. In practice, it # turns out the convolutional layers of the model (i.e. the first 90% of the layers) are used for # identifying low-level features like lines and shapes - only the last few fully connected layers # are used to determine how those shapes make up the objects the network is trying to detect. # # We can take advantage of this by starting training with a MobileNet model that was trained on # ImageNet, and already knows how to identify those lines and shapes. We can then just remove the # last few layers from this pretrained model, and add our own final layers. We'll then train this # conglomerate model for a few epochs on our cars vs non-cars dataset, to adjust the first layers # and train from scratch the last layers. This process of training an already-partially-trained # model is called *fine-tuning*. # # Source MobileNets for transfer learning have been `pretrained by the TensorFlow folks <https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet_v1.md>`_, so we # can just download the one closest to what we want (the 128x128 input model with 0.25 depth scale). os.makedirs(f"{FOLDER}/models") WEIGHTS_PATH = f"{FOLDER}/models/mobilenet_2_5_128_tf.h5" urllib.request.urlretrieve( "https://storage.googleapis.com/tensorflow/keras-applications/mobilenet/mobilenet_2_5_128_tf.h5", WEIGHTS_PATH, ) pretrained = tf.keras.applications.MobileNet( input_shape=IMAGE_SIZE, weights=WEIGHTS_PATH, alpha=0.25 ) ###################################################################### # Modifying Our Network # ^^^^^^^^^^^^^^^^^^^^^ # As mentioned above, our pretrained model is designed to classify the 1,000 ImageNet categories, # but we want to convert it to classify cars. Since only the bottom few layers are task-specific, # we'll **cut off the last five layers** of our original model. In their place we'll build our own # "tail" to the model by performing respape, dropout, flatten, and softmax operations. model = tf.keras.models.Sequential() model.add(tf.keras.layers.InputLayer(input_shape=IMAGE_SIZE)) model.add(tf.keras.Model(inputs=pretrained.inputs, outputs=pretrained.layers[-5].output)) model.add(tf.keras.layers.Reshape((-1,))) model.add(tf.keras.layers.Dropout(0.1)) model.add(tf.keras.layers.Flatten()) model.add(tf.keras.layers.Dense(2, activation="softmax")) ###################################################################### # Fine Tuning Our Network # ^^^^^^^^^^^^^^^^^^^^^^^ # When training neural networks, we must set a parameter called the **learning rate** that controls # how fast our network learns. It must be set carefully - too slow, and our network will take # forever to train; too fast, and our network won't be able to learn some fine details. Generally # for Adam (the optimizer we're using), ``0.001`` is a pretty good learning rate (and is what's # recommended in the `original paper <https://arxiv.org/abs/1412.6980>`_). However, in this case # ``0.0005`` seems to work a little better. # # We'll also pass the validation set from earlier to ``model.fit``. This will evaluate how good our # model is each time we train it, and let us track how our model is improving. Once training is # finished, the model should have a validation accuracy around ``0.98`` (meaning it was right 98% of # the time on our validation set). model.compile( optimizer=tf.keras.optimizers.Adam(learning_rate=0.0005), loss="categorical_crossentropy", metrics=["accuracy"], ) model.fit(train_dataset, validation_data=validation_dataset, epochs=3, verbose=2) ###################################################################### # Quantization # ------------ # We've done a decent job of reducing our model's size so far - changing the input dimension, # along with removing the bottom layers reduced the model to just 219k parameters. However, each of # these parameters is a ``float32`` that takes four bytes, so our model will take up almost one MB! # # Additionally, it might be the case that our hardware doesn't have built-in support for floating # point numbers. While most high-memory Arduinos (like the Nano 33 BLE) do have hardware support, # some others (like the Arduino Due) do not. On any boards *without* dedicated hardware support, # floating point multiplication will be extremely slow. # # To address both issues we will **quantize** the model - representing the weights as eight bit # integers. It's more complex than just rounding, though - to get the best performance, TensorFlow # tracks how each neuron in our model activates, so we can figure out how most accurately simulate # the neuron's original activations with integer operations. # # We will help TensorFlow do this by creating a representative dataset - a subset of the original # that is used for tracking how those neurons activate. We'll then pass this into a ``TFLiteConverter`` # (Keras itself does not have quantization support) with an ``Optimize`` flag to tell TFLite to perform # the conversion. By default, TFLite keeps the inputs and outputs of our model as floats, so we must # explicitly tell it to avoid this behavior. def representative_dataset(): for image_batch, label_batch in full_dataset.take(10): yield [image_batch] converter = tf.lite.TFLiteConverter.from_keras_model(model) converter.optimizations = [tf.lite.Optimize.DEFAULT] converter.representative_dataset = representative_dataset converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8] converter.inference_input_type = tf.uint8 converter.inference_output_type = tf.uint8 quantized_model = converter.convert() ###################################################################### # Download the Model if Desired # ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ # We've now got a finished model that you can use locally or in other tutorials (try autotuning # this model or viewing it on `https://netron.app/ <https://netron.app/>`_). But before we do # those things, we'll have to write it to a file (``quantized.tflite``). If you're running this # tutorial on Google Colab, you'll have to uncomment the last two lines to download the file # after writing it. QUANTIZED_MODEL_PATH = f"{FOLDER}/models/quantized.tflite" with open(QUANTIZED_MODEL_PATH, "wb") as f: f.write(quantized_model) # from google.colab import files # files.download(QUANTIZED_MODEL_PATH) ###################################################################### # Compiling With TVM For Arduino # ------------------------------ # TensorFlow has a built-in framework for deploying to microcontrollers - `TFLite Micro <https://www.tensorflow.org/lite/microcontrollers>`_. However, # it's poorly supported by development boards and does not support autotuning. We will use Apache # TVM instead. # # TVM can be used either with its command line interface (``tvmc``) or with its Python interface. The # Python interface is fully-featured and more stable, so we'll use it here. # # TVM is an optimizing compiler, and optimizations to our model are performed in stages via # **intermediate representations**. The first of these is `Relay <https://arxiv.org/abs/1810.00952>`_ a high-level intermediate # representation emphasizing portability. The conversion from ``.tflite`` to Relay is done without any # knowledge of our "end goal" - the fact we intend to run this model on an Arduino. # # Choosing an Arduino Board # ^^^^^^^^^^^^^^^^^^^^^^^^^ # Next, we'll have to decide exactly which Arduino board to use. The Arduino sketch that we # ultimately generate should be compatible with any board, but knowing which board we are using in # advance allows TVM to adjust its compilation strategy to get better performance. # # There is one catch - we need enough **memory** (flash and RAM) to be able to run our model. We # won't ever be able to run a complex vision model like a MobileNet on an Arduino Uno - that board # only has 2 kB of RAM and 32 kB of flash! Our model has ~200,000 parameters, so there is just no # way it could fit. # # For this tutorial, we will use the Nano 33 BLE, which has 1 MB of flash memory and 256 KB of RAM. # However, any other Arduino with those specs or better should also work. # # Generating our project # ^^^^^^^^^^^^^^^^^^^^^^ # Next, we'll compile the model to TVM's MLF (model library format) intermediate representation, # which consists of C/C++ code and is designed for autotuning. To improve performance, we'll tell # TVM that we're compiling for the ``nrf52840`` microprocessor (the one the Nano 33 BLE uses). We'll # also tell it to use the C runtime (abbreviated ``crt``) and to use ahead-of-time memory allocation # (abbreviated ``aot``, which helps reduce the model's memory footprint). Lastly, we will disable # vectorization with ``"tir.disable_vectorize": True``, as C has no native vectorized types. # # Once we have set these configuration parameters, we will call ``tvm.relay.build`` to compile our # Relay model into the MLF intermediate representation. From here, we just need to call # ``tvm.micro.generate_project`` and pass in the Arduino template project to finish compilation. import shutil import tvm import tvm.micro.testing # Method to load model is different in TFLite 1 vs 2 try: # TFLite 2.1 and above import tflite tflite_model = tflite.Model.GetRootAsModel(quantized_model, 0) except AttributeError: # Fall back to TFLite 1.14 method import tflite.Model tflite_model = tflite.Model.Model.GetRootAsModel(quantized_model, 0) # Convert to the Relay intermediate representation mod, params = tvm.relay.frontend.from_tflite(tflite_model) # Set configuration flags to improve performance target = tvm.micro.testing.get_target("zephyr", "nrf5340dk_nrf5340_cpuapp") runtime = tvm.relay.backend.Runtime("crt") executor = tvm.relay.backend.Executor("aot", {"unpacked-api": True}) # Convert to the MLF intermediate representation with tvm.transform.PassContext(opt_level=3, config={"tir.disable_vectorize": True}): mod = tvm.relay.build(mod, target, runtime=runtime, executor=executor, params=params) # Generate an Arduino project from the MLF intermediate representation shutil.rmtree(f"{FOLDER}/models/project", ignore_errors=True) arduino_project = tvm.micro.generate_project( tvm.micro.get_microtvm_template_projects("arduino"), mod, f"{FOLDER}/models/project", { "board": "nano33ble", "arduino_cli_cmd": "/content/bin/arduino-cli", "project_type": "example_project", }, ) ###################################################################### # Testing our Arduino Project # --------------------------- # Consider the following two 224x224 images from the author's camera roll - one of a car, one not. # We will test our Arduino project by loading both of these images and executing the compiled model # on them. # # .. image:: https://raw.githubusercontent.com/tlc-pack/web-data/main/testdata/microTVM/data/model_train_images_combined.png # :align: center # :height: 200px # :width: 600px # # Currently, these are 224x224 PNG images we can download from Imgur. Before we can feed in these # images, we'll need to resize and convert them to raw data, which can be done with ``imagemagick``. # # It's also challenging to load raw data onto an Arduino, as only C/CPP files (and similar) are # compiled. We can work around this by embedding our raw data in a hard-coded C array with the # built-in utility ``bin2c`` that will output a file like below: # # .. code-block:: c # # static const unsigned char CAR_IMAGE[] = { # 0x22,0x23,0x14,0x22, # ... # 0x07,0x0e,0x08,0x08 # }; # # We can do both of these things with a few lines of Bash code: # # .. code-block:: bash # # %%shell # mkdir -p ~/tests # curl "https://i.imgur.com/JBbEhxN.png" -o ~/tests/car_224.png # convert ~/tests/car_224.png -resize 64 ~/tests/car_64.png # stream ~/tests/car_64.png ~/tests/car.raw # bin2c -c -st ~/tests/car.raw --name CAR_IMAGE > ~/models/project/car.c # # curl "https://i.imgur.com/wkh7Dx2.png" -o ~/tests/catan_224.png # convert ~/tests/catan_224.png -resize 64 ~/tests/catan_64.png # stream ~/tests/catan_64.png ~/tests/catan.raw # bin2c -c -st ~/tests/catan.raw --name CATAN_IMAGE > ~/models/project/catan.c ###################################################################### # Writing our Arduino Script # -------------------------- # We now need a little bit of Arduino code to read the two binary arrays we just generated, run the # model on them, and log the output to the serial monitor. This file will replace ``arduino_sketch.ino`` # as the main file of our sketch. You'll have to copy this code in manually.. # # .. code-block:: c # # %%writefile /root/models/project.ino # #include "src/model.h" # #include "car.c" # #include "catan.c" # # void setup() { # Serial.begin(9600); # TVMInitialize(); # } # # void loop() { # uint8_t result_data[2]; # Serial.println("Car results:"); # TVMExecute(const_cast<uint8_t*>(CAR_IMAGE), result_data); # Serial.print(result_data[0]); Serial.print(", "); # Serial.print(result_data[1]); Serial.println(); # # Serial.println("Other object results:"); # TVMExecute(const_cast<uint8_t*>(CATAN_IMAGE), result_data); # Serial.print(result_data[0]); Serial.print(", "); # Serial.print(result_data[1]); Serial.println(); # # delay(1000); # } # # Compiling Our Code # ^^^^^^^^^^^^^^^^^^ # Now that our project has been generated, TVM's job is mostly done! We can still call # ``arduino_project.build()`` and ``arduino_project.upload()``, but these just use ``arduino-cli``'s # compile and flash commands underneath. We could also begin autotuning our model, but that's a # subject for a different tutorial. To finish up, we'll verify no compiler errors are thrown # by our project: shutil.rmtree(f"{FOLDER}/models/project/build", ignore_errors=True) # sphinx_gallery_start_ignore from unittest.mock import MagicMock arduino_project = MagicMock() # sphinx_gallery_end_ignore arduino_project.build() print("Compilation succeeded!") ###################################################################### # Uploading to Our Device # ----------------------- # The very last step is uploading our sketch to an Arduino to make sure our code works properly. # Unfortunately, we can't do that from Google Colab, so we'll have to download our sketch. This is # simple enough to do - we'll just turn our project into a `.zip` archive, and call `files.download`. # If you're running on Google Colab, you'll have to uncomment the last two lines to download the file # after writing it. ZIP_FOLDER = f"{FOLDER}/models/project" shutil.make_archive(ZIP_FOLDER, "zip", ZIP_FOLDER) # from google.colab import files # files.download(f"{FOLDER}/models/project.zip") # sphinx_gallery_start_ignore # Run a few unit tests to make sure the Python code worked # Ensure transfer learn model was correctly assembled assert len(model.layers) == 5 assert model.count_params() == 219058 # Only 219,058 of these are trainable assert len(quantized_model) >= 250000 # Quantized model will be 250 KB - 350 KB assert len(quantized_model) <= 350000 # Exact value depends on quantization # Assert .tflite and .zip files were written to disk assert os.path.isfile(f"{FOLDER}/models/quantized.tflite") assert os.path.isfile(f"{FOLDER}/models/project.zip") # Assert MLF file was correctly generated assert mod.executor.name == "aot" # Remove the temporary folder we generated at the beginning shutil.rmtree(FOLDER) # sphinx_gallery_end_ignore ###################################################################### # From here, we'll need to open it in the Arduino IDE. You'll have to download the IDE as well as # the SDK for whichever board you are using. For certain boards like the Sony SPRESENSE, you may # have to change settings to control how much memory you want the board to use. # # Expected Results # ^^^^^^^^^^^^^^^^ # If all works as expected, you should see the following output on a Serial monitor: # # .. code-block:: # # Car results: # 255, 0 # Other object results: # 0, 255 # # The first number represents the model's confidence that the object **is** a car and ranges from # 0-255. The second number represents the model's confidence that the object **is not** a car and # is also 0-255. These results mean the model is very sure that the first image is a car, and the # second image is not (which is correct). Hence, our model is working! # # Summary # ------- # In this tutorial, we used transfer learning to quickly train an image recognition model to # identify cars. We modified its input dimensions and last few layers to make it better at this, # and to make it faster and smaller. We then quantified the model and compiled it using TVM to # create an Arduino sketch. Lastly, we tested the model using two static images to prove it works # as intended. # # Next Steps # ^^^^^^^^^^ # From here, we could modify the model to read live images from the camera - we have another # Arduino tutorial for how to do that `on GitHub <https://github.com/guberti/tvm-arduino-demos/tree/master/examples/person_detection>`_. Alternatively, we could also # `use TVM's autotuning capabilities <https://tvm.apache.org/docs/how_to/work_with_microtvm/micro_autotune.html>`_ to dramatically improve the model's performance. #
31,816
48.40528
182
py
tvm
tvm-main/gallery/how_to/work_with_microtvm/micro_aot.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ .. _tutorial-micro-aot: 3. microTVM Ahead-of-Time (AOT) Compilation =========================================== **Authors**: `Mehrdad Hessar <https://github.com/mehrdadh>`_, `Alan MacDonald <https://github.com/alanmacd>`_ This tutorial is showcasing microTVM host-driven AoT compilation with a TFLite model. AoTExecutor reduces the overhead of parsing graph at runtime compared to GraphExecutor. Also, we can have better memory management using ahead of time compilation. This tutorial can be executed on a x86 CPU using C runtime (CRT) or on Zephyr platform on a microcontroller/board supported by Zephyr. """ ###################################################################### # # .. include:: ../../../../gallery/how_to/work_with_microtvm/install_dependencies.rst # import os # By default, this tutorial runs on x86 CPU using TVM's C runtime. If you would like # to run on real Zephyr hardware, you must export the `TVM_MICRO_USE_HW` environment # variable. Otherwise (if you are using the C runtime), you can skip installing # Zephyr. It takes ~20 minutes to install Zephyr. use_physical_hw = bool(os.getenv("TVM_MICRO_USE_HW")) ###################################################################### # # .. include:: ../../../../gallery/how_to/work_with_microtvm/install_zephyr.rst # ###################################################################### # Import Python dependencies # ------------------------------- # import numpy as np import pathlib import json import tvm from tvm import relay import tvm.micro.testing from tvm.relay.backend import Executor, Runtime from tvm.contrib.download import download_testdata ###################################################################### # Import a TFLite model # --------------------- # # To begin with, download and import a Keyword Spotting TFLite model. # This model is originally from `MLPerf Tiny repository <https://github.com/mlcommons/tiny>`_. # To test this model, we use samples from `KWS dataset provided by Google <https://ai.googleblog.com/2017/08/launching-speech-commands-dataset.html>`_. # # **Note:** By default this tutorial runs on x86 CPU using CRT, if you would like to run on Zephyr platform # you need to export `TVM_MICRO_USE_HW` environment variable. # MODEL_URL = "https://github.com/mlcommons/tiny/raw/bceb91c5ad2e2deb295547d81505721d3a87d578/benchmark/training/keyword_spotting/trained_models/kws_ref_model.tflite" MODEL_PATH = download_testdata(MODEL_URL, "kws_ref_model.tflite", module="model") SAMPLE_URL = "https://github.com/tlc-pack/web-data/raw/main/testdata/microTVM/data/keyword_spotting_int8_6.pyc.npy" SAMPLE_PATH = download_testdata(SAMPLE_URL, "keyword_spotting_int8_6.pyc.npy", module="data") tflite_model_buf = open(MODEL_PATH, "rb").read() try: import tflite tflite_model = tflite.Model.GetRootAsModel(tflite_model_buf, 0) except AttributeError: import tflite.Model tflite_model = tflite.Model.Model.GetRootAsModel(tflite_model_buf, 0) input_shape = (1, 49, 10, 1) INPUT_NAME = "input_1" relay_mod, params = relay.frontend.from_tflite( tflite_model, shape_dict={INPUT_NAME: input_shape}, dtype_dict={INPUT_NAME: "int8"} ) ###################################################################### # Defining the target # ------------------- # # Now we need to define the target, runtime and executor. In this tutorial, we focused on # using AOT host driven executor. We use the host micro target which is for running a model # on x86 CPU using CRT runtime or running a model with Zephyr platform on qemu_x86 simulator # board. In the case of a physical microcontroller, we get the target model for the physical # board (E.g. nucleo_l4r5zi) and change `BOARD` to supported Zephyr board. # # Use the C runtime (crt) and enable static linking by setting system-lib to True RUNTIME = Runtime("crt", {"system-lib": True}) # Simulate a microcontroller on the host machine. Uses the main() from `src/runtime/crt/host/main.cc`. # To use physical hardware, replace "host" with something matching your hardware. TARGET = tvm.micro.testing.get_target("crt") # Use the AOT executor rather than graph or vm executors. Don't use unpacked API or C calling style. EXECUTOR = Executor("aot") if use_physical_hw: BOARD = os.getenv("TVM_MICRO_BOARD", default="nucleo_l4r5zi") SERIAL = os.getenv("TVM_MICRO_SERIAL", default=None) TARGET = tvm.micro.testing.get_target("zephyr", BOARD) ###################################################################### # Compile the model # ----------------- # # Now, we compile the model for the target: # with tvm.transform.PassContext(opt_level=3, config={"tir.disable_vectorize": True}): module = tvm.relay.build( relay_mod, target=TARGET, params=params, runtime=RUNTIME, executor=EXECUTOR ) ###################################################################### # Create a microTVM project # ------------------------- # # Now that we have the compiled model as an IRModule, we need to create a firmware project # to use the compiled model with microTVM. To do this, we use Project API. We have defined # CRT and Zephyr microTVM template projects which are used for x86 CPU and Zephyr boards # respectively. # template_project_path = pathlib.Path(tvm.micro.get_microtvm_template_projects("crt")) project_options = {} # You can use options to provide platform-specific options through TVM. if use_physical_hw: template_project_path = pathlib.Path(tvm.micro.get_microtvm_template_projects("zephyr")) project_options = { "project_type": "host_driven", "board": BOARD, "serial_number": SERIAL, "config_main_stack_size": 4096, "zephyr_base": os.getenv("ZEPHYR_BASE", default="/content/zephyrproject/zephyr"), } temp_dir = tvm.contrib.utils.tempdir() generated_project_dir = temp_dir / "project" project = tvm.micro.generate_project( template_project_path, module, generated_project_dir, project_options ) ###################################################################### # Build, flash and execute the model # ---------------------------------- # Next, we build the microTVM project and flash it. Flash step is specific to # physical microcontrollers and it is skipped if it is simulating a microcontroller # via the host main.cc or if a Zephyr emulated board is selected as the target. # Next, we define the labels for the model output and execute the model with a # sample with expected value of 6 (label: left). # project.build() project.flash() labels = [ "_silence_", "_unknown_", "yes", "no", "up", "down", "left", "right", "on", "off", "stop", "go", ] with tvm.micro.Session(project.transport()) as session: aot_executor = tvm.runtime.executor.aot_executor.AotModule(session.create_aot_executor()) sample = np.load(SAMPLE_PATH) aot_executor.get_input(INPUT_NAME).copyfrom(sample) aot_executor.run() result = aot_executor.get_output(0).numpy() print(f"Label is `{labels[np.argmax(result)]}` with index `{np.argmax(result)}`")
7,856
39.086735
164
py
tvm
tvm-main/gallery/how_to/work_with_pytorch/using_as_torch.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ Wrap Your TVMScript as PyTorch Module ===================================== **Author**: `Yaoda Zhou <https://github.com/juda>`_ This article is a tutorial on wrapping the TVMScript code as the PyTorch module. Using the decorator `as_torch`, users can wrap TVMScript code into a PyTorch nn.Module naturally. To follow the tutorial, PyTorch should be installed: .. code-block:: bash %%shell pip install torch """ # Import PyTorch, as well as necessary libraries import torch import torch.nn.functional as F import torch.utils.benchmark as benchmark import tvm from tvm.contrib.torch import as_torch from tvm.script import tir as T ###################################################################### # Write your own PyTorch operator by TVMScript # -------------------------------------------- # PyTorch is a very popular machine learning framework which contains # optimized implementations of most commonly used operators. # Nevertheless, sometimes you might want to write your own operators in PyTorch. # In that case, the performance of such custom operators might not be satisfactory for your needs. # # For example, suppose that we are going to define a 1-d depthwise convolution operator. # Assume the number of in_channel and out_channel are both 70, # the width is 80 and the kernel size is 20, # then the 1-d depthwise conv could be written in PyTorch in one line: in_channel = 70 out_channel = 70 width = 80 kernel_size = 20 def torch_depthwise(inputs, filters): return F.conv1d(inputs, filters.view(out_channel, 1, kernel_size), groups=out_channel) # We can run this function as: inputs = torch.randn(in_channel, width) filters = torch.randn(out_channel, kernel_size) ret_torch = torch_depthwise(inputs, filters) # The `torch_depthwise` function, in a plain Python code, could be written as: def vanilla_depthwise(input, weight): ret = torch.zeros(out_channel, width - kernel_size + 1) for j in range(out_channel): for i in range(width - kernel_size + 1): for k in range(kernel_size): ret[j, i] += weight[j, k] * input[j, i + k] return ret # Then, we plan to optimize the `depthwise` function by leveraging the power of TVM. # TVM community proposes an embedded Domain Specific Language in Python called TVMScript, # which serves as the high-level frontend for TVM's Tensor IR. # The depthwise 1D convolution code above can be translated to TVMScript as follows. # We provide an `as_torch` decorator, which converts the TVMScript code to PyTorch's nn.Module automatically. @as_torch @T.prim_func def tvm_depthwise( A: T.Buffer((70, 80), "float32"), B: T.Buffer((70, 20), "float32"), C: T.Buffer((70, 61), "float32"), ) -> None: for j, i, k in T.grid(70, 61, 20): with T.block(): vi, vj, vk = T.axis.remap("SSR", [i, j, k]) with T.init(): C[vj, vi] = T.float32(0) C[vj, vi] += B[vj, vk] * A[vj, vi + vk] # We can build the TVMScript code by calling the `tune` method in default setting. # Without providing extra information, the model will be tuned for CPU. tvm_depthwise.tune() # We can print out the tuned TVMScript code to see how the program is transformed, as print(tvm_depthwise.script()) # We can verify that the two outputs are the same: ret_tvm = torch.zeros(out_channel, width - kernel_size + 1) tvm_depthwise(inputs, filters, ret_tvm) testing.assert_allclose(ret_torch.cpu().numpy(), ret_tvm.cpu().numpy(), atol=1e-5, rtol=1e-5) ###################################################################### # Benchmark # --------- results = [] for i in range(5): inputs = torch.randn(out_channel, width) filters = torch.randn(out_channel, kernel_size) res = torch.zeros(out_channel, width - kernel_size + 1) sub_label = f"[test {i}]" results.append( benchmark.Timer( stmt="tvm_depthwise(inputs, filters, res)", setup="from __main__ import tvm_depthwise", globals={"inputs": inputs, "filters": filters, "res": res}, sub_label=sub_label, description="TVMScript", ).blocked_autorange() ) results.append( benchmark.Timer( stmt="torch_depthwise(inputs, filters)", setup="from __main__ import torch_depthwise", globals={ "inputs": inputs, "filters": filters, }, sub_label=sub_label, description="PyTorch", ).blocked_autorange() ) compare = benchmark.Compare(results) compare.print() # In author's environment, the average inference time of `tvm_depthwise` is 120.0 us, # while the average inference time of `torch_depthwise` is 196.0 us (PyTorch version is 1.11.0), # showing the speedup of around 38%.
5,600
33.574074
109
py
tvm
tvm-main/gallery/how_to/work_with_pytorch/using_optimized_torch.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ Compile PyTorch Models ====================== **Author**: `Yaoda Zhou <https://github.com/juda>`_ This article is a tutorial to optimize PyTorch models by using decorator `optimize_torch`. To follow this tutorial, PyTorch, as well as TorchVision, should be installed: .. code-block:: bash %%shell pip install torch pip install torchvision """ # Import PyTorch # sphinx_gallery_start_ignore # sphinx_gallery_requires_cuda = True # sphinx_gallery_end_ignore import torch import torch.nn as nn import torch.nn.functional as F # Import library for profiling import torch.utils.benchmark as benchmark from torchvision.models import resnet18 # Import `optimize_torch` function from tvm.contrib.torch import optimize_torch from tvm.meta_schedule import TuneConfig ###################################################################### # Define a simple module written by PyTorch # ----------------------------------------- class SimpleModel(nn.Module): def __init__(self): super().__init__() self.conv1 = nn.Conv2d(1, 20, 5) self.conv2 = nn.Conv2d(20, 20, 5) def forward(self, x): x = F.relu(self.conv1(x)) return F.relu(self.conv2(x)) ###################################################################### # Optimize SimpleModel by TVM MetaSchedule # ---------------------------------------- # We provide the `optimize_torch` function, which has the similar usage as `torch.jit.trace`. # The PyTorch model to optimize, along with its example input, are provided by users. # The PyTorch module will be tuned by TVM for the target hardware. # Without providing extra information, the model will be tuned for CPU. simple_model = SimpleModel() example_input = torch.randn(20, 1, 10, 10) model_optimized_by_tvm = optimize_torch(simple_model, example_input) ###################################################################### # Save/Load module # ---------------- # We can save and load our tuned module like the standard `nn.Module`. # Let us run our tuned module. ret1 = model_optimized_by_tvm(example_input) torch.save(model_optimized_by_tvm, "model_optimized.pt") model_loaded = torch.load("model_optimized.pt") # We load the module and run it again. ret2 = model_loaded(example_input) # We will show 2 results: # (1) we can safely load and save model by showing the result of model # after save and load operations is still the same as original one; # (2) the model we optimize returns the same result as the original PyTorch model. ret3 = simple_model(example_input) testing.assert_allclose(ret1.detach().numpy(), ret2.detach().numpy(), atol=1e-5, rtol=1e-5) testing.assert_allclose(ret1.detach().numpy(), ret3.detach().numpy(), atol=1e-5, rtol=1e-5) ###################################################################### # Optimize resnet18 # ----------------- # In the following, we will show that our approach is able to # accelerate common models, such as resnet18. # We will tune our model for the GPU. target_cuda = "nvidia/geforce-rtx-3070" # For PyTorch users, the code could be written as usual, except for # applying "optimize_torch" function on the resnet18 model. resnet18_tvm = optimize_torch( resnet18().cuda().eval(), [torch.rand(1, 3, 224, 224).cuda()], target=target_cuda ) # TorchScript also provides a built-in "optimize_for_inference" function to accelerate the inference. resnet18_torch = torch.jit.optimize_for_inference(torch.jit.script(resnet18().cuda().eval())) ###################################################################### # Compare the performance between two approaches # ---------------------------------------------- results = [] for i in range(5): test_input = torch.rand(1, 3, 224, 224).cuda() sub_label = f"[test {i}]" results.append( benchmark.Timer( stmt="resnet18_tvm(test_input)", setup="from __main__ import resnet18_tvm", globals={"test_input": test_input}, sub_label=sub_label, description="tuning by meta", ).blocked_autorange() ) results.append( benchmark.Timer( stmt="resnet18_torch(test_input)", setup="from __main__ import resnet18_torch", globals={"test_input": test_input}, sub_label=sub_label, description="tuning by jit", ).blocked_autorange() ) compare = benchmark.Compare(results) compare.print() # In author's environment, the average inference time of `resnet18_tvm` is 620.0 us, # while the average inference time of `resnet18_torch` is 980.0 us (PyTorch version is 1.11.0), # showing the speedup of around 38%.
5,431
34.272727
101
py
tvm
tvm-main/gallery/how_to/tune_with_autotvm/tune_relay_mobile_gpu.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ Auto-tuning a Convolutional Network for Mobile GPU ================================================== **Author**: `Lianmin Zheng <https://github.com/merrymercy>`_, `Eddie Yan <https://github.com/eqy>`_ Auto-tuning for a specific device is critical for getting the best performance. This is a tutorial about how to tune a whole convolutional network. The operator implementation for Mobile GPU in TVM is written in template form. The template has many tunable knobs (tile factor, vectorization, unrolling, etc). We will tune all convolution, depthwise convolution and dense operators in the neural network. After tuning, we produce a log file which stores the best knob values for all required operators. When the TVM compiler compiles these operators, it will query this log file to get the best knob values. We also released pre-tuned parameters for some arm devices. You can go to `Mobile GPU Benchmark <https://github.com/apache/tvm/wiki/Benchmark#mobile-gpu>`_ to see the results. Note that this tutorial will not run on Windows or recent versions of macOS. To get it to run, you will need to wrap the body of this tutorial in a :code:`if __name__ == "__main__":` block. """ ###################################################################### # Install dependencies # -------------------- # To use the autotvm package in tvm, we need to install some extra dependencies. # (change "3" to "2" if you use python2): # # .. code-block:: bash # # pip3 install --user psutil xgboost tornado cloudpickle # # To make TVM run faster during tuning, it is recommended to use cython # as FFI of tvm. In the root directory of tvm, execute # (change "3" to "2" if you use python2): # # .. code-block:: bash # # pip3 install --user cython # sudo make cython3 # # Now return to python code. Import packages. import os import numpy as np import tvm from tvm import relay, autotvm import tvm.relay.testing from tvm.autotvm.tuner import XGBTuner, GATuner, RandomTuner, GridSearchTuner from tvm.contrib.utils import tempdir import tvm.contrib.graph_executor as runtime ################################################################# # Define network # -------------- # First we need to define the network in relay frontend API. # We can load some pre-defined network from :code:`relay.testing`. # We can also load models from MXNet, ONNX and TensorFlow. def get_network(name, batch_size): """Get the symbol definition and random weight of a network""" input_shape = (batch_size, 3, 224, 224) output_shape = (batch_size, 1000) if "resnet" in name: n_layer = int(name.split("-")[1]) mod, params = relay.testing.resnet.get_workload( num_layers=n_layer, batch_size=batch_size, dtype=dtype ) elif "vgg" in name: n_layer = int(name.split("-")[1]) mod, params = relay.testing.vgg.get_workload( num_layers=n_layer, batch_size=batch_size, dtype=dtype ) elif name == "mobilenet": mod, params = relay.testing.mobilenet.get_workload(batch_size=batch_size, dtype=dtype) elif name == "squeezenet_v1.1": mod, params = relay.testing.squeezenet.get_workload( batch_size=batch_size, version="1.1", dtype=dtype ) elif name == "inception_v3": input_shape = (batch_size, 3, 299, 299) mod, params = relay.testing.inception_v3.get_workload(batch_size=batch_size, dtype=dtype) elif name == "mxnet": # an example for mxnet model from mxnet.gluon.model_zoo.vision import get_model block = get_model("resnet18_v1", pretrained=True) mod, params = relay.frontend.from_mxnet(block, shape={"data": input_shape}, dtype=dtype) net = mod["main"] net = relay.Function( net.params, relay.nn.softmax(net.body), None, net.type_params, net.attrs ) mod = tvm.IRModule.from_expr(net) else: raise ValueError("Unsupported network: " + name) return mod, params, input_shape, output_shape ################################################################# # .. _tutorials-autotvm-start-rpc-tracker: ################################################################# # Start RPC Tracker # ----------------- # TVM uses RPC session to communicate with ARM boards. # During tuning, the tuner will send the generated code to the board and # measure the speed of code on the board. # # To scale up the tuning, TVM uses RPC Tracker to manage distributed devices. # The RPC Tracker is a centralized controller node. We can register all devices to # the tracker. For example, if we have 10 phones, we can register all of them # to the tracker, and run 10 measurements in parallel, accelerating the tuning process. # # To start an RPC tracker, run this command on the host machine. The tracker is # required during the whole tuning process, so we need to open a new terminal for # this command: # # .. code-block:: bash # # python -m tvm.exec.rpc_tracker --host=0.0.0.0 --port=9190 # # The expected output is # # .. code-block:: bash # # INFO:RPCTracker:bind to 0.0.0.0:9190 ################################################################# # Register Devices to RPC Tracker # ----------------------------------- # Now we can register our devices to the tracker. The first step is to # build the TVM runtime for the ARM devices. # # * For Linux: # Follow this section :ref:`build-tvm-runtime-on-device` to build # the TVM runtime on the device. Then register the device to tracker by # # .. code-block:: bash # # python -m tvm.exec.rpc_server --tracker=[HOST_IP]:9190 --key=rk3399 # # (replace :code:`[HOST_IP]` with the IP address of your host machine) # # * For Android: # Follow this `readme page <https://github.com/apache/tvm/tree/main/apps/android_rpc>`_ to # install TVM RPC APK on the android device. Make sure you can pass the android RPC test. # Then you have already registered your device. During tuning, you have to go to developer option # and enable "Keep screen awake during changing" and charge your phone to make it stable. # # After registering devices, we can confirm it by querying rpc_tracker # # .. code-block:: bash # # python -m tvm.exec.query_rpc_tracker --host=0.0.0.0 --port=9190 # # For example, if we have 2 Huawei mate10 pro, 11 Raspberry Pi 3B and 2 rk3399, # the output can be # # .. code-block:: bash # # Queue Status # ---------------------------------- # key total free pending # ---------------------------------- # mate10pro 2 2 0 # rk3399 2 2 0 # rpi3b 11 11 0 # ---------------------------------- # # You can register multiple devices to the tracker to accelerate the measurement in tuning. ########################################### # Set Tuning Options # ------------------ # Before tuning, we should apply some configurations. Here I use an RK3399 board # as example. In your setting, you should modify the target and device_key accordingly. # set :code:`use_android` to True if you use android phone. #### DEVICE CONFIG #### # Replace "aarch64-linux-gnu" with the correct target of your board. # This target host is used for cross compilation. You can query it by :code:`gcc -v` on your device. target = tvm.target.Target("opencl -device=mali", host="llvm -mtriple=aarch64-linux-gnu") # Also replace this with the device key in your tracker device_key = "rk3399" # Set this to True if you use android phone use_android = False #### TUNING OPTION #### network = "resnet-18" log_file = "%s.%s.log" % (device_key, network) dtype = "float32" tuning_option = { "log_filename": log_file, "tuner": "xgb", "n_trial": 1000, "early_stopping": 450, "measure_option": autotvm.measure_option( builder=autotvm.LocalBuilder(build_func="ndk" if use_android else "default"), runner=autotvm.RPCRunner( device_key, host="127.0.0.1", port=9190, number=10, timeout=5, ), ), } #################################################################### # # .. note:: How to set tuning options # # In general, the default values provided here work well. # If you have enough time budget, you can set :code:`n_trial`, :code:`early_stopping` larger, # which makes the tuning run longer. # If your device runs very slow or your conv2d operators have many GFLOPs, considering to # set timeout larger. # ################################################################### # Begin Tuning # ------------ # Now we can extract tuning tasks from the network and begin tuning. # Here, we provide a simple utility function to tune a list of tasks. # This function is just an initial implementation which tunes them in sequential order. # We will introduce a more sophisticated tuning scheduler in the future. # You can skip the implementation of this function for this tutorial. def tune_tasks( tasks, measure_option, tuner="xgb", n_trial=1000, early_stopping=None, log_filename="tuning.log", use_transfer_learning=True, ): # create tmp log file tmp_log_file = log_filename + ".tmp" if os.path.exists(tmp_log_file): os.remove(tmp_log_file) for i, tsk in enumerate(reversed(tasks)): prefix = "[Task %2d/%2d] " % (i + 1, len(tasks)) # create tuner if tuner == "xgb": tuner_obj = XGBTuner(tsk, loss_type="reg") elif tuner == "xgb_knob": tuner_obj = XGBTuner(tsk, loss_type="reg", feature_type="knob") elif tuner == "xgb_itervar": tuner_obj = XGBTuner(tsk, loss_type="reg", feature_type="itervar") elif tuner == "xgb_curve": tuner_obj = XGBTuner(tsk, loss_type="reg", feature_type="curve") elif tuner == "xgb_rank": tuner_obj = XGBTuner(tsk, loss_type="rank") elif tuner == "xgb_rank_knob": tuner_obj = XGBTuner(tsk, loss_type="rank", feature_type="knob") elif tuner == "xgb_rank_itervar": tuner_obj = XGBTuner(tsk, loss_type="rank", feature_type="itervar") elif tuner == "xgb_rank_curve": tuner_obj = XGBTuner(tsk, loss_type="rank", feature_type="curve") elif tuner == "xgb_rank_binary": tuner_obj = XGBTuner(tsk, loss_type="rank-binary") elif tuner == "xgb_rank_binary_knob": tuner_obj = XGBTuner(tsk, loss_type="rank-binary", feature_type="knob") elif tuner == "xgb_rank_binary_itervar": tuner_obj = XGBTuner(tsk, loss_type="rank-binary", feature_type="itervar") elif tuner == "xgb_rank_binary_curve": tuner_obj = XGBTuner(tsk, loss_type="rank-binary", feature_type="curve") elif tuner == "ga": tuner_obj = GATuner(tsk, pop_size=50) elif tuner == "random": tuner_obj = RandomTuner(tsk) elif tuner == "gridsearch": tuner_obj = GridSearchTuner(tsk) else: raise ValueError("Invalid tuner: " + tuner) if use_transfer_learning: if os.path.isfile(tmp_log_file): tuner_obj.load_history(autotvm.record.load_from_file(tmp_log_file)) # do tuning tsk_trial = min(n_trial, len(tsk.config_space)) tuner_obj.tune( n_trial=tsk_trial, early_stopping=early_stopping, measure_option=measure_option, callbacks=[ autotvm.callback.progress_bar(tsk_trial, prefix=prefix), autotvm.callback.log_to_file(tmp_log_file), ], ) # pick best records to a cache file autotvm.record.pick_best(tmp_log_file, log_filename) os.remove(tmp_log_file) ######################################################################## # Finally, we launch tuning jobs and evaluate the end-to-end performance. def tune_and_evaluate(tuning_opt): # extract workloads from relay program print("Extract tasks...") mod, params, input_shape, _ = get_network(network, batch_size=1) tasks = autotvm.task.extract_from_program( mod["main"], target=target, params=params, ops=(relay.op.get("nn.conv2d"),), ) # run tuning tasks print("Tuning...") tune_tasks(tasks, **tuning_opt) # compile kernels with history best records with autotvm.apply_history_best(log_file): print("Compile...") with tvm.transform.PassContext(opt_level=3): lib = relay.build_module.build(mod, target=target, params=params) # export library tmp = tempdir() if use_android: from tvm.contrib import ndk filename = "net.so" lib.export_library(tmp.relpath(filename), ndk.create_shared) else: filename = "net.tar" lib.export_library(tmp.relpath(filename)) # upload module to device print("Upload...") remote = autotvm.measure.request_remote(device_key, "127.0.0.1", 9190, timeout=10000) remote.upload(tmp.relpath(filename)) rlib = remote.load_module(filename) # upload parameters to device dev = remote.device(str(target), 0) module = runtime.GraphModule(rlib["default"](dev)) data_tvm = tvm.nd.array((np.random.uniform(size=input_shape)).astype(dtype)) module.set_input("data", data_tvm) # evaluate print("Evaluate inference time cost...") print(module.benchmark(dev, number=1, repeat=30)) # We do not run the tuning in our webpage server since it takes too long. # Uncomment the following line to run it by yourself. # tune_and_evaluate(tuning_option) ###################################################################### # Sample Output # ------------- # The tuning needs to compile many programs and extract feature from them. # So a high performance CPU is recommended. # One sample output is listed below. It takes about 3 hours on a 32T AMD Ryzen Threadripper. # # .. code-block:: bash # # Extract tasks... # Tuning... # [Task 1/17] Current/Best: 25.30/ 39.12 GFLOPS | Progress: (992/1000) | 751.22 s Done. # [Task 2/17] Current/Best: 40.70/ 45.50 GFLOPS | Progress: (736/1000) | 545.46 s Done. # [Task 3/17] Current/Best: 38.83/ 42.35 GFLOPS | Progress: (992/1000) | 1549.85 s Done. # [Task 4/17] Current/Best: 23.31/ 31.02 GFLOPS | Progress: (640/1000) | 1059.31 s Done. # [Task 5/17] Current/Best: 0.06/ 2.34 GFLOPS | Progress: (544/1000) | 305.45 s Done. # [Task 6/17] Current/Best: 10.97/ 17.20 GFLOPS | Progress: (992/1000) | 1050.00 s Done. # [Task 7/17] Current/Best: 8.98/ 10.94 GFLOPS | Progress: (928/1000) | 421.36 s Done. # [Task 8/17] Current/Best: 4.48/ 14.86 GFLOPS | Progress: (704/1000) | 582.60 s Done. # [Task 9/17] Current/Best: 10.30/ 25.99 GFLOPS | Progress: (864/1000) | 899.85 s Done. # [Task 10/17] Current/Best: 11.73/ 12.52 GFLOPS | Progress: (608/1000) | 304.85 s Done. # [Task 11/17] Current/Best: 15.26/ 18.68 GFLOPS | Progress: (800/1000) | 747.52 s Done. # [Task 12/17] Current/Best: 17.48/ 26.71 GFLOPS | Progress: (1000/1000) | 1166.40 s Done. # [Task 13/17] Current/Best: 0.96/ 11.43 GFLOPS | Progress: (960/1000) | 611.65 s Done. # [Task 14/17] Current/Best: 17.88/ 20.22 GFLOPS | Progress: (672/1000) | 670.29 s Done. # [Task 15/17] Current/Best: 11.62/ 13.98 GFLOPS | Progress: (736/1000) | 449.25 s Done. # [Task 16/17] Current/Best: 19.90/ 23.83 GFLOPS | Progress: (608/1000) | 708.64 s Done. # [Task 17/17] Current/Best: 17.98/ 22.75 GFLOPS | Progress: (736/1000) | 1122.60 s Done. # Compile... # Upload... # Evaluate inference time cost... # Mean inference time (std dev): 128.05 ms (7.74 ms) # ###################################################################### # # .. note:: **Experiencing Difficulties?** # # The auto tuning module is error-prone. If you always see " 0.00/ 0.00 GFLOPS", # then there must be something wrong. # # First, make sure you set the correct configuration of your device. # Then, you can print debug information by adding these lines in the beginning # of the script. It will print every measurement result, where you can find useful # error messages. # # .. code-block:: python # # import logging # logging.getLogger('autotvm').setLevel(logging.DEBUG) # # Finally, always feel free to ask our community for help on https://discuss.tvm.apache.org
17,315
38.534247
100
py
tvm
tvm-main/gallery/how_to/tune_with_autotvm/tune_conv2d_cuda.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ Tuning High Performance Convolution on NVIDIA GPUs ========================================================================= **Author**: `Lianmin Zheng <https://github.com/merrymercy>`_ This is an advanced tutorial for writing high performance tunable template for NVIDIA GPU. By running auto-tuner on this template, we can outperform the vendor provided library CuDNN in many cases. Note that this tutorial will not run on Windows or recent versions of macOS. To get it to run, you will need to wrap the body of this tutorial in a :code:`if __name__ == "__main__":` block. """ ###################################################################### # Install dependencies # -------------------- # To use autotvm package in tvm, we need to install some extra dependencies. # (change "3" to "2" if you use python2): # # .. code-block:: bash # # pip3 install --user psutil xgboost tornado cloudpickle # # To make TVM run faster in tuning, it is recommended to use cython # as FFI of tvm. In the root directory of tvm, execute # # .. code-block:: bash # # pip3 install --user cython # sudo make cython3 # # Now return to python code. Import packages. # sphinx_gallery_start_ignore # sphinx_gallery_requires_cuda = True # sphinx_gallery_end_ignore import logging import sys import numpy as np import tvm from tvm import te, topi, testing from tvm.topi.testing import conv2d_nchw_python import tvm.testing from tvm import autotvm ###################################################################### # Step 1: Define the search space # -------------------------------- # There are plenty of useful schedule primitives in tvm. You can also find # some tutorials that describe them in more details, such as # (1). :ref:`opt-conv-gpu` # (2). `Optimizing DepthwiseConv on NVIDIA GPU <https://tvm.apache.org/2017/08/22/Optimize-Deep-Learning-GPU-Operators-with-TVM-A-Depthwise-Convolution-Example>`_ # # However, their implementations are manually tuned for some special input # shapes. In this section, we build a large enough space to cover # the techniques used in these tutorials. Then we rely on the efficient auto-tuner # to search through this space and pick some good configurations. # # If you are familiar with writing cuda schedule, you can find the following # template is very general. Actually this template can be easily modified # to tune other operators such as depthwise convolution and GEMM. # In order to fully understand this template, you should be familiar with # the schedule primitives and auto tuning API. You can refer to the above # tutorials and :ref:`autotvm tutorial <tutorial-autotvm-matmul-x86>` # # It is worth noting that the search space for a conv2d operator # can be very large (at the level of 10^9 for some input shapes) # @autotvm.template("tutorial/conv2d_no_batching") def conv2d_no_batching(N, H, W, CO, CI, KH, KW, stride, padding): assert N == 1, "Only consider batch_size = 1 in this template" data = te.placeholder((N, CI, H, W), name="data") kernel = te.placeholder((CO, CI, KH, KW), name="kernel") conv = topi.nn.conv2d_nchw(data, kernel, stride, padding, dilation=1, out_dtype="float32") s = te.create_schedule([conv.op]) ##### space definition begin ##### n, f, y, x = s[conv].op.axis rc, ry, rx = s[conv].op.reduce_axis cfg = autotvm.get_config() cfg.define_split("tile_f", f, num_outputs=4) cfg.define_split("tile_y", y, num_outputs=4) cfg.define_split("tile_x", x, num_outputs=4) cfg.define_split("tile_rc", rc, num_outputs=3) cfg.define_split("tile_ry", ry, num_outputs=3) cfg.define_split("tile_rx", rx, num_outputs=3) cfg.define_knob("auto_unroll_max_step", [0, 512, 1500]) cfg.define_knob("unroll_explicit", [0, 1]) ##### space definition end ##### # inline padding pad_data = s[conv].op.input_tensors[0] s[pad_data].compute_inline() data, raw_data = pad_data, data output = conv OL = s.cache_write(conv, "local") # create cache stage AA = s.cache_read(data, "shared", [OL]) WW = s.cache_read(kernel, "shared", [OL]) AL = s.cache_read(AA, "local", [OL]) WL = s.cache_read(WW, "local", [OL]) # tile and bind spatial axes n, f, y, x = s[output].op.axis bf, vf, tf, fi = cfg["tile_f"].apply(s, output, f) by, vy, ty, yi = cfg["tile_y"].apply(s, output, y) bx, vx, tx, xi = cfg["tile_x"].apply(s, output, x) kernel_scope = n # this is the scope to attach global config inside this kernel s[output].bind(bf, te.thread_axis("blockIdx.z")) s[output].bind(by, te.thread_axis("blockIdx.y")) s[output].bind(bx, te.thread_axis("blockIdx.x")) s[output].bind(vf, te.thread_axis("vthread")) s[output].bind(vy, te.thread_axis("vthread")) s[output].bind(vx, te.thread_axis("vthread")) s[output].bind(tf, te.thread_axis("threadIdx.z")) s[output].bind(ty, te.thread_axis("threadIdx.y")) s[output].bind(tx, te.thread_axis("threadIdx.x")) s[output].reorder(n, bf, by, bx, vf, vy, vx, tf, ty, tx, fi, yi, xi) s[OL].compute_at(s[output], tx) # tile reduction axes n, f, y, x = s[OL].op.axis rc, ry, rx = s[OL].op.reduce_axis rco, rcm, rci = cfg["tile_rc"].apply(s, OL, rc) ryo, rym, ryi = cfg["tile_rx"].apply(s, OL, ry) rxo, rxm, rxi = cfg["tile_ry"].apply(s, OL, rx) s[OL].reorder(rco, ryo, rxo, rcm, rym, rxm, rci, ryi, rxi, n, f, y, x) s[AA].compute_at(s[OL], rxo) s[WW].compute_at(s[OL], rxo) s[AL].compute_at(s[OL], rxm) s[WL].compute_at(s[OL], rxm) # cooperative fetching for load in [AA, WW]: n, f, y, x = s[load].op.axis fused = s[load].fuse(n, f, y, x) tz, fused = s[load].split(fused, nparts=cfg["tile_f"].size[2]) ty, fused = s[load].split(fused, nparts=cfg["tile_y"].size[2]) tx, fused = s[load].split(fused, nparts=cfg["tile_x"].size[2]) s[load].bind(tz, te.thread_axis("threadIdx.z")) s[load].bind(ty, te.thread_axis("threadIdx.y")) s[load].bind(tx, te.thread_axis("threadIdx.x")) # tune unroll s[output].pragma(kernel_scope, "auto_unroll_max_step", cfg["auto_unroll_max_step"].val) s[output].pragma(kernel_scope, "unroll_explicit", cfg["unroll_explicit"].val) return s, [raw_data, kernel, conv] ###################################################################### # Step 2: Search through the space # --------------------------------- # We pick the last layer on resnet as test case. # Since our space is very large, :code:`XGBoostTuner` is most suitable # for our case. Here we only do 20 trials for demonstration. # In practice, making 1000 trials usually can find some good kernels # for this template # logging config (for printing tuning log to screen) logging.getLogger("autotvm").setLevel(logging.DEBUG) logging.getLogger("autotvm").addHandler(logging.StreamHandler(sys.stdout)) # the last layer in resnet N, H, W, CO, CI, KH, KW, strides, padding = 1, 7, 7, 512, 512, 3, 3, (1, 1), (1, 1) task = autotvm.task.create( "tutorial/conv2d_no_batching", args=(N, H, W, CO, CI, KH, KW, strides, padding), target="cuda" ) print(task.config_space) # Use local gpu, measure 10 times for every config to reduce variance # The timeout of compiling a program is 10 seconds, the timeout for running is 4 seconds measure_option = autotvm.measure_option( builder=autotvm.LocalBuilder(), runner=autotvm.LocalRunner(repeat=3, min_repeat_ms=100, timeout=4), ) record_file = None # Begin tuning, log records to file `conv2d.log` # During tuning we will also try many invalid configs, so you are expected to # see many error reports. As long as you can see non-zero GFLOPS, it is okay. # We do not run the tuning in our webpage server since it takes too long. # Uncomment the following lines to run it by yourself. # tuner = autotvm.tuner.XGBTuner(task) # record_file = "conv2d.log" # tuner.tune( # n_trial=5, # measure_option=measure_option, # callbacks=[autotvm.callback.log_to_file(record_file)], # ) ######################################################################### # Finally we can inspect the best config from log file, check correctness, # and measure running time. # inspect the best config dispatch_context = autotvm.apply_history_best(record_file) best_config = dispatch_context.query(task.target, task.workload) print("\nBest config:") print(best_config) # apply history best from log file with autotvm.apply_history_best(record_file): with tvm.target.Target("cuda"): s, arg_bufs = conv2d_no_batching(N, H, W, CO, CI, KH, KW, strides, padding) func = tvm.build(s, arg_bufs) # check correctness a_np = np.random.uniform(size=(N, CI, H, W)).astype(np.float32) w_np = np.random.uniform(size=(CO, CI, KH, KW)).astype(np.float32) c_np = conv2d_nchw_python(a_np, w_np, strides, padding) dev = tvm.cuda() a_tvm = tvm.nd.array(a_np, device=dev) w_tvm = tvm.nd.array(w_np, device=dev) c_tvm = tvm.nd.empty(c_np.shape, device=dev) func(a_tvm, w_tvm, c_tvm) tvm.testing.assert_allclose(c_np, c_tvm.numpy(), rtol=1e-2) # Evaluate running time. Here we choose a large repeat number (400) to reduce the noise # and the overhead of kernel launch. You can also use nvprof to validate the result. evaluator = func.time_evaluator(func.entry_name, dev, number=400) print("Time cost of this operator: %f" % evaluator(a_tvm, w_tvm, c_tvm).mean)
10,170
39.043307
162
py
tvm
tvm-main/gallery/how_to/tune_with_autotvm/tune_relay_arm.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ .. _tune_relay_arm: Auto-tuning a Convolutional Network for ARM CPU =============================================== **Author**: `Lianmin Zheng <https://github.com/merrymercy>`_, `Zhao Wu <https://github.com/FrozenGene>`_, `Eddie Yan <https://github.com/eqy>`_ Auto-tuning for a specific ARM device is critical for getting the best performance. This is a tutorial about how to tune a whole convolutional network. The operator implementation for ARM CPU in TVM is written in template form. The template has many tunable knobs (tile factor, vectorization, unrolling, etc). We will tune all convolution and depthwise convolution operators in the neural network. After tuning, we produce a log file which stores the best knob values for all required operators. When the TVM compiler compiles these operators, it will query this log file to get the best knob values. We also released pre-tuned parameters for some arm devices. You can go to `ARM CPU Benchmark <https://github.com/apache/tvm/wiki/Benchmark#arm-cpu>`_ to see the results. Note that this tutorial will not run on Windows or recent versions of macOS. To get it to run, you will need to wrap the body of this tutorial in a :code:`if __name__ == "__main__":` block. """ ###################################################################### # Install dependencies # -------------------- # To use the autotvm package in tvm, we need to install some extra dependencies. # (change "3" to "2" if you use python2): # # .. code-block:: bash # # pip3 install --user psutil xgboost tornado cloudpickle # # To make TVM run faster during tuning, it is recommended to use cython # as FFI of TVM. In the root directory of TVM, execute # (change "3" to "2" if you use python2): # # .. code-block:: bash # # pip3 install --user cython # sudo make cython3 # # Now return to python code. Import packages. import os import numpy as np import tvm from tvm import relay, autotvm import tvm.relay.testing from tvm.autotvm.tuner import XGBTuner, GATuner, RandomTuner, GridSearchTuner from tvm.contrib.utils import tempdir import tvm.contrib.graph_executor as runtime ################################################################# # Define network # -------------- # First we need to define the network in relay frontend API. # We can load some pre-defined network from :code:`relay.testing`. # We can also load models from MXNet, ONNX and TensorFlow. def get_network(name, batch_size): """Get the symbol definition and random weight of a network""" input_shape = (batch_size, 3, 224, 224) output_shape = (batch_size, 1000) if "resnet" in name: n_layer = int(name.split("-")[1]) mod, params = relay.testing.resnet.get_workload( num_layers=n_layer, batch_size=batch_size, dtype=dtype ) elif "vgg" in name: n_layer = int(name.split("-")[1]) mod, params = relay.testing.vgg.get_workload( num_layers=n_layer, batch_size=batch_size, dtype=dtype ) elif name == "mobilenet": mod, params = relay.testing.mobilenet.get_workload(batch_size=batch_size) elif name == "squeezenet_v1.1": mod, params = relay.testing.squeezenet.get_workload( batch_size=batch_size, version="1.1", dtype=dtype ) elif name == "inception_v3": input_shape = (batch_size, 3, 299, 299) mod, params = relay.testing.inception_v3.get_workload(batch_size=batch_size, dtype=dtype) elif name == "mxnet": # an example for mxnet model from mxnet.gluon.model_zoo.vision import get_model block = get_model("resnet18_v1", pretrained=True) mod, params = relay.frontend.from_mxnet(block, shape={"data": input_shape}, dtype=dtype) net = mod["main"] net = relay.Function( net.params, relay.nn.softmax(net.body), None, net.type_params, net.attrs ) mod = tvm.IRModule.from_expr(net) else: raise ValueError("Unsupported network: " + name) return mod, params, input_shape, output_shape ################################################################# # Start RPC Tracker # ----------------- # TVM uses RPC session to communicate with ARM boards. # During tuning, the tuner will send the generated code to the board and # measure the speed of code on the board. # # To scale up the tuning, TVM uses RPC Tracker to manage distributed devices. # The RPC Tracker is a centralized controller node. We can register all devices to # the tracker. For example, if we have 10 phones, we can register all of them # to the tracker, and run 10 measurements in parallel, accelerating the tuning process. # # To start an RPC tracker, run this command on the host machine. The tracker is # required during the whole tuning process, so we need to open a new terminal for # this command: # # .. code-block:: bash # # python -m tvm.exec.rpc_tracker --host=0.0.0.0 --port=9190 # # The expected output is # # .. code-block:: bash # # INFO:RPCTracker:bind to 0.0.0.0:9190 ################################################################# # Register Devices to RPC Tracker # ----------------------------------- # Now we can register our devices to the tracker. The first step is to # build the TVM runtime for the ARM devices. # # * For Linux: # Follow this section :ref:`build-tvm-runtime-on-device` to build # the TVM runtime on the device. Then register the device to tracker by # # .. code-block:: bash # # python -m tvm.exec.rpc_server --tracker=[HOST_IP]:9190 --key=rk3399 # # (replace :code:`[HOST_IP]` with the IP address of your host machine) # # * For Android: # Follow this `readme page <https://github.com/apache/tvm/tree/main/apps/android_rpc>`_ to # install the TVM RPC APK on the android device. Make sure you can pass the android rpc test. # Then you have already registered your device. During tuning, you have to go to developer option # and enable "Keep screen awake during changing" and charge your phone to make it stable. # # After registering devices, we can confirm it by querying rpc_tracker # # .. code-block:: bash # # python -m tvm.exec.query_rpc_tracker --host=0.0.0.0 --port=9190 # # For example, if we have 2 Huawei mate10 pro, 11 Raspberry Pi 3B and 2 rk3399, # the output can be # # .. code-block:: bash # # Queue Status # ---------------------------------- # key total free pending # ---------------------------------- # mate10pro 2 2 0 # rk3399 2 2 0 # rpi3b 11 11 0 # ---------------------------------- # # You can register multiple devices to the tracker to accelerate the measurement in tuning. ########################################### # Set Tuning Options # ------------------ # Before tuning, we should apply some configurations. Here I use an RK3399 board # as example. In your setting, you should modify the target and device_key accordingly. # set :code:`use_android` to True if you use android phone. #### DEVICE CONFIG #### # Replace "aarch64-linux-gnu" with the correct target of your board. # This target is used for cross compilation. You can query it by :code:`gcc -v` on your device. target = tvm.target.Target("llvm -device=arm_cpu -mtriple=aarch64-linux-gnu") # Also replace this with the device key in your tracker device_key = "rk3399" # Set this to True if you use android phone use_android = False #### TUNING OPTION #### network = "resnet-18" log_file = "%s.%s.log" % (device_key, network) dtype = "float32" tuning_option = { "log_filename": log_file, "tuner": "xgb", "n_trial": 1500, "early_stopping": 800, "measure_option": autotvm.measure_option( builder=autotvm.LocalBuilder(build_func="ndk" if use_android else "default"), runner=autotvm.RPCRunner( device_key, host="127.0.0.1", port=9190, number=5, timeout=10, ), ), } #################################################################### # # .. note:: How to set tuning options # # In general, the default values provided here work well. # If you have enough time budget, you can set :code:`n_trial`, :code:`early_stopping` larger, # which makes the tuning run longer. # If your device runs very slow or your conv2d operators have many GFLOPs, considering to # set timeout larger. # # If your model has depthwise convolution, you could consider setting # :code:`try_spatial_pack_depthwise` be :code:`True`, which perform better than default # optimization in general. For example, on ARM CPU A53 2.0GHz, we find it could boost 1.6x # performance of depthwise convolution on Mobilenet V1 model. ################################################################### # Begin Tuning # ------------ # Now we can extract tuning tasks from the network and begin tuning. # Here, we provide a simple utility function to tune a list of tasks. # This function is just an initial implementation which tunes them in sequential order. # We will introduce a more sophisticated tuning scheduler in the future. # You can skip the implementation of this function for this tutorial. def tune_tasks( tasks, measure_option, tuner="xgb", n_trial=1000, early_stopping=None, log_filename="tuning.log", use_transfer_learning=True, ): # create tmp log file tmp_log_file = log_filename + ".tmp" if os.path.exists(tmp_log_file): os.remove(tmp_log_file) for i, tsk in enumerate(reversed(tasks)): prefix = "[Task %2d/%2d] " % (i + 1, len(tasks)) # create tuner if tuner == "xgb": tuner_obj = XGBTuner(tsk, loss_type="reg") elif tuner == "xgb_knob": tuner_obj = XGBTuner(tsk, loss_type="reg", feature_type="knob") elif tuner == "xgb_itervar": tuner_obj = XGBTuner(tsk, loss_type="reg", feature_type="itervar") elif tuner == "xgb_curve": tuner_obj = XGBTuner(tsk, loss_type="reg", feature_type="curve") elif tuner == "xgb_rank": tuner_obj = XGBTuner(tsk, loss_type="rank") elif tuner == "xgb_rank_knob": tuner_obj = XGBTuner(tsk, loss_type="rank", feature_type="knob") elif tuner == "xgb_rank_itervar": tuner_obj = XGBTuner(tsk, loss_type="rank", feature_type="itervar") elif tuner == "xgb_rank_curve": tuner_obj = XGBTuner(tsk, loss_type="rank", feature_type="curve") elif tuner == "xgb_rank_binary": tuner_obj = XGBTuner(tsk, loss_type="rank-binary") elif tuner == "xgb_rank_binary_knob": tuner_obj = XGBTuner(tsk, loss_type="rank-binary", feature_type="knob") elif tuner == "xgb_rank_binary_itervar": tuner_obj = XGBTuner(tsk, loss_type="rank-binary", feature_type="itervar") elif tuner == "xgb_rank_binary_curve": tuner_obj = XGBTuner(tsk, loss_type="rank-binary", feature_type="curve") elif tuner == "ga": tuner_obj = GATuner(tsk, pop_size=50) elif tuner == "random": tuner_obj = RandomTuner(tsk) elif tuner == "gridsearch": tuner_obj = GridSearchTuner(tsk) else: raise ValueError("Invalid tuner: " + tuner) if use_transfer_learning: if os.path.isfile(tmp_log_file): tuner_obj.load_history(autotvm.record.load_from_file(tmp_log_file)) # process tuning tsk_trial = min(n_trial, len(tsk.config_space)) tuner_obj.tune( n_trial=tsk_trial, early_stopping=early_stopping, measure_option=measure_option, callbacks=[ autotvm.callback.progress_bar(tsk_trial, prefix=prefix), autotvm.callback.log_to_file(tmp_log_file), ], ) # pick best records to a cache file autotvm.record.pick_best(tmp_log_file, log_filename) os.remove(tmp_log_file) ######################################################################## # Finally, we launch tuning jobs and evaluate the end-to-end performance. def tune_and_evaluate(tuning_opt): # extract workloads from relay program print("Extract tasks...") mod, params, input_shape, _ = get_network(network, batch_size=1) tasks = autotvm.task.extract_from_program( mod["main"], target=target, params=params, ops=(relay.op.get("nn.conv2d"),) ) # run tuning tasks print("Tuning...") tune_tasks(tasks, **tuning_opt) # compile kernels with history best records with autotvm.apply_history_best(log_file): print("Compile...") with tvm.transform.PassContext(opt_level=3): lib = relay.build_module.build(mod, target=target, params=params) # export library tmp = tempdir() if use_android: from tvm.contrib import ndk filename = "net.so" lib.export_library(tmp.relpath(filename), ndk.create_shared) else: filename = "net.tar" lib.export_library(tmp.relpath(filename)) # upload module to device print("Upload...") remote = autotvm.measure.request_remote(device_key, "127.0.0.1", 9190, timeout=10000) remote.upload(tmp.relpath(filename)) rlib = remote.load_module(filename) # upload parameters to device dev = remote.device(str(target), 0) module = runtime.GraphModule(rlib["default"](dev)) data_tvm = tvm.nd.array((np.random.uniform(size=input_shape)).astype(dtype)) module.set_input("data", data_tvm) # evaluate print("Evaluate inference time cost...") print(module.benchmark(dev, number=1, repeat=10)) # We do not run the tuning in our webpage server since it takes too long. # Uncomment the following line to run it by yourself. # tune_and_evaluate(tuning_option) ###################################################################### # Sample Output # ------------- # The tuning needs to compile many programs and extract feature from them. # So a high performance CPU is recommended. # One sample output is listed below. # It takes about 2 hours on a 32T AMD Ryzen Threadripper. # # .. code-block:: bash # # Extract tasks... # Tuning... # [Task 1/12] Current/Best: 22.37/ 52.19 GFLOPS | Progress: (544/1000) | 406.59 s Done. # [Task 2/12] Current/Best: 6.51/ 18.77 GFLOPS | Progress: (608/1000) | 325.05 s Done. # [Task 3/12] Current/Best: 4.67/ 24.87 GFLOPS | Progress: (480/1000) | 372.31 s Done. # [Task 4/12] Current/Best: 11.35/ 46.83 GFLOPS | Progress: (736/1000) | 602.39 s Done. # [Task 5/12] Current/Best: 1.01/ 19.80 GFLOPS | Progress: (448/1000) | 262.16 s Done. # [Task 6/12] Current/Best: 2.47/ 23.76 GFLOPS | Progress: (672/1000) | 563.85 s Done. # [Task 7/12] Current/Best: 14.57/ 33.97 GFLOPS | Progress: (544/1000) | 465.15 s Done. # [Task 8/12] Current/Best: 1.13/ 17.65 GFLOPS | Progress: (576/1000) | 365.08 s Done. # [Task 9/12] Current/Best: 14.45/ 22.66 GFLOPS | Progress: (928/1000) | 724.25 s Done. # [Task 10/12] Current/Best: 3.22/ 15.36 GFLOPS | Progress: (864/1000) | 564.27 s Done. # [Task 11/12] Current/Best: 11.03/ 32.23 GFLOPS | Progress: (736/1000) | 635.15 s Done. # [Task 12/12] Current/Best: 8.00/ 21.65 GFLOPS | Progress: (1000/1000) | 1111.81 s Done. # Compile... # Upload... # Evaluate inference time cost... # Mean inference time (std dev): 162.59 ms (0.06 ms) ###################################################################### # # .. note:: **Experiencing Difficulties?** # # The auto tuning module is error-prone. If you always see " 0.00/ 0.00 GFLOPS", # then there must be something wrong. # # First, make sure you set the correct configuration of your device. # Then, you can print debug information by adding these lines in the beginning # of the script. It will print every measurement result, where you can find useful # error messages. # # .. code-block:: python # # import logging # logging.getLogger('autotvm').setLevel(logging.DEBUG) # # Finally, always feel free to ask our community for help on https://discuss.tvm.apache.org
17,042
38.269585
143
py
tvm
tvm-main/gallery/how_to/tune_with_autotvm/tune_relay_cuda.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ Auto-tuning a Convolutional Network for NVIDIA GPU ================================================== **Author**: `Lianmin Zheng <https://github.com/merrymercy>`_, `Eddie Yan <https://github.com/eqy/>`_ Auto-tuning for specific devices and workloads is critical for getting the best performance. This is a tutorial on how to tune a whole convolutional network for NVIDIA GPU. The operator implementation for NVIDIA GPU in TVM is written in template form. The template has many tunable knobs (tile factor, unrolling, etc). We will tune all convolution and depthwise convolution operators in the neural network. After tuning, we produce a log file which stores the best knob values for all required operators. When the TVM compiler compiles these operators, it will query this log file to get the best knob values. We also released pre-tuned parameters for some NVIDIA GPUs. You can go to `NVIDIA GPU Benchmark <https://github.com/apache/tvm/wiki/Benchmark#nvidia-gpu>`_ to see the results. Note that this tutorial will not run on Windows or recent versions of macOS. To get it to run, you will need to wrap the body of this tutorial in a :code:`if __name__ == "__main__":` block. """ ###################################################################### # Install dependencies # -------------------- # To use the autotvm package in tvm, we need to install some extra dependencies. # (change "3" to "2" if you use python2): # # .. code-block:: bash # # pip3 install --user psutil xgboost tornado cloudpickle # # To make TVM run faster during tuning, it is recommended to use cython # as FFI of tvm. In the root directory of tvm, execute: # # .. code-block:: bash # # pip3 install --user cython # sudo make cython3 # # Now return to python code. Import packages. # sphinx_gallery_start_ignore # sphinx_gallery_requires_cuda = True # sphinx_gallery_end_ignore import os import numpy as np import tvm from tvm import relay, autotvm import tvm.relay.testing from tvm.autotvm.tuner import XGBTuner, GATuner, RandomTuner, GridSearchTuner import tvm.contrib.graph_executor as runtime ################################################################# # Define Network # -------------- # First we need to define the network in relay frontend API. # We can load some pre-defined network from :code:`tvm.relay.testing`. # We can also load models from MXNet, ONNX and TensorFlow. def get_network(name, batch_size): """Get the symbol definition and random weight of a network""" input_shape = (batch_size, 3, 224, 224) output_shape = (batch_size, 1000) if "resnet" in name: n_layer = int(name.split("-")[1]) mod, params = relay.testing.resnet.get_workload( num_layers=n_layer, batch_size=batch_size, dtype=dtype ) elif "vgg" in name: n_layer = int(name.split("-")[1]) mod, params = relay.testing.vgg.get_workload( num_layers=n_layer, batch_size=batch_size, dtype=dtype ) elif name == "mobilenet": mod, params = relay.testing.mobilenet.get_workload(batch_size=batch_size, dtype=dtype) elif name == "squeezenet_v1.1": mod, params = relay.testing.squeezenet.get_workload( batch_size=batch_size, version="1.1", dtype=dtype ) elif name == "inception_v3": input_shape = (batch_size, 3, 299, 299) mod, params = relay.testing.inception_v3.get_workload(batch_size=batch_size, dtype=dtype) elif name == "mxnet": # an example for mxnet model from mxnet.gluon.model_zoo.vision import get_model block = get_model("resnet18_v1", pretrained=True) mod, params = relay.frontend.from_mxnet(block, shape={"data": input_shape}, dtype=dtype) net = mod["main"] net = relay.Function( net.params, relay.nn.softmax(net.body), None, net.type_params, net.attrs ) mod = tvm.IRModule.from_expr(net) else: raise ValueError("Unsupported network: " + name) return mod, params, input_shape, output_shape ########################################### # Set Tuning Options # ------------------ # Before tuning, we apply some configurations. #### DEVICE CONFIG #### target = tvm.target.cuda() #### TUNING OPTION #### network = "resnet-18" log_file = "%s.log" % network dtype = "float32" tuning_option = { "log_filename": log_file, "tuner": "xgb", "n_trial": 2000, "early_stopping": 600, "measure_option": autotvm.measure_option( builder=autotvm.LocalBuilder(timeout=10), runner=autotvm.LocalRunner(number=20, repeat=3, timeout=4, min_repeat_ms=150), ), } #################################################################### # # .. note:: How to set tuning options # # In general, the default value provided here works well. # # If you have large time budget, you can set :code:`n_trial`, :code:`early_stopping` larger, # which makes the tuning runs longer. # # If you have multiple devices, you can use all of them for measurement to # accelerate the tuning process. (see the 'Scale up measurement` section below). # ################################################################### # Begin Tuning # ------------ # Now we can extract tuning tasks from the network and begin tuning. # Here, we provide a simple utility function to tune a list of tasks. # This function is just an initial implementation which tunes them in sequential order. # We will introduce a more sophisticated tuning scheduler in the future. # You can skip the implementation of this function for this tutorial. def tune_tasks( tasks, measure_option, tuner="xgb", n_trial=1000, early_stopping=None, log_filename="tuning.log", use_transfer_learning=True, ): # create tmp log file tmp_log_file = log_filename + ".tmp" if os.path.exists(tmp_log_file): os.remove(tmp_log_file) for i, tsk in enumerate(reversed(tasks)): prefix = "[Task %2d/%2d] " % (i + 1, len(tasks)) # create tuner if tuner == "xgb": tuner_obj = XGBTuner(tsk, loss_type="reg") elif tuner == "xgb_knob": tuner_obj = XGBTuner(tsk, loss_type="reg", feature_type="knob") elif tuner == "xgb_itervar": tuner_obj = XGBTuner(tsk, loss_type="reg", feature_type="itervar") elif tuner == "xgb_curve": tuner_obj = XGBTuner(tsk, loss_type="reg", feature_type="curve") elif tuner == "xgb_rank": tuner_obj = XGBTuner(tsk, loss_type="rank") elif tuner == "xgb_rank_knob": tuner_obj = XGBTuner(tsk, loss_type="rank", feature_type="knob") elif tuner == "xgb_rank_itervar": tuner_obj = XGBTuner(tsk, loss_type="rank", feature_type="itervar") elif tuner == "xgb_rank_curve": tuner_obj = XGBTuner(tsk, loss_type="rank", feature_type="curve") elif tuner == "xgb_rank_binary": tuner_obj = XGBTuner(tsk, loss_type="rank-binary") elif tuner == "xgb_rank_binary_knob": tuner_obj = XGBTuner(tsk, loss_type="rank-binary", feature_type="knob") elif tuner == "xgb_rank_binary_itervar": tuner_obj = XGBTuner(tsk, loss_type="rank-binary", feature_type="itervar") elif tuner == "xgb_rank_binary_curve": tuner_obj = XGBTuner(tsk, loss_type="rank-binary", feature_type="curve") elif tuner == "ga": tuner_obj = GATuner(tsk, pop_size=100) elif tuner == "random": tuner_obj = RandomTuner(tsk) elif tuner == "gridsearch": tuner_obj = GridSearchTuner(tsk) else: raise ValueError("Invalid tuner: " + tuner) if use_transfer_learning: if os.path.isfile(tmp_log_file): tuner_obj.load_history(autotvm.record.load_from_file(tmp_log_file)) # do tuning tsk_trial = min(n_trial, len(tsk.config_space)) tuner_obj.tune( n_trial=tsk_trial, early_stopping=early_stopping, measure_option=measure_option, callbacks=[ autotvm.callback.progress_bar(tsk_trial, prefix=prefix), autotvm.callback.log_to_file(tmp_log_file), ], ) # pick best records to a cache file autotvm.record.pick_best(tmp_log_file, log_filename) os.remove(tmp_log_file) ######################################################################## # Finally, we launch tuning jobs and evaluate the end-to-end performance. def tune_and_evaluate(tuning_opt): # extract workloads from relay program print("Extract tasks...") mod, params, input_shape, out_shape = get_network(network, batch_size=1) tasks = autotvm.task.extract_from_program( mod["main"], target=target, params=params, ops=(relay.op.get("nn.conv2d"),) ) # run tuning tasks print("Tuning...") tune_tasks(tasks, **tuning_opt) # compile kernels with history best records with autotvm.apply_history_best(log_file): print("Compile...") with tvm.transform.PassContext(opt_level=3): lib = relay.build_module.build(mod, target=target, params=params) # load parameters dev = tvm.device(str(target), 0) module = runtime.GraphModule(lib["default"](dev)) data_tvm = tvm.nd.array((np.random.uniform(size=input_shape)).astype(dtype)) module.set_input("data", data_tvm) # evaluate print("Evaluate inference time cost...") print(module.benchmark(dev, number=1, repeat=600)) # We do not run the tuning in our webpage server since it takes too long. # Uncomment the following line to run it by yourself. # tune_and_evaluate(tuning_option) ###################################################################### # Sample Output # ------------- # The tuning needs to compile many programs and extract feature from them. # So a high performance CPU is recommended. One sample output is listed below. # It takes about 4 hours to get the following output on a 32T AMD Ryzen Threadripper. # The tuning target is NVIDIA 1080 Ti. # (You can see some errors during compilation. If the tuning is not stuck, it is okay.) # # .. code-block:: bash # # Extract tasks... # Tuning... # [Task 1/12] Current/Best: 541.83/3570.66 GFLOPS | Progress: (960/2000) | 1001.31 s Done. # [Task 2/12] Current/Best: 0.56/ 803.33 GFLOPS | Progress: (704/2000) | 608.08 s Done. # [Task 3/12] Current/Best: 103.69/1141.25 GFLOPS | Progress: (768/2000) | 702.13 s Done. # [Task 4/12] Current/Best: 2905.03/3925.15 GFLOPS | Progress: (864/2000) | 745.94 sterminate called without an active exception # [Task 4/12] Current/Best: 2789.36/3925.15 GFLOPS | Progress: (1056/2000) | 929.40 s Done. # [Task 5/12] Current/Best: 89.06/1076.24 GFLOPS | Progress: (704/2000) | 601.73 s Done. # [Task 6/12] Current/Best: 40.39/2129.02 GFLOPS | Progress: (1088/2000) | 1125.76 s Done. # [Task 7/12] Current/Best: 4090.53/5007.02 GFLOPS | Progress: (800/2000) | 903.90 s Done. # [Task 8/12] Current/Best: 4.78/1272.28 GFLOPS | Progress: (768/2000) | 749.14 s Done. # [Task 9/12] Current/Best: 1391.45/2325.08 GFLOPS | Progress: (992/2000) | 1084.87 s Done. # [Task 10/12] Current/Best: 1995.44/2383.59 GFLOPS | Progress: (864/2000) | 862.60 s Done. # [Task 11/12] Current/Best: 4093.94/4899.80 GFLOPS | Progress: (224/2000) | 240.92 sterminate called without an active exception # [Task 11/12] Current/Best: 3487.98/4909.91 GFLOPS | Progress: (480/2000) | 534.96 sterminate called without an active exception # [Task 11/12] Current/Best: 4636.84/4912.17 GFLOPS | Progress: (1184/2000) | 1381.16 sterminate called without an active exception # [Task 11/12] Current/Best: 50.12/4912.17 GFLOPS | Progress: (1344/2000) | 1602.81 s Done. # [Task 12/12] Current/Best: 3581.31/4286.30 GFLOPS | Progress: (736/2000) | 943.52 s Done. # Compile... # Evaluate inference time cost... # Mean inference time (std dev): 1.07 ms (0.05 ms) # # As a reference baseline, the time cost of MXNet + TensorRT on resnet-18 is 1.30ms. So we are a little faster. ###################################################################### # # .. note:: **Experiencing Difficulties?** # # The auto tuning module is error-prone. If you always see " 0.00/ 0.00 GFLOPS", # then there must be something wrong. # # First, make sure you set the correct configuration of your device. # Then, you can print debug information by adding these lines in the beginning # of the script. It will print every measurement result, where you can find useful # error messages. # # .. code-block:: python # # import logging # logging.getLogger('autotvm').setLevel(logging.DEBUG) # # Finally, always feel free to ask our community for help on https://discuss.tvm.apache.org ################################################################# # .. _tutorials-autotvm-scale-up-rpc-tracker: ################################################################# # Scale up measurement by using multiple devices # ---------------------------------------------- # If you have multiple devices, you can use all of them for measurement. # TVM uses the RPC Tracker to manage distributed devices. # The RPC Tracker is a centralized controller node. We can register all devices to # the tracker. For example, if we have 10 GPU cards, we can register all of them # to the tracker, and run 10 measurements in parallel, accelerating the tuning process. # # To start an RPC tracker, run this command on the host machine. The tracker is # required during the whole tuning process, so we need to open a new terminal for # this command: # # .. code-block:: bash # # python -m tvm.exec.rpc_tracker --host=0.0.0.0 --port=9190 # # The expected output is # # .. code-block:: bash # # INFO:RPCTracker:bind to 0.0.0.0:9190 # # Then open another new terminal for the RPC server. We need to start one dedicated server # for each device. We use a string key to distinguish the types of devices. # You can pick a name you like. # (Note: For rocm backend, there are some internal errors with the compiler, # we need to add `--no-fork` to the argument list.) # # .. code-block:: bash # # python -m tvm.exec.rpc_server --tracker=127.0.0.1:9190 --key=1080ti # # After registering devices, we can confirm it by querying rpc_tracker # # .. code-block:: bash # # python -m tvm.exec.query_rpc_tracker --host=127.0.0.1 --port=9190 # # For example, if we have four 1080ti, two titanx and one gfx900, the output can be # # .. code-block:: bash # # Queue Status # ---------------------------------- # key total free pending # ---------------------------------- # 1080ti 4 4 0 # titanx 2 2 0 # gfx900 1 1 0 # ---------------------------------- # # Finally, we need to change the tuning option to use RPCRunner. Use the code below # to replace the corresponding part above. tuning_option = { "log_filename": log_file, "tuner": "xgb", "n_trial": 2000, "early_stopping": 600, "measure_option": autotvm.measure_option( builder=autotvm.LocalBuilder(timeout=10), runner=autotvm.RPCRunner( "1080ti", # change the device key to your key "127.0.0.1", 9190, number=20, repeat=3, timeout=4, min_repeat_ms=150, ), ), }
16,284
38.719512
135
py
tvm
tvm-main/gallery/how_to/tune_with_autotvm/tune_relay_x86.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ .. _tune_relay_x86: Auto-tuning a Convolutional Network for x86 CPU =============================================== **Author**: `Yao Wang <https://github.com/kevinthesun>`_, `Eddie Yan <https://github.com/eqy>`_ This is a tutorial about how to tune convolution neural network for x86 CPU. Note that this tutorial will not run on Windows or recent versions of macOS. To get it to run, you will need to wrap the body of this tutorial in a :code:`if __name__ == "__main__":` block. """ import os import numpy as np import tvm from tvm import relay, autotvm from tvm.relay import testing from tvm.autotvm.tuner import XGBTuner, GATuner, RandomTuner, GridSearchTuner from tvm.autotvm.graph_tuner import DPTuner, PBQPTuner import tvm.contrib.graph_executor as runtime ################################################################# # Define network # -------------- # First we need to define the network in relay frontend API. # We can either load some pre-defined network from :code:`relay.testing` # or building :any:`relay.testing.resnet` with relay. # We can also load models from MXNet, ONNX and TensorFlow. # # In this tutorial, we choose resnet-18 as tuning example. def get_network(name, batch_size): """Get the symbol definition and random weight of a network""" input_shape = (batch_size, 3, 224, 224) output_shape = (batch_size, 1000) if "resnet" in name: n_layer = int(name.split("-")[1]) mod, params = relay.testing.resnet.get_workload( num_layers=n_layer, batch_size=batch_size, dtype=dtype ) elif "vgg" in name: n_layer = int(name.split("-")[1]) mod, params = relay.testing.vgg.get_workload( num_layers=n_layer, batch_size=batch_size, dtype=dtype ) elif name == "mobilenet": mod, params = relay.testing.mobilenet.get_workload(batch_size=batch_size, dtype=dtype) elif name == "squeezenet_v1.1": mod, params = relay.testing.squeezenet.get_workload( batch_size=batch_size, version="1.1", dtype=dtype ) elif name == "inception_v3": input_shape = (batch_size, 3, 299, 299) mod, params = relay.testing.inception_v3.get_workload(batch_size=batch_size, dtype=dtype) elif name == "mxnet": # an example for mxnet model from mxnet.gluon.model_zoo.vision import get_model block = get_model("resnet18_v1", pretrained=True) mod, params = relay.frontend.from_mxnet(block, shape={input_name: input_shape}, dtype=dtype) net = mod["main"] net = relay.Function( net.params, relay.nn.softmax(net.body), None, net.type_params, net.attrs ) mod = tvm.IRModule.from_expr(net) else: raise ValueError("Unsupported network: " + name) return mod, params, input_shape, output_shape # Replace "llvm" with the correct target of your CPU. # For example, for AWS EC2 c5 instance with Intel Xeon # Platinum 8000 series, the target should be "llvm -mcpu=skylake-avx512". # For AWS EC2 c4 instance with Intel Xeon E5-2666 v3, it should be # "llvm -mcpu=core-avx2". target = "llvm" batch_size = 1 dtype = "float32" model_name = "resnet-18" log_file = "%s.log" % model_name graph_opt_sch_file = "%s_graph_opt.log" % model_name # Set the input name of the graph # For ONNX models, it is typically "0". input_name = "data" # Set number of threads used for tuning based on the number of # physical CPU cores on your machine. num_threads = 1 os.environ["TVM_NUM_THREADS"] = str(num_threads) ################################################################# # Configure tensor tuning settings and create tasks # ------------------------------------------------- # To get better kernel execution performance on x86 CPU, # we need to change data layout of convolution kernel from # "NCHW" to "NCHWc". To deal with this situation, we define # conv2d_NCHWc operator in topi. We will tune this operator # instead of plain conv2d. # # We will use local mode for tuning configuration. RPC tracker # mode can be setup similarly to the approach in # :ref:`tune_relay_arm` tutorial. # # To perform a precise measurement, we should repeat the measurement several # times and use the average of results. In addition, we need to flush the cache # for the weight tensors between repeated measurements. This can make the measured # latency of one operator closer to its actual latency during end-to-end inference. tuning_option = { "log_filename": log_file, "tuner": "random", "early_stopping": None, "measure_option": autotvm.measure_option( builder=autotvm.LocalBuilder(), runner=autotvm.LocalRunner( number=1, repeat=10, min_repeat_ms=0, enable_cpu_cache_flush=True ), ), } # You can skip the implementation of this function for this tutorial. def tune_kernels( tasks, measure_option, tuner="gridsearch", early_stopping=None, log_filename="tuning.log" ): for i, task in enumerate(tasks): prefix = "[Task %2d/%2d] " % (i + 1, len(tasks)) # create tuner if tuner == "xgb": tuner_obj = XGBTuner(task, loss_type="reg") elif tuner == "xgb_knob": tuner_obj = XGBTuner(task, loss_type="reg", feature_type="knob") elif tuner == "xgb_itervar": tuner_obj = XGBTuner(task, loss_type="reg", feature_type="itervar") elif tuner == "xgb_curve": tuner_obj = XGBTuner(task, loss_type="reg", feature_type="curve") elif tuner == "xgb_rank": tuner_obj = XGBTuner(task, loss_type="rank") elif tuner == "xgb_rank_knob": tuner_obj = XGBTuner(task, loss_type="rank", feature_type="knob") elif tuner == "xgb_rank_itervar": tuner_obj = XGBTuner(task, loss_type="rank", feature_type="itervar") elif tuner == "xgb_rank_curve": tuner_obj = XGBTuner(task, loss_type="rank", feature_type="curve") elif tuner == "xgb_rank_binary": tuner_obj = XGBTuner(task, loss_type="rank-binary") elif tuner == "xgb_rank_binary_knob": tuner_obj = XGBTuner(task, loss_type="rank-binary", feature_type="knob") elif tuner == "xgb_rank_binary_itervar": tuner_obj = XGBTuner(task, loss_type="rank-binary", feature_type="itervar") elif tuner == "xgb_rank_binary_curve": tuner_obj = XGBTuner(task, loss_type="rank-binary", feature_type="curve") elif tuner == "ga": tuner_obj = GATuner(task, pop_size=50) elif tuner == "random": tuner_obj = RandomTuner(task) elif tuner == "gridsearch": tuner_obj = GridSearchTuner(task) else: raise ValueError("Invalid tuner: " + tuner) # do tuning n_trial = len(task.config_space) tuner_obj.tune( n_trial=n_trial, early_stopping=early_stopping, measure_option=measure_option, callbacks=[ autotvm.callback.progress_bar(n_trial, prefix=prefix), autotvm.callback.log_to_file(log_filename), ], ) # Use graph tuner to achieve graph level optimal schedules # Set use_DP=False if it takes too long to finish. def tune_graph(graph, dshape, records, opt_sch_file, use_DP=True): target_op = [ relay.op.get("nn.conv2d"), ] Tuner = DPTuner if use_DP else PBQPTuner executor = Tuner(graph, {input_name: dshape}, records, target_op, target) executor.benchmark_layout_transform(min_exec_num=2000) executor.run() executor.write_opt_sch2record_file(opt_sch_file) ######################################################################## # Finally, we launch tuning jobs and evaluate the end-to-end performance. def evaluate_performance(lib, data_shape): # upload parameters to device dev = tvm.cpu() data_tvm = tvm.nd.array((np.random.uniform(size=data_shape)).astype(dtype)) module = runtime.GraphModule(lib["default"](dev)) module.set_input(input_name, data_tvm) # evaluate print("Evaluate inference time cost...") print(module.benchmark(dev, number=100, repeat=3)) def tune_and_evaluate(tuning_opt): # extract workloads from relay program print("Extract tasks...") mod, params, data_shape, out_shape = get_network(model_name, batch_size) tasks = autotvm.task.extract_from_program( mod["main"], target=target, params=params, ops=(relay.op.get("nn.conv2d"),) ) # run tuning tasks tune_kernels(tasks, **tuning_opt) tune_graph(mod["main"], data_shape, log_file, graph_opt_sch_file) # compile kernels in default mode print("Evaluation of the network compiled in 'default' mode without auto tune:") with tvm.transform.PassContext(opt_level=3): print("Compile...") lib = relay.build(mod, target=target, params=params) evaluate_performance(lib, data_shape) # compile kernels in kernel tuned only mode print("\nEvaluation of the network been tuned on kernel level:") with autotvm.apply_history_best(log_file): print("Compile...") with tvm.transform.PassContext(opt_level=3): lib = relay.build(mod, target=target, params=params) evaluate_performance(lib, data_shape) # compile kernels with graph-level best records print("\nEvaluation of the network been tuned on graph level:") with autotvm.apply_graph_best(graph_opt_sch_file): print("Compile...") with tvm.transform.PassContext(opt_level=3): lib = relay.build_module.build(mod, target=target, params=params) evaluate_performance(lib, data_shape) # We do not run the tuning in our webpage server since it takes too long. # Uncomment the following line to run it by yourself. # tune_and_evaluate(tuning_option) ###################################################################### # Sample Output # ------------- # The tuning needs to compile many programs and extract feature from them. # So a high performance CPU is recommended. # One sample output is listed below. # # .. code-block:: bash # # Extract tasks... # Tuning... # [Task 1/12] Current/Best: 598.05/2497.63 GFLOPS | Progress: (252/252) | 1357.95 s Done. # [Task 2/12] Current/Best: 522.63/2279.24 GFLOPS | Progress: (784/784) | 3989.60 s Done. # [Task 3/12] Current/Best: 447.33/1927.69 GFLOPS | Progress: (784/784) | 3869.14 s Done. # [Task 4/12] Current/Best: 481.11/1912.34 GFLOPS | Progress: (672/672) | 3274.25 s Done. # [Task 5/12] Current/Best: 414.09/1598.45 GFLOPS | Progress: (672/672) | 2720.78 s Done. # [Task 6/12] Current/Best: 508.96/2273.20 GFLOPS | Progress: (768/768) | 3718.75 s Done. # [Task 7/12] Current/Best: 469.14/1955.79 GFLOPS | Progress: (576/576) | 2665.67 s Done. # [Task 8/12] Current/Best: 230.91/1658.97 GFLOPS | Progress: (576/576) | 2435.01 s Done. # [Task 9/12] Current/Best: 487.75/2295.19 GFLOPS | Progress: (648/648) | 3009.95 s Done. # [Task 10/12] Current/Best: 182.33/1734.45 GFLOPS | Progress: (360/360) | 1755.06 s Done. # [Task 11/12] Current/Best: 372.18/1745.15 GFLOPS | Progress: (360/360) | 1684.50 s Done. # [Task 12/12] Current/Best: 215.34/2271.11 GFLOPS | Progress: (400/400) | 2128.74 s Done. # INFO Start to benchmark layout transformation... # INFO Benchmarking layout transformation successful. # INFO Start to run dynamic programming algorithm... # INFO Start forward pass... # INFO Finished forward pass. # INFO Start backward pass... # INFO Finished backward pass... # INFO Finished DPExecutor run. # INFO Writing optimal schedules to resnet-18_graph_opt.log successfully. # # Evaluation of the network compiled in 'default' mode without auto tune: # Compile... # Evaluate inference time cost... # Mean inference time (std dev): 4.5 ms (0.03 ms) # # Evaluation of the network been tuned on kernel level: # Compile... # Evaluate inference time cost... # Mean inference time (std dev): 3.2 ms (0.03 ms) # # Evaluation of the network been tuned on graph level: # Compile... # Config for target=llvm -keys=cpu, workload=('dense_nopack.x86', ('TENSOR', (1, 512), 'float32'), ('TENSOR', (1000, 512), 'float32'), None, 'float32') is missing in ApplyGraphBest context. A fallback configuration is used, which may bring great performance regression. # Config for target=llvm -keys=cpu, workload=('dense_pack.x86', ('TENSOR', (1, 512), 'float32'), ('TENSOR', (1000, 512), 'float32'), None, 'float32') is missing in ApplyGraphBest context. A fallback configuration is used, which may bring great performance regression. # Evaluate inference time cost... # Mean inference time (std dev): 3.16 ms (0.03 ms)
13,571
41.149068
272
py
tvm
tvm-main/gallery/how_to/compile_models/from_onnx.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ Compile ONNX Models =================== **Author**: `Joshua Z. Zhang <https://zhreshold.github.io/>`_ This article is an introductory tutorial to deploy ONNX models with Relay. To begin, install the ONNX package: .. code-block:: bash %%shell pip install onnx onnxoptimizer Alternatively, you can refer to official site: https://github.com/onnx/onnx """ import onnx import numpy as np import tvm from tvm import te import tvm.relay as relay from tvm.contrib.download import download_testdata ###################################################################### # Load pretrained ONNX model # --------------------------------------------- # The example super resolution model used here is exactly the same model in onnx tutorial # http://pytorch.org/tutorials/advanced/super_resolution_with_caffe2.html # we skip the pytorch model construction part, and download the saved onnx model model_url = "".join( [ "https://gist.github.com/zhreshold/", "bcda4716699ac97ea44f791c24310193/raw/", "93672b029103648953c4e5ad3ac3aadf346a4cdc/", "super_resolution_0.2.onnx", ] ) model_path = download_testdata(model_url, "super_resolution.onnx", module="onnx") # now you have super_resolution.onnx on disk onnx_model = onnx.load(model_path) ###################################################################### # Load a test image # --------------------------------------------- # A single cat dominates the examples! This model takes a single input image of size # 224x224 and outputs a scaled image that is 3x greater than the input along each # axis, a 672x672 image. Re-scale the cat image to fit this input shape then # convert to `YCbCr`. The super resolution model will then be applied to the # luminance (`Y`) channel. from PIL import Image img_url = "https://github.com/dmlc/mxnet.js/blob/main/data/cat.png?raw=true" img_path = download_testdata(img_url, "cat.png", module="data") img = Image.open(img_path).resize((224, 224)) img_ycbcr = img.convert("YCbCr") # convert to YCbCr img_y, img_cb, img_cr = img_ycbcr.split() x = np.array(img_y)[np.newaxis, np.newaxis, :, :] ###################################################################### # Compile the model with relay # --------------------------------------------- # Typically ONNX models mix model input values with parameter values, with # the input having the name `1`. This model dependent, and you should check # with the documentation for your model to determine the full input and # parameter name space. # # Passing in the shape dictionary to the `relay.frontend.from_onnx` method # tells relay which ONNX parameters are inputs, and which are parameters, and # provides a static definition of the input size. target = "llvm" input_name = "1" shape_dict = {input_name: x.shape} mod, params = relay.frontend.from_onnx(onnx_model, shape_dict) with tvm.transform.PassContext(opt_level=1): executor = relay.build_module.create_executor( "graph", mod, tvm.cpu(0), target, params ).evaluate() ###################################################################### # Execute on TVM # --------------------------------------------- dtype = "float32" tvm_output = executor(tvm.nd.array(x.astype(dtype))).numpy() ###################################################################### # Display results # --------------------------------------------- # We put input and output image neck to neck. The luminance channel, `Y` is the output # from the model. The chroma channels `Cb` and `Cr` are resized to match with a simple # bicubic algorithm. The image is then recombined and converted back to `RGB`. from matplotlib import pyplot as plt out_y = Image.fromarray(np.uint8((tvm_output[0, 0]).clip(0, 255)), mode="L") out_cb = img_cb.resize(out_y.size, Image.BICUBIC) out_cr = img_cr.resize(out_y.size, Image.BICUBIC) result = Image.merge("YCbCr", [out_y, out_cb, out_cr]).convert("RGB") canvas = np.full((672, 672 * 2, 3), 255) canvas[0:224, 0:224, :] = np.asarray(img) canvas[:, 672:, :] = np.asarray(result) plt.imshow(canvas.astype(np.uint8)) plt.show() ###################################################################### # Notes # --------------------------------------------- # By default, ONNX defines models in terms of dynamic shapes. The ONNX importer # retains that dynamism upon import, and the compiler attempts to convert the model # into a static shapes at compile time. If this fails, there may still be dynamic # operations in the model. Not all TVM kernels currently support dynamic shapes, # please file an issue on discuss.tvm.apache.org if you hit an error with dynamic kernels. # # This particular model was build using an older version of ONNX. During the import # phase ONNX importer will run the ONNX verifier, which may throw a `Mismatched attribute type` # warning. Because TVM supports a number of different ONNX versions, the Relay model # will still be valid.
5,709
40.985294
95
py
tvm
tvm-main/gallery/how_to/compile_models/from_darknet.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ Compile YOLO-V2 and YOLO-V3 in DarkNet Models ============================================= **Author**: `Siju Samuel <https://siju-samuel.github.io/>`_ This article is an introductory tutorial to deploy darknet models with TVM. All the required models and libraries will be downloaded from the internet by the script. This script runs the YOLO-V2 and YOLO-V3 Model with the bounding boxes Darknet parsing have dependancy with CFFI and CV2 library Please install CFFI and CV2 before executing this script .. code-block:: bash %%shell pip install cffi opencv-python """ # numpy and matplotlib import numpy as np import matplotlib.pyplot as plt import sys # tvm, relay import tvm from tvm import te from tvm import relay from ctypes import * from tvm.contrib.download import download_testdata from tvm.relay.testing.darknet import __darknetffi__ import tvm.relay.testing.yolo_detection import tvm.relay.testing.darknet ###################################################################### # Choose the model # ----------------------- # Models are: 'yolov2', 'yolov3' or 'yolov3-tiny' # Model name MODEL_NAME = "yolov3" ###################################################################### # Download required files # ----------------------- # Download cfg and weights file if first time. CFG_NAME = MODEL_NAME + ".cfg" WEIGHTS_NAME = MODEL_NAME + ".weights" REPO_URL = "https://github.com/dmlc/web-data/blob/main/darknet/" CFG_URL = REPO_URL + "cfg/" + CFG_NAME + "?raw=true" WEIGHTS_URL = "https://pjreddie.com/media/files/" + WEIGHTS_NAME cfg_path = download_testdata(CFG_URL, CFG_NAME, module="darknet") weights_path = download_testdata(WEIGHTS_URL, WEIGHTS_NAME, module="darknet") # Download and Load darknet library if sys.platform in ["linux", "linux2"]: DARKNET_LIB = "libdarknet2.0.so" DARKNET_URL = REPO_URL + "lib/" + DARKNET_LIB + "?raw=true" elif sys.platform == "darwin": DARKNET_LIB = "libdarknet_mac2.0.so" DARKNET_URL = REPO_URL + "lib_osx/" + DARKNET_LIB + "?raw=true" else: err = "Darknet lib is not supported on {} platform".format(sys.platform) raise NotImplementedError(err) lib_path = download_testdata(DARKNET_URL, DARKNET_LIB, module="darknet") DARKNET_LIB = __darknetffi__.dlopen(lib_path) net = DARKNET_LIB.load_network(cfg_path.encode("utf-8"), weights_path.encode("utf-8"), 0) dtype = "float32" batch_size = 1 data = np.empty([batch_size, net.c, net.h, net.w], dtype) shape_dict = {"data": data.shape} print("Converting darknet to relay functions...") mod, params = relay.frontend.from_darknet(net, dtype=dtype, shape=data.shape) ###################################################################### # Import the graph to Relay # ------------------------- # compile the model target = tvm.target.Target("llvm", host="llvm") dev = tvm.cpu(0) data = np.empty([batch_size, net.c, net.h, net.w], dtype) shape = {"data": data.shape} print("Compiling the model...") with tvm.transform.PassContext(opt_level=3): lib = relay.build(mod, target=target, params=params) [neth, netw] = shape["data"][2:] # Current image shape is 608x608 ###################################################################### # Load a test image # ----------------- test_image = "dog.jpg" print("Loading the test image...") img_url = REPO_URL + "data/" + test_image + "?raw=true" img_path = download_testdata(img_url, test_image, "data") data = tvm.relay.testing.darknet.load_image(img_path, netw, neth) ###################################################################### # Execute on TVM Runtime # ---------------------- # The process is no different from other examples. from tvm.contrib import graph_executor m = graph_executor.GraphModule(lib["default"](dev)) # set inputs m.set_input("data", tvm.nd.array(data.astype(dtype))) # execute print("Running the test image...") # detection # thresholds thresh = 0.5 nms_thresh = 0.45 m.run() # get outputs tvm_out = [] if MODEL_NAME == "yolov2": layer_out = {} layer_out["type"] = "Region" # Get the region layer attributes (n, out_c, out_h, out_w, classes, coords, background) layer_attr = m.get_output(2).numpy() layer_out["biases"] = m.get_output(1).numpy() out_shape = (layer_attr[0], layer_attr[1] // layer_attr[0], layer_attr[2], layer_attr[3]) layer_out["output"] = m.get_output(0).numpy().reshape(out_shape) layer_out["classes"] = layer_attr[4] layer_out["coords"] = layer_attr[5] layer_out["background"] = layer_attr[6] tvm_out.append(layer_out) elif MODEL_NAME == "yolov3": for i in range(3): layer_out = {} layer_out["type"] = "Yolo" # Get the yolo layer attributes (n, out_c, out_h, out_w, classes, total) layer_attr = m.get_output(i * 4 + 3).numpy() layer_out["biases"] = m.get_output(i * 4 + 2).numpy() layer_out["mask"] = m.get_output(i * 4 + 1).numpy() out_shape = (layer_attr[0], layer_attr[1] // layer_attr[0], layer_attr[2], layer_attr[3]) layer_out["output"] = m.get_output(i * 4).numpy().reshape(out_shape) layer_out["classes"] = layer_attr[4] tvm_out.append(layer_out) elif MODEL_NAME == "yolov3-tiny": for i in range(2): layer_out = {} layer_out["type"] = "Yolo" # Get the yolo layer attributes (n, out_c, out_h, out_w, classes, total) layer_attr = m.get_output(i * 4 + 3).numpy() layer_out["biases"] = m.get_output(i * 4 + 2).numpy() layer_out["mask"] = m.get_output(i * 4 + 1).numpy() out_shape = (layer_attr[0], layer_attr[1] // layer_attr[0], layer_attr[2], layer_attr[3]) layer_out["output"] = m.get_output(i * 4).numpy().reshape(out_shape) layer_out["classes"] = layer_attr[4] tvm_out.append(layer_out) thresh = 0.560 # do the detection and bring up the bounding boxes img = tvm.relay.testing.darknet.load_image_color(img_path) _, im_h, im_w = img.shape dets = tvm.relay.testing.yolo_detection.fill_network_boxes( (netw, neth), (im_w, im_h), thresh, 1, tvm_out ) last_layer = net.layers[net.n - 1] tvm.relay.testing.yolo_detection.do_nms_sort(dets, last_layer.classes, nms_thresh) coco_name = "coco.names" coco_url = REPO_URL + "data/" + coco_name + "?raw=true" font_name = "arial.ttf" font_url = REPO_URL + "data/" + font_name + "?raw=true" coco_path = download_testdata(coco_url, coco_name, module="data") font_path = download_testdata(font_url, font_name, module="data") with open(coco_path) as f: content = f.readlines() names = [x.strip() for x in content] tvm.relay.testing.yolo_detection.show_detections(img, dets, thresh, names, last_layer.classes) tvm.relay.testing.yolo_detection.draw_detections( font_path, img, dets, thresh, names, last_layer.classes ) plt.imshow(img.transpose(1, 2, 0)) plt.show()
7,570
36.112745
97
py
tvm
tvm-main/gallery/how_to/compile_models/from_pytorch.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ Compile PyTorch Models ====================== **Author**: `Alex Wong <https://github.com/alexwong/>`_ This article is an introductory tutorial to deploy PyTorch models with Relay. For us to begin, PyTorch should be installed. TorchVision is also required so we can use the model zoo. A quick solution is to install via pip: .. code-block:: bash %%shell pip install torch pip install torchvision or please refer to official site https://pytorch.org/get-started/locally/ PyTorch versions should be backwards compatible but should be used with the proper TorchVision version. Currently, TVM supports PyTorch 1.7 and 1.4. Other versions may be unstable. """ import tvm from tvm import relay import numpy as np from tvm.contrib.download import download_testdata # PyTorch imports import torch import torchvision ###################################################################### # Load a pretrained PyTorch model # ------------------------------- model_name = "resnet18" model = getattr(torchvision.models, model_name)(pretrained=True) model = model.eval() # We grab the TorchScripted model via tracing input_shape = [1, 3, 224, 224] input_data = torch.randn(input_shape) scripted_model = torch.jit.trace(model, input_data).eval() ###################################################################### # Load a test image # ----------------- # Classic cat example! from PIL import Image img_url = "https://github.com/dmlc/mxnet.js/blob/main/data/cat.png?raw=true" img_path = download_testdata(img_url, "cat.png", module="data") img = Image.open(img_path).resize((224, 224)) # Preprocess the image and convert to tensor from torchvision import transforms my_preprocess = transforms.Compose( [ transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ] ) img = my_preprocess(img) img = np.expand_dims(img, 0) ###################################################################### # Import the graph to Relay # ------------------------- # Convert PyTorch graph to Relay graph. The input name can be arbitrary. input_name = "input0" shape_list = [(input_name, img.shape)] mod, params = relay.frontend.from_pytorch(scripted_model, shape_list) ###################################################################### # Relay Build # ----------- # Compile the graph to llvm target with given input specification. target = tvm.target.Target("llvm", host="llvm") dev = tvm.cpu(0) with tvm.transform.PassContext(opt_level=3): lib = relay.build(mod, target=target, params=params) ###################################################################### # Execute the portable graph on TVM # --------------------------------- # Now we can try deploying the compiled model on target. from tvm.contrib import graph_executor dtype = "float32" m = graph_executor.GraphModule(lib["default"](dev)) # Set inputs m.set_input(input_name, tvm.nd.array(img.astype(dtype))) # Execute m.run() # Get outputs tvm_output = m.get_output(0) ##################################################################### # Look up synset name # ------------------- # Look up prediction top 1 index in 1000 class synset. synset_url = "".join( [ "https://raw.githubusercontent.com/Cadene/", "pretrained-models.pytorch/master/data/", "imagenet_synsets.txt", ] ) synset_name = "imagenet_synsets.txt" synset_path = download_testdata(synset_url, synset_name, module="data") with open(synset_path) as f: synsets = f.readlines() synsets = [x.strip() for x in synsets] splits = [line.split(" ") for line in synsets] key_to_classname = {spl[0]: " ".join(spl[1:]) for spl in splits} class_url = "".join( [ "https://raw.githubusercontent.com/Cadene/", "pretrained-models.pytorch/master/data/", "imagenet_classes.txt", ] ) class_name = "imagenet_classes.txt" class_path = download_testdata(class_url, class_name, module="data") with open(class_path) as f: class_id_to_key = f.readlines() class_id_to_key = [x.strip() for x in class_id_to_key] # Get top-1 result for TVM top1_tvm = np.argmax(tvm_output.numpy()[0]) tvm_class_key = class_id_to_key[top1_tvm] # Convert input to PyTorch variable and get PyTorch result for comparison with torch.no_grad(): torch_img = torch.from_numpy(img) output = model(torch_img) # Get top-1 result for PyTorch top1_torch = np.argmax(output.numpy()) torch_class_key = class_id_to_key[top1_torch] print("Relay top-1 id: {}, class name: {}".format(top1_tvm, key_to_classname[tvm_class_key])) print("Torch top-1 id: {}, class name: {}".format(top1_torch, key_to_classname[torch_class_key]))
5,523
31.116279
97
py
tvm
tvm-main/gallery/how_to/compile_models/from_tflite.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ Compile TFLite Models ===================== **Author**: `Zhao Wu <https://github.com/FrozenGene>`_ This article is an introductory tutorial to deploy TFLite models with Relay. To get started, TFLite package needs to be installed as prerequisite. .. code-block:: bash %%shell pip install tflite==2.1.0 or you could generate TFLite package yourself. The steps are the following: .. code-block:: bash # Get the flatc compiler. # Please refer to https://github.com/google/flatbuffers for details # and make sure it is properly installed. flatc --version # Get the TFLite schema. wget https://raw.githubusercontent.com/tensorflow/tensorflow/r1.13/tensorflow/lite/schema/schema.fbs # Generate TFLite package. flatc --python schema.fbs # Add current folder (which contains generated tflite module) to PYTHONPATH. export PYTHONPATH=${PYTHONPATH:+$PYTHONPATH:}$(pwd) Now please check if TFLite package is installed successfully, ``python -c "import tflite"`` Below you can find an example on how to compile TFLite model using TVM. """ ###################################################################### # Utils for downloading and extracting zip files # ---------------------------------------------- import os def extract(path): import tarfile if path.endswith("tgz") or path.endswith("gz"): dir_path = os.path.dirname(path) tar = tarfile.open(path) tar.extractall(path=dir_path) tar.close() else: raise RuntimeError("Could not decompress the file: " + path) ###################################################################### # Load pretrained TFLite model # ---------------------------- # Load mobilenet V1 TFLite model provided by Google from tvm.contrib.download import download_testdata model_url = "http://download.tensorflow.org/models/mobilenet_v1_2018_08_02/mobilenet_v1_1.0_224.tgz" # Download model tar file and extract it to get mobilenet_v1_1.0_224.tflite model_path = download_testdata(model_url, "mobilenet_v1_1.0_224.tgz", module=["tf", "official"]) model_dir = os.path.dirname(model_path) extract(model_path) # Now we can open mobilenet_v1_1.0_224.tflite tflite_model_file = os.path.join(model_dir, "mobilenet_v1_1.0_224.tflite") tflite_model_buf = open(tflite_model_file, "rb").read() # Get TFLite model from buffer try: import tflite tflite_model = tflite.Model.GetRootAsModel(tflite_model_buf, 0) except AttributeError: import tflite.Model tflite_model = tflite.Model.Model.GetRootAsModel(tflite_model_buf, 0) ###################################################################### # Load a test image # ----------------- # A single cat dominates the examples! from PIL import Image from matplotlib import pyplot as plt import numpy as np image_url = "https://github.com/dmlc/mxnet.js/blob/main/data/cat.png?raw=true" image_path = download_testdata(image_url, "cat.png", module="data") resized_image = Image.open(image_path).resize((224, 224)) plt.imshow(resized_image) plt.show() image_data = np.asarray(resized_image).astype("float32") # Add a dimension to the image so that we have NHWC format layout image_data = np.expand_dims(image_data, axis=0) # Preprocess image as described here: # https://github.com/tensorflow/models/blob/edb6ed22a801665946c63d650ab9a0b23d98e1b1/research/slim/preprocessing/inception_preprocessing.py#L243 image_data[:, :, :, 0] = 2.0 / 255.0 * image_data[:, :, :, 0] - 1 image_data[:, :, :, 1] = 2.0 / 255.0 * image_data[:, :, :, 1] - 1 image_data[:, :, :, 2] = 2.0 / 255.0 * image_data[:, :, :, 2] - 1 print("input", image_data.shape) ###################################################################### # Compile the model with relay # ---------------------------- # TFLite input tensor name, shape and type input_tensor = "input" input_shape = (1, 224, 224, 3) input_dtype = "float32" # Parse TFLite model and convert it to a Relay module from tvm import relay, transform mod, params = relay.frontend.from_tflite( tflite_model, shape_dict={input_tensor: input_shape}, dtype_dict={input_tensor: input_dtype} ) # Build the module against to x86 CPU target = "llvm" with transform.PassContext(opt_level=3): lib = relay.build(mod, target, params=params) ###################################################################### # Execute on TVM # -------------- import tvm from tvm import te from tvm.contrib import graph_executor as runtime # Create a runtime executor module module = runtime.GraphModule(lib["default"](tvm.cpu())) # Feed input data module.set_input(input_tensor, tvm.nd.array(image_data)) # Run module.run() # Get output tvm_output = module.get_output(0).numpy() ###################################################################### # Display results # --------------- # Load label file label_file_url = "".join( [ "https://raw.githubusercontent.com/", "tensorflow/tensorflow/master/tensorflow/lite/java/demo/", "app/src/main/assets/", "labels_mobilenet_quant_v1_224.txt", ] ) label_file = "labels_mobilenet_quant_v1_224.txt" label_path = download_testdata(label_file_url, label_file, module="data") # List of 1001 classes with open(label_path) as f: labels = f.readlines() # Convert result to 1D data predictions = np.squeeze(tvm_output) # Get top 1 prediction prediction = np.argmax(predictions) # Convert id to class name and show the result print("The image prediction result is: id " + str(prediction) + " name: " + labels[prediction])
6,309
31.525773
144
py
tvm
tvm-main/gallery/how_to/compile_models/from_tensorflow.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ Compile Tensorflow Models ========================= This article is an introductory tutorial to deploy tensorflow models with TVM. For us to begin with, tensorflow python module is required to be installed. .. code-block:: bash %%shell pip install tensorflow Please refer to https://www.tensorflow.org/install """ # tvm, relay import tvm from tvm import te from tvm import relay # os and numpy import numpy as np import os.path # Tensorflow imports import tensorflow as tf # Ask tensorflow to limit its GPU memory to what's actually needed # instead of gobbling everything that's available. # https://www.tensorflow.org/guide/gpu#limiting_gpu_memory_growth # This way this tutorial is a little more friendly to sphinx-gallery. gpus = tf.config.list_physical_devices("GPU") if gpus: try: for gpu in gpus: tf.config.experimental.set_memory_growth(gpu, True) print("tensorflow will use experimental.set_memory_growth(True)") except RuntimeError as e: print("experimental.set_memory_growth option is not available: {}".format(e)) try: tf_compat_v1 = tf.compat.v1 except ImportError: tf_compat_v1 = tf # Tensorflow utility functions import tvm.relay.testing.tf as tf_testing # Base location for model related files. repo_base = "https://github.com/dmlc/web-data/raw/main/tensorflow/models/InceptionV1/" # Test image img_name = "elephant-299.jpg" image_url = os.path.join(repo_base, img_name) ###################################################################### # Tutorials # --------- # Please refer docs/frontend/tensorflow.md for more details for various models # from tensorflow. model_name = "classify_image_graph_def-with_shapes.pb" model_url = os.path.join(repo_base, model_name) # Image label map map_proto = "imagenet_2012_challenge_label_map_proto.pbtxt" map_proto_url = os.path.join(repo_base, map_proto) # Human readable text for labels label_map = "imagenet_synset_to_human_label_map.txt" label_map_url = os.path.join(repo_base, label_map) # Target settings # Use these commented settings to build for cuda. # target = tvm.target.Target("cuda", host="llvm") # layout = "NCHW" # dev = tvm.cuda(0) target = tvm.target.Target("llvm", host="llvm") layout = None dev = tvm.cpu(0) ###################################################################### # Download required files # ----------------------- # Download files listed above. from tvm.contrib.download import download_testdata img_path = download_testdata(image_url, img_name, module="data") model_path = download_testdata(model_url, model_name, module=["tf", "InceptionV1"]) map_proto_path = download_testdata(map_proto_url, map_proto, module="data") label_path = download_testdata(label_map_url, label_map, module="data") ###################################################################### # Import model # ------------ # Creates tensorflow graph definition from protobuf file. with tf_compat_v1.gfile.GFile(model_path, "rb") as f: graph_def = tf_compat_v1.GraphDef() graph_def.ParseFromString(f.read()) graph = tf.import_graph_def(graph_def, name="") # Call the utility to import the graph definition into default graph. graph_def = tf_testing.ProcessGraphDefParam(graph_def) # Add shapes to the graph. with tf_compat_v1.Session() as sess: graph_def = tf_testing.AddShapesToGraphDef(sess, "softmax") ###################################################################### # Decode image # ------------ # .. note:: # # tensorflow frontend import doesn't support preprocessing ops like JpegDecode. # JpegDecode is bypassed (just return source node). # Hence we supply decoded frame to TVM instead. # from PIL import Image image = Image.open(img_path).resize((299, 299)) x = np.array(image) ###################################################################### # Import the graph to Relay # ------------------------- # Import tensorflow graph definition to relay frontend. # # Results: # sym: relay expr for given tensorflow protobuf. # params: params converted from tensorflow params (tensor protobuf). shape_dict = {"DecodeJpeg/contents": x.shape} dtype_dict = {"DecodeJpeg/contents": "uint8"} mod, params = relay.frontend.from_tensorflow(graph_def, layout=layout, shape=shape_dict) print("Tensorflow protobuf imported to relay frontend.") ###################################################################### # Relay Build # ----------- # Compile the graph to llvm target with given input specification. # # Results: # graph: Final graph after compilation. # params: final params after compilation. # lib: target library which can be deployed on target with TVM runtime. with tvm.transform.PassContext(opt_level=3): lib = relay.build(mod, target, params=params) ###################################################################### # Execute the portable graph on TVM # --------------------------------- # Now we can try deploying the compiled model on target. from tvm.contrib import graph_executor dtype = "uint8" m = graph_executor.GraphModule(lib["default"](dev)) # set inputs m.set_input("DecodeJpeg/contents", tvm.nd.array(x.astype(dtype))) # execute m.run() # get outputs tvm_output = m.get_output(0, tvm.nd.empty(((1, 1008)), "float32")) ###################################################################### # Process the output # ------------------ # Process the model output to human readable text for InceptionV1. predictions = tvm_output.numpy() predictions = np.squeeze(predictions) # Creates node ID --> English string lookup. node_lookup = tf_testing.NodeLookup(label_lookup_path=map_proto_path, uid_lookup_path=label_path) # Print top 5 predictions from TVM output. top_k = predictions.argsort()[-5:][::-1] for node_id in top_k: human_string = node_lookup.id_to_string(node_id) score = predictions[node_id] print("%s (score = %.5f)" % (human_string, score)) ###################################################################### # Inference on tensorflow # ----------------------- # Run the corresponding model on tensorflow def create_graph(): """Creates a graph from saved GraphDef file and returns a saver.""" # Creates graph from saved graph_def.pb. with tf_compat_v1.gfile.GFile(model_path, "rb") as f: graph_def = tf_compat_v1.GraphDef() graph_def.ParseFromString(f.read()) graph = tf.import_graph_def(graph_def, name="") # Call the utility to import the graph definition into default graph. graph_def = tf_testing.ProcessGraphDefParam(graph_def) def run_inference_on_image(image): """Runs inference on an image. Parameters ---------- image: String Image file name. Returns ------- Nothing """ if not tf_compat_v1.gfile.Exists(image): tf.logging.fatal("File does not exist %s", image) image_data = tf_compat_v1.gfile.GFile(image, "rb").read() # Creates graph from saved GraphDef. create_graph() with tf_compat_v1.Session() as sess: softmax_tensor = sess.graph.get_tensor_by_name("softmax:0") predictions = sess.run(softmax_tensor, {"DecodeJpeg/contents:0": image_data}) predictions = np.squeeze(predictions) # Creates node ID --> English string lookup. node_lookup = tf_testing.NodeLookup( label_lookup_path=map_proto_path, uid_lookup_path=label_path ) # Print top 5 predictions from tensorflow. top_k = predictions.argsort()[-5:][::-1] print("===== TENSORFLOW RESULTS =======") for node_id in top_k: human_string = node_lookup.id_to_string(node_id) score = predictions[node_id] print("%s (score = %.5f)" % (human_string, score)) run_inference_on_image(img_path)
8,575
32.24031
97
py
tvm
tvm-main/gallery/how_to/compile_models/from_mxnet.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ .. _tutorial-from-mxnet: Compile MXNet Models ==================== **Author**: `Joshua Z. Zhang <https://zhreshold.github.io/>`_, \ `Kazutaka Morita <https://github.com/kazum>`_ This article is an introductory tutorial to deploy mxnet models with Relay. To begin, we must install `mxnet`: .. code-block:: bash %%shell pip install mxnet or please refer to official installation guide. https://mxnet.apache.org/versions/master/install/index.html """ # some standard imports # sphinx_gallery_start_ignore # sphinx_gallery_requires_cuda = True # sphinx_gallery_end_ignore import mxnet as mx import tvm import tvm.relay as relay import numpy as np ###################################################################### # Download Resnet18 model from Gluon Model Zoo # --------------------------------------------- # In this section, we download a pretrained imagenet model and classify an image. from tvm.contrib.download import download_testdata from mxnet.gluon.model_zoo.vision import get_model from PIL import Image from matplotlib import pyplot as plt block = get_model("resnet18_v1", pretrained=True) img_url = "https://github.com/dmlc/mxnet.js/blob/main/data/cat.png?raw=true" img_name = "cat.png" synset_url = "".join( [ "https://gist.githubusercontent.com/zhreshold/", "4d0b62f3d01426887599d4f7ede23ee5/raw/", "596b27d23537e5a1b5751d2b0481ef172f58b539/", "imagenet1000_clsid_to_human.txt", ] ) synset_name = "imagenet1000_clsid_to_human.txt" img_path = download_testdata(img_url, "cat.png", module="data") synset_path = download_testdata(synset_url, synset_name, module="data") with open(synset_path) as f: synset = eval(f.read()) image = Image.open(img_path).resize((224, 224)) plt.imshow(image) plt.show() def transform_image(image): image = np.array(image) - np.array([123.0, 117.0, 104.0]) image /= np.array([58.395, 57.12, 57.375]) image = image.transpose((2, 0, 1)) image = image[np.newaxis, :] return image x = transform_image(image) print("x", x.shape) ###################################################################### # Compile the Graph # ----------------- # Now we would like to port the Gluon model to a portable computational graph. # It's as easy as several lines. # We support MXNet static graph(symbol) and HybridBlock in mxnet.gluon shape_dict = {"data": x.shape} mod, params = relay.frontend.from_mxnet(block, shape_dict) ## we want a probability so add a softmax operator func = mod["main"] func = relay.Function(func.params, relay.nn.softmax(func.body), None, func.type_params, func.attrs) ###################################################################### # now compile the graph target = "cuda" with tvm.transform.PassContext(opt_level=3): lib = relay.build(func, target, params=params) ###################################################################### # Execute the portable graph on TVM # --------------------------------- # Now, we would like to reproduce the same forward computation using TVM. from tvm.contrib import graph_executor dev = tvm.cuda(0) dtype = "float32" m = graph_executor.GraphModule(lib["default"](dev)) # set inputs m.set_input("data", tvm.nd.array(x.astype(dtype))) # execute m.run() # get outputs tvm_output = m.get_output(0) top1 = np.argmax(tvm_output.numpy()[0]) print("TVM prediction top-1:", top1, synset[top1]) ###################################################################### # Use MXNet symbol with pretrained weights # ---------------------------------------- # MXNet often use `arg_params` and `aux_params` to store network parameters # separately, here we show how to use these weights with existing API def block2symbol(block): data = mx.sym.Variable("data") sym = block(data) args = {} auxs = {} for k, v in block.collect_params().items(): args[k] = mx.nd.array(v.data().asnumpy()) return sym, args, auxs mx_sym, args, auxs = block2symbol(block) # usually we would save/load it as checkpoint mx.model.save_checkpoint("resnet18_v1", 0, mx_sym, args, auxs) # there are 'resnet18_v1-0000.params' and 'resnet18_v1-symbol.json' on disk ###################################################################### # for a normal mxnet model, we start from here mx_sym, args, auxs = mx.model.load_checkpoint("resnet18_v1", 0) # now we use the same API to get Relay computation graph mod, relay_params = relay.frontend.from_mxnet(mx_sym, shape_dict, arg_params=args, aux_params=auxs) # repeat the same steps to run this model using TVM
5,334
35.047297
110
py
tvm
tvm-main/gallery/how_to/compile_models/from_keras.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ Compile Keras Models ===================== **Author**: `Yuwei Hu <https://Huyuwei.github.io/>`_ This article is an introductory tutorial to deploy Keras models with Relay. For us to begin with, keras should be installed. Tensorflow is also required since it's used as the default backend of keras. A quick solution is to install via pip .. code-block:: bash %%shell pip install keras tensorflow or please refer to official site https://keras.io/#installation """ # sphinx_gallery_start_ignore # sphinx_gallery_requires_cuda = True # sphinx_gallery_end_ignore import tvm from tvm import te import tvm.relay as relay from tvm.contrib.download import download_testdata import keras import tensorflow as tf import numpy as np ###################################################################### # Load pretrained keras model # ---------------------------- # We load a pretrained resnet-50 classification model provided by keras. if tuple(keras.__version__.split(".")) < ("2", "4", "0"): weights_url = "".join( [ "https://github.com/fchollet/deep-learning-models/releases/", "download/v0.2/resnet50_weights_tf_dim_ordering_tf_kernels.h5", ] ) weights_file = "resnet50_keras_old.h5" else: weights_url = "".join( [ " https://storage.googleapis.com/tensorflow/keras-applications/", "resnet/resnet50_weights_tf_dim_ordering_tf_kernels.h5", ] ) weights_file = "resnet50_keras_new.h5" weights_path = download_testdata(weights_url, weights_file, module="keras") keras_resnet50 = tf.keras.applications.resnet50.ResNet50( include_top=True, weights=None, input_shape=(224, 224, 3), classes=1000 ) keras_resnet50.load_weights(weights_path) ###################################################################### # Load a test image # ------------------ # A single cat dominates the examples! from PIL import Image from matplotlib import pyplot as plt from tensorflow.keras.applications.resnet50 import preprocess_input img_url = "https://github.com/dmlc/mxnet.js/blob/main/data/cat.png?raw=true" img_path = download_testdata(img_url, "cat.png", module="data") img = Image.open(img_path).resize((224, 224)) plt.imshow(img) plt.show() # input preprocess data = np.array(img)[np.newaxis, :].astype("float32") data = preprocess_input(data).transpose([0, 3, 1, 2]) print("input_1", data.shape) ###################################################################### # Compile the model with Relay # ---------------------------- # convert the keras model(NHWC layout) to Relay format(NCHW layout). shape_dict = {"input_1": data.shape} mod, params = relay.frontend.from_keras(keras_resnet50, shape_dict) # compile the model target = "cuda" dev = tvm.cuda(0) # TODO(mbs): opt_level=3 causes nn.contrib_conv2d_winograd_weight_transform # to end up in the module which fails memory validation on cuda most likely # due to a latent bug. Note that the pass context only has an effect within # evaluate() and is not captured by create_executor(). with tvm.transform.PassContext(opt_level=0): model = relay.build_module.create_executor("graph", mod, dev, target, params).evaluate() ###################################################################### # Execute on TVM # --------------- dtype = "float32" tvm_out = model(tvm.nd.array(data.astype(dtype))) top1_tvm = np.argmax(tvm_out.numpy()[0]) ##################################################################### # Look up synset name # ------------------- # Look up prediction top 1 index in 1000 class synset. synset_url = "".join( [ "https://gist.githubusercontent.com/zhreshold/", "4d0b62f3d01426887599d4f7ede23ee5/raw/", "596b27d23537e5a1b5751d2b0481ef172f58b539/", "imagenet1000_clsid_to_human.txt", ] ) synset_name = "imagenet1000_clsid_to_human.txt" synset_path = download_testdata(synset_url, synset_name, module="data") with open(synset_path) as f: synset = eval(f.read()) print("Relay top-1 id: {}, class name: {}".format(top1_tvm, synset[top1_tvm])) # confirm correctness with keras output keras_out = keras_resnet50.predict(data.transpose([0, 2, 3, 1])) top1_keras = np.argmax(keras_out) print("Keras top-1 id: {}, class name: {}".format(top1_keras, synset[top1_keras]))
5,079
34.774648
92
py
tvm
tvm-main/gallery/how_to/compile_models/from_coreml.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ Compile CoreML Models ===================== **Author**: `Joshua Z. Zhang <https://zhreshold.github.io/>`_, \ `Kazutaka Morita <https://github.com/kazum>`_, \ `Zhao Wu <https://github.com/FrozenGene>`_ This article is an introductory tutorial to deploy CoreML models with Relay. To begin, we must install coremltools: .. code-block:: bash %%shell pip install coremltools or please refer to official site https://github.com/apple/coremltools """ import tvm from tvm import te import tvm.relay as relay from tvm.contrib.download import download_testdata import coremltools as cm import numpy as np from PIL import Image ###################################################################### # Load pretrained CoreML model # ---------------------------- # We will download and load a pretrained mobilenet classification network # provided by apple in this example model_url = "https://docs-assets.developer.apple.com/coreml/models/MobileNet.mlmodel" model_file = "mobilenet.mlmodel" model_path = download_testdata(model_url, model_file, module="coreml") # Now you have mobilenet.mlmodel on disk mlmodel = cm.models.MLModel(model_path) ###################################################################### # Load a test image # ------------------ # A single cat dominates the examples! img_url = "https://github.com/dmlc/mxnet.js/blob/main/data/cat.png?raw=true" img_path = download_testdata(img_url, "cat.png", module="data") img = Image.open(img_path).resize((224, 224)) # Mobilenet.mlmodel's input is BGR format img_bgr = np.array(img)[:, :, ::-1] x = np.transpose(img_bgr, (2, 0, 1))[np.newaxis, :] ###################################################################### # Compile the model on Relay # --------------------------- # We should be familiar with the process right now. target = "llvm" shape_dict = {"image": x.shape} # Parse CoreML model and convert into Relay computation graph mod, params = relay.frontend.from_coreml(mlmodel, shape_dict) with tvm.transform.PassContext(opt_level=3): lib = relay.build(mod, target, params=params) ###################################################################### # Execute on TVM # ------------------- # The process is no different from other example from tvm.contrib import graph_executor dev = tvm.cpu(0) dtype = "float32" m = graph_executor.GraphModule(lib["default"](dev)) # set inputs m.set_input("image", tvm.nd.array(x.astype(dtype))) # execute m.run() # get outputs tvm_output = m.get_output(0) top1 = np.argmax(tvm_output.numpy()[0]) ##################################################################### # Look up synset name # ------------------- # Look up prediction top 1 index in 1000 class synset. synset_url = "".join( [ "https://gist.githubusercontent.com/zhreshold/", "4d0b62f3d01426887599d4f7ede23ee5/raw/", "596b27d23537e5a1b5751d2b0481ef172f58b539/", "imagenet1000_clsid_to_human.txt", ] ) synset_name = "imagenet1000_clsid_to_human.txt" synset_path = download_testdata(synset_url, synset_name, module="data") with open(synset_path) as f: synset = eval(f.read()) # You should see the following result: Top-1 id 282 class name tiger cat print("Top-1 id", top1, "class name", synset[top1])
4,043
34.165217
85
py
tvm
tvm-main/gallery/how_to/compile_models/from_paddle.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ Compile PaddlePaddle Models =========================== **Author**: `Ziyuan Ma <https://github.com/ZiyuanMa/>`_ This article is an introductory tutorial to deploy PaddlePaddle models with Relay. To begin, we'll install PaddlePaddle>=2.1.3: .. code-block:: bash %%shell pip install paddlepaddle -i https://mirror.baidu.com/pypi/simple For more details, refer to the official install instructions at: https://www.paddlepaddle.org.cn/install/quick?docurl=/documentation/docs/zh/install/pip/linux-pip.html """ import tarfile import paddle import numpy as np import tvm from tvm import relay from tvm.contrib.download import download_testdata ###################################################################### # Load pretrained ResNet50 model # --------------------------------------------- # We load a pretrained ResNet50 provided by PaddlePaddle. url = "https://bj.bcebos.com/x2paddle/models/paddle_resnet50.tar" model_path = download_testdata(url, "paddle_resnet50.tar", module="model") with tarfile.open(model_path) as tar: names = tar.getnames() for name in names: tar.extract(name, "./") model = paddle.jit.load("./paddle_resnet50/model") ###################################################################### # Load a test image # --------------------------------------------- # A single cat dominates the examples! from PIL import Image import paddle.vision.transforms as T transforms = T.Compose( [ T.Resize((256, 256)), T.CenterCrop(224), T.ToTensor(), T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ] ) img_url = "https://github.com/dmlc/mxnet.js/blob/main/data/cat.png?raw=true" img_path = download_testdata(img_url, "cat.png", module="data") img = Image.open(img_path).resize((224, 224)) img = transforms(img) img = np.expand_dims(img, axis=0) ###################################################################### # Compile the model with relay # --------------------------------------------- target = "llvm" shape_dict = {"inputs": img.shape} mod, params = relay.frontend.from_paddle(model, shape_dict) with tvm.transform.PassContext(opt_level=3): executor = relay.build_module.create_executor( "graph", mod, tvm.cpu(0), target, params ).evaluate() ###################################################################### # Execute on TVM # --------------------------------------------- dtype = "float32" tvm_output = executor(tvm.nd.array(img.astype(dtype))).numpy() ###################################################################### # Look up synset name # --------------------------------------------- # Look up prediction top 1 index in 1000 class synset. synset_url = "".join( [ "https://gist.githubusercontent.com/zhreshold/", "4d0b62f3d01426887599d4f7ede23ee5/raw/", "596b27d23537e5a1b5751d2b0481ef172f58b539/", "imagenet1000_clsid_to_human.txt", ] ) synset_name = "imagenet1000_clsid_to_human.txt" synset_path = download_testdata(synset_url, synset_name, module="data") with open(synset_path) as f: synset = f.readlines() top1 = np.argmax(tvm_output[0]) print(f"TVM prediction top-1 id: {top1}, class name: {synset[top1]}")
4,003
32.647059
102
py
tvm
tvm-main/gallery/how_to/compile_models/from_oneflow.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ Compile OneFlow Models ====================== **Author**: `Xiaoyu Zhang <https://github.com/BBuf/>`_ This article is an introductory tutorial to deploy OneFlow models with Relay. For us to begin with, OneFlow package should be installed. A quick solution is to install via pip .. code-block:: bash %%shell pip install flowvision==0.1.0 pip install -f https://release.oneflow.info oneflow==0.7.0+cpu or please refer to official site: https://github.com/Oneflow-Inc/oneflow Currently, TVM supports OneFlow 0.7.0. Other versions may be unstable. """ # sphinx_gallery_start_ignore # sphinx_gallery_requires_cuda = True # sphinx_gallery_end_ignore import os, math from matplotlib import pyplot as plt import numpy as np from PIL import Image # oneflow imports import flowvision import oneflow as flow import oneflow.nn as nn import tvm from tvm import relay from tvm.contrib.download import download_testdata ###################################################################### # Load a pretrained OneFlow model and save model # ---------------------------------------------- model_name = "resnet18" model = getattr(flowvision.models, model_name)(pretrained=True) model = model.eval() model_dir = "resnet18_model" if not os.path.exists(model_dir): flow.save(model.state_dict(), model_dir) ###################################################################### # Load a test image # ----------------- # Classic cat example! from PIL import Image img_url = "https://github.com/dmlc/mxnet.js/blob/main/data/cat.png?raw=true" img_path = download_testdata(img_url, "cat.png", module="data") img = Image.open(img_path).resize((224, 224)) # Preprocess the image and convert to tensor from flowvision import transforms my_preprocess = transforms.Compose( [ transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ] ) img = my_preprocess(img) img = np.expand_dims(img.numpy(), 0) ###################################################################### # Import the graph to Relay # ------------------------- # Convert OneFlow graph to Relay graph. The input name can be arbitrary. class Graph(flow.nn.Graph): def __init__(self, module): super().__init__() self.m = module def build(self, x): out = self.m(x) return out graph = Graph(model) _ = graph._compile(flow.randn(1, 3, 224, 224)) mod, params = relay.frontend.from_oneflow(graph, model_dir) ###################################################################### # Relay Build # ----------- # Compile the graph to llvm target with given input specification. target = tvm.target.Target("llvm", host="llvm") dev = tvm.cpu(0) with tvm.transform.PassContext(opt_level=3): lib = relay.build(mod, target=target, params=params) ###################################################################### # Execute the portable graph on TVM # --------------------------------- # Now we can try deploying the compiled model on target. target = "cuda" with tvm.transform.PassContext(opt_level=10): intrp = relay.build_module.create_executor("graph", mod, tvm.cuda(0), target) print(type(img)) print(img.shape) tvm_output = intrp.evaluate()(tvm.nd.array(img.astype("float32")), **params) ##################################################################### # Look up synset name # ------------------- # Look up prediction top 1 index in 1000 class synset. synset_url = "".join( [ "https://raw.githubusercontent.com/Cadene/", "pretrained-models.pytorch/master/data/", "imagenet_synsets.txt", ] ) synset_name = "imagenet_synsets.txt" synset_path = download_testdata(synset_url, synset_name, module="data") with open(synset_path) as f: synsets = f.readlines() synsets = [x.strip() for x in synsets] splits = [line.split(" ") for line in synsets] key_to_classname = {spl[0]: " ".join(spl[1:]) for spl in splits} class_url = "".join( [ "https://raw.githubusercontent.com/Cadene/", "pretrained-models.pytorch/master/data/", "imagenet_classes.txt", ] ) class_name = "imagenet_classes.txt" class_path = download_testdata(class_url, class_name, module="data") with open(class_path) as f: class_id_to_key = f.readlines() class_id_to_key = [x.strip() for x in class_id_to_key] # Get top-1 result for TVM top1_tvm = np.argmax(tvm_output.numpy()[0]) tvm_class_key = class_id_to_key[top1_tvm] # Convert input to OneFlow variable and get OneFlow result for comparison with flow.no_grad(): torch_img = flow.from_numpy(img) output = model(torch_img) # Get top-1 result for OneFlow top_oneflow = np.argmax(output.numpy()) oneflow_class_key = class_id_to_key[top_oneflow] print("Relay top-1 id: {}, class name: {}".format(top1_tvm, key_to_classname[tvm_class_key])) print( "OneFlow top-1 id: {}, class name: {}".format(top_oneflow, key_to_classname[oneflow_class_key]) )
5,796
30.677596
99
py
tvm
tvm-main/gallery/how_to/optimize_operators/opt_conv_tensorcore.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ .. _opt-conv-tensorcore: How to optimize convolution using TensorCores ============================================= **Author**: `Siyuan Feng <https://github.com/Hzfengsy>`_ In this tutorial, we will demonstrate how to write a high performance convolution schedule using TensorCores in TVM. In this example, we assume the input to convolution has a large batch. We strongly recommend covering the :ref:`opt-conv-gpu` tutorial first. """ ################################################################ # TensorCore Introduction # ----------------------- # Each Tensor Core provides a 4x4x4 matrix processing array that operates # :code:`D = A * B + C`, where A, B, C and D are 4x4 matrices as Figure shows. # The matrix multiplication inputs A and B are FP16 matrices, while the accumulation # matrices C and D may be FP16 or FP32 matrices. # # However, CUDA programmers can only use warp-level primitive # :code:`wmma::mma_sync(acc_frag, a_frag, b_frag, acc_frag)` to perform # 16x16x16 half-precision matrix multiplication on tensor cores. Before invoking # the matrix multiplication, programmers must load data from memory into registers # with primitive :code:`wmma::load_matrix_sync`, explicitly. The NVCC compiler translates # that primitive into multiple memory load instructions. At run time, every thread loads # 16 elements from matrix A and 16 elements from B. ################################################################ # Preparation and Algorithm # ------------------------- # We use the fixed size for input tensors with 256 channels and 14 x 14 dimensions. # The batch size is 256. Convolution filters contain 512 filters of size 3 x 3. # We use stride size 1 and padding size 1 for the convolution. In the example, we use # NHWCnc memory layout.The following code defines the convolution algorithm in TVM. # sphinx_gallery_start_ignore # sphinx_gallery_requires_cuda = True # sphinx_gallery_end_ignore import tvm from tvm import te import numpy as np from tvm.contrib import nvcc # The sizes of inputs and filters batch_size = 256 height = 14 width = 14 in_channels = 256 out_channels = 512 kernel_h = 3 kernel_w = 3 pad_h = 1 pad_w = 1 stride_h = 1 stride_w = 1 # TensorCore shape block_size = 16 assert batch_size % block_size == 0 assert in_channels % block_size == 0 assert out_channels % block_size == 0 # Input feature map: (N, H, W, IC, n, ic) data_shape = ( batch_size // block_size, height, width, in_channels // block_size, block_size, block_size, ) # Kernel: (H, W, IC, OC, ic, oc) kernel_shape = ( kernel_h, kernel_w, in_channels // block_size, out_channels // block_size, block_size, block_size, ) # Output feature map: (N, H, W, OC, n, oc) output_shape = ( batch_size // block_size, height, width, out_channels // block_size, block_size, block_size, ) # Reduction axes kh = te.reduce_axis((0, kernel_h), name="kh") kw = te.reduce_axis((0, kernel_w), name="kw") ic = te.reduce_axis((0, in_channels // block_size), name="ic") ii = te.reduce_axis((0, block_size), name="ii") # Algorithm A = te.placeholder(data_shape, name="A", dtype="float16") W = te.placeholder(kernel_shape, name="W", dtype="float16") Apad = te.compute( ( batch_size // block_size, height + 2 * pad_h, width + 2 * pad_w, in_channels // block_size, block_size, block_size, ), lambda n, h, w, i, nn, ii: tvm.tir.if_then_else( tvm.tir.all(h >= pad_h, h - pad_h < height, w >= pad_w, w - pad_w < width), A[n, h - pad_h, w - pad_w, i, nn, ii], tvm.tir.const(0.0, "float16"), ), name="Apad", ) Conv = te.compute( output_shape, lambda n, h, w, o, nn, oo: te.sum( Apad[n, h * stride_h + kh, w * stride_w + kw, ic, nn, ii].astype("float32") * W[kh, kw, ic, o, ii, oo].astype("float32"), axis=[ic, kh, kw, ii], ), name="Conv", ) s = te.create_schedule(Conv.op) s[Apad].compute_inline() ############################################################################### # Memory Scope # ------------ # In traditional GPU schedule, we have global, shared and local memory scope. # To support TensorCores, we add another three special memory scope: :code:`wmma.matrix_a`, # :code:`wmma.matrix_b` and :code:`wmma.accumulator`. On hardware, all fragments scope # stores at the on-chip registers level, the same place with local memory. # Designate the memory hierarchy AS = s.cache_read(Apad, "shared", [Conv]) WS = s.cache_read(W, "shared", [Conv]) AF = s.cache_read(AS, "wmma.matrix_a", [Conv]) WF = s.cache_read(WS, "wmma.matrix_b", [Conv]) ConvF = s.cache_write(Conv, "wmma.accumulator") ############################################################################### # Define Tensor Intrinsic # ----------------------- # In fact, TensorCore is a special hardware operation. So, we can just use tensorize # to replace a unit of computation with the TensorCore instruction. The first thing is # that we need to define tensor intrinsic. # # There are four basic operation in TensorCore: :code:`fill_fragment`, :code:`load_matrix`, # :code:`mma_sync` and :code:`store_matrix`. Since :code:`fill_fragment` and :code:`mma_sync` # are both used in matrix multiplication, so we can just write following three intrinsics. def intrin_wmma_load_matrix(scope): n = 16 A = te.placeholder((n, n), name="A", dtype="float16") BA = tvm.tir.decl_buffer(A.shape, A.dtype, scope="shared", data_alignment=32, offset_factor=256) C = te.compute((n, n), lambda i, j: A[i, j], name="C") BC = tvm.tir.decl_buffer(C.shape, C.dtype, scope=scope, data_alignment=32, offset_factor=256) def intrin_func(ins, outs): ib = tvm.tir.ir_builder.create() BA = ins[0] BC = outs[0] ib.emit( tvm.tir.call_intrin( "handle", "tir.tvm_load_matrix_sync", BC.data, n, n, n, BC.elem_offset // 256, BA.access_ptr("r"), n, "row_major", ) ) return ib.get() return te.decl_tensor_intrin(C.op, intrin_func, binds={A: BA, C: BC}) def intrin_wmma_gemm(): n = 16 A = te.placeholder((n, n), name="A", dtype="float16") B = te.placeholder((n, n), name="B", dtype="float16") k = te.reduce_axis((0, n), name="k") C = te.compute( (n, n), lambda ii, jj: te.sum(A[ii, k].astype("float") * B[k, jj].astype("float"), axis=k), name="C", ) BA = tvm.tir.decl_buffer( A.shape, A.dtype, name="BA", scope="wmma.matrix_a", data_alignment=32, offset_factor=256 ) BB = tvm.tir.decl_buffer( B.shape, B.dtype, name="BB", scope="wmma.matrix_b", data_alignment=32, offset_factor=256 ) BC = tvm.tir.decl_buffer( C.shape, C.dtype, name="BC", scope="wmma.accumulator", data_alignment=32, offset_factor=256 ) def intrin_func(ins, outs): BA, BB = ins (BC,) = outs def init(): ib = tvm.tir.ir_builder.create() ib.emit( tvm.tir.call_intrin( "handle", "tir.tvm_fill_fragment", BC.data, n, n, n, BC.elem_offset // 256, 0.0 ) ) return ib.get() def update(): ib = tvm.tir.ir_builder.create() ib.emit( tvm.tir.call_intrin( "handle", "tir.tvm_mma_sync", BC.data, BC.elem_offset // 256, BA.data, BA.elem_offset // 256, BB.data, BB.elem_offset // 256, BC.data, BC.elem_offset // 256, ) ) return ib.get() return update(), init(), update() return te.decl_tensor_intrin(C.op, intrin_func, binds={A: BA, B: BB, C: BC}) def intrin_wmma_store_matrix(): n = 16 A = te.placeholder((n, n), name="A", dtype="float32") BA = tvm.tir.decl_buffer( A.shape, A.dtype, scope="wmma.accumulator", data_alignment=32, offset_factor=256 ) C = te.compute((n, n), lambda i, j: A[i, j], name="C") BC = tvm.tir.decl_buffer(C.shape, C.dtype, scope="global", data_alignment=32, offset_factor=256) def intrin_func(ins, outs): ib = tvm.tir.ir_builder.create() BA = ins[0] BC = outs[0] ib.emit( tvm.tir.call_intrin( "handle", "tir.tvm_store_matrix_sync", BA.data, n, n, n, BA.elem_offset // 256, BC.access_ptr("w"), n, "row_major", ) ) return ib.get() return te.decl_tensor_intrin(C.op, intrin_func, binds={A: BA, C: BC}) ############################################################################### # Scheduling the Computation # -------------------------- # To use TensorCores in TVM, we must schedule the computation into specific structure # to match the tensor intrinsic. The same as traditional GPU programs, we can also use # shared memory to boost the speed. If you have any questions about blocking and shared # memory, please refer :ref:`opt-conv-gpu`. # # In this example, each block contains 2x4 warps, and each warp calls 4x2 TensorCore # instructions. Thus, the output shape of each warp is 64x32 and each block outputs # 128x128 titles. Due to the limit of shared memory space, we only load 2 blocks (2x128x128 tiles) # one time. # # .. note:: # # *Warp-level Operation* # # Note that all TensorCore instructions are warp-level instructions, which means all 32 threads # in a warp should do this instruction simultaneously. Making threadIdx.x extent=32 is one of the # easiest way to solve this. Then We can bind threadIdx.x to any loops except those contain # TensorCore intrinsics directly or indirectly. Also note that it is not the unique solution. # The only thing we should do is to make sure all threads in a warp can call TensorCore at the same time. # Define tiling sizes block_row_warps = 4 block_col_warps = 2 warp_row_tiles = 2 warp_col_tiles = 4 warp_size = 32 chunk = 2 block_x = te.thread_axis("blockIdx.x") block_y = te.thread_axis("blockIdx.y") block_z = te.thread_axis("blockIdx.z") thread_x = te.thread_axis("threadIdx.x") thread_y = te.thread_axis("threadIdx.y") thread_z = te.thread_axis("threadIdx.z") nc, hc, wc, oc, nnc, ooc = Conv.op.axis block_k = s[Conv].fuse(hc, wc) s[Conv].bind(block_k, block_z) nc, nci = s[Conv].split(nc, factor=warp_row_tiles) block_i, nc = s[Conv].split(nc, factor=block_row_warps) oc, oci = s[Conv].split(oc, factor=warp_col_tiles) block_j, oc = s[Conv].split(oc, factor=block_col_warps) s[Conv].reorder(block_k, block_i, block_j, nc, oc, nci, oci, nnc, ooc) s[Conv].bind(block_i, block_x) s[Conv].bind(block_j, block_y) s[Conv].bind(nc, thread_y) s[Conv].bind(oc, thread_z) # Schedule local computation s[ConvF].compute_at(s[Conv], oc) n, h, w, o, nnf, oof = ConvF.op.axis ko, ki = s[ConvF].split(ic, factor=chunk) s[ConvF].reorder(ko, kh, ki, kw, n, o, nnf, oof, ii) # Move intermediate computation into each output compute tile s[AF].compute_at(s[ConvF], kw) s[WF].compute_at(s[ConvF], kw) # Schedule for A's share memory s[AS].compute_at(s[ConvF], kh) n, h, w, i, nn, ii = AS.op.axis tx, xo = s[AS].split(n, nparts=block_row_warps) ty, yo = s[AS].split(xo, nparts=block_col_warps) t = s[AS].fuse(nn, ii) to, ti = s[AS].split(t, factor=warp_size) s[AS].bind(tx, thread_y) s[AS].bind(ty, thread_z) s[AS].bind(ti, thread_x) # Schedule for W's share memory s[WS].compute_at(s[ConvF], kh) kh, kw, ic, o, ii, oo = WS.op.axis tx, xo = s[WS].split(o, nparts=block_row_warps) ty, yo = s[WS].split(xo, nparts=block_col_warps) t = s[WS].fuse(ii, oo) to, ti = s[WS].split(t, nparts=warp_size) s[WS].bind(tx, thread_y) s[WS].bind(ty, thread_z) s[WS].bind(to, thread_x) s[WS].vectorize(ti) print(tvm.lower(s, [A, W, Conv], simple_mode=True)) ############################################################################### # Lowering Computation to Intrinsics # ---------------------------------- # The last phase is to lower the computation loops down to TensorCore hardware intrinsics # by mapping the 2D convolution to tensor intrinsics s[AF].tensorize(AF.op.axis[-2], intrin_wmma_load_matrix("wmma.matrix_a")) s[WF].tensorize(WF.op.axis[-2], intrin_wmma_load_matrix("wmma.matrix_b")) s[Conv].tensorize(nnc, intrin_wmma_store_matrix()) s[ConvF].tensorize(nnf, intrin_wmma_gemm()) print(tvm.lower(s, [A, W, Conv], simple_mode=True)) ############################################################################### # Generate CUDA Kernel # -------------------- # Finally we use TVM to generate and compile the CUDA kernel, and evaluate the latency of convolution. # Since TensorCores are only supported in NVIDIA GPU with Compute Capability 7.0 or higher, it may not # be able to run on our build server dev = tvm.cuda(0) if nvcc.have_tensorcore(dev.compute_version): with tvm.transform.PassContext(config={"tir.UnrollLoop": {"auto_max_step": 16}}): func = tvm.build(s, [A, W, Conv], "cuda") a_np = np.random.uniform(size=data_shape).astype(A.dtype) w_np = np.random.uniform(size=kernel_shape).astype(W.dtype) a = tvm.nd.array(a_np, dev) w = tvm.nd.array(w_np, dev) c = tvm.nd.array(np.zeros(output_shape, dtype=Conv.dtype), dev) evaluator = func.time_evaluator(func.entry_name, dev, number=10) print("conv2d with tensor core: %f ms" % (evaluator(a, w, c).mean * 1e3)) ############################################################################### # Summary # ------- # This tutorial demonstrates how TVM scheduling primitives can be used to # call TensorCores on specific GPUs.
14,750
34.544578
107
py
tvm
tvm-main/gallery/how_to/optimize_operators/opt_gemm.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ .. _opt-gemm: How to optimize GEMM on CPU =========================== **Author**: `Jian Weng <https://github.com/were>`_, \ `Ruofei Yu <https://github.com/yuruofeifei>`_ (TL;DR) TVM provides abstract interfaces which allows users to depict an algorithm and the algorithm's implementing organization (the so-called schedule) separately. Typically, writing algorithm in high-performance schedule breaks the algorithm's readability and modularity. Also, trying various seemingly promising schedules is time-consuming. With the help of TVM, we can try these schedules efficiently to enhance the performance. In this tutorial, we will demonstrate how to use TVM to optimize square matrix multiplication and achieve 200 times faster than baseline by simply adding 18 extra lines of code. There are two important optimizations on intense computation applications executed on CPU: 1. Increase the cache hit rate of memory access. Both complex numerical computation and hot-spot memory access can be accelerated from high cache hit rate. This requires us to transform the origin memory access pattern to the pattern fits the cache policy. 2. SIMD (Single instruction multi-data), or we call it vector processing unit. Every time, a small batch of data, rather than a single grid, will be processed. This requires us to transform the data access pattern in the loop body in uniform pattern so that the LLVM backend can lower it to SIMD. Actually, all the methodologies used in this tutorial is a subset of tricks mentioned in this `repo <https://github.com/flame/how-to-optimize-gemm>`_. Some of them have been applied by TVM abstraction automatically, but some of them cannot be simply applied due to TVM constraints. All the experiment results mentioned below, are executed on 2015's 15' MacBook equipped with Intel i7-4770HQ CPU. The cache line size should be 64 bytes for all the x86 CPUs. """ ################################################################################################ # Preparation and Baseline # ------------------------ # In this tutorial, we will demo how to use TVM to optimize matrix multiplication. # Before actually demonstrating, we first define these variables. # Then we write a baseline implementation, the simplest way to write a matrix multiplication in TVM. import tvm import tvm.testing from tvm import te import numpy import timeit # The size of the matrix # (M, K) x (K, N) # You are free to try out different shapes, sometimes TVM optimization outperforms numpy with MKL. M = 1024 K = 1024 N = 1024 # The default tensor type in tvm dtype = "float32" # using Intel AVX2(Advanced Vector Extensions) ISA for SIMD # To get the best performance, please change the following line # to llvm -mcpu=core-avx2, or specific type of CPU you use target = "llvm" dev = tvm.device(target, 0) # Random generated tensor for testing a = tvm.nd.array(numpy.random.rand(M, K).astype(dtype), dev) b = tvm.nd.array(numpy.random.rand(K, N).astype(dtype), dev) np_repeat = 100 np_runing_time = timeit.timeit( setup="import numpy\n" "M = " + str(M) + "\n" "K = " + str(K) + "\n" "N = " + str(N) + "\n" 'dtype = "float32"\n' "a = numpy.random.rand(M, K).astype(dtype)\n" "b = numpy.random.rand(K, N).astype(dtype)\n", stmt="answer = numpy.dot(a, b)", number=np_repeat, ) print("Numpy running time: %f" % (np_runing_time / np_repeat)) answer = numpy.dot(a.numpy(), b.numpy()) # Algorithm k = te.reduce_axis((0, K), "k") A = te.placeholder((M, K), name="A") B = te.placeholder((K, N), name="B") C = te.compute((M, N), lambda m, n: te.sum(A[m, k] * B[k, n], axis=k), name="C") # Default schedule s = te.create_schedule(C.op) func = tvm.build(s, [A, B, C], target=target, name="mmult") assert func c = tvm.nd.array(numpy.zeros((M, N), dtype=dtype), dev) func(a, b, c) tvm.testing.assert_allclose(c.numpy(), answer, rtol=1e-5) evaluator = func.time_evaluator(func.entry_name, dev, number=1) print("Baseline: %f" % evaluator(a, b, c).mean) ################################################################################################ # In TVM, we can always inspect lower level IR to debug or optimize our schedule. # Here is the generated IR using our baseline schedule. print(tvm.lower(s, [A, B, C], simple_mode=True)) ################################################################################################ # Blocking # -------- # A important trick to enhance the cache hit rate is blocking --- data chunk will be computed # block by block. The memory access inside the block is a small neighbourhood which is with high # memory locality. In this tutorial, I picked up 32 as the blocking factor. So the block will # fill 32 * 32 * sizeof(float) which is 4KB in the cache whose total size is 32KB (L1 data cache) bn = 32 kfactor = 4 s = te.create_schedule(C.op) # Blocking by loop tiling mo, no, mi, ni = s[C].tile(C.op.axis[0], C.op.axis[1], bn, bn) (kaxis,) = s[C].op.reduce_axis ko, ki = s[C].split(kaxis, factor=kfactor) # Hoist reduction domain outside the blocking loop s[C].reorder(mo, no, ko, ki, mi, ni) func = tvm.build(s, [A, B, C], target=target, name="mmult") assert func c = tvm.nd.array(numpy.zeros((M, N), dtype=dtype), dev) func(a, b, c) tvm.testing.assert_allclose(c.numpy(), answer, rtol=1e-5) # By simply tiling the loop 32x32, and hoisting ko, ki outside the blocking loops, # we can see big speedup compared with the baseline. evaluator = func.time_evaluator(func.entry_name, dev, number=10) print("Opt1: %f" % evaluator(a, b, c).mean) ################################################################################################ # Here is the generated IR after blocking. print(tvm.lower(s, [A, B, C], simple_mode=True)) ################################################################################################### # Vectorization # ------------- # Another important trick is vectorization. When the memory access pattern is uniform, # the compiler can detect this pattern and pass the continuous memory to vector processor. In TVM, # we can use `vectorize` interface to hint the compiler this pattern, so that we can accelerate it # vastly. # # In this tutorial, we chose to vectorize the inner loop row data since it is cache friendly. s = te.create_schedule(C.op) mo, no, mi, ni = s[C].tile(C.op.axis[0], C.op.axis[1], bn, bn) (kaxis,) = s[C].op.reduce_axis ko, ki = s[C].split(kaxis, factor=kfactor) s[C].reorder(mo, no, ko, ki, mi, ni) # Vectorization s[C].vectorize(ni) func = tvm.build(s, [A, B, C], target=target, name="mmult") assert func c = tvm.nd.array(numpy.zeros((M, N), dtype=dtype), dev) func(a, b, c) tvm.testing.assert_allclose(c.numpy(), answer, rtol=1e-5) evaluator = func.time_evaluator(func.entry_name, dev, number=10) print("Opt2: %f" % evaluator(a, b, c).mean) ################################################################################################ # Here is the generated IR after vectorization. print(tvm.lower(s, [A, B, C], simple_mode=True)) ################################################################################################### # Loop Permutation # ---------------- # If we look at the above IR, we can see the inner loop row data is vectorized for both B and C. # Next we will look at the access pattern of A. In current schedule, A is accessed column by column # which is not cache friendly. If we change the nested loop order of ki and inner axes mi, # the access pattern for A matrix is more cache friendly. s = te.create_schedule(C.op) mo, no, mi, ni = s[C].tile(C.op.axis[0], C.op.axis[1], bn, bn) (kaxis,) = s[C].op.reduce_axis ko, ki = s[C].split(kaxis, factor=kfactor) # re-ordering s[C].reorder(mo, no, ko, mi, ki, ni) s[C].vectorize(ni) func = tvm.build(s, [A, B, C], target=target, name="mmult") assert func c = tvm.nd.array(numpy.zeros((M, N), dtype=dtype), dev) func(a, b, c) tvm.testing.assert_allclose(c.numpy(), answer, rtol=1e-5) evaluator = func.time_evaluator(func.entry_name, dev, number=10) print("Opt3: %f" % evaluator(a, b, c).mean) ################################################################################################ # Here is the generated IR after loop permutation. print(tvm.lower(s, [A, B, C], simple_mode=True)) ################################################################################################### # Array Packing # ------------- # Another important trick is array packing. The trick is to reorder the storage of a multi- # dimensional array so that it is accessed sequentially after it is flattened and stored in one- # dimensional memory. # # .. image:: https://github.com/dmlc/web-data/raw/main/tvm/tutorial/array-packing.png # :align: center # # NOTE: This figure is a general illustration of how array packing works. ################################################################################################### # We can use array packing to address the access pattern for B. Observe the array access pattern of # B after flattening which is not sequential as we iterate over the K dimension. We can reorder B # with dimensions [K][N] so that it has dimensions [N/bn][K][bn] where bn is the blocking factor and # also the vector size for B in the inner loop. This reorder splits N into two dimensions --- # bigN (N/bn) and littleN (bn) --- and the new dimensions [N/bn][K][bn] match the indexing of B # from outer to inner loops (no, ko, ki, ni) resulting in a sequential access pattern for B after # flattening. # We have to re-write the algorithm slightly. packedB = te.compute( (N / bn, K, bn), lambda bigN, k, littleN: B[k, bigN * bn + littleN], name="packedB" ) C = te.compute( (M, N), lambda m, n: te.sum(A[m, k] * packedB[n // bn, k, tvm.tir.indexmod(n, bn)], axis=k), name="C", ) s = te.create_schedule(C.op) mo, no, mi, ni = s[C].tile(C.op.axis[0], C.op.axis[1], bn, bn) (kaxis,) = s[C].op.reduce_axis ko, ki = s[C].split(kaxis, factor=kfactor) s[C].reorder(mo, no, ko, mi, ki, ni) s[C].vectorize(ni) bigN, _, littleN = s[packedB].op.axis s[packedB].vectorize(littleN) s[packedB].parallel(bigN) func = tvm.build(s, [A, B, C], target=target, name="mmult") assert func c = tvm.nd.array(numpy.zeros((M, N), dtype=dtype), dev) func(a, b, c) tvm.testing.assert_allclose(c.numpy(), answer, rtol=1e-5) evaluator = func.time_evaluator(func.entry_name, dev, number=10) print("Opt4: %f" % evaluator(a, b, c).mean) ################################################################################################ # Here is the generated IR after array packing. print(tvm.lower(s, [A, B, C], simple_mode=True)) ################################################################################################ # Write cache for blocks # ---------------------- # After blocking, the program will write result to C block by block, the access pattern # is not sequential. So we can use a sequential cache array to hold the block results and # write to C when all the block results are ready. # s = te.create_schedule(C.op) # Allocate write cache CC = s.cache_write(C, "global") mo, no, mi, ni = s[C].tile(C.op.axis[0], C.op.axis[1], bn, bn) # Write cache is computed at no s[CC].compute_at(s[C], no) # New inner axes mc, nc = s[CC].op.axis (kaxis,) = s[CC].op.reduce_axis ko, ki = s[CC].split(kaxis, factor=kfactor) s[CC].reorder(ko, mc, ki, nc) s[CC].vectorize(nc) # TODO: Add separate optimization step to discuss loop unrolling # unrolling is a loop optimization strategy which can reduce branch # prediction failures and increases the chance of concurrent execution # unroll kfactor loops s[CC].unroll(ki) bigN, _, littleN = s[packedB].op.axis s[packedB].vectorize(littleN) s[packedB].parallel(bigN) func = tvm.build(s, [A, B, C], target=target, name="mmult") assert func c = tvm.nd.array(numpy.zeros((M, N), dtype=dtype), dev) func(a, b, c) tvm.testing.assert_allclose(c.numpy(), answer, rtol=1e-5) evaluator = func.time_evaluator(func.entry_name, dev, number=10) print("Opt5: %f" % evaluator(a, b, c).mean) ################################################################################################ # Here is the generated IR after blocking. print(tvm.lower(s, [A, B, C], simple_mode=True)) ################################################################################################### # Parallel # -------- # Furthermore, we can also utilize multi-core processors to do the thread-level parallelization. s = te.create_schedule(C.op) CC = s.cache_write(C, "global") mo, no, mi, ni = s[C].tile(C.op.axis[0], C.op.axis[1], bn, bn) s[CC].compute_at(s[C], no) mc, nc = s[CC].op.axis (kaxis,) = s[CC].op.reduce_axis ko, ki = s[CC].split(kaxis, factor=kfactor) s[CC].reorder(ko, mc, ki, nc) s[CC].vectorize(nc) s[CC].unroll(ki) # parallel s[C].parallel(mo) bigN, _, littleN = s[packedB].op.axis s[packedB].vectorize(littleN) s[packedB].parallel(bigN) func = tvm.build(s, [A, B, C], target=target, name="mmult") assert func c = tvm.nd.array(numpy.zeros((M, N), dtype=dtype), dev) func(a, b, c) tvm.testing.assert_allclose(c.numpy(), answer, rtol=1e-5) evaluator = func.time_evaluator(func.entry_name, dev, number=50) opt6_time = evaluator(a, b, c).mean print("Opt6: %f" % opt6_time) ################################################################################################ # Here is the generated IR after parallelization. print(tvm.lower(s, [A, B, C], simple_mode=True)) ################################################################################################### ################################################################################################## # Summary # ------- # After applying the above simple optimizations with only 18 lines of code, # our generated code can achieve 60% of the `numpy` performance with MKL. # Note that the outputs on the web page reflect the running times on a non-exclusive # Docker container, thereby they are *unreliable*. It is highly encouraged to run the # tutorial by yourself to observe the performance gain achieved by TVM.
14,849
36.594937
100
py
tvm
tvm-main/gallery/how_to/optimize_operators/opt_conv_cuda.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ .. _opt-conv-gpu: How to optimize convolution on GPU ================================== **Author**: `Haichen Shen <https://homes.cs.washington.edu/~haichen/>`_ In this tutorial, we will demonstrate how to write a high performance convolution implementation in TVM. We use square size input tensors and filters as an example, and assume the input to convolution has a large batch. In this example, we use a different layout to store the data in order to achieve better data locality. The buffer layout is HWCN, which stands for height, width, channel, batch. """ ################################################################ # Preparation and Algorithm # ------------------------- # # We use the fixed size for input tensors with 256 channels and 14 x 14 # dimensions. The batch size is 256. Convolution filters contain 512 filters # of size 3 x 3. We use stride size 1 and padding size 1 for the # convolution. The following code defines the convolution algorithm in TVM. # # sphinx_gallery_start_ignore # sphinx_gallery_requires_cuda = True # sphinx_gallery_end_ignore import numpy as np import tvm from tvm import te # The sizes of inputs and filters batch = 256 in_channel = 256 out_channel = 512 in_size = 14 kernel = 3 pad = 1 stride = 1 # Algorithm A = te.placeholder((in_size, in_size, in_channel, batch), name="A") W = te.placeholder((kernel, kernel, in_channel, out_channel), name="W") out_size = (in_size - kernel + 2 * pad) // stride + 1 # Pad input Apad = te.compute( (in_size + 2 * pad, in_size + 2 * pad, in_channel, batch), lambda yy, xx, cc, nn: tvm.tir.if_then_else( tvm.tir.all(yy >= pad, yy - pad < in_size, xx >= pad, xx - pad < in_size), A[yy - pad, xx - pad, cc, nn], tvm.tir.const(0.0, "float32"), ), name="Apad", ) # Create reduction variables rc = te.reduce_axis((0, in_channel), name="rc") ry = te.reduce_axis((0, kernel), name="ry") rx = te.reduce_axis((0, kernel), name="rx") # Compute the convolution B = te.compute( (out_size, out_size, out_channel, batch), lambda yy, xx, ff, nn: te.sum( Apad[yy * stride + ry, xx * stride + rx, rc, nn] * W[ry, rx, rc, ff], axis=[ry, rx, rc] ), name="B", ) ############################################################################### # Memory Hierarchy # ---------------- # # We first specify the memory hierarchy for buffers. The figure below shows the # GPU memory hierarchy. One important difference from CPU memory hierarchy is # that GPU provides a cache buffer called shared memory, which is managed by # programmers. Thus how to maximize the data reuse in the shared memory is # critical to achieve high performance in GPU kernels. # # .. image:: https://github.com/dmlc/web-data/raw/main/tvm/tutorial/gpu_memory_hierarchy.png # :align: center # :height: 319px # :width: 271px # # In this example, we load both Apad and W into buffer AA and WW, which are # stored in the shared memory. These buffers will be later shared by all # threads within the same thread block to compute the convolution. Each thread # then loads its own part from shared buffer into their local registers, AL and # WL. BL is a local cache of output B, which is also stored in the thread local # registers. # # Designate the memory hierarchy s = te.create_schedule(B.op) s[Apad].compute_inline() # compute Apad inline AA = s.cache_read(Apad, "shared", [B]) WW = s.cache_read(W, "shared", [B]) AL = s.cache_read(AA, "local", [B]) WL = s.cache_read(WW, "local", [B]) BL = s.cache_write(B, "local") ############################################################################### # Blocking # -------- # # The following code splits the workload into thread blocks and individual # threads. We follow the blocking scheme in the matrix multiply. As shown in the # figure below, given a pixel coordinate (y, x), a thread block is responsible # for computing a region of block_factor x block_factor (64 x 64) for output # channels and batch. Due to the limit of shared memory space, we only load step # x block_factor (8 x 64) data from Apad and B each time to buffers in the # shared memory. # # .. image:: https://github.com/dmlc/web-data/raw/main/tvm/tutorial/conv_gpu_blocking.png # :align: center # :height: 308px # :width: 317px # # tile consts tile = 8 num_thread = 8 block_factor = tile * num_thread step = 8 vthread = 2 # Get the GPU thread indices block_x = te.thread_axis("blockIdx.x") block_y = te.thread_axis("blockIdx.y") block_z = te.thread_axis("blockIdx.z") thread_x = te.thread_axis((0, num_thread), "threadIdx.x") thread_y = te.thread_axis((0, num_thread), "threadIdx.y") thread_xz = te.thread_axis((0, vthread), "vthread", name="vx") thread_yz = te.thread_axis((0, vthread), "vthread", name="vy") # Split the workloads hi, wi, fi, ni = s[B].op.axis bz = s[B].fuse(hi, wi) by, fi = s[B].split(fi, factor=block_factor) bx, ni = s[B].split(ni, factor=block_factor) # Bind the iteration variables to GPU thread indices s[B].bind(bz, block_z) s[B].bind(by, block_y) s[B].bind(bx, block_x) ############################################################################### # Virtual Thread Split # -------------------- # # We further split the workload from a thread block to individual threads. To # avoid *memory bank conflict*, we use virtual thread to split the area into 4 # parts, and then tile into 8x8 grids. Therefore, shown in the figure below, # each thread computes 4 strided grids, where size of each grid is 4 x 4. # # .. image:: https://github.com/dmlc/web-data/raw/main/tvm/tutorial/conv_gpu_vthread.png # :align: center # :height: 188px # :width: 268px # tyz, fi = s[B].split(fi, nparts=vthread) # virtual thread split txz, ni = s[B].split(ni, nparts=vthread) # virtual thread split ty, fi = s[B].split(fi, nparts=num_thread) tx, ni = s[B].split(ni, nparts=num_thread) s[B].reorder(bz, by, bx, tyz, txz, ty, tx, fi, ni) s[B].bind(tyz, thread_yz) s[B].bind(txz, thread_xz) s[B].bind(ty, thread_y) s[B].bind(tx, thread_x) ############################################################################### # Cooperative Fetching # -------------------- # # As mentioned before, each time step we need to transfer step x block_factor # data from GPU global memory to shared memory. In order to reduce the memory # transfer per thread, the following code lets threads in the same thread block # coopertively fetch dependent data from global memory. # # Schedule BL local write s[BL].compute_at(s[B], tx) yi, xi, fi, ni = s[BL].op.axis ry, rx, rc = s[BL].op.reduce_axis rco, rci = s[BL].split(rc, factor=step) s[BL].reorder(rco, ry, rx, rci, fi, ni) # Attach computation to iteration variables s[AA].compute_at(s[BL], rx) s[WW].compute_at(s[BL], rx) s[AL].compute_at(s[BL], rci) s[WL].compute_at(s[BL], rci) # Schedule for A's shared memory load yi, xi, ci, ni = s[AA].op.axis ty, ci = s[AA].split(ci, nparts=num_thread) tx, ni = s[AA].split(ni, nparts=num_thread) _, ni = s[AA].split(ni, factor=4) s[AA].reorder(ty, tx, yi, xi, ci, ni) s[AA].bind(ty, thread_y) s[AA].bind(tx, thread_x) s[AA].vectorize(ni) # vectorize memory load # Schedule for W's shared memory load yi, xi, ci, fi = s[WW].op.axis ty, ci = s[WW].split(ci, nparts=num_thread) tx, fi = s[WW].split(fi, nparts=num_thread) _, fi = s[WW].split(fi, factor=4) s[WW].reorder(ty, tx, yi, xi, ci, fi) s[WW].bind(ty, thread_y) s[WW].bind(tx, thread_x) s[WW].vectorize(fi) # vectorize memory load ############################################################################### # Generate CUDA Kernel # -------------------- # # Finally we use TVM to generate and compile the CUDA kernel, and evaluate the # latency of convolution. # func = tvm.build(s, [A, W, B], "cuda") dev = tvm.cuda(0) a_np = np.random.uniform(size=(in_size, in_size, in_channel, batch)).astype(A.dtype) w_np = np.random.uniform(size=(kernel, kernel, in_channel, out_channel)).astype(W.dtype) a = tvm.nd.array(a_np, dev) w = tvm.nd.array(w_np, dev) b = tvm.nd.array(np.zeros((out_size, out_size, out_channel, batch), dtype=B.dtype), dev) func(a, w, b) evaluator = func.time_evaluator(func.entry_name, dev, number=1) print("Convolution: %f ms" % (evaluator(a, w, b).mean * 1e3))
8,997
34.565217
95
py
tvm
tvm-main/gallery/how_to/tune_with_autoscheduler/tune_network_x86.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ Auto-scheduling a Neural Network for x86 CPU ============================================ **Author**: `Lianmin Zheng <https://github.com/merrymercy>`_, \ `Chengfan Jia <https://github.com/jcf94/>`_ Auto-tuning for specific devices and workloads is critical for getting the best performance. This is a tutorial on how to tune a whole neural network for x86 CPU with the auto-scheduler. To auto-tune a neural network, we partition the network into small subgraphs and tune them independently. Each subgraph is treated as one search task. A task scheduler slices the time and dynamically allocates time resources to these tasks. The task scheduler predicts the impact of each task on the end-to-end execution time and prioritizes the one that can reduce the execution time the most. For each subgraph, we use the compute declaration in :code:`tvm/python/topi` to get the computational DAG in the tensor expression form. We then use the auto-scheduler to construct a search space of this DAG and search for good schedules (low-level optimizations). Different from the template-based :ref:`autotvm <tutorials-autotvm-sec>` which relies on manual templates to define the search space, the auto-scheduler does not require any schedule templates. In other words, the auto-scheduler only uses the compute declarations in :code:`tvm/python/topi` and does not use existing schedule templates. Note that this tutorial will not run on Windows or recent versions of macOS. To get it to run, you will need to wrap the body of this tutorial in a :code:`if __name__ == "__main__":` block. """ import numpy as np import tvm from tvm import relay, auto_scheduler from tvm.relay import data_dep_optimization as ddo import tvm.relay.testing from tvm.contrib import graph_executor ################################################################# # Define a Network # ---------------- # First, we need to define the network with relay frontend API. # We can load some pre-defined network from :code:`tvm.relay.testing`. # We can also load models from MXNet, ONNX, PyTorch, and TensorFlow # (see :ref:`front end tutorials<tutorial-frontend>`). # # For convolutional neural networks, although auto-scheduler can work correctly # with any layout, we found the best performance is typically achieved with NHWC layout. # We also implemented more optimizations for NHWC layout with the auto-scheduler. # So it is recommended to convert your models to NHWC layout to use the auto-scheduler. # You can use :ref:`ConvertLayout <convert-layout-usage>` pass to do the layout conversion in TVM. def get_network(name, batch_size, layout="NHWC", dtype="float32", use_sparse=False): """Get the symbol definition and random weight of a network""" # auto-scheduler prefers NHWC layout if layout == "NHWC": image_shape = (224, 224, 3) elif layout == "NCHW": image_shape = (3, 224, 224) else: raise ValueError("Invalid layout: " + layout) input_shape = (batch_size,) + image_shape output_shape = (batch_size, 1000) if name.startswith("resnet-"): n_layer = int(name.split("-")[1]) mod, params = relay.testing.resnet.get_workload( num_layers=n_layer, batch_size=batch_size, layout=layout, dtype=dtype, image_shape=image_shape, ) elif name.startswith("resnet3d-"): n_layer = int(name.split("-")[1]) mod, params = relay.testing.resnet.get_workload( num_layers=n_layer, batch_size=batch_size, layout=layout, dtype=dtype, image_shape=image_shape, ) elif name == "mobilenet": mod, params = relay.testing.mobilenet.get_workload( batch_size=batch_size, layout=layout, dtype=dtype, image_shape=image_shape ) elif name == "squeezenet_v1.1": assert layout == "NCHW", "squeezenet_v1.1 only supports NCHW layout" mod, params = relay.testing.squeezenet.get_workload( version="1.1", batch_size=batch_size, dtype=dtype, image_shape=image_shape, ) elif name == "inception_v3": input_shape = (batch_size, 3, 299, 299) if layout == "NCHW" else (batch_size, 299, 299, 3) mod, params = relay.testing.inception_v3.get_workload(batch_size=batch_size, dtype=dtype) elif name == "mxnet": # an example for mxnet model from mxnet.gluon.model_zoo.vision import get_model assert layout == "NCHW" block = get_model("resnet50_v1", pretrained=True) mod, params = relay.frontend.from_mxnet(block, shape={"data": input_shape}, dtype=dtype) net = mod["main"] net = relay.Function( net.params, relay.nn.softmax(net.body), None, net.type_params, net.attrs ) mod = tvm.IRModule.from_expr(net) elif name == "mlp": mod, params = relay.testing.mlp.get_workload( batch_size=batch_size, dtype=dtype, image_shape=image_shape, num_classes=1000 ) else: raise ValueError("Network not found.") if use_sparse: from tvm.topi.sparse.utils import convert_model_dense_to_sparse mod, params = convert_model_dense_to_sparse(mod, params, bs_r=4, random_params=True) return mod, params, input_shape, output_shape # Define the neural network and compilation target. # If the target machine supports avx512 instructions, replace the # "llvm -mcpu=core-avx2" with "llvm -mcpu=skylake-avx512" network = "resnet-50" use_sparse = False batch_size = 1 layout = "NHWC" target = tvm.target.Target("llvm -mcpu=core-avx2") dtype = "float32" log_file = "%s-%s-B%d-%s.json" % (network, layout, batch_size, target.kind.name) ################################################################# # Extract Search Tasks # -------------------- # Next, we extract the search tasks and their weights from a network. # The weight of a task is the number of appearances of the task's subgraph # in the whole network. # By using the weight, we can approximate the end-to-end latency of the network # as :code:`sum(latency[t] * weight[t])`, where :code:`latency[t]` is the # latency of a task and :code:`weight[t]` is the weight of the task. # The task scheduler will just optimize this objective. # Extract tasks from the network print("Get model...") mod, params, input_shape, output_shape = get_network( network, batch_size, layout, dtype=dtype, use_sparse=use_sparse, ) print("Extract tasks...") tasks, task_weights = auto_scheduler.extract_tasks(mod["main"], params, target) for idx, task in enumerate(tasks): print("========== Task %d (workload key: %s) ==========" % (idx, task.workload_key)) print(task.compute_dag) ################################################################# # Begin Tuning # ------------ # Now, we set some options for tuning and launch the search tasks # # * :code:`num_measure_trials` is the number of measurement trials we can use during the tuning. # You can set it to a small number (e.g., 200) for a fast demonstrative run. # In practice, we recommend setting it around :code:`800 * len(tasks)`, # which is typically enough for the search to converge. # For example, there are 29 tasks in resnet-50, so we can set it as 20000. # You can adjust this parameter according to your time budget. # * In addition, we use :code:`RecordToFile` to dump measurement records into a log file, # The measurement records can be used to query the history best, resume the search, # and do more analyses later. # * see :any:`auto_scheduler.TuningOptions`, # :any:`auto_scheduler.LocalRunner` for more parameters. # def run_tuning(): print("Begin tuning...") tuner = auto_scheduler.TaskScheduler(tasks, task_weights) tune_option = auto_scheduler.TuningOptions( num_measure_trials=200, # change this to 20000 to achieve the best performance runner=auto_scheduler.LocalRunner(repeat=10, enable_cpu_cache_flush=True), measure_callbacks=[auto_scheduler.RecordToFile(log_file)], ) if use_sparse: from tvm.topi.sparse.utils import sparse_sketch_rules search_policy = [ auto_scheduler.SketchPolicy( task, program_cost_model=auto_scheduler.XGBModel(), init_search_callbacks=sparse_sketch_rules(), ) for task in tasks ] tuner.tune(tune_option, search_policy=search_policy) else: tuner.tune(tune_option) # We do not run the tuning in our webpage server since it takes too long. # Uncomment the following line to run it by yourself. # run_tuning() ###################################################################### # .. note:: Explain the printed information during tuning # # During the tuning, a lot of information will be printed on the console. # They are used for debugging purposes. The most important info is the output # of the task scheduler. The following table is a sample output. # # .. code-block:: c # # ---------------------------------------------------------------------- # ------------------------------ [ Task Scheduler ] # ---------------------------------------------------------------------- # | ID | Latency (ms) | Speed (GFLOPS) | Trials | # ------------------------------------------------- # | 0 | 0.010 | 0.40 | 64 | # | 1 | 0.087 | 47.19 | 64 | # | 2 | 0.008 | -0.00 | 64 | # | 3 | 0.177 | 582.07 | 64 | # | 4 | 0.268 | 862.37 | 256 | # | 5 | 0.166 | 621.13 | 128 | # | 6 | 0.170 | 605.10 | 128 | # | 7 | 0.128 | 403.20 | 64 | # | 8 | 0.189 | 545.71 | 64 | # | 9 | 0.231 | 1001.01 | 448 | # | 10 | 0.155 | 664.80 | 256 | # | 11 | 0.155 | 662.86 | 256 | # | 12 | 0.119 | 434.08 | 64 | # | 13 | 0.199 | 522.13 | 64 | # | 14 | 0.235 | 986.56 | 320 | # | 15 | 0.149 | 689.13 | 128 | # | 16 | 0.155 | 664.80 | 192 | # | 17 | 0.151 | 340.64 | 64 | # | 18 | 0.176 | 597.55 | 128 | # | 19 | 0.220 | 1054.37 | 192 | # | 20 | 0.150 | 686.01 | 128 | # | 21 | 0.159 | 650.88 | 128 | # | 22 | 0.073 | 358.19 | 64 | # | 23 | 0.031 | 70.63 | 64 | # | 24 | 0.251 | 947.73 | 128 | # | 25 | 0.157 | 652.47 | 128 | # | 26 | 0.215 | 954.84 | 128 | # | 27 | 0.237 | 868.92 | 128 | # | 28 | 0.266 | 774.06 | 128 | # ------------------------------------------------- # Estimated total latency: 10.016 ms Trials: 3992 Used time : 1131 s Next ID: 15 # # This table lists the latency and (estimated) speed of all tasks. # It also lists the allocation of measurement trials for all tasks. # The last line prints the total weighted latency of these tasks, # which can be a rough estimation of the end-to-end execution time # of the network. # The last line also prints the total number of measurement trials, # total time spent on auto-tuning and the id of the next task to tune. # # There will also be some "tvm::Error"s errors, because the # auto-scheduler will try some invalid schedules. # You can safely ignore them if the tuning can continue, because these # errors are isolated from the main process. # ###################################################################### # .. note:: Terminate the tuning earlier # # You can terminate the tuning earlier by forcibly killing this process. # As long as you get at least one valid schedule for each task in the log file, # you should be able to do the compilation (the secion below). # ################################################################# # Compile and Evaluate # -------------------- # After auto-tuning, we can compile the network with the best schedules we found. # All measurement records are dumped into the log file during auto-tuning, # so we can read the log file and load the best schedules. # Compile with the history best print("Compile...") with auto_scheduler.ApplyHistoryBest(log_file): with tvm.transform.PassContext(opt_level=3, config={"relay.backend.use_auto_scheduler": True}): lib = relay.build(mod, target=target, params=params) # Create graph executor dev = tvm.device(str(target), 0) module = graph_executor.GraphModule(lib["default"](dev)) data_tvm = tvm.nd.array((np.random.uniform(size=input_shape)).astype(dtype)) module.set_input("data", data_tvm) # Evaluate print("Evaluate inference time cost...") print(module.benchmark(dev, repeat=3, min_repeat_ms=500)) ################################################################# # Other Tips # ---------- # 1. During the tuning, the auto-scheduler needs to compile many programs and # extract feature from them. This part is CPU-intensive, # so a high-performance CPU with many cores is recommended for faster search. # 2. You can use :code:`python3 -m tvm.auto_scheduler.measure_record --mode distill -i log.json` # to distill the large log file and only save the best useful records. # 3. You can resume a search from the previous log file. You just need to # add a new argument :code:`load_log_file` when creating the task scheduler # in function :code:`run_tuning`. Say, # :code:`tuner = auto_scheduler.TaskScheduler(tasks, task_weights, load_log_file=log_file)` # 4. If you have multiple target CPUs, you can use all of them for measurements to # parallelize the measurements. Check this :ref:`section <tutorials-autotvm-scale-up-rpc-tracker>` # to learn how to use the RPC Tracker and RPC Server. # To use the RPC Tracker in auto-scheduler, replace the runner in :code:`TuningOptions` # with :any:`auto_scheduler.RPCRunner`.
15,076
42.575145
101
py
tvm
tvm-main/gallery/how_to/tune_with_autoscheduler/tune_network_cuda.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ Auto-scheduling a Neural Network for NVIDIA GPU =============================================== **Author**: `Lianmin Zheng <https://github.com/merrymercy>`_ Auto-tuning for specific devices and workloads is critical for getting the best performance. This is a tutorial on how to tune a whole neural network for NVIDIA GPU with the auto-scheduler. To auto-tune a neural network, we partition the network into small subgraphs and tune them independently. Each subgraph is treated as one search task. A task scheduler slices the time and dynamically allocates time resources to these tasks. The task scheduler predicts the impact of each task on the end-to-end execution time and prioritizes the one that can reduce the execution time the most. For each subgraph, we use the compute declaration in :code:`tvm/python/topi` to get the computational DAG in the tensor expression form. We then use the auto-scheduler to construct a search space of this DAG and search for good schedules (low-level optimizations). Different from the template-based :ref:`autotvm <tutorials-autotvm-sec>` which relies on manual templates to define the search space, the auto-scheduler does not require any schedule templates. In other words, the auto-scheduler only uses the compute declarations in :code:`tvm/python/topi` and does not use existing schedule templates. Note that this tutorial will not run on Windows or recent versions of macOS. To get it to run, you will need to wrap the body of this tutorial in a :code:`if __name__ == "__main__":` block. """ import numpy as np import tvm from tvm import relay, auto_scheduler import tvm.relay.testing from tvm.contrib import graph_executor ################################################################# # Define a Network # ---------------- # First, we need to define the network with relay frontend API. # We can load some pre-defined network from :code:`tvm.relay.testing`. # We can also load models from MXNet, ONNX, PyTorch, and TensorFlow # (see :ref:`front end tutorials<tutorial-frontend>`). # # For convolutional neural networks, although auto-scheduler can work correctly # with any layout, we found the best performance is typically achieved with NHWC layout. # We also implemented more optimizations for NHWC layout with the auto-scheduler. # So it is recommended to convert your models to NHWC layout to use the auto-scheduler. # You can use :ref:`ConvertLayout <convert-layout-usage>` pass to do the layout conversion in TVM. def get_network(name, batch_size, layout="NHWC", dtype="float32"): """Get the symbol definition and random weight of a network""" # auto-scheduler prefers NHWC layout if layout == "NHWC": image_shape = (224, 224, 3) elif layout == "NCHW": image_shape = (3, 224, 224) else: raise ValueError("Invalid layout: " + layout) input_shape = (batch_size,) + image_shape output_shape = (batch_size, 1000) if name.startswith("resnet-"): n_layer = int(name.split("-")[1]) mod, params = relay.testing.resnet.get_workload( num_layers=n_layer, batch_size=batch_size, layout=layout, dtype=dtype, image_shape=image_shape, ) elif name.startswith("resnet3d-"): n_layer = int(name.split("-")[1]) mod, params = relay.testing.resnet.get_workload( num_layers=n_layer, batch_size=batch_size, layout=layout, dtype=dtype, image_shape=image_shape, ) elif name == "mobilenet": mod, params = relay.testing.mobilenet.get_workload( batch_size=batch_size, layout=layout, dtype=dtype, image_shape=image_shape ) elif name == "squeezenet_v1.1": assert layout == "NCHW", "squeezenet_v1.1 only supports NCHW layout" mod, params = relay.testing.squeezenet.get_workload( version="1.1", batch_size=batch_size, dtype=dtype, image_shape=image_shape, ) elif name == "inception_v3": input_shape = (batch_size, 3, 299, 299) if layout == "NCHW" else (batch_size, 299, 299, 3) mod, params = relay.testing.inception_v3.get_workload(batch_size=batch_size, dtype=dtype) elif name == "mxnet": # an example for mxnet model from mxnet.gluon.model_zoo.vision import get_model assert layout == "NCHW" block = get_model("resnet18_v1", pretrained=True) mod, params = relay.frontend.from_mxnet(block, shape={"data": input_shape}, dtype=dtype) net = mod["main"] net = relay.Function( net.params, relay.nn.softmax(net.body), None, net.type_params, net.attrs ) mod = tvm.IRModule.from_expr(net) return mod, params, input_shape, output_shape # Define the neural network and compilation target network = "resnet-18" batch_size = 1 layout = "NHWC" target = tvm.target.Target("cuda") dtype = "float32" log_file = "%s-%s-B%d-%s.json" % (network, layout, batch_size, target.kind.name) ################################################################# # Extract Search Tasks # -------------------- # Next, we extract the search tasks and their weights from a network. # The weight of a task is the number of appearances of the task's subgraph # in the whole network. # By using the weight, we can approximate the end-to-end latency of the network # as :code:`sum(latency[t] * weight[t])`, where :code:`latency[t]` is the # latency of a task and :code:`weight[t]` is the weight of the task. # The task scheduler will just optimize this objective. # Extract tasks from the network print("Extract tasks...") mod, params, input_shape, output_shape = get_network(network, batch_size, layout, dtype=dtype) tasks, task_weights = auto_scheduler.extract_tasks(mod["main"], params, target) for idx, task in enumerate(tasks): print("========== Task %d (workload key: %s) ==========" % (idx, task.workload_key)) print(task.compute_dag) ################################################################# # Begin Tuning # ------------ # Now, we set some options for tuning and launch the search tasks # # * :code:`measure_ctx` launches a different process for measurement to # provide isolation. It can protect the main process from GPU crashes # during measurement and avoid other runtime conflicts. # * :code:`min_repeat_ms` defines the minimum duration of one "repeat" in every measurement. # This can warmup the GPU, which is necessary to get accurate measurement results. # Typically, we recommend a value >= 300 ms. # * :code:`num_measure_trials` is the number of measurement trials we can use during the tuning. # You can set it to a small number (e.g., 200) for a fast demonstrative run. # In practice, we recommend setting it around :code:`900 * len(tasks)`, # which is typically enough for the search to converge. # For example, there are 24 tasks in resnet-18, so we can set it as 20000. # You can adjust this parameter according to your time budget. # * In addition, we use :code:`RecordToFile` to dump measurement records into a log file, # The measurement records can be used to query the history best, resume the search, # and do more analyses later. # * see :any:`auto_scheduler.TuningOptions`, # :any:`auto_scheduler.LocalRPCMeasureContext` for more parameters. # def run_tuning(): print("Begin tuning...") measure_ctx = auto_scheduler.LocalRPCMeasureContext(repeat=1, min_repeat_ms=300, timeout=10) tuner = auto_scheduler.TaskScheduler(tasks, task_weights) tune_option = auto_scheduler.TuningOptions( num_measure_trials=200, # change this to 20000 to achieve the best performance runner=measure_ctx.runner, measure_callbacks=[auto_scheduler.RecordToFile(log_file)], ) tuner.tune(tune_option) # We do not run the tuning in our webpage server since it takes too long. # Uncomment the following line to run it by yourself. # run_tuning() ###################################################################### # .. note:: Explain the printed information during tuning # # During the tuning, a lot of information will be printed on the console. # They are used for debugging purposes. The most important info is the output # of the task scheduler. The following table is a sample output. # # .. code-block:: c # # ---------------------------------------------------------------------- # ------------------------------ [ Task Scheduler ] # ---------------------------------------------------------------------- # | ID | Latency (ms) | Speed (GFLOPS) | Trials | # ------------------------------------------------- # | 0 | 0.005 | 0.88 | 64 | # | 1 | 0.010 | 99.10 | 64 | # | 2 | 0.006 | 0.00 | 64 | # | 3 | 0.145 | 979.78 | 384 | # | 4 | 0.130 | 1097.02 | 384 | # | 5 | 0.143 | 992.69 | 384 | # | 6 | 0.076 | 1526.86 | 192 | # | 7 | 0.115 | 999.44 | 320 | # | 8 | 0.079 | 1449.39 | 320 | # | 9 | 0.122 | 938.73 | 384 | # | 10 | 0.063 | 1832.98 | 192 | # | 11 | 0.072 | 1763.62 | 256 | # | 12 | 0.062 | 2036.40 | 192 | # | 13 | 0.068 | 1874.44 | 192 | # | 14 | 0.049 | 2346.50 | 128 | # | 15 | 0.076 | 1694.31 | 256 | # | 16 | 0.067 | 1933.30 | 448 | # | 17 | 0.076 | 1680.90 | 256 | # | 18 | 0.022 | 98.43 | 64 | # | 19 | 0.076 | 3112.55 | 192 | # | 20 | 0.013 | 2026.44 | 64 | # | 21 | 0.011 | 1136.69 | 64 | # | 22 | 0.013 | 992.47 | 64 | # | 23 | 0.020 | 627.56 | 64 | # ------------------------------------------------- # Estimated total latency: 1.587 ms Trials: 4992 Used time : 13296 s Next ID: 3 # # This table lists the latency and (estimated) speed of all tasks. # It also lists the allocation of measurement trials for all tasks. # The last line prints the total weighted latency of these tasks, # which can be a rough estimation of the end-to-end execution time # of the network. # The last line also prints the total number of measurement trials, # total time spent on auto-tuning and the id of the next task to tune. # # There will also be some "tvm::Error"s and CUDA errors, because the # auto-scheduler will try some invalid schedules. # You can safely ignore them if the tuning can continue, because these # errors are isolated from the main process. # ###################################################################### # .. note:: Terminate the tuning earlier # # You can terminate the tuning earlier by forcibly killing this process. # As long as you get at least one valid schedule for each task in the log file, # you should be able to do the compilation (the secion below). # ################################################################# # Compile and Evaluate # -------------------- # After auto-tuning, we can compile the network with the best schedules we found. # All measurement records are dumped into the log file during auto-tuning, # so we can read the log file and load the best schedules. # Compile with the history best print("Compile...") with auto_scheduler.ApplyHistoryBest(log_file): with tvm.transform.PassContext(opt_level=3, config={"relay.backend.use_auto_scheduler": True}): lib = relay.build(mod, target=target, params=params) # Create graph executor dev = tvm.device(str(target), 0) module = graph_executor.GraphModule(lib["default"](dev)) data_tvm = tvm.nd.array((np.random.uniform(size=input_shape)).astype(dtype)) module.set_input("data", data_tvm) # Evaluate print("Evaluate inference time cost...") print(module.benchmark(dev, repeat=3, min_repeat_ms=500)) ################################################################# # Other Tips # ---------- # 1. During the tuning, the auto-scheduler needs to compile many programs and # extract feature from them. This part is CPU-intensive, # so a high-performance CPU with many cores is recommended for faster search. # 2. You can use :code:`python3 -m tvm.auto_scheduler.measure_record --mode distill -i log.json` # to distill the large log file and only save the best useful records. # 3. You can resume a search from the previous log file. You just need to # add a new argument :code:`load_log_file` when creating the task scheduler # in function :code:`run_tuning`. Say, # :code:`tuner = auto_scheduler.TaskScheduler(tasks, task_weights, load_log_file=log_file)` # 4. If you have multiple target GPUs, you can use all of them for measurements to # parallelize the measurements. Check this :ref:`section <tutorials-autotvm-scale-up-rpc-tracker>` # to learn how to use the RPC Tracker and RPC Server. # To use the RPC Tracker in auto-scheduler, replace the runner in :code:`TuningOptions` # with :any:`auto_scheduler.RPCRunner`.
14,090
44.163462
101
py
tvm
tvm-main/gallery/how_to/tune_with_autoscheduler/tune_conv2d_layer_cuda.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ .. _auto-scheduler-conv-gpu: Auto-scheduling a Convolution Layer for GPU =========================================== **Author**: `Lianmin Zheng <https://github.com/merrymercy>`_, \ `Chengfan Jia <https://github.com/jcf94/>`_ This is a tutorial on how to use the auto-scheduler for GPUs. Different from the template-based :ref:`autotvm <tutorials-autotvm-sec>` which relies on manual templates to define the search space, the auto-scheduler does not require any templates. Users only need to write the computation declaration without any schedule commands or templates. The auto-scheduler can automatically generate a large search space and find a good schedule in the space. We use a convolution layer as an example in this tutorial. Note that this tutorial will not run on Windows or recent versions of macOS. To get it to run, you will need to wrap the body of this tutorial in a :code:`if __name__ == "__main__":` block. """ # sphinx_gallery_start_ignore # sphinx_gallery_requires_cuda = True # sphinx_gallery_end_ignore import os import numpy as np import tvm from tvm import te, auto_scheduler, topi from tvm.topi.testing import conv2d_nchw_python ###################################################################### # Define the computation # ^^^^^^^^^^^^^^^^^^^^^^ # To begin with, let us define the computation of a convolution layer. # The function should return the list of input/output tensors. # From these tensors, the auto-scheduler can get the whole computational graph. @auto_scheduler.register_workload def conv2d_layer(N, H, W, CO, CI, KH, KW, stride, padding): data = te.placeholder((N, CI, H, W), name="data") kernel = te.placeholder((CO, CI, KH, KW), name="kernel") bias = te.placeholder((1, CO, 1, 1), name="bias") conv = topi.nn.conv2d_nchw(data, kernel, stride, padding, dilation=1, out_dtype="float32") out = topi.nn.relu(conv + bias) return [data, kernel, bias, out] ###################################################################### # Create the search task # ^^^^^^^^^^^^^^^^^^^^^^ # We then create a search task for the last convolution layer in the resnet. target = tvm.target.Target("cuda") # Use the last layer in ResNet-50 N, H, W, CO, CI, KH, KW, strides, padding = 1, 7, 7, 512, 512, 3, 3, (1, 1), (1, 1) task = auto_scheduler.SearchTask( func=conv2d_layer, args=(N, H, W, CO, CI, KH, KW, strides, padding), target=target ) # Inspect the computational graph print("Computational DAG:") print(task.compute_dag) ###################################################################### # Next, we set parameters for the auto-scheduler. These parameters # mainly specify how we do the measurement during the search. # # * :code:`measure_ctx` launches a different process for measurement to # provide isolation. It can protect the main process from GPU crashes # during measurement and avoid other runtime conflicts. # * :code:`min_repeat_ms` defines the minimum duration of one "repeat" in every measurement. # This can warmup the GPU, which is necessary to get accurate measurement results. # Typically, we recommend a value >= 300 ms. # * :code:`num_measure_trials` is the number of measurement trials we can use during the search. # We only make 10 trials in this tutorial for a fast demonstration. In practice, 1000 is a # good value for the search to converge. You can do more trials according to your time budget. # * In addition, we use :code:`RecordToFile` to dump measurement records into a file `conv2d.json`. # The measurement records can be used to query the history best, resume the search, # and do more analyses later. # * see :any:`auto_scheduler.TuningOptions`, # :any:`auto_scheduler.LocalRPCMeasureContext` for more parameters. log_file = "conv2d.json" measure_ctx = auto_scheduler.LocalRPCMeasureContext(min_repeat_ms=300) tune_option = auto_scheduler.TuningOptions( num_measure_trials=10, # change this to 1000 to achieve the best performance runner=measure_ctx.runner, measure_callbacks=[auto_scheduler.RecordToFile(log_file)], verbose=2, ) ###################################################################### # Run the search # ^^^^^^^^^^^^^^ # Now we get all inputs ready. Pretty simple, isn't it? # We can kick off the search and let the auto-scheduler do its magic. # After some measurement trials, we can load the best schedule from the log # file and apply it. # Run auto-tuning (search) # We do not run the tuning in our webpage server since it takes too long. # Uncomment the following line to run it by yourself. # task.tune(tune_option) # Apply the best schedule sch, args = task.apply_best(log_file) # Kill the measurement process del measure_ctx ###################################################################### # We can lower the schedule to see the IR after auto-scheduling. # The auto-scheduler correctly performs optimizations including multi-level tiling, # cooperative fetching, unrolling and operator fusion. print("Lowered TIR:") print(tvm.lower(sch, args, simple_mode=True)) ###################################################################### # Check correctness and evaluate performance # ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ # We build the binary and check its correctness and performance. func = tvm.build(sch, args, target) # Check correctness data_np = np.random.uniform(size=(N, CI, H, W)).astype(np.float32) weight_np = np.random.uniform(size=(CO, CI, KH, KW)).astype(np.float32) bias_np = np.random.uniform(size=(1, CO, 1, 1)).astype(np.float32) conv_np = conv2d_nchw_python(data_np, weight_np, strides, padding) out_np = np.maximum(conv_np + bias_np, 0.0) dev = tvm.cuda() data_tvm = tvm.nd.array(data_np, device=dev) weight_tvm = tvm.nd.array(weight_np, device=dev) bias_tvm = tvm.nd.array(bias_np, device=dev) out_tvm = tvm.nd.empty(out_np.shape, device=dev) func(data_tvm, weight_tvm, bias_tvm, out_tvm) # Check results np.testing.assert_allclose(out_np, out_tvm.numpy(), rtol=1e-3) # Evaluate execution time evaluator = func.time_evaluator(func.entry_name, dev, min_repeat_ms=500) print( "Execution time of this operator: %.3f ms" % (np.median(evaluator(data_tvm, weight_tvm, bias_tvm, out_tvm).results) * 1000) ) ###################################################################### # Using the record file # ^^^^^^^^^^^^^^^^^^^^^ # During the search, all measurement records are dumped into the record # file "conv2d.json". The measurement records can be used to re-apply search results, # resume the search, and perform other analyses. ###################################################################### # Here is an example where we load the best schedule from a file, # print the equivalent python schedule API and CUDA source code. # They can be used for debugging and learning the behavior of the auto-scheduler. print("Equivalent python schedule:") print(task.print_best(log_file, print_mode="schedule")) print("CUDA source code:") print(task.print_best(log_file, print_mode="cuda")) ###################################################################### # A more complicated example is to resume the search. # In this case, we need to create the search policy and cost model by ourselves # and resume the status of search policy and cost model with the log file. # In the example below we resume the status and do more 5 trials. def resume_search(task, log_file): print("Resume search:") cost_model = auto_scheduler.XGBModel() cost_model.update_from_file(log_file) search_policy = auto_scheduler.SketchPolicy( task, cost_model, init_search_callbacks=[auto_scheduler.PreloadMeasuredStates(log_file)] ) measure_ctx = auto_scheduler.LocalRPCMeasureContext(min_repeat_ms=300) tune_option = auto_scheduler.TuningOptions( num_measure_trials=5, runner=measure_ctx.runner, measure_callbacks=[auto_scheduler.RecordToFile(log_file)], ) task.tune(tune_option, search_policy=search_policy) # Kill the measurement process del measure_ctx # We do not run the tuning in our webpage server since it takes too long. # Uncomment the following line to run it by yourself. # resume_search(task, log_file)
8,991
40.437788
99
py
tvm
tvm-main/gallery/how_to/tune_with_autoscheduler/tune_network_arm.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ Auto-scheduling a Neural Network for ARM CPU ============================================= **Author**: `Thierry Moreau <https://github.com/tmoreau89>`_, \ `Lianmin Zheng <https://github.com/merrymercy>`_, \ `Chengfan Jia <https://github.com/jcf94/>`_ Auto-tuning for specific devices and workloads is critical for getting the best performance. This is a tutorial on how to tune a whole neural network for ARM CPU with the auto-scheduler via RPC. To auto-tune a neural network, we partition the network into small subgraphs and tune them independently. Each subgraph is treated as one search task. A task scheduler slices the time and dynamically allocates time resources to these tasks. The task scheduler predicts the impact of each task on the end-to-end execution time and prioritizes the one that can reduce the execution time the most. For each subgraph, we use the compute declaration in :code:`tvm/python/topi` to get the computational DAG in the tensor expression form. We then use the auto-scheduler to construct a search space of this DAG and search for good schedules (low-level optimizations). Different from the template-based :ref:`autotvm <tutorials-autotvm-sec>` which relies on manual templates to define the search space, the auto-scheduler does not require any schedule templates. In other words, the auto-scheduler only uses the compute declarations in :code:`tvm/python/topi` and does not use existing schedule templates. Note that this tutorial will not run on Windows or recent versions of macOS. To get it to run, you will need to wrap the body of this tutorial in a :code:`if __name__ == "__main__":` block. """ import numpy as np import os import tvm from tvm import relay, auto_scheduler from tvm.relay import data_dep_optimization as ddo import tvm.relay.testing from tvm.contrib import graph_executor from tvm.contrib.utils import tempdir ################################################################# # Define a Network # ---------------- # First, we need to define the network with relay frontend API. # We can load some pre-defined network from :code:`tvm.relay.testing`. # We can also load models from MXNet, ONNX, PyTorch, and TensorFlow # (see :ref:`front end tutorials<tutorial-frontend>`). # # For convolutional neural networks, although auto-scheduler can work correctly # with any layout, we found the best performance is typically achieved with NHWC layout. # We also implemented more optimizations for NHWC layout with the auto-scheduler. # So it is recommended to convert your models to NHWC layout to use the auto-scheduler. # You can use :ref:`ConvertLayout <convert-layout-usage>` pass to do the layout conversion in TVM. def get_network(name, batch_size, layout="NHWC", dtype="float32", use_sparse=False): """Get the symbol definition and random weight of a network""" # auto-scheduler prefers NHWC layout if layout == "NHWC": image_shape = (224, 224, 3) elif layout == "NCHW": image_shape = (3, 224, 224) else: raise ValueError("Invalid layout: " + layout) input_shape = (batch_size,) + image_shape output_shape = (batch_size, 1000) if name.startswith("resnet-"): n_layer = int(name.split("-")[1]) mod, params = relay.testing.resnet.get_workload( num_layers=n_layer, batch_size=batch_size, layout=layout, dtype=dtype, image_shape=image_shape, ) elif name.startswith("resnet3d-"): n_layer = int(name.split("-")[1]) mod, params = relay.testing.resnet.get_workload( num_layers=n_layer, batch_size=batch_size, layout=layout, dtype=dtype, image_shape=image_shape, ) elif name == "mobilenet": mod, params = relay.testing.mobilenet.get_workload( batch_size=batch_size, layout=layout, dtype=dtype, image_shape=image_shape ) elif name == "squeezenet_v1.1": assert layout == "NCHW", "squeezenet_v1.1 only supports NCHW layout" mod, params = relay.testing.squeezenet.get_workload( version="1.1", batch_size=batch_size, dtype=dtype, image_shape=image_shape, ) elif name == "inception_v3": input_shape = (batch_size, 3, 299, 299) if layout == "NCHW" else (batch_size, 299, 299, 3) mod, params = relay.testing.inception_v3.get_workload(batch_size=batch_size, dtype=dtype) elif name == "mxnet": # an example for mxnet model from mxnet.gluon.model_zoo.vision import get_model assert layout == "NCHW" block = get_model("resnet50_v1", pretrained=True) mod, params = relay.frontend.from_mxnet(block, shape={"data": input_shape}, dtype=dtype) net = mod["main"] net = relay.Function( net.params, relay.nn.softmax(net.body), None, net.type_params, net.attrs ) mod = tvm.IRModule.from_expr(net) elif name == "mlp": mod, params = relay.testing.mlp.get_workload( batch_size=batch_size, dtype=dtype, image_shape=image_shape, num_classes=1000 ) else: raise ValueError("Network not found.") if use_sparse: from tvm.topi.sparse.utils import convert_model_dense_to_sparse mod, params = convert_model_dense_to_sparse(mod, params, random_params=True) return mod, params, input_shape, output_shape ################################################################# # Start RPC Tracker # ----------------- # TVM uses RPC session to communicate with ARM boards. # During tuning, the tuner will send the generated code to the board and # measure the speed of code on the board. # # To scale up the tuning, TVM uses RPC Tracker to manage distributed devices. # The RPC Tracker is a centralized controller node. We can register all devices to # the tracker. For example, if we have 10 phones, we can register all of them # to the tracker, and run 10 measurements in parallel, accelerating the tuning process. # # To start an RPC tracker, run this command on the host machine. The tracker is # required during the whole tuning process, so we need to open a new terminal for # this command: # # .. code-block:: bash # # python -m tvm.exec.rpc_tracker --host=0.0.0.0 --port=9190 # # The expected output is # # .. code-block:: bash # # INFO:RPCTracker:bind to 0.0.0.0:9190 ################################################################# # Register Devices to RPC Tracker # ----------------------------------- # Now we can register our devices to the tracker. The first step is to # build the TVM runtime for the ARM devices. # # * For Linux: # Follow this section :ref:`build-tvm-runtime-on-device` to build # the TVM runtime on the device. Then register the device to tracker by # # .. code-block:: bash # # python -m tvm.exec.rpc_server --tracker=[HOST_IP]:9190 --key=rasp4b-64 # # (replace :code:`[HOST_IP]` with the IP address of your host machine) # # * For Android: # Follow this `readme page <https://github.com/apache/tvm/tree/main/apps/android_rpc>`_ to # install the TVM RPC APK on the android device. Make sure you can pass the android rpc test. # Then you have already registered your device. During tuning, you have to go to developer option # and enable "Keep screen awake during changing" and charge your phone to make it stable. # # After registering devices, we can confirm it by querying rpc_tracker # # .. code-block:: bash # # python -m tvm.exec.query_rpc_tracker --host=0.0.0.0 --port=9190 # # For example, if we have 2 Huawei mate10 pro, 11 Raspberry Pi 4B with 64bit OS, and 2 rk3399, # the output can be # # .. code-block:: bash # # Queue Status # ---------------------------------- # key total free pending # ---------------------------------- # mate10pro 2 2 0 # rk3399 2 2 0 # rasp4b-64 11 11 0 # ---------------------------------- # # You can register multiple devices to the tracker to accelerate the measurement in tuning. ########################################### # Set Tuning Options # ------------------ # Before tuning, we should apply some configurations. Here I use a Raspberry Pi 4b 4GB board # as example with a 64bit OS (Ubuntu 20.04). In your setting, you should modify the target # and device_key accordingly. # set :code:`use_ndk` to True if you use android phone. #### DEVICE CONFIG #### # Replace "aarch64-linux-gnu" with the correct target of your board. # This target is used for cross compilation. You can query it by :code:`gcc -v` on your device. # FIXME(tmoreau89, merrymercy): We leave '-device=arm_cpu' out of the target string # because we're sharing x86 op strategy. target = tvm.target.Target("llvm -mtriple=aarch64-linux-gnu -mattr=+neon") # Also replace this with the device key, rpc host and rpc port in your tracker device_key = "rasp4b-64" rpc_host = "127.0.0.1" rpc_port = 9190 # Set this to True if you use ndk tools for cross compiling # And also set the environment variable below to point to the cross compiler use_ndk = False # os.environ["TVM_NDK_CC"] = "/usr/bin/aarch64-linux-gnu-g++" #### TUNING OPTION #### network = "mobilenet" use_sparse = False batch_size = 1 layout = "NHWC" dtype = "float32" log_file = "%s-%s-B%d-%s.json" % (network, layout, batch_size, target.kind.name) ################################################################# # Extract Search Tasks # -------------------- # Next, we extract the search tasks and their weights from a network. # The weight of a task is the number of appearances of the task's subgraph # in the whole network. # By using the weight, we can approximate the end-to-end latency of the network # as :code:`sum(latency[t] * weight[t])`, where :code:`latency[t]` is the # latency of a task and :code:`weight[t]` is the weight of the task. # The task scheduler will just optimize this objective. # Extract tasks from the network print("Get model...") mod, params, input_shape, output_shape = get_network( network, batch_size, layout, dtype=dtype, use_sparse=use_sparse ) print("Extract tasks...") tasks, task_weights = auto_scheduler.extract_tasks(mod["main"], params, target) for idx, task in enumerate(tasks): print("========== Task %d (workload key: %s) ==========" % (idx, task.workload_key)) print(task.compute_dag) ################################################################# # Tuning and Evaluation # --------------------- # Now, we set some options for tuning and launch the search tasks # # * :code:`num_measure_trials` is the number of measurement trials we can use during the tuning. # You can set it to a small number (e.g., 200) for a fast demonstrative run. # In practice, we recommend setting it around :code:`800 * len(tasks)`, # which is typically enough for the search to converge. # For example, there are 29 tasks in resnet-50, so we can set it as 20000. # You can adjust this parameter according to your time budget. # * In addition, we use :code:`RecordToFile` to dump measurement records into a log file, # The measurement records can be used to query the history best, resume the search, # and do more analyses later. # * see :any:`auto_scheduler.TuningOptions`, # :any:`auto_scheduler.LocalRunner` for more parameters. # # After auto-tuning, we can compile the network with the best schedules we found. # All measurement records are dumped into the log file during auto-tuning, # so we can read the log file and load the best schedules. def tune_and_evaluate(): print("Begin tuning...") tuner = auto_scheduler.TaskScheduler(tasks, task_weights) tune_option = auto_scheduler.TuningOptions( num_measure_trials=200, # change this to 20000 to achieve the best performance builder=auto_scheduler.LocalBuilder(build_func="ndk" if use_ndk else "default"), runner=auto_scheduler.RPCRunner( device_key, host=rpc_host, port=rpc_port, timeout=30, repeat=1, min_repeat_ms=200, enable_cpu_cache_flush=True, ), measure_callbacks=[auto_scheduler.RecordToFile(log_file)], ) tuner.tune(tune_option) # Compile with the history best print("Compile...") with auto_scheduler.ApplyHistoryBest(log_file): with tvm.transform.PassContext( opt_level=3, config={"relay.backend.use_auto_scheduler": True} ): lib = relay.build(mod, target=target, params=params) # Export library tmp = tempdir() if use_ndk: from tvm.contrib import ndk filename = "net.so" lib.export_library(tmp.relpath(filename), ndk.create_shared) else: filename = "net.tar" lib.export_library(tmp.relpath(filename)) # Upload module to device print("Upload...") remote = auto_scheduler.utils.request_remote(device_key, rpc_host, rpc_port, timeout=10000) remote.upload(tmp.relpath(filename)) rlib = remote.load_module(filename) # Create graph executor dev = remote.cpu() module = graph_executor.GraphModule(rlib["default"](dev)) data_tvm = tvm.nd.array((np.random.uniform(size=input_shape)).astype(dtype)) module.set_input("data", data_tvm) # Evaluate print("Evaluate inference time cost...") print(module.benchmark(dev, repeat=3, min_repeat_ms=500)) # We do not run the tuning in our webpage server since the server doesn't have a Raspberry Pi, # or device tracker running. # Uncomment the following line to run it by yourself. # tune_and_evaluate() ###################################################################### # .. note:: Explaining the printed information during tuning # # During the tuning, a lot of information will be printed on the console. # They are used for debugging purposes. The most important info is the output # of the task scheduler. The following table is a sample output. # # .. code-block:: c # # ---------------------------------------------------------------------- # ------------------------------ [ Task Scheduler ] # ---------------------------------------------------------------------- # | ID | Latency (ms) | Speed (GFLOPS) | Trials | # ------------------------------------------------- # | 0 | 0.013 | 0.31 | 64 | # | 1 | 0.845 | 2.43 | 448 | # | 2 | 0.046 | -0.00 | 64 | # | 3 | 4.194 | 24.53 | 2112 | # | 4 | 0.109 | 9.21 | 64 | # | 5 | 1.759 | 29.27 | 896 | # | 6 | 0.083 | 6.01 | 64 | # | 7 | 3.084 | 33.38 | 7680 | # | 8 | 0.136 | 14.78 | 384 | # | 9 | 1.349 | 38.23 | 768 | # | 10 | 0.133 | 7.55 | 128 | # | 11 | 2.747 | 37.56 | 1536 | # | 12 | 0.338 | 11.87 | 192 | # | 13 | 1.295 | 40.00 | 704 | # | 14 | 0.482 | 4.16 | 256 | # | 15 | 2.686 | 38.56 | 1344 | # | 16 | 0.884 | 9.08 | 448 | # | 17 | 1.332 | 39.18 | 704 | # | 18 | 1.045 | 3.84 | 576 | # | 19 | 1.391 | 38.09 | 704 | # | 20 | 0.777 | 10.34 | 448 | # | 21 | 0.739 | 30.97 | 448 | # ------------------------------------------------- # Estimated total latency: 38.347 ms Trials: 19992 Used time : 19260 s Next ID: 3 # # This table lists the latency and (estimated) speed of all tasks. # It also lists the allocation of measurement trials for all tasks. # The last line prints the total weighted latency of these tasks, # which can be a rough estimation of the end-to-end execution time # of the network. # The last line also prints the total number of measurement trials, # total time spent on auto-tuning and the id of the next task to tune. # # There will also be some "dmlc::Error"s errors, because the # auto-scheduler will try some invalid schedules. # You can safely ignore them if the tuning can continue, because these # errors are isolated from the main process. # ###################################################################### # .. note:: Terminate the tuning earlier # # You can terminate the tuning earlier by forcibly killing this process. # As long as you get at least one valid schedule for each task in the log file, # you should be able to do the compilation (the secion below). # ################################################################# # Other Tips # ---------- # 1. During the tuning, the auto-scheduler needs to compile many programs and # extract feature from them. This part is CPU-intensive, # so a high-performance CPU with many cores is recommended for faster search. # 2. You can use :code:`python3 -m tvm.auto_scheduler.measure_record --mode distill -i log.json` # to distill the large log file and only save the best useful records. # 3. You can resume a search from the previous log file. You just need to # add a new argument :code:`load_log_file` when creating the task scheduler # in function :code:`run_tuning`. Say, # :code:`tuner = auto_scheduler.TaskScheduler(tasks, task_weights, load_log_file=log_file)` # 4. If you have multiple target CPUs, you can use all of them for measurements to # parallelize the measurements. Check this :ref:`section <tutorials-autotvm-scale-up-rpc-tracker>` # to learn how to use the RPC Tracker and RPC Server. # To use the RPC Tracker in auto-scheduler, replace the runner in :code:`TuningOptions` # with :any:`auto_scheduler.RPCRunner`.
18,708
41.424036
101
py
tvm
tvm-main/gallery/how_to/tune_with_autoscheduler/tune_sparse_x86.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ Auto-scheduling Sparse Matrix Multiplication on CPU with Custom Sketch Rule =========================================================================== **Author**: `Chengfan Jia <https://github.com/jcf94/>`_ This is a tutorial on how to use the auto-scheduler to tune a sparse matrix multiplication for CPUs. Auto-scheduler is designed to explore the schedule with best performance for a given computation declaration automatically. While sometimes, we may have a demand to try some special ops which may not been well-supported by auto-scheduler's default sketch rules and result in poor performance. Fortunately, auto-scheduler currently allows user to provide a CustomSketch to cover these cases. We use sparse matrix multiplication as an example in this tutorial to demonstrate how to implement and plug a custom sketch rule to the auto-scheduler's search policy. Note that this tutorial will not run on Windows or recent versions of macOS. To get it to run, you will need to wrap the body of this tutorial in a :code:`if __name__ == "__main__":` block. """ import os import numpy as np import tvm import tvm.testing from tvm import te, auto_scheduler, runtime, topi from tvm.auto_scheduler import _ffi_api from tvm.topi.utils import get_const_tuple from tvm.topi.sparse.utils import random_bsr_matrix ###################################################################### # Define the computation # ^^^^^^^^^^^^^^^^^^^^^^ # To begin with, let us define the computation of a sparse matmul with several relu and bias add. # The function should return the list of input/output tensors. # From these tensors, the auto-scheduler can get the whole computational graph. @auto_scheduler.register_workload def sparse_dense(M, N, K, w_data_shape, w_indices_shape, w_indptr_shape, dtype): X = te.placeholder(shape=(M, K), dtype=dtype) W_data = te.placeholder(shape=w_data_shape, dtype=dtype) W_indices = te.placeholder(shape=w_indices_shape, dtype="int32") W_indptr = te.placeholder(shape=w_indptr_shape, dtype="int32") B = te.placeholder(shape=(M, N), dtype=dtype) out = topi.nn.sparse_dense(topi.nn.relu(X), W_data, W_indices, W_indptr) out = te.compute((M, N), lambda i, j: out[i, j] + B[i, j], name="BiasAdd") out = topi.nn.relu(out) return [X, W_data, W_indices, W_indptr, B, out] ###################################################################### # Special step for sparse workload # ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ # During schedule tuning, auto-scheduler will use random inputs to measure the performance of a # generated schedule. While we cannot directly use a random array as the input of a sparse op, for # the "indices" and "indptr" array are meaningful for the computation. # # To solve this problem, we register these as special buffers, and load them when process program # measuring. # See the `tvm.auto_scheduler.measure.py` for more details. # Define the basic shapes of this sparse computation M = 128 K = 256 N = 512 BS_R = 16 BS_C = 1 density = 0.6 # Generate the test data with numpy X_np = np.random.randn(M, K).astype("float32") X_np = np.maximum(np.zeros((M, K), dtype="float32"), X_np) # Relu W_sp_np = random_bsr_matrix(N, K, BS_R, BS_C, density=density, dtype="float32") W_np = W_sp_np.todense() Y_np = X_np @ W_np.T # Process the matrix multiplication B_np = np.random.randn(M, N).astype("float32") Y_np = Y_np + B_np # Bias add Y_np = np.maximum(np.zeros((M, N), dtype="float32"), Y_np) # Relu ###################################################################### # Create the search task # ^^^^^^^^^^^^^^^^^^^^^^ # We then create a search task with M=N=K=512 and dtype="float32" # If your machine supports avx instructions, you can # # - replace "llvm" below with "llvm -mcpu=core-avx2" to enable AVX2 # - replace "llvm" below with "llvm -mcpu=skylake-avx512" to enable AVX-512 target = tvm.target.Target("llvm") # Register the sparse data to task inputs prefix = "sparse_dense_bsr_%d_%d_%d_%d_%d_%d_" % ( N, K, BS_R, BS_C, W_sp_np.indices.shape[0], W_sp_np.indptr.shape[0], ) task = tvm.auto_scheduler.SearchTask( func=sparse_dense, args=(M, N, K, W_sp_np.data.shape, W_sp_np.indices.shape, W_sp_np.indptr.shape, "float32"), target=target, task_inputs={ prefix + "W_data": runtime.ndarray.array(W_sp_np.data), prefix + "W_indices": runtime.ndarray.array(W_sp_np.indices), prefix + "W_indptr": runtime.ndarray.array(W_sp_np.indptr), }, task_inputs_save_to_file=True, ) # Inspect the computational graph print("Computational DAG:") print(task.compute_dag) ###################################################################### # Write the custom sketch for sparse dense op # ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ # Before tuning, we will need to define the CustomSketchRule for the sparse dense op. # # CustomSketchRule consists of two parts: the condition function and the apply function. # # - condition function: describe when to apply this sketch rule. For example, we can only apply # the rule to the sparse ops by matching their name and tag. # - apply function: describe how to generate the initial sketch. You can implement it using # auto-scheduler provided loop state APIs. def meet_condition_func(search_policy, state, stage_id): state = auto_scheduler.loop_state.State(state, search_policy.search_task.compute_dag) if state.stages[stage_id].op.tag in [ "sparse_dense_sp_rhs_bsrmm", "sparse_dense_sp_rhs_bsrmm_block", ]: return auto_scheduler.PreloadCustomSketchRule.APPLY_AND_SKIP_REST else: return auto_scheduler.PreloadCustomSketchRule.PASS def apply_func(search_policy, state, stage_id): ret = [] s0 = auto_scheduler.loop_state.State(state, search_policy.search_task.compute_dag) if s0.stages[stage_id].op.tag == "sparse_dense_sp_rhs_bsrmm_block": return [s0.state_object, stage_id - 1] sparse_dense = s0.stages[stage_id].op sparse_dense_block = s0.stages[stage_id - 1].op assert sparse_dense.tag == "sparse_dense_sp_rhs_bsrmm" assert sparse_dense_block.tag == "sparse_dense_sp_rhs_bsrmm_block" # Set the default consumer of compute block consumer = sparse_dense # If sparse dense has a single elementwise consumer # We can compute inline the sparse_dense output stage consumers = _ffi_api.SearchPolicyUtilsGetConsumers( search_policy.search_task, s0.state_object, stage_id ) if len(consumers) == 1: consumer_id = int(consumers.items()[0][0]) if _ffi_api.SearchPolicyUtilsIsElementwiseMatch( search_policy.search_task, s0.state_object, stage_id, consumer_id ): consumer = s0.stages[consumer_id].op s0.compute_inline(sparse_dense) i, nb_j, j, row_offset, c = s0[sparse_dense_block].iters m, n = s0[consumer].iters i0, i1, i2 = s0.split(sparse_dense_block, i, [None, None]) m0, m1 = s0.follow_split(consumer, m, len(s0.transform_steps) - 1, 1) j0, j1 = s0.split(sparse_dense_block, nb_j, [None]) n0, n1 = s0.follow_split(consumer, n, len(s0.transform_steps) - 1, 1) s0.reorder(sparse_dense_block, [i0, j0, i1, j1, row_offset, i2, j, c]) s0.reorder(consumer, [m0, n0, m1, n1]) s0.compute_at(sparse_dense_block, consumer, n0) ret.append([s0.state_object, stage_id - 2]) return ret ###################################################################### # Next, we set parameters for the auto-scheduler with the custom sketch plugged in. # # * :code:`num_measure_trials` is the number of measurement trials we can use during the search. # We only make 10 trials in this tutorial for a fast demonstration. In practice, 1000 is a # good value for the search to converge. You can do more trials according to your time budget. # * In addition, we use :code:`RecordToFile` to dump measurement records into a file # `sparse_dense.json`. # The measurement records can be used to query the history best, resume the search, # and do more analyses later. # * see :any:`auto_scheduler.TuningOptions` for more parameters # * Here, we need to create a :code:`auto_scheduler.SketchPolicy` object, and add the custom sketch # rule as a `init_search_callbacks`. log_file = "sparse_dense.json" tune_option = auto_scheduler.TuningOptions( num_measure_trials=10, measure_callbacks=[auto_scheduler.RecordToFile(log_file)], verbose=2, ) search_policy = auto_scheduler.SketchPolicy( task, program_cost_model=auto_scheduler.XGBModel(), init_search_callbacks=[ auto_scheduler.PreloadCustomSketchRule(meet_condition_func, apply_func, "SparseDense") ], ) ###################################################################### # Run the search # ^^^^^^^^^^^^^^ # Now we get all inputs ready. # We can kick off the search and let the auto-scheduler do its magic. # After some measurement trials, we can load the best schedule from the log # file and apply it. def tune_and_evaluate(tune_option, search_policy): # Run auto-tuning (search) task.tune(tune_option, search_policy) # Apply the best schedule sch, args = task.apply_best(log_file) # We can lower the schedule to see the IR after auto-scheduling. # The auto-scheduler correctly performs optimizations including multi-level tiling, # layout transformation, parallelization, vectorization, unrolling, and operator fusion. print("Lowered TIR:") print(tvm.lower(sch, args, simple_mode=True)) # Check correctness and evaluate performance # We build the binary and check its correctness and performance. func = tvm.build(sch, args, target) dev = tvm.cpu() X_tvm = tvm.nd.array(X_np, device=dev) W_data_tvm = tvm.nd.array(W_sp_np.data, device=dev) W_indices_tvm = tvm.nd.array(W_sp_np.indices, device=dev) W_indptr_tvm = tvm.nd.array(W_sp_np.indptr, device=dev) B_tvm = tvm.nd.array(B_np, device=dev) Y_tvm = tvm.nd.empty(Y_np.shape, device=dev) func(X_tvm, W_data_tvm, W_indices_tvm, W_indptr_tvm, B_tvm, Y_tvm) # Check results tvm.testing.assert_allclose(Y_np, Y_tvm.numpy(), atol=1e-4, rtol=1e-4) # Evaluate execution time. evaluator = func.time_evaluator(func.entry_name, dev, min_repeat_ms=500) print( "Execution time of this operator: %.3f ms" % ( np.median( evaluator(X_tvm, W_data_tvm, W_indices_tvm, W_indptr_tvm, B_tvm, Y_tvm).results ) * 1000 ) ) # Notice: We do not run the tuning in our webpage server since it takes too long. # Uncomment the following line to run it by yourself. # tune_and_evaluate(tune_option, search_policy) ###################################################################### # .. note:: Tuning result example # # .. code-block:: c # # ---------------------------------------------------------------------- # Lowered TIR: # primfn(placeholder_5: handle, placeholder_6: handle, placeholder_7: handle, placeholder_8: handle, placeholder_9: handle, compute_1: handle) -> () # attr = {"global_symbol": "main", "tir.noalias": True} # buffers = {placeholder_2: Buffer(placeholder_10: Pointer(float32), float32, [9831, 16, 1], []), # placeholder_4: Buffer(placeholder_11: Pointer(int32), int32, [33], []), # placeholder_3: Buffer(placeholder_12: Pointer(float32), float32, [512, 512], []), # compute: Buffer(compute_2: Pointer(float32), float32, [512, 512], []), # placeholder_1: Buffer(placeholder_13: Pointer(float32), float32, [512, 512], []), # placeholder: Buffer(placeholder_14: Pointer(int32), int32, [9831], [])} # buffer_map = {placeholder_7: placeholder, placeholder_9: placeholder_1, placeholder_6: placeholder_2, compute_1: compute, placeholder_5: placeholder_3, placeholder_8: placeholder_4} { # for (i0.outer.i1.outer.fused: int32, 0, 1024) "parallel" { # attr [compute_3: Pointer(float32)] "storage_scope" = "global"; # allocate(compute_3, float32, [256]) { # for (nb_j.inner: int32, 0, 2) { # for (i.inner.init: int32, 0, 8) { # for (j.init: int32, 0, 16) { # compute_3[(((i.inner.init*32) + (nb_j.inner*16)) + j.init)] = 0f32 # } # } # for (elem_idx: int32, 0, ((int32*)placeholder_11[(((floormod(i0.outer.i1.outer.fused, 16)*2) + nb_j.inner) + 1)] - (int32*)placeholder_11[((floormod(i0.outer.i1.outer.fused, 16)*2) + nb_j.inner)])) { # for (i.inner: int32, 0, 8) { # for (j: int32, 0, 16) { # compute_3[(((i.inner*32) + (nb_j.inner*16)) + j)] = ((float32*)compute_3[(((i.inner*32) + (nb_j.inner*16)) + j)] + ((float32*)placeholder_10[((((int32*)placeholder_11[((floormod(i0.outer.i1.outer.fused, 16)*2) + nb_j.inner)]*16) + (elem_idx*16)) + j)]*max((float32*)placeholder_12[(((floordiv(i0.outer.i1.outer.fused, 16)*4096) + (i.inner*512)) + (int32*)placeholder_14[((int32*)placeholder_11[((floormod(i0.outer.i1.outer.fused, 16)*2) + nb_j.inner)] + elem_idx)])], 0f32))) # } # } # } # } # for (i0.inner: int32, 0, 8) { # compute_2[ramp((((floordiv(i0.outer.i1.outer.fused, 16)*4096) + (i0.inner*512)) + (floormod(i0.outer.i1.outer.fused, 16)*32)), 1, 32)] = max(((float32x32*)compute_3[ramp((i0.inner*32), 1, 32)] + (float32x32*)placeholder_13[ramp((((floordiv(i0.outer.i1.outer.fused, 16)*4096) + (i0.inner*512)) + (floormod(i0.outer.i1.outer.fused, 16)*32)), 1, 32)]), broadcast(0f32, 32)) # } # } # } # }
14,490
42.779456
494
py
tvm
tvm-main/gallery/how_to/tune_with_autoscheduler/tune_network_mali.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ Auto-scheduling a Neural Network for mali GPU ============================================= **Author**: `Zhao Wu <https://github.com/FrozenGene>`_ Auto-tuning for specific devices and workloads is critical for getting the best performance. This is a tutorial on how to tune a whole neural network for mali GPU with the auto-scheduler. To auto-tune a neural network, we partition the network into small subgraphs and tune them independently. Each subgraph is treated as one search task. A task scheduler slices the time and dynamically allocates time resources to these tasks. The task scheduler predicts the impact of each task on the end-to-end execution time and prioritizes the one that can reduce the execution time the most. For each subgraph, we use the compute declaration in :code:`tvm/python/topi` to get the computational DAG in the tensor expression form. We then use the auto-scheduler to construct a search space of this DAG and search for good schedules (low-level optimizations). Different from the template-based :ref:`autotvm <tutorials-autotvm-sec>` which relies on manual templates to define the search space, the auto-scheduler does not require any schedule templates. In other words, the auto-scheduler only uses the compute declarations in :code:`tvm/python/topi` and does not use existing schedule templates. Note that this tutorial will not run on Windows or recent versions of macOS. To get it to run, you will need to wrap the body of this tutorial in a :code:`if __name__ == "__main__":` block. """ import numpy as np import tvm from tvm import relay, auto_scheduler import tvm.relay.testing from tvm.contrib import graph_executor import os ################################################################# # Define a Network # ---------------- # First, we need to define the network with relay frontend API. # We can load some pre-defined network from :code:`tvm.relay.testing`. # We can also load models from MXNet, ONNX, PyTorch, and TensorFlow # (see :ref:`front end tutorials<tutorial-frontend>`). # # For convolutional neural networks, although auto-scheduler can work correctly # with any layout, we found the best performance is typically achieved with NHWC layout. # We also implemented more optimizations for NHWC layout with the auto-scheduler. # So it is recommended to convert your models to NHWC layout to use the auto-scheduler. # You can use :ref:`ConvertLayout <convert-layout-usage>` pass to do the layout conversion in TVM. def get_network(name, batch_size, layout="NHWC", dtype="float32"): """Get the symbol definition and random weight of a network""" # auto-scheduler prefers NHWC layout if layout == "NHWC": image_shape = (224, 224, 3) elif layout == "NCHW": image_shape = (3, 224, 224) else: raise ValueError("Invalid layout: " + layout) input_shape = (batch_size,) + image_shape output_shape = (batch_size, 1000) if name.startswith("resnet-"): n_layer = int(name.split("-")[1]) mod, params = relay.testing.resnet.get_workload( num_layers=n_layer, batch_size=batch_size, layout=layout, dtype=dtype, image_shape=image_shape, ) elif name.startswith("resnet3d-"): n_layer = int(name.split("-")[1]) mod, params = relay.testing.resnet.get_workload( num_layers=n_layer, batch_size=batch_size, layout=layout, dtype=dtype, image_shape=image_shape, ) elif name == "mobilenet": mod, params = relay.testing.mobilenet.get_workload( batch_size=batch_size, layout=layout, dtype=dtype, image_shape=image_shape ) elif name == "squeezenet_v1.1": assert layout == "NCHW", "squeezenet_v1.1 only supports NCHW layout" mod, params = relay.testing.squeezenet.get_workload( version="1.1", batch_size=batch_size, dtype=dtype, image_shape=image_shape, ) elif name == "inception_v3": input_shape = (batch_size, 3, 299, 299) if layout == "NCHW" else (batch_size, 299, 299, 3) mod, params = relay.testing.inception_v3.get_workload(batch_size=batch_size, dtype=dtype) elif name == "mxnet": # an example for mxnet model from mxnet.gluon.model_zoo.vision import get_model assert layout == "NCHW" block = get_model("resnet50_v1", pretrained=True) mod, params = relay.frontend.from_mxnet(block, shape={"data": input_shape}, dtype=dtype) net = mod["main"] net = relay.Function( net.params, relay.nn.softmax(net.body), None, net.type_params, net.attrs ) mod = tvm.IRModule.from_expr(net) return mod, params, input_shape, output_shape # Define the neural network and compilation target. network = "mobilenet" batch_size = 1 layout = "NHWC" # Set this to True if you use ndk tools for cross compiling use_ndk = True # Path to cross compiler os.environ["TVM_NDK_CC"] = "/usr/bin/aarch64-linux-gnu-g++" target = tvm.target.Target("opencl -device=mali", host="llvm -mtriple=aarch64-linux-gnu") dtype = "float32" log_file = "%s-%s-B%d-%s.json" % (network, layout, batch_size, target.kind.name) ################################################################# # Start an RPC Tracker and Register Devices to the Tracker # -------------------------------------------------------- # Please refer to the "Start RPC Tracker" and "Register Devices to RPC Tracker" setions # in this :ref:`tutorial <tutorials-autotvm-start-rpc-tracker>` to start an RPC tracker # and register devices to the tracker. # Replace this with the device key in your tracker device_key = "rk3399" ################################################################# # Extract Search Tasks # -------------------- # Next, we extract the search tasks and their weights from a network. # The weight of a task is the number of appearances of the task's subgraph # in the whole network. # By using the weight, we can approximate the end-to-end latency of the network # as :code:`sum(latency[t] * weight[t])`, where :code:`latency[t]` is the # latency of a task and :code:`weight[t]` is the weight of the task. # The task scheduler will just optimize this objective. # Extract tasks from the network print("Extract tasks...") mod, params, input_shape, output_shape = get_network(network, batch_size, layout, dtype=dtype) tasks, task_weights = auto_scheduler.extract_tasks(mod["main"], params, target) for idx, task in enumerate(tasks): print("========== Task %d (workload key: %s) ==========" % (idx, task.workload_key)) print(task.compute_dag) ###################################################################### # .. note:: How to get the hardware parameters from remote device # # .. code-block:: python # # from tvm.auto_scheduler.utils import request_remote # remote = request_remote(device_key, "127.0.0.1", 9190) # dev = remote.cl() # max_shared_memory_per_block = dev.max_shared_memory_per_block # # There is no explicit local memory limition # # so we can use INT32_MAX to disable the check on local_memory. # max_local_memory_per_block = 2147483647 # INT32_MAX # max_threads_per_block = dev.max_threads_per_block # max_vthread_extent = int(dev.warp_size / 4) if int(dev.warp_size / 4) > 1 else dev.warp_size # warp_size = dev.warp_size # hardware_params = auto_scheduler.HardwareParams(-1, 16, 64, # max_shared_memory_per_block, max_local_memory_per_block, # max_threads_per_block, max_vthread_extent, warp_size) # # Now you could pass it to search task and tune # # .. code-block:: python # # tasks, task_weights = auto_scheduler.extract_tasks( # mod["main"], params, target, hardware_params = hardware_params # ) # ################################################################# # Tuning and Evaluate # ------------------- # Now, we set some options for tuning, launch the search tasks and evaluate the end-to-end performance # # * :code:`num_measure_trials` is the number of measurement trials we can use during the tuning. # You can set it to a small number (e.g., 200) for a fast demonstrative run. # In practice, we recommend setting it around :code:`800 * len(tasks)`, # which is typically enough for the search to converge. # For example, there are 29 tasks in resnet-50, so we can set it as 20000. # You can adjust this parameter according to your time budget. # * In addition, we use :code:`RecordToFile` to dump measurement records into a log file, # The measurement records can be used to query the history best, resume the search, # and do more analyses later. # * see :any:`auto_scheduler.TuningOptions`, # :any:`auto_scheduler.LocalRunner` for more parameters. # def tune_and_evaluate(): print("Begin tuning...") tuner = auto_scheduler.TaskScheduler(tasks, task_weights) tune_option = auto_scheduler.TuningOptions( num_measure_trials=200, # change this to 20000 to achieve the best performance builder=auto_scheduler.LocalBuilder(build_func="ndk" if use_ndk else "default"), runner=auto_scheduler.RPCRunner( device_key, host="127.0.0.1", port=9190, repeat=3, timeout=50 ), measure_callbacks=[auto_scheduler.RecordToFile(log_file)], ) tuner.tune(tune_option) # Compile the whole network print("Compile...") with auto_scheduler.ApplyHistoryBest(log_file): with tvm.transform.PassContext( opt_level=3, config={"relay.backend.use_auto_scheduler": True} ): lib = relay.build(mod, target, params=params) # Create graph executor print("=============== Request Remote ===============") from tvm.auto_scheduler.utils import request_remote remote = request_remote(device_key, "127.0.0.1", 9190) dev = remote.cl() from tvm.contrib import utils, ndk temp = utils.tempdir() filename = "deploy_lib.so" path_lib = temp.relpath(filename) lib.export_library(path_lib, ndk.create_shared) remote.upload(path_lib) loaded_lib = remote.load_module(filename) module = graph_executor.GraphModule(loaded_lib["default"](dev)) data = (np.random.uniform(size=input_shape)).astype(dtype) data_tvm = tvm.nd.array(data) module.set_input("data", data_tvm) # Evaluate print("Evaluate inference time cost...") print(module.benchmark(dev, repeat=3, min_repeat_ms=500)) # We do not run the tuning in our webpage server since server doesn't have mali gpu. # Uncomment the following line to run it by yourself. # tune_and_evaluate() ###################################################################### # .. note:: Explain the printed information during tuning # # During the tuning, a lot of information will be printed on the console. # They are used for debugging purposes. The most important info is the output # of the task scheduler. The following table is a sample output. # # .. code-block:: c # # ---------------------------------------------------------------------- # ------------------------------ [ Task Scheduler ] # ---------------------------------------------------------------------- # | ID | Latency (ms) | Speed (GFLOPS) | Trials | # ------------------------------------------------- # | 0 | 0.010 | 0.40 | 64 | # | 1 | 0.087 | 47.19 | 64 | # | 2 | 0.008 | -0.00 | 64 | # | 3 | 0.177 | 582.07 | 64 | # | 4 | 0.268 | 862.37 | 256 | # | 5 | 0.166 | 621.13 | 128 | # | 6 | 0.170 | 605.10 | 128 | # | 7 | 0.128 | 403.20 | 64 | # | 8 | 0.189 | 545.71 | 64 | # | 9 | 0.231 | 1001.01 | 448 | # | 10 | 0.155 | 664.80 | 256 | # | 11 | 0.155 | 662.86 | 256 | # | 12 | 0.119 | 434.08 | 64 | # | 13 | 0.199 | 522.13 | 64 | # | 14 | 0.235 | 986.56 | 320 | # | 15 | 0.149 | 689.13 | 128 | # | 16 | 0.155 | 664.80 | 192 | # | 17 | 0.151 | 340.64 | 64 | # | 18 | 0.176 | 597.55 | 128 | # | 19 | 0.220 | 1054.37 | 192 | # | 20 | 0.150 | 686.01 | 128 | # | 21 | 0.159 | 650.88 | 128 | # | 22 | 0.073 | 358.19 | 64 | # | 23 | 0.031 | 70.63 | 64 | # | 24 | 0.251 | 947.73 | 128 | # | 25 | 0.157 | 652.47 | 128 | # | 26 | 0.215 | 954.84 | 128 | # | 27 | 0.237 | 868.92 | 128 | # | 28 | 0.266 | 774.06 | 128 | # ------------------------------------------------- # Estimated total latency: 10.016 ms Trials: 3992 Used time : 1131 s Next ID: 15 # # This table lists the latency and (estimated) speed of all tasks. # It also lists the allocation of measurement trials for all tasks. # The last line prints the total weighted latency of these tasks, # which can be a rough estimation of the end-to-end execution time # of the network. # The last line also prints the total number of measurement trials, # total time spent on auto-tuning and the id of the next task to tune. # # There will also be some "tvm::Error"s errors, because the # auto-scheduler will try some invalid schedules. # You can safely ignore them if the tuning can continue, because these # errors are isolated from the main process. # ###################################################################### # .. note:: Terminate the tuning earlier # # You can terminate the tuning earlier by forcibly killing this process. # As long as you get at least one valid schedule for each task in the log file, # you should be able to do the compilation (the secion below). # ################################################################# # Other Tips # ---------- # 1. During the tuning, the auto-scheduler needs to compile many programs and # extract feature from them. This part is CPU-intensive, # so a high-performance CPU with many cores is recommended for faster search. # 2. You can use :code:`python3 -m tvm.auto_scheduler.measure_record --mode distill -i log.json` # to distill the large log file and only save the best useful records. # 3. You can resume a search from the previous log file. You just need to # add a new argument :code:`load_log_file` when creating the task scheduler # in function :code:`run_tuning`. Say, # :code:`tuner = auto_scheduler.TaskScheduler(tasks, task_weights, load_log_file=log_file)` # 4. If you have multiple target GPUs, you can use all of them for measurements to # parallelize the measurements. Check this :ref:`section <tutorials-autotvm-scale-up-rpc-tracker>` # to learn how to use the RPC Tracker and RPC Server. # To use the RPC Tracker in auto-scheduler, replace the runner in :code:`TuningOptions` # with :any:`auto_scheduler.RPCRunner`.
16,209
43.903047
110
py
tvm
tvm-main/gallery/how_to/work_with_relay/using_external_lib.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ Using External Libraries in Relay ================================= **Author**: `Masahiro Masuda <https://github.com/masahi>`_, `Truman Tian <https://github.com/SiNZeRo>`_ This is a short tutorial on how to use external libraries such as cuDNN, or cuBLAS with Relay. Relay uses TVM internally to generate target specific code. For example, with cuda backend TVM generates cuda kernels for all layers in the user provided network. But sometimes it is also helpful to incorporate external libraries developed by various vendors into Relay. Luckily, TVM has a mechanism to transparently call into these libraries. For Relay users, all we need to do is just to set a target string appropriately. Before we can use external libraries from Relay, your TVM needs to be built with libraries you want to use. For example, to use cuDNN, USE_CUDNN option in `cmake/config.cmake` needs to be enabled, and cuDNN include and library directories need to be specified if necessary. To begin with, we import Relay and TVM. """ import tvm from tvm import te import numpy as np from tvm.contrib import graph_executor as runtime from tvm import relay from tvm.relay import testing import tvm.testing ###################################################################### # Create a simple network # ----------------------- # Let's create a very simple network for demonstration. # It consists of convolution, batch normalization, and ReLU activation. out_channels = 16 batch_size = 1 data = relay.var("data", relay.TensorType((batch_size, 3, 224, 224), "float32")) weight = relay.var("weight") bn_gamma = relay.var("bn_gamma") bn_beta = relay.var("bn_beta") bn_mmean = relay.var("bn_mean") bn_mvar = relay.var("bn_var") simple_net = relay.nn.conv2d( data=data, weight=weight, kernel_size=(3, 3), channels=out_channels, padding=(1, 1) ) simple_net = relay.nn.batch_norm(simple_net, bn_gamma, bn_beta, bn_mmean, bn_mvar)[0] simple_net = relay.nn.relu(simple_net) simple_net = relay.Function(relay.analysis.free_vars(simple_net), simple_net) data_shape = (batch_size, 3, 224, 224) net, params = testing.create_workload(simple_net) ###################################################################### # Build and run with cuda backend # ------------------------------- # We build and run this network with cuda backend, as usual. # By setting the logging level to DEBUG, the result of Relay graph compilation will be dumped as pseudo code. import logging logging.basicConfig(level=logging.DEBUG) # to dump TVM IR after fusion target = "cuda" lib = relay.build_module.build(net, target, params=params) dev = tvm.device(target, 0) data = np.random.uniform(-1, 1, size=data_shape).astype("float32") module = runtime.GraphModule(lib["default"](dev)) module.set_input("data", data) module.run() out_shape = (batch_size, out_channels, 224, 224) out = module.get_output(0, tvm.nd.empty(out_shape)) out_cuda = out.numpy() ###################################################################### # The generated pseudo code should look something like below. # Note how bias add, batch normalization, and ReLU activation are fused into the convolution kernel. # TVM generates a single, fused kernel from this representation. # # .. code-block:: text # # produce tensor { # // attr [iter_var(blockIdx.z, , blockIdx.z)] thread_extent = 1 # // attr [compute] storage_scope = "local" # allocate compute[float32 * 32] # // attr [pad_temp.shared] storage_scope = "shared" # allocate pad_temp.shared[float32 * 180] # // attr [placeholder.shared] storage_scope = "shared" # allocate placeholder.shared[float32 * 144] # // attr [iter_var(blockIdx.y, , blockIdx.y)] thread_extent = 28 # // attr [iter_var(blockIdx.x, , blockIdx.x)] thread_extent = 14 # // attr [iter_var(threadIdx.z, , threadIdx.z)] thread_extent = 4 # // attr [iter_var(threadIdx.y, , threadIdx.y)] thread_extent = 1 # // attr [iter_var(threadIdx.x, , threadIdx.x)] thread_extent = 16 # produce compute { # compute[0] = 0.000000f # compute[1] = 0.000000f # compute[2] = 0.000000f # compute[3] = 0.000000f # compute[4] = 0.000000f # compute[5] = 0.000000f # compute[6] = 0.000000f # compute[7] = 0.000000f # compute[8] = 0.000000f # compute[9] = 0.000000f # compute[10] = 0.000000f # compute[11] = 0.000000f # compute[12] = 0.000000f # compute[13] = 0.000000f # compute[14] = 0.000000f # compute[15] = 0.000000f # compute[16] = 0.000000f # compute[17] = 0.000000f # compute[18] = 0.000000f # compute[19] = 0.000000f # compute[20] = 0.000000f # compute[21] = 0.000000f # compute[22] = 0.000000f # compute[23] = 0.000000f # compute[24] = 0.000000f # compute[25] = 0.000000f # compute[26] = 0.000000f # compute[27] = 0.000000f # compute[28] = 0.000000f # compute[29] = 0.000000f # compute[30] = 0.000000f # compute[31] = 0.000000f # for (rc.outer, 0, 3) { # produce pad_temp.shared { # // attr [iter_var(threadIdx.z, , threadIdx.z)] thread_extent = 4 # // attr [iter_var(threadIdx.y, , threadIdx.y)] thread_extent = 1 # // attr [iter_var(threadIdx.x, , threadIdx.x)] thread_extent = 16 # if (likely(((threadIdx.z*15) < (60 - threadIdx.x)))) { # if (likely((threadIdx.x < 15))) { # pad_temp.shared[(((((threadIdx.z*15) + threadIdx.x)/60)*180) + ((((((threadIdx.z*15) + threadIdx.x)/6) % 10)*18) + ((((threadIdx.z*3) + threadIdx.x)*3) % 18)))] = tvm_if_then_else((((((1 - ((((threadIdx.z*15) + threadIdx.x)/6) % 10)) <= (blockIdx.y*8)) && ((blockIdx.y*8) < (225 - ((((threadIdx.z*15) + threadIdx.x)/6) % 10)))) && ((1 - ((((threadIdx.z*3) + threadIdx.x)*3) % 18)) <= (blockIdx.x*16))) && ((blockIdx.x*16) < (225 - ((((threadIdx.z*3) + threadIdx.x)*3) % 18)))), placeholder[((((((((blockIdx.y*112) + blockIdx.x) + (rc.outer*3136)) + ((((threadIdx.z*15) + threadIdx.x)/60)*9408))*16) + ((((threadIdx.z*3) + threadIdx.x)*3) % 18)) + (((((threadIdx.z*15) + threadIdx.x)/6) % 10)*224)) + -225)], 0.000000f) # pad_temp.shared[(((((((threadIdx.z*15) + threadIdx.x)*3) + 1)/180)*180) + ((((((((threadIdx.z*15) + threadIdx.x)*3) + 1)/18) % 10)*18) + (((((threadIdx.z*3) + threadIdx.x)*3) + 1) % 18)))] = tvm_if_then_else((((((1 - ((((((threadIdx.z*15) + threadIdx.x)*3) + 1)/18) % 10)) <= (blockIdx.y*8)) && ((blockIdx.y*8) < (225 - ((((((threadIdx.z*15) + threadIdx.x)*3) + 1)/18) % 10)))) && ((1 - (((((threadIdx.z*3) + threadIdx.x)*3) + 1) % 18)) <= (blockIdx.x*16))) && ((blockIdx.x*16) < (225 - (((((threadIdx.z*3) + threadIdx.x)*3) + 1) % 18)))), placeholder[((((((((blockIdx.y*112) + blockIdx.x) + (rc.outer*3136)) + ((((((threadIdx.z*15) + threadIdx.x)*3) + 1)/180)*9408))*16) + (((((threadIdx.z*3) + threadIdx.x)*3) + 1) % 18)) + (((((((threadIdx.z*15) + threadIdx.x)*3) + 1)/18) % 10)*224)) + -225)], 0.000000f) # pad_temp.shared[(((((((threadIdx.z*15) + threadIdx.x)*3) + 2)/180)*180) + ((((((((threadIdx.z*15) + threadIdx.x)*3) + 2)/18) % 10)*18) + (((((threadIdx.z*3) + threadIdx.x)*3) + 2) % 18)))] = tvm_if_then_else((((((1 - ((((((threadIdx.z*15) + threadIdx.x)*3) + 2)/18) % 10)) <= (blockIdx.y*8)) && ((blockIdx.y*8) < (225 - ((((((threadIdx.z*15) + threadIdx.x)*3) + 2)/18) % 10)))) && ((1 - (((((threadIdx.z*3) + threadIdx.x)*3) + 2) % 18)) <= (blockIdx.x*16))) && ((blockIdx.x*16) < (225 - (((((threadIdx.z*3) + threadIdx.x)*3) + 2) % 18)))), placeholder[((((((((blockIdx.y*112) + blockIdx.x) + (rc.outer*3136)) + ((((((threadIdx.z*15) + threadIdx.x)*3) + 2)/180)*9408))*16) + (((((threadIdx.z*3) + threadIdx.x)*3) + 2) % 18)) + (((((((threadIdx.z*15) + threadIdx.x)*3) + 2)/18) % 10)*224)) + -225)], 0.000000f) # } # } # } # produce placeholder.shared { # // attr [iter_var(threadIdx.z, , threadIdx.z)] thread_extent = 4 # // attr [iter_var(threadIdx.y, , threadIdx.y)] thread_extent = 1 # // attr [iter_var(threadIdx.x, , threadIdx.x)] thread_extent = 16 # if (likely(((threadIdx.z*4) < (16 - (threadIdx.x/3))))) { # if (likely(((threadIdx.z*12) < (48 - threadIdx.x)))) { # if (likely((threadIdx.x < 12))) { # placeholder.shared[(((((threadIdx.z*4) + (threadIdx.x/3))*3) + (threadIdx.x % 3))*3)] = placeholder[(((((rc.outer + (threadIdx.z*12)) + ((threadIdx.x/3)*3))*3) + (threadIdx.x % 3))*3)] # placeholder.shared[((((((threadIdx.z*4) + (threadIdx.x/3))*3) + (threadIdx.x % 3))*3) + 1)] = placeholder[((((((rc.outer + (threadIdx.z*12)) + ((threadIdx.x/3)*3))*3) + (threadIdx.x % 3))*3) + 1)] # placeholder.shared[((((((threadIdx.z*4) + (threadIdx.x/3))*3) + (threadIdx.x % 3))*3) + 2)] = placeholder[((((((rc.outer + (threadIdx.z*12)) + ((threadIdx.x/3)*3))*3) + (threadIdx.x % 3))*3) + 2)] # } # } # } # } # compute[0] = (compute[0] + (pad_temp.shared[threadIdx.x]*placeholder.shared[(threadIdx.z*36)])) # compute[1] = (compute[1] + (pad_temp.shared[(threadIdx.x + 18)]*placeholder.shared[(threadIdx.z*36)])) # compute[2] = (compute[2] + (pad_temp.shared[(threadIdx.x + 36)]*placeholder.shared[(threadIdx.z*36)])) # compute[3] = (compute[3] + (pad_temp.shared[(threadIdx.x + 54)]*placeholder.shared[(threadIdx.z*36)])) # compute[4] = (compute[4] + (pad_temp.shared[(threadIdx.x + 72)]*placeholder.shared[(threadIdx.z*36)])) # compute[5] = (compute[5] + (pad_temp.shared[(threadIdx.x + 90)]*placeholder.shared[(threadIdx.z*36)])) # compute[6] = (compute[6] + (pad_temp.shared[(threadIdx.x + 108)]*placeholder.shared[(threadIdx.z*36)])) # compute[7] = (compute[7] + (pad_temp.shared[(threadIdx.x + 126)]*placeholder.shared[(threadIdx.z*36)])) # compute[8] = (compute[8] + (pad_temp.shared[threadIdx.x]*placeholder.shared[((threadIdx.z*36) + 9)])) # compute[9] = (compute[9] + (pad_temp.shared[(threadIdx.x + 18)]*placeholder.shared[((threadIdx.z*36) + 9)])) # compute[10] = (compute[10] + (pad_temp.shared[(threadIdx.x + 36)]*placeholder.shared[((threadIdx.z*36) + 9)])) # compute[11] = (compute[11] + (pad_temp.shared[(threadIdx.x + 54)]*placeholder.shared[((threadIdx.z*36) + 9)])) # compute[12] = (compute[12] + (pad_temp.shared[(threadIdx.x + 72)]*placeholder.shared[((threadIdx.z*36) + 9)])) # compute[13] = (compute[13] + (pad_temp.shared[(threadIdx.x + 90)]*placeholder.shared[((threadIdx.z*36) + 9)])) # compute[14] = (compute[14] + (pad_temp.shared[(threadIdx.x + 108)]*placeholder.shared[((threadIdx.z*36) + 9)])) # compute[15] = (compute[15] + (pad_temp.shared[(threadIdx.x + 126)]*placeholder.shared[((threadIdx.z*36) + 9)])) # compute[16] = (compute[16] + (pad_temp.shared[threadIdx.x]*placeholder.shared[((threadIdx.z*36) + 18)])) # compute[17] = (compute[17] + (pad_temp.shared[(threadIdx.x + 18)]*placeholder.shared[((threadIdx.z*36) + 18)])) # compute[18] = (compute[18] + (pad_temp.shared[(threadIdx.x + 36)]*placeholder.shared[((threadIdx.z*36) + 18)])) # compute[19] = (compute[19] + (pad_temp.shared[(threadIdx.x + 54)]*placeholder.shared[((threadIdx.z*36) + 18)])) # compute[20] = (compute[20] + (pad_temp.shared[(threadIdx.x + 72)]*placeholder.shared[((threadIdx.z*36) + 18)])) # compute[21] = (compute[21] + (pad_temp.shared[(threadIdx.x + 90)]*placeholder.shared[((threadIdx.z*36) + 18)])) # compute[22] = (compute[22] + (pad_temp.shared[(threadIdx.x + 108)]*placeholder.shared[((threadIdx.z*36) + 18)])) # compute[23] = (compute[23] + (pad_temp.shared[(threadIdx.x + 126)]*placeholder.shared[((threadIdx.z*36) + 18)])) # compute[24] = (compute[24] + (pad_temp.shared[threadIdx.x]*placeholder.shared[((threadIdx.z*36) + 27)])) # compute[25] = (compute[25] + (pad_temp.shared[(threadIdx.x + 18)]*placeholder.shared[((threadIdx.z*36) + 27)])) # compute[26] = (compute[26] + (pad_temp.shared[(threadIdx.x + 36)]*placeholder.shared[((threadIdx.z*36) + 27)])) # compute[27] = (compute[27] + (pad_temp.shared[(threadIdx.x + 54)]*placeholder.shared[((threadIdx.z*36) + 27)])) # compute[28] = (compute[28] + (pad_temp.shared[(threadIdx.x + 72)]*placeholder.shared[((threadIdx.z*36) + 27)])) # compute[29] = (compute[29] + (pad_temp.shared[(threadIdx.x + 90)]*placeholder.shared[((threadIdx.z*36) + 27)])) # compute[30] = (compute[30] + (pad_temp.shared[(threadIdx.x + 108)]*placeholder.shared[((threadIdx.z*36) + 27)])) # compute[31] = (compute[31] + (pad_temp.shared[(threadIdx.x + 126)]*placeholder.shared[((threadIdx.z*36) + 27)])) # compute[0] = (compute[0] + (pad_temp.shared[(threadIdx.x + 1)]*placeholder.shared[((threadIdx.z*36) + 1)])) # compute[1] = (compute[1] + (pad_temp.shared[(threadIdx.x + 19)]*placeholder.shared[((threadIdx.z*36) + 1)])) # compute[2] = (compute[2] + (pad_temp.shared[(threadIdx.x + 37)]*placeholder.shared[((threadIdx.z*36) + 1)])) # compute[3] = (compute[3] + (pad_temp.shared[(threadIdx.x + 55)]*placeholder.shared[((threadIdx.z*36) + 1)])) # compute[4] = (compute[4] + (pad_temp.shared[(threadIdx.x + 73)]*placeholder.shared[((threadIdx.z*36) + 1)])) # compute[5] = (compute[5] + (pad_temp.shared[(threadIdx.x + 91)]*placeholder.shared[((threadIdx.z*36) + 1)])) # compute[6] = (compute[6] + (pad_temp.shared[(threadIdx.x + 109)]*placeholder.shared[((threadIdx.z*36) + 1)])) # compute[7] = (compute[7] + (pad_temp.shared[(threadIdx.x + 127)]*placeholder.shared[((threadIdx.z*36) + 1)])) # compute[8] = (compute[8] + (pad_temp.shared[(threadIdx.x + 1)]*placeholder.shared[((threadIdx.z*36) + 10)])) # compute[9] = (compute[9] + (pad_temp.shared[(threadIdx.x + 19)]*placeholder.shared[((threadIdx.z*36) + 10)])) # compute[10] = (compute[10] + (pad_temp.shared[(threadIdx.x + 37)]*placeholder.shared[((threadIdx.z*36) + 10)])) # compute[11] = (compute[11] + (pad_temp.shared[(threadIdx.x + 55)]*placeholder.shared[((threadIdx.z*36) + 10)])) # compute[12] = (compute[12] + (pad_temp.shared[(threadIdx.x + 73)]*placeholder.shared[((threadIdx.z*36) + 10)])) # compute[13] = (compute[13] + (pad_temp.shared[(threadIdx.x + 91)]*placeholder.shared[((threadIdx.z*36) + 10)])) # compute[14] = (compute[14] + (pad_temp.shared[(threadIdx.x + 109)]*placeholder.shared[((threadIdx.z*36) + 10)])) # compute[15] = (compute[15] + (pad_temp.shared[(threadIdx.x + 127)]*placeholder.shared[((threadIdx.z*36) + 10)])) # compute[16] = (compute[16] + (pad_temp.shared[(threadIdx.x + 1)]*placeholder.shared[((threadIdx.z*36) + 19)])) # compute[17] = (compute[17] + (pad_temp.shared[(threadIdx.x + 19)]*placeholder.shared[((threadIdx.z*36) + 19)])) # compute[18] = (compute[18] + (pad_temp.shared[(threadIdx.x + 37)]*placeholder.shared[((threadIdx.z*36) + 19)])) # compute[19] = (compute[19] + (pad_temp.shared[(threadIdx.x + 55)]*placeholder.shared[((threadIdx.z*36) + 19)])) # compute[20] = (compute[20] + (pad_temp.shared[(threadIdx.x + 73)]*placeholder.shared[((threadIdx.z*36) + 19)])) # compute[21] = (compute[21] + (pad_temp.shared[(threadIdx.x + 91)]*placeholder.shared[((threadIdx.z*36) + 19)])) # compute[22] = (compute[22] + (pad_temp.shared[(threadIdx.x + 109)]*placeholder.shared[((threadIdx.z*36) + 19)])) # compute[23] = (compute[23] + (pad_temp.shared[(threadIdx.x + 127)]*placeholder.shared[((threadIdx.z*36) + 19)])) # compute[24] = (compute[24] + (pad_temp.shared[(threadIdx.x + 1)]*placeholder.shared[((threadIdx.z*36) + 28)])) # compute[25] = (compute[25] + (pad_temp.shared[(threadIdx.x + 19)]*placeholder.shared[((threadIdx.z*36) + 28)])) # compute[26] = (compute[26] + (pad_temp.shared[(threadIdx.x + 37)]*placeholder.shared[((threadIdx.z*36) + 28)])) # compute[27] = (compute[27] + (pad_temp.shared[(threadIdx.x + 55)]*placeholder.shared[((threadIdx.z*36) + 28)])) # compute[28] = (compute[28] + (pad_temp.shared[(threadIdx.x + 73)]*placeholder.shared[((threadIdx.z*36) + 28)])) # compute[29] = (compute[29] + (pad_temp.shared[(threadIdx.x + 91)]*placeholder.shared[((threadIdx.z*36) + 28)])) # compute[30] = (compute[30] + (pad_temp.shared[(threadIdx.x + 109)]*placeholder.shared[((threadIdx.z*36) + 28)])) # compute[31] = (compute[31] + (pad_temp.shared[(threadIdx.x + 127)]*placeholder.shared[((threadIdx.z*36) + 28)])) # compute[0] = (compute[0] + (pad_temp.shared[(threadIdx.x + 2)]*placeholder.shared[((threadIdx.z*36) + 2)])) # compute[1] = (compute[1] + (pad_temp.shared[(threadIdx.x + 20)]*placeholder.shared[((threadIdx.z*36) + 2)])) # compute[2] = (compute[2] + (pad_temp.shared[(threadIdx.x + 38)]*placeholder.shared[((threadIdx.z*36) + 2)])) # compute[3] = (compute[3] + (pad_temp.shared[(threadIdx.x + 56)]*placeholder.shared[((threadIdx.z*36) + 2)])) # compute[4] = (compute[4] + (pad_temp.shared[(threadIdx.x + 74)]*placeholder.shared[((threadIdx.z*36) + 2)])) # compute[5] = (compute[5] + (pad_temp.shared[(threadIdx.x + 92)]*placeholder.shared[((threadIdx.z*36) + 2)])) # compute[6] = (compute[6] + (pad_temp.shared[(threadIdx.x + 110)]*placeholder.shared[((threadIdx.z*36) + 2)])) # compute[7] = (compute[7] + (pad_temp.shared[(threadIdx.x + 128)]*placeholder.shared[((threadIdx.z*36) + 2)])) # compute[8] = (compute[8] + (pad_temp.shared[(threadIdx.x + 2)]*placeholder.shared[((threadIdx.z*36) + 11)])) # compute[9] = (compute[9] + (pad_temp.shared[(threadIdx.x + 20)]*placeholder.shared[((threadIdx.z*36) + 11)])) # compute[10] = (compute[10] + (pad_temp.shared[(threadIdx.x + 38)]*placeholder.shared[((threadIdx.z*36) + 11)])) # compute[11] = (compute[11] + (pad_temp.shared[(threadIdx.x + 56)]*placeholder.shared[((threadIdx.z*36) + 11)])) # compute[12] = (compute[12] + (pad_temp.shared[(threadIdx.x + 74)]*placeholder.shared[((threadIdx.z*36) + 11)])) # compute[13] = (compute[13] + (pad_temp.shared[(threadIdx.x + 92)]*placeholder.shared[((threadIdx.z*36) + 11)])) # compute[14] = (compute[14] + (pad_temp.shared[(threadIdx.x + 110)]*placeholder.shared[((threadIdx.z*36) + 11)])) # compute[15] = (compute[15] + (pad_temp.shared[(threadIdx.x + 128)]*placeholder.shared[((threadIdx.z*36) + 11)])) # compute[16] = (compute[16] + (pad_temp.shared[(threadIdx.x + 2)]*placeholder.shared[((threadIdx.z*36) + 20)])) # compute[17] = (compute[17] + (pad_temp.shared[(threadIdx.x + 20)]*placeholder.shared[((threadIdx.z*36) + 20)])) # compute[18] = (compute[18] + (pad_temp.shared[(threadIdx.x + 38)]*placeholder.shared[((threadIdx.z*36) + 20)])) # compute[19] = (compute[19] + (pad_temp.shared[(threadIdx.x + 56)]*placeholder.shared[((threadIdx.z*36) + 20)])) # compute[20] = (compute[20] + (pad_temp.shared[(threadIdx.x + 74)]*placeholder.shared[((threadIdx.z*36) + 20)])) # compute[21] = (compute[21] + (pad_temp.shared[(threadIdx.x + 92)]*placeholder.shared[((threadIdx.z*36) + 20)])) # compute[22] = (compute[22] + (pad_temp.shared[(threadIdx.x + 110)]*placeholder.shared[((threadIdx.z*36) + 20)])) # compute[23] = (compute[23] + (pad_temp.shared[(threadIdx.x + 128)]*placeholder.shared[((threadIdx.z*36) + 20)])) # compute[24] = (compute[24] + (pad_temp.shared[(threadIdx.x + 2)]*placeholder.shared[((threadIdx.z*36) + 29)])) # compute[25] = (compute[25] + (pad_temp.shared[(threadIdx.x + 20)]*placeholder.shared[((threadIdx.z*36) + 29)])) # compute[26] = (compute[26] + (pad_temp.shared[(threadIdx.x + 38)]*placeholder.shared[((threadIdx.z*36) + 29)])) # compute[27] = (compute[27] + (pad_temp.shared[(threadIdx.x + 56)]*placeholder.shared[((threadIdx.z*36) + 29)])) # compute[28] = (compute[28] + (pad_temp.shared[(threadIdx.x + 74)]*placeholder.shared[((threadIdx.z*36) + 29)])) # compute[29] = (compute[29] + (pad_temp.shared[(threadIdx.x + 92)]*placeholder.shared[((threadIdx.z*36) + 29)])) # compute[30] = (compute[30] + (pad_temp.shared[(threadIdx.x + 110)]*placeholder.shared[((threadIdx.z*36) + 29)])) # compute[31] = (compute[31] + (pad_temp.shared[(threadIdx.x + 128)]*placeholder.shared[((threadIdx.z*36) + 29)])) # compute[0] = (compute[0] + (pad_temp.shared[(threadIdx.x + 18)]*placeholder.shared[((threadIdx.z*36) + 3)])) # compute[1] = (compute[1] + (pad_temp.shared[(threadIdx.x + 36)]*placeholder.shared[((threadIdx.z*36) + 3)])) # compute[2] = (compute[2] + (pad_temp.shared[(threadIdx.x + 54)]*placeholder.shared[((threadIdx.z*36) + 3)])) # compute[3] = (compute[3] + (pad_temp.shared[(threadIdx.x + 72)]*placeholder.shared[((threadIdx.z*36) + 3)])) # compute[4] = (compute[4] + (pad_temp.shared[(threadIdx.x + 90)]*placeholder.shared[((threadIdx.z*36) + 3)])) # compute[5] = (compute[5] + (pad_temp.shared[(threadIdx.x + 108)]*placeholder.shared[((threadIdx.z*36) + 3)])) # compute[6] = (compute[6] + (pad_temp.shared[(threadIdx.x + 126)]*placeholder.shared[((threadIdx.z*36) + 3)])) # compute[7] = (compute[7] + (pad_temp.shared[(threadIdx.x + 144)]*placeholder.shared[((threadIdx.z*36) + 3)])) # compute[8] = (compute[8] + (pad_temp.shared[(threadIdx.x + 18)]*placeholder.shared[((threadIdx.z*36) + 12)])) # compute[9] = (compute[9] + (pad_temp.shared[(threadIdx.x + 36)]*placeholder.shared[((threadIdx.z*36) + 12)])) # compute[10] = (compute[10] + (pad_temp.shared[(threadIdx.x + 54)]*placeholder.shared[((threadIdx.z*36) + 12)])) # compute[11] = (compute[11] + (pad_temp.shared[(threadIdx.x + 72)]*placeholder.shared[((threadIdx.z*36) + 12)])) # compute[12] = (compute[12] + (pad_temp.shared[(threadIdx.x + 90)]*placeholder.shared[((threadIdx.z*36) + 12)])) # compute[13] = (compute[13] + (pad_temp.shared[(threadIdx.x + 108)]*placeholder.shared[((threadIdx.z*36) + 12)])) # compute[14] = (compute[14] + (pad_temp.shared[(threadIdx.x + 126)]*placeholder.shared[((threadIdx.z*36) + 12)])) # compute[15] = (compute[15] + (pad_temp.shared[(threadIdx.x + 144)]*placeholder.shared[((threadIdx.z*36) + 12)])) # compute[16] = (compute[16] + (pad_temp.shared[(threadIdx.x + 18)]*placeholder.shared[((threadIdx.z*36) + 21)])) # compute[17] = (compute[17] + (pad_temp.shared[(threadIdx.x + 36)]*placeholder.shared[((threadIdx.z*36) + 21)])) # compute[18] = (compute[18] + (pad_temp.shared[(threadIdx.x + 54)]*placeholder.shared[((threadIdx.z*36) + 21)])) # compute[19] = (compute[19] + (pad_temp.shared[(threadIdx.x + 72)]*placeholder.shared[((threadIdx.z*36) + 21)])) # compute[20] = (compute[20] + (pad_temp.shared[(threadIdx.x + 90)]*placeholder.shared[((threadIdx.z*36) + 21)])) # compute[21] = (compute[21] + (pad_temp.shared[(threadIdx.x + 108)]*placeholder.shared[((threadIdx.z*36) + 21)])) # compute[22] = (compute[22] + (pad_temp.shared[(threadIdx.x + 126)]*placeholder.shared[((threadIdx.z*36) + 21)])) # compute[23] = (compute[23] + (pad_temp.shared[(threadIdx.x + 144)]*placeholder.shared[((threadIdx.z*36) + 21)])) # compute[24] = (compute[24] + (pad_temp.shared[(threadIdx.x + 18)]*placeholder.shared[((threadIdx.z*36) + 30)])) # compute[25] = (compute[25] + (pad_temp.shared[(threadIdx.x + 36)]*placeholder.shared[((threadIdx.z*36) + 30)])) # compute[26] = (compute[26] + (pad_temp.shared[(threadIdx.x + 54)]*placeholder.shared[((threadIdx.z*36) + 30)])) # compute[27] = (compute[27] + (pad_temp.shared[(threadIdx.x + 72)]*placeholder.shared[((threadIdx.z*36) + 30)])) # compute[28] = (compute[28] + (pad_temp.shared[(threadIdx.x + 90)]*placeholder.shared[((threadIdx.z*36) + 30)])) # compute[29] = (compute[29] + (pad_temp.shared[(threadIdx.x + 108)]*placeholder.shared[((threadIdx.z*36) + 30)])) # compute[30] = (compute[30] + (pad_temp.shared[(threadIdx.x + 126)]*placeholder.shared[((threadIdx.z*36) + 30)])) # compute[31] = (compute[31] + (pad_temp.shared[(threadIdx.x + 144)]*placeholder.shared[((threadIdx.z*36) + 30)])) # compute[0] = (compute[0] + (pad_temp.shared[(threadIdx.x + 19)]*placeholder.shared[((threadIdx.z*36) + 4)])) # compute[1] = (compute[1] + (pad_temp.shared[(threadIdx.x + 37)]*placeholder.shared[((threadIdx.z*36) + 4)])) # compute[2] = (compute[2] + (pad_temp.shared[(threadIdx.x + 55)]*placeholder.shared[((threadIdx.z*36) + 4)])) # compute[3] = (compute[3] + (pad_temp.shared[(threadIdx.x + 73)]*placeholder.shared[((threadIdx.z*36) + 4)])) # compute[4] = (compute[4] + (pad_temp.shared[(threadIdx.x + 91)]*placeholder.shared[((threadIdx.z*36) + 4)])) # compute[5] = (compute[5] + (pad_temp.shared[(threadIdx.x + 109)]*placeholder.shared[((threadIdx.z*36) + 4)])) # compute[6] = (compute[6] + (pad_temp.shared[(threadIdx.x + 127)]*placeholder.shared[((threadIdx.z*36) + 4)])) # compute[7] = (compute[7] + (pad_temp.shared[(threadIdx.x + 145)]*placeholder.shared[((threadIdx.z*36) + 4)])) # compute[8] = (compute[8] + (pad_temp.shared[(threadIdx.x + 19)]*placeholder.shared[((threadIdx.z*36) + 13)])) # compute[9] = (compute[9] + (pad_temp.shared[(threadIdx.x + 37)]*placeholder.shared[((threadIdx.z*36) + 13)])) # compute[10] = (compute[10] + (pad_temp.shared[(threadIdx.x + 55)]*placeholder.shared[((threadIdx.z*36) + 13)])) # compute[11] = (compute[11] + (pad_temp.shared[(threadIdx.x + 73)]*placeholder.shared[((threadIdx.z*36) + 13)])) # compute[12] = (compute[12] + (pad_temp.shared[(threadIdx.x + 91)]*placeholder.shared[((threadIdx.z*36) + 13)])) # compute[13] = (compute[13] + (pad_temp.shared[(threadIdx.x + 109)]*placeholder.shared[((threadIdx.z*36) + 13)])) # compute[14] = (compute[14] + (pad_temp.shared[(threadIdx.x + 127)]*placeholder.shared[((threadIdx.z*36) + 13)])) # compute[15] = (compute[15] + (pad_temp.shared[(threadIdx.x + 145)]*placeholder.shared[((threadIdx.z*36) + 13)])) # compute[16] = (compute[16] + (pad_temp.shared[(threadIdx.x + 19)]*placeholder.shared[((threadIdx.z*36) + 22)])) # compute[17] = (compute[17] + (pad_temp.shared[(threadIdx.x + 37)]*placeholder.shared[((threadIdx.z*36) + 22)])) # compute[18] = (compute[18] + (pad_temp.shared[(threadIdx.x + 55)]*placeholder.shared[((threadIdx.z*36) + 22)])) # compute[19] = (compute[19] + (pad_temp.shared[(threadIdx.x + 73)]*placeholder.shared[((threadIdx.z*36) + 22)])) # compute[20] = (compute[20] + (pad_temp.shared[(threadIdx.x + 91)]*placeholder.shared[((threadIdx.z*36) + 22)])) # compute[21] = (compute[21] + (pad_temp.shared[(threadIdx.x + 109)]*placeholder.shared[((threadIdx.z*36) + 22)])) # compute[22] = (compute[22] + (pad_temp.shared[(threadIdx.x + 127)]*placeholder.shared[((threadIdx.z*36) + 22)])) # compute[23] = (compute[23] + (pad_temp.shared[(threadIdx.x + 145)]*placeholder.shared[((threadIdx.z*36) + 22)])) # compute[24] = (compute[24] + (pad_temp.shared[(threadIdx.x + 19)]*placeholder.shared[((threadIdx.z*36) + 31)])) # compute[25] = (compute[25] + (pad_temp.shared[(threadIdx.x + 37)]*placeholder.shared[((threadIdx.z*36) + 31)])) # compute[26] = (compute[26] + (pad_temp.shared[(threadIdx.x + 55)]*placeholder.shared[((threadIdx.z*36) + 31)])) # compute[27] = (compute[27] + (pad_temp.shared[(threadIdx.x + 73)]*placeholder.shared[((threadIdx.z*36) + 31)])) # compute[28] = (compute[28] + (pad_temp.shared[(threadIdx.x + 91)]*placeholder.shared[((threadIdx.z*36) + 31)])) # compute[29] = (compute[29] + (pad_temp.shared[(threadIdx.x + 109)]*placeholder.shared[((threadIdx.z*36) + 31)])) # compute[30] = (compute[30] + (pad_temp.shared[(threadIdx.x + 127)]*placeholder.shared[((threadIdx.z*36) + 31)])) # compute[31] = (compute[31] + (pad_temp.shared[(threadIdx.x + 145)]*placeholder.shared[((threadIdx.z*36) + 31)])) # compute[0] = (compute[0] + (pad_temp.shared[(threadIdx.x + 20)]*placeholder.shared[((threadIdx.z*36) + 5)])) # compute[1] = (compute[1] + (pad_temp.shared[(threadIdx.x + 38)]*placeholder.shared[((threadIdx.z*36) + 5)])) # compute[2] = (compute[2] + (pad_temp.shared[(threadIdx.x + 56)]*placeholder.shared[((threadIdx.z*36) + 5)])) # compute[3] = (compute[3] + (pad_temp.shared[(threadIdx.x + 74)]*placeholder.shared[((threadIdx.z*36) + 5)])) # compute[4] = (compute[4] + (pad_temp.shared[(threadIdx.x + 92)]*placeholder.shared[((threadIdx.z*36) + 5)])) # compute[5] = (compute[5] + (pad_temp.shared[(threadIdx.x + 110)]*placeholder.shared[((threadIdx.z*36) + 5)])) # compute[6] = (compute[6] + (pad_temp.shared[(threadIdx.x + 128)]*placeholder.shared[((threadIdx.z*36) + 5)])) # compute[7] = (compute[7] + (pad_temp.shared[(threadIdx.x + 146)]*placeholder.shared[((threadIdx.z*36) + 5)])) # compute[8] = (compute[8] + (pad_temp.shared[(threadIdx.x + 20)]*placeholder.shared[((threadIdx.z*36) + 14)])) # compute[9] = (compute[9] + (pad_temp.shared[(threadIdx.x + 38)]*placeholder.shared[((threadIdx.z*36) + 14)])) # compute[10] = (compute[10] + (pad_temp.shared[(threadIdx.x + 56)]*placeholder.shared[((threadIdx.z*36) + 14)])) # compute[11] = (compute[11] + (pad_temp.shared[(threadIdx.x + 74)]*placeholder.shared[((threadIdx.z*36) + 14)])) # compute[12] = (compute[12] + (pad_temp.shared[(threadIdx.x + 92)]*placeholder.shared[((threadIdx.z*36) + 14)])) # compute[13] = (compute[13] + (pad_temp.shared[(threadIdx.x + 110)]*placeholder.shared[((threadIdx.z*36) + 14)])) # compute[14] = (compute[14] + (pad_temp.shared[(threadIdx.x + 128)]*placeholder.shared[((threadIdx.z*36) + 14)])) # compute[15] = (compute[15] + (pad_temp.shared[(threadIdx.x + 146)]*placeholder.shared[((threadIdx.z*36) + 14)])) # compute[16] = (compute[16] + (pad_temp.shared[(threadIdx.x + 20)]*placeholder.shared[((threadIdx.z*36) + 23)])) # compute[17] = (compute[17] + (pad_temp.shared[(threadIdx.x + 38)]*placeholder.shared[((threadIdx.z*36) + 23)])) # compute[18] = (compute[18] + (pad_temp.shared[(threadIdx.x + 56)]*placeholder.shared[((threadIdx.z*36) + 23)])) # compute[19] = (compute[19] + (pad_temp.shared[(threadIdx.x + 74)]*placeholder.shared[((threadIdx.z*36) + 23)])) # compute[20] = (compute[20] + (pad_temp.shared[(threadIdx.x + 92)]*placeholder.shared[((threadIdx.z*36) + 23)])) # compute[21] = (compute[21] + (pad_temp.shared[(threadIdx.x + 110)]*placeholder.shared[((threadIdx.z*36) + 23)])) # compute[22] = (compute[22] + (pad_temp.shared[(threadIdx.x + 128)]*placeholder.shared[((threadIdx.z*36) + 23)])) # compute[23] = (compute[23] + (pad_temp.shared[(threadIdx.x + 146)]*placeholder.shared[((threadIdx.z*36) + 23)])) # compute[24] = (compute[24] + (pad_temp.shared[(threadIdx.x + 20)]*placeholder.shared[((threadIdx.z*36) + 32)])) # compute[25] = (compute[25] + (pad_temp.shared[(threadIdx.x + 38)]*placeholder.shared[((threadIdx.z*36) + 32)])) # compute[26] = (compute[26] + (pad_temp.shared[(threadIdx.x + 56)]*placeholder.shared[((threadIdx.z*36) + 32)])) # compute[27] = (compute[27] + (pad_temp.shared[(threadIdx.x + 74)]*placeholder.shared[((threadIdx.z*36) + 32)])) # compute[28] = (compute[28] + (pad_temp.shared[(threadIdx.x + 92)]*placeholder.shared[((threadIdx.z*36) + 32)])) # compute[29] = (compute[29] + (pad_temp.shared[(threadIdx.x + 110)]*placeholder.shared[((threadIdx.z*36) + 32)])) # compute[30] = (compute[30] + (pad_temp.shared[(threadIdx.x + 128)]*placeholder.shared[((threadIdx.z*36) + 32)])) # compute[31] = (compute[31] + (pad_temp.shared[(threadIdx.x + 146)]*placeholder.shared[((threadIdx.z*36) + 32)])) # compute[0] = (compute[0] + (pad_temp.shared[(threadIdx.x + 36)]*placeholder.shared[((threadIdx.z*36) + 6)])) # compute[1] = (compute[1] + (pad_temp.shared[(threadIdx.x + 54)]*placeholder.shared[((threadIdx.z*36) + 6)])) # compute[2] = (compute[2] + (pad_temp.shared[(threadIdx.x + 72)]*placeholder.shared[((threadIdx.z*36) + 6)])) # compute[3] = (compute[3] + (pad_temp.shared[(threadIdx.x + 90)]*placeholder.shared[((threadIdx.z*36) + 6)])) # compute[4] = (compute[4] + (pad_temp.shared[(threadIdx.x + 108)]*placeholder.shared[((threadIdx.z*36) + 6)])) # compute[5] = (compute[5] + (pad_temp.shared[(threadIdx.x + 126)]*placeholder.shared[((threadIdx.z*36) + 6)])) # compute[6] = (compute[6] + (pad_temp.shared[(threadIdx.x + 144)]*placeholder.shared[((threadIdx.z*36) + 6)])) # compute[7] = (compute[7] + (pad_temp.shared[(threadIdx.x + 162)]*placeholder.shared[((threadIdx.z*36) + 6)])) # compute[8] = (compute[8] + (pad_temp.shared[(threadIdx.x + 36)]*placeholder.shared[((threadIdx.z*36) + 15)])) # compute[9] = (compute[9] + (pad_temp.shared[(threadIdx.x + 54)]*placeholder.shared[((threadIdx.z*36) + 15)])) # compute[10] = (compute[10] + (pad_temp.shared[(threadIdx.x + 72)]*placeholder.shared[((threadIdx.z*36) + 15)])) # compute[11] = (compute[11] + (pad_temp.shared[(threadIdx.x + 90)]*placeholder.shared[((threadIdx.z*36) + 15)])) # compute[12] = (compute[12] + (pad_temp.shared[(threadIdx.x + 108)]*placeholder.shared[((threadIdx.z*36) + 15)])) # compute[13] = (compute[13] + (pad_temp.shared[(threadIdx.x + 126)]*placeholder.shared[((threadIdx.z*36) + 15)])) # compute[14] = (compute[14] + (pad_temp.shared[(threadIdx.x + 144)]*placeholder.shared[((threadIdx.z*36) + 15)])) # compute[15] = (compute[15] + (pad_temp.shared[(threadIdx.x + 162)]*placeholder.shared[((threadIdx.z*36) + 15)])) # compute[16] = (compute[16] + (pad_temp.shared[(threadIdx.x + 36)]*placeholder.shared[((threadIdx.z*36) + 24)])) # compute[17] = (compute[17] + (pad_temp.shared[(threadIdx.x + 54)]*placeholder.shared[((threadIdx.z*36) + 24)])) # compute[18] = (compute[18] + (pad_temp.shared[(threadIdx.x + 72)]*placeholder.shared[((threadIdx.z*36) + 24)])) # compute[19] = (compute[19] + (pad_temp.shared[(threadIdx.x + 90)]*placeholder.shared[((threadIdx.z*36) + 24)])) # compute[20] = (compute[20] + (pad_temp.shared[(threadIdx.x + 108)]*placeholder.shared[((threadIdx.z*36) + 24)])) # compute[21] = (compute[21] + (pad_temp.shared[(threadIdx.x + 126)]*placeholder.shared[((threadIdx.z*36) + 24)])) # compute[22] = (compute[22] + (pad_temp.shared[(threadIdx.x + 144)]*placeholder.shared[((threadIdx.z*36) + 24)])) # compute[23] = (compute[23] + (pad_temp.shared[(threadIdx.x + 162)]*placeholder.shared[((threadIdx.z*36) + 24)])) # compute[24] = (compute[24] + (pad_temp.shared[(threadIdx.x + 36)]*placeholder.shared[((threadIdx.z*36) + 33)])) # compute[25] = (compute[25] + (pad_temp.shared[(threadIdx.x + 54)]*placeholder.shared[((threadIdx.z*36) + 33)])) # compute[26] = (compute[26] + (pad_temp.shared[(threadIdx.x + 72)]*placeholder.shared[((threadIdx.z*36) + 33)])) # compute[27] = (compute[27] + (pad_temp.shared[(threadIdx.x + 90)]*placeholder.shared[((threadIdx.z*36) + 33)])) # compute[28] = (compute[28] + (pad_temp.shared[(threadIdx.x + 108)]*placeholder.shared[((threadIdx.z*36) + 33)])) # compute[29] = (compute[29] + (pad_temp.shared[(threadIdx.x + 126)]*placeholder.shared[((threadIdx.z*36) + 33)])) # compute[30] = (compute[30] + (pad_temp.shared[(threadIdx.x + 144)]*placeholder.shared[((threadIdx.z*36) + 33)])) # compute[31] = (compute[31] + (pad_temp.shared[(threadIdx.x + 162)]*placeholder.shared[((threadIdx.z*36) + 33)])) # compute[0] = (compute[0] + (pad_temp.shared[(threadIdx.x + 37)]*placeholder.shared[((threadIdx.z*36) + 7)])) # compute[1] = (compute[1] + (pad_temp.shared[(threadIdx.x + 55)]*placeholder.shared[((threadIdx.z*36) + 7)])) # compute[2] = (compute[2] + (pad_temp.shared[(threadIdx.x + 73)]*placeholder.shared[((threadIdx.z*36) + 7)])) # compute[3] = (compute[3] + (pad_temp.shared[(threadIdx.x + 91)]*placeholder.shared[((threadIdx.z*36) + 7)])) # compute[4] = (compute[4] + (pad_temp.shared[(threadIdx.x + 109)]*placeholder.shared[((threadIdx.z*36) + 7)])) # compute[5] = (compute[5] + (pad_temp.shared[(threadIdx.x + 127)]*placeholder.shared[((threadIdx.z*36) + 7)])) # compute[6] = (compute[6] + (pad_temp.shared[(threadIdx.x + 145)]*placeholder.shared[((threadIdx.z*36) + 7)])) # compute[7] = (compute[7] + (pad_temp.shared[(threadIdx.x + 163)]*placeholder.shared[((threadIdx.z*36) + 7)])) # compute[8] = (compute[8] + (pad_temp.shared[(threadIdx.x + 37)]*placeholder.shared[((threadIdx.z*36) + 16)])) # compute[9] = (compute[9] + (pad_temp.shared[(threadIdx.x + 55)]*placeholder.shared[((threadIdx.z*36) + 16)])) # compute[10] = (compute[10] + (pad_temp.shared[(threadIdx.x + 73)]*placeholder.shared[((threadIdx.z*36) + 16)])) # compute[11] = (compute[11] + (pad_temp.shared[(threadIdx.x + 91)]*placeholder.shared[((threadIdx.z*36) + 16)])) # compute[12] = (compute[12] + (pad_temp.shared[(threadIdx.x + 109)]*placeholder.shared[((threadIdx.z*36) + 16)])) # compute[13] = (compute[13] + (pad_temp.shared[(threadIdx.x + 127)]*placeholder.shared[((threadIdx.z*36) + 16)])) # compute[14] = (compute[14] + (pad_temp.shared[(threadIdx.x + 145)]*placeholder.shared[((threadIdx.z*36) + 16)])) # compute[15] = (compute[15] + (pad_temp.shared[(threadIdx.x + 163)]*placeholder.shared[((threadIdx.z*36) + 16)])) # compute[16] = (compute[16] + (pad_temp.shared[(threadIdx.x + 37)]*placeholder.shared[((threadIdx.z*36) + 25)])) # compute[17] = (compute[17] + (pad_temp.shared[(threadIdx.x + 55)]*placeholder.shared[((threadIdx.z*36) + 25)])) # compute[18] = (compute[18] + (pad_temp.shared[(threadIdx.x + 73)]*placeholder.shared[((threadIdx.z*36) + 25)])) # compute[19] = (compute[19] + (pad_temp.shared[(threadIdx.x + 91)]*placeholder.shared[((threadIdx.z*36) + 25)])) # compute[20] = (compute[20] + (pad_temp.shared[(threadIdx.x + 109)]*placeholder.shared[((threadIdx.z*36) + 25)])) # compute[21] = (compute[21] + (pad_temp.shared[(threadIdx.x + 127)]*placeholder.shared[((threadIdx.z*36) + 25)])) # compute[22] = (compute[22] + (pad_temp.shared[(threadIdx.x + 145)]*placeholder.shared[((threadIdx.z*36) + 25)])) # compute[23] = (compute[23] + (pad_temp.shared[(threadIdx.x + 163)]*placeholder.shared[((threadIdx.z*36) + 25)])) # compute[24] = (compute[24] + (pad_temp.shared[(threadIdx.x + 37)]*placeholder.shared[((threadIdx.z*36) + 34)])) # compute[25] = (compute[25] + (pad_temp.shared[(threadIdx.x + 55)]*placeholder.shared[((threadIdx.z*36) + 34)])) # compute[26] = (compute[26] + (pad_temp.shared[(threadIdx.x + 73)]*placeholder.shared[((threadIdx.z*36) + 34)])) # compute[27] = (compute[27] + (pad_temp.shared[(threadIdx.x + 91)]*placeholder.shared[((threadIdx.z*36) + 34)])) # compute[28] = (compute[28] + (pad_temp.shared[(threadIdx.x + 109)]*placeholder.shared[((threadIdx.z*36) + 34)])) # compute[29] = (compute[29] + (pad_temp.shared[(threadIdx.x + 127)]*placeholder.shared[((threadIdx.z*36) + 34)])) # compute[30] = (compute[30] + (pad_temp.shared[(threadIdx.x + 145)]*placeholder.shared[((threadIdx.z*36) + 34)])) # compute[31] = (compute[31] + (pad_temp.shared[(threadIdx.x + 163)]*placeholder.shared[((threadIdx.z*36) + 34)])) # compute[0] = (compute[0] + (pad_temp.shared[(threadIdx.x + 38)]*placeholder.shared[((threadIdx.z*36) + 8)])) # compute[1] = (compute[1] + (pad_temp.shared[(threadIdx.x + 56)]*placeholder.shared[((threadIdx.z*36) + 8)])) # compute[2] = (compute[2] + (pad_temp.shared[(threadIdx.x + 74)]*placeholder.shared[((threadIdx.z*36) + 8)])) # compute[3] = (compute[3] + (pad_temp.shared[(threadIdx.x + 92)]*placeholder.shared[((threadIdx.z*36) + 8)])) # compute[4] = (compute[4] + (pad_temp.shared[(threadIdx.x + 110)]*placeholder.shared[((threadIdx.z*36) + 8)])) # compute[5] = (compute[5] + (pad_temp.shared[(threadIdx.x + 128)]*placeholder.shared[((threadIdx.z*36) + 8)])) # compute[6] = (compute[6] + (pad_temp.shared[(threadIdx.x + 146)]*placeholder.shared[((threadIdx.z*36) + 8)])) # compute[7] = (compute[7] + (pad_temp.shared[(threadIdx.x + 164)]*placeholder.shared[((threadIdx.z*36) + 8)])) # compute[8] = (compute[8] + (pad_temp.shared[(threadIdx.x + 38)]*placeholder.shared[((threadIdx.z*36) + 17)])) # compute[9] = (compute[9] + (pad_temp.shared[(threadIdx.x + 56)]*placeholder.shared[((threadIdx.z*36) + 17)])) # compute[10] = (compute[10] + (pad_temp.shared[(threadIdx.x + 74)]*placeholder.shared[((threadIdx.z*36) + 17)])) # compute[11] = (compute[11] + (pad_temp.shared[(threadIdx.x + 92)]*placeholder.shared[((threadIdx.z*36) + 17)])) # compute[12] = (compute[12] + (pad_temp.shared[(threadIdx.x + 110)]*placeholder.shared[((threadIdx.z*36) + 17)])) # compute[13] = (compute[13] + (pad_temp.shared[(threadIdx.x + 128)]*placeholder.shared[((threadIdx.z*36) + 17)])) # compute[14] = (compute[14] + (pad_temp.shared[(threadIdx.x + 146)]*placeholder.shared[((threadIdx.z*36) + 17)])) # compute[15] = (compute[15] + (pad_temp.shared[(threadIdx.x + 164)]*placeholder.shared[((threadIdx.z*36) + 17)])) # compute[16] = (compute[16] + (pad_temp.shared[(threadIdx.x + 38)]*placeholder.shared[((threadIdx.z*36) + 26)])) # compute[17] = (compute[17] + (pad_temp.shared[(threadIdx.x + 56)]*placeholder.shared[((threadIdx.z*36) + 26)])) # compute[18] = (compute[18] + (pad_temp.shared[(threadIdx.x + 74)]*placeholder.shared[((threadIdx.z*36) + 26)])) # compute[19] = (compute[19] + (pad_temp.shared[(threadIdx.x + 92)]*placeholder.shared[((threadIdx.z*36) + 26)])) # compute[20] = (compute[20] + (pad_temp.shared[(threadIdx.x + 110)]*placeholder.shared[((threadIdx.z*36) + 26)])) # compute[21] = (compute[21] + (pad_temp.shared[(threadIdx.x + 128)]*placeholder.shared[((threadIdx.z*36) + 26)])) # compute[22] = (compute[22] + (pad_temp.shared[(threadIdx.x + 146)]*placeholder.shared[((threadIdx.z*36) + 26)])) # compute[23] = (compute[23] + (pad_temp.shared[(threadIdx.x + 164)]*placeholder.shared[((threadIdx.z*36) + 26)])) # compute[24] = (compute[24] + (pad_temp.shared[(threadIdx.x + 38)]*placeholder.shared[((threadIdx.z*36) + 35)])) # compute[25] = (compute[25] + (pad_temp.shared[(threadIdx.x + 56)]*placeholder.shared[((threadIdx.z*36) + 35)])) # compute[26] = (compute[26] + (pad_temp.shared[(threadIdx.x + 74)]*placeholder.shared[((threadIdx.z*36) + 35)])) # compute[27] = (compute[27] + (pad_temp.shared[(threadIdx.x + 92)]*placeholder.shared[((threadIdx.z*36) + 35)])) # compute[28] = (compute[28] + (pad_temp.shared[(threadIdx.x + 110)]*placeholder.shared[((threadIdx.z*36) + 35)])) # compute[29] = (compute[29] + (pad_temp.shared[(threadIdx.x + 128)]*placeholder.shared[((threadIdx.z*36) + 35)])) # compute[30] = (compute[30] + (pad_temp.shared[(threadIdx.x + 146)]*placeholder.shared[((threadIdx.z*36) + 35)])) # compute[31] = (compute[31] + (pad_temp.shared[(threadIdx.x + 164)]*placeholder.shared[((threadIdx.z*36) + 35)])) # } # } # tensor[(((((blockIdx.y*112) + blockIdx.x) + (threadIdx.z*12544))*16) + threadIdx.x)] = max(((compute[0]*placeholder[(threadIdx.z*4)]) + placeholder[(threadIdx.z*4)]), 0.000000f) # tensor[((((((blockIdx.y*112) + blockIdx.x) + (threadIdx.z*12544))*16) + threadIdx.x) + 224)] = max(((compute[1]*placeholder[(threadIdx.z*4)]) + placeholder[(threadIdx.z*4)]), 0.000000f) # tensor[((((((blockIdx.y*112) + blockIdx.x) + (threadIdx.z*12544))*16) + threadIdx.x) + 448)] = max(((compute[2]*placeholder[(threadIdx.z*4)]) + placeholder[(threadIdx.z*4)]), 0.000000f) # tensor[((((((blockIdx.y*112) + blockIdx.x) + (threadIdx.z*12544))*16) + threadIdx.x) + 672)] = max(((compute[3]*placeholder[(threadIdx.z*4)]) + placeholder[(threadIdx.z*4)]), 0.000000f) # tensor[((((((blockIdx.y*112) + blockIdx.x) + (threadIdx.z*12544))*16) + threadIdx.x) + 896)] = max(((compute[4]*placeholder[(threadIdx.z*4)]) + placeholder[(threadIdx.z*4)]), 0.000000f) # tensor[((((((blockIdx.y*112) + blockIdx.x) + (threadIdx.z*12544))*16) + threadIdx.x) + 1120)] = max(((compute[5]*placeholder[(threadIdx.z*4)]) + placeholder[(threadIdx.z*4)]), 0.000000f) # tensor[((((((blockIdx.y*112) + blockIdx.x) + (threadIdx.z*12544))*16) + threadIdx.x) + 1344)] = max(((compute[6]*placeholder[(threadIdx.z*4)]) + placeholder[(threadIdx.z*4)]), 0.000000f) # tensor[((((((blockIdx.y*112) + blockIdx.x) + (threadIdx.z*12544))*16) + threadIdx.x) + 1568)] = max(((compute[7]*placeholder[(threadIdx.z*4)]) + placeholder[(threadIdx.z*4)]), 0.000000f) # tensor[((((((blockIdx.y*112) + blockIdx.x) + (threadIdx.z*12544))*16) + threadIdx.x) + 50176)] = max(((compute[8]*placeholder[((threadIdx.z*4) + 1)]) + placeholder[((threadIdx.z*4) + 1)]), 0.000000f) # tensor[((((((blockIdx.y*112) + blockIdx.x) + (threadIdx.z*12544))*16) + threadIdx.x) + 50400)] = max(((compute[9]*placeholder[((threadIdx.z*4) + 1)]) + placeholder[((threadIdx.z*4) + 1)]), 0.000000f) # tensor[((((((blockIdx.y*112) + blockIdx.x) + (threadIdx.z*12544))*16) + threadIdx.x) + 50624)] = max(((compute[10]*placeholder[((threadIdx.z*4) + 1)]) + placeholder[((threadIdx.z*4) + 1)]), 0.000000f) # tensor[((((((blockIdx.y*112) + blockIdx.x) + (threadIdx.z*12544))*16) + threadIdx.x) + 50848)] = max(((compute[11]*placeholder[((threadIdx.z*4) + 1)]) + placeholder[((threadIdx.z*4) + 1)]), 0.000000f) # tensor[((((((blockIdx.y*112) + blockIdx.x) + (threadIdx.z*12544))*16) + threadIdx.x) + 51072)] = max(((compute[12]*placeholder[((threadIdx.z*4) + 1)]) + placeholder[((threadIdx.z*4) + 1)]), 0.000000f) # tensor[((((((blockIdx.y*112) + blockIdx.x) + (threadIdx.z*12544))*16) + threadIdx.x) + 51296)] = max(((compute[13]*placeholder[((threadIdx.z*4) + 1)]) + placeholder[((threadIdx.z*4) + 1)]), 0.000000f) # tensor[((((((blockIdx.y*112) + blockIdx.x) + (threadIdx.z*12544))*16) + threadIdx.x) + 51520)] = max(((compute[14]*placeholder[((threadIdx.z*4) + 1)]) + placeholder[((threadIdx.z*4) + 1)]), 0.000000f) # tensor[((((((blockIdx.y*112) + blockIdx.x) + (threadIdx.z*12544))*16) + threadIdx.x) + 51744)] = max(((compute[15]*placeholder[((threadIdx.z*4) + 1)]) + placeholder[((threadIdx.z*4) + 1)]), 0.000000f) # tensor[((((((blockIdx.y*112) + blockIdx.x) + (threadIdx.z*12544))*16) + threadIdx.x) + 100352)] = max(((compute[16]*placeholder[((threadIdx.z*4) + 2)]) + placeholder[((threadIdx.z*4) + 2)]), 0.000000f) # tensor[((((((blockIdx.y*112) + blockIdx.x) + (threadIdx.z*12544))*16) + threadIdx.x) + 100576)] = max(((compute[17]*placeholder[((threadIdx.z*4) + 2)]) + placeholder[((threadIdx.z*4) + 2)]), 0.000000f) # tensor[((((((blockIdx.y*112) + blockIdx.x) + (threadIdx.z*12544))*16) + threadIdx.x) + 100800)] = max(((compute[18]*placeholder[((threadIdx.z*4) + 2)]) + placeholder[((threadIdx.z*4) + 2)]), 0.000000f) # tensor[((((((blockIdx.y*112) + blockIdx.x) + (threadIdx.z*12544))*16) + threadIdx.x) + 101024)] = max(((compute[19]*placeholder[((threadIdx.z*4) + 2)]) + placeholder[((threadIdx.z*4) + 2)]), 0.000000f) # tensor[((((((blockIdx.y*112) + blockIdx.x) + (threadIdx.z*12544))*16) + threadIdx.x) + 101248)] = max(((compute[20]*placeholder[((threadIdx.z*4) + 2)]) + placeholder[((threadIdx.z*4) + 2)]), 0.000000f) # tensor[((((((blockIdx.y*112) + blockIdx.x) + (threadIdx.z*12544))*16) + threadIdx.x) + 101472)] = max(((compute[21]*placeholder[((threadIdx.z*4) + 2)]) + placeholder[((threadIdx.z*4) + 2)]), 0.000000f) # tensor[((((((blockIdx.y*112) + blockIdx.x) + (threadIdx.z*12544))*16) + threadIdx.x) + 101696)] = max(((compute[22]*placeholder[((threadIdx.z*4) + 2)]) + placeholder[((threadIdx.z*4) + 2)]), 0.000000f) # tensor[((((((blockIdx.y*112) + blockIdx.x) + (threadIdx.z*12544))*16) + threadIdx.x) + 101920)] = max(((compute[23]*placeholder[((threadIdx.z*4) + 2)]) + placeholder[((threadIdx.z*4) + 2)]), 0.000000f) # tensor[((((((blockIdx.y*112) + blockIdx.x) + (threadIdx.z*12544))*16) + threadIdx.x) + 150528)] = max(((compute[24]*placeholder[((threadIdx.z*4) + 3)]) + placeholder[((threadIdx.z*4) + 3)]), 0.000000f) # tensor[((((((blockIdx.y*112) + blockIdx.x) + (threadIdx.z*12544))*16) + threadIdx.x) + 150752)] = max(((compute[25]*placeholder[((threadIdx.z*4) + 3)]) + placeholder[((threadIdx.z*4) + 3)]), 0.000000f) # tensor[((((((blockIdx.y*112) + blockIdx.x) + (threadIdx.z*12544))*16) + threadIdx.x) + 150976)] = max(((compute[26]*placeholder[((threadIdx.z*4) + 3)]) + placeholder[((threadIdx.z*4) + 3)]), 0.000000f) # tensor[((((((blockIdx.y*112) + blockIdx.x) + (threadIdx.z*12544))*16) + threadIdx.x) + 151200)] = max(((compute[27]*placeholder[((threadIdx.z*4) + 3)]) + placeholder[((threadIdx.z*4) + 3)]), 0.000000f) # tensor[((((((blockIdx.y*112) + blockIdx.x) + (threadIdx.z*12544))*16) + threadIdx.x) + 151424)] = max(((compute[28]*placeholder[((threadIdx.z*4) + 3)]) + placeholder[((threadIdx.z*4) + 3)]), 0.000000f) # tensor[((((((blockIdx.y*112) + blockIdx.x) + (threadIdx.z*12544))*16) + threadIdx.x) + 151648)] = max(((compute[29]*placeholder[((threadIdx.z*4) + 3)]) + placeholder[((threadIdx.z*4) + 3)]), 0.000000f) # tensor[((((((blockIdx.y*112) + blockIdx.x) + (threadIdx.z*12544))*16) + threadIdx.x) + 151872)] = max(((compute[30]*placeholder[((threadIdx.z*4) + 3)]) + placeholder[((threadIdx.z*4) + 3)]), 0.000000f) # tensor[((((((blockIdx.y*112) + blockIdx.x) + (threadIdx.z*12544))*16) + threadIdx.x) + 152096)] = max(((compute[31]*placeholder[((threadIdx.z*4) + 3)]) + placeholder[((threadIdx.z*4) + 3)]), 0.000000f) # } ###################################################################### # Use cuDNN for a convolutional layer # ----------------------------------- # We can use cuDNN to replace convolution kernels with cuDNN ones. # To do that, all we need to do is to append the option " -libs=cudnn" to the target string. net, params = testing.create_workload(simple_net) target = "cuda -libs=cudnn" # use cudnn for convolution lib = relay.build_module.build(net, target, params=params) dev = tvm.device(target, 0) data = np.random.uniform(-1, 1, size=data_shape).astype("float32") module = runtime.GraphModule(lib["default"](dev)) module.set_input("data", data) module.run() out_shape = (batch_size, out_channels, 224, 224) out = module.get_output(0, tvm.nd.empty(out_shape)) out_cudnn = out.numpy() ###################################################################### # Note that if you use cuDNN, Relay cannot fuse convolution with layers following it. # This is because layer fusion happens at the level of TVM internal representation(IR). # Relay treats external libraries as black box, so there is no way to fuse them with TVM IR. # # The pseudo code below shows that cuDNN convolution + bias add + batch norm + ReLU turned into two stages of computation, one for cuDNN call and the other for the rest of operations. # # .. code-block:: text # # // attr [y] storage_scope = "global" # allocate y[float32 * 802816] # produce y { # // attr [0] extern_scope = 0 # tvm_call_packed("tvm.contrib.cudnn.conv2d.forward", 1, 0, 1, 1, 1, 1, 1, 1, 1, tvm_stack_make_array(placeholder, tvm_stack_make_shape(1, 3, 224, 224), 0, 4, 0.000000f, 0), tvm_stack_make_array(placeholder, tvm_stack_make_shape(16, 3, 3, 3), 0, 4, 0.000000f, 0), tvm_stack_make_array(y, tvm_stack_make_shape(1, 16, 224, 224), 0, 4, 0.000000f, 0)) # } # produce tensor { # // attr [iter_var(blockIdx.x, , blockIdx.x)] thread_extent = 256 # // attr [iter_var(threadIdx.x, , threadIdx.x)] thread_extent = 512 # for (ax0.ax1.fused.ax2.fused.ax3.fused.outer, 0, 7) { # if (likely(((blockIdx.x*512) < ((802816 - (ax0.ax1.fused.ax2.fused.ax3.fused.outer*131072)) - threadIdx.x)))) { # tensor[(((((((blockIdx.x*512) + threadIdx.x) + (ax0.ax1.fused.ax2.fused.ax3.fused.outer*131072))/802816)*802816) + (((((((blockIdx.x*512) + threadIdx.x) + (ax0.ax1.fused.ax2.fused.ax3.fused.outer*131072))/224) % 224)*224) + ((((blockIdx.x*64) + threadIdx.x) + (ax0.ax1.fused.ax2.fused.ax3.fused.outer*32)) % 224))) + ((((((blockIdx.x*512) + threadIdx.x) + (ax0.ax1.fused.ax2.fused.ax3.fused.outer*131072))/50176) % 16)*50176))] = max(((y[(((((((blockIdx.x*512) + threadIdx.x) + (ax0.ax1.fused.ax2.fused.ax3.fused.outer*131072))/802816)*802816) + (((((((blockIdx.x*512) + threadIdx.x) + (ax0.ax1.fused.ax2.fused.ax3.fused.outer*131072))/224) % 224)*224) + ((((blockIdx.x*64) + threadIdx.x) + (ax0.ax1.fused.ax2.fused.ax3.fused.outer*32)) % 224))) + ((((((blockIdx.x*512) + threadIdx.x) + (ax0.ax1.fused.ax2.fused.ax3.fused.outer*131072))/50176) % 16)*50176))]*placeholder[(((((blockIdx.x*512) + threadIdx.x) + (ax0.ax1.fused.ax2.fused.ax3.fused.outer*131072))/50176) % 16)]) + placeholder[(((((blockIdx.x*512) + threadIdx.x) + (ax0.ax1.fused.ax2.fused.ax3.fused.outer*131072))/50176) % 16)]), 0.000000f) # } # } # } ###################################################################### # Verify the result # ----------------- # We can check that the results of two runs match. tvm.testing.assert_allclose(out_cuda, out_cudnn, rtol=1e-5) ##################################################################### # Conclusion # ---------- # This tutorial covered the usage of cuDNN with Relay. # We also have support for cuBLAS. If cuBLAS is enabled, it will be used inside a fully connected layer (relay.dense). # To use cuBLAS, set a target string as "cuda -libs=cublas". # You can use both cuDNN and cuBLAS with "cuda -libs=cudnn,cublas". # # For ROCm backend, we have support for MIOpen and rocBLAS. # They can be enabled with target "rocm -libs=miopen,rocblas". # # Being able to use external libraries is great, but we need to keep in mind some cautions. # # First, the use of external libraries may restrict your usage of TVM and Relay. # For example, MIOpen only supports NCHW layout and fp32 data type at the moment, so you cannot use other layouts or data type in TVM. # # Second, and more importantly, external libraries restrict the possibility of operator fusion during graph compilation, as shown above. # TVM and Relay aim to achieve the best performance on a variety of hardwares, with joint operator level and graph level optimization. # To achieve this goal, we should continue developing better optimizations for TVM and Relay, while using external libraries as a nice way to fall back to existing implementation when necessary.
57,702
101.492007
1,115
py
tvm
tvm-main/gallery/how_to/work_with_relay/using_relay_viz.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # pylint: disable=line-too-long """ Use Relay Visualizer to Visualize Relay ============================================================ **Author**: `Chi-Wei Wang <https://github.com/chiwwang>`_ Relay IR module can contain lots of operations. Although an individual operation is usually easy to understand, putting them together can cause a complicated, hard-to-read graph. Things can get even worse with optimization-passes coming into play. This utility visualizes an IR module as nodes and edges. It defines a set of interfaces including parser, plotter(renderer), graph, node, and edges. A default parser is provided. Users can implement their own renderers to render the graph. Here we use a renderer rendering graph in the text-form. It is a lightweight, AST-like visualizer, inspired by `clang ast-dump <https://clang.llvm.org/docs/IntroductionToTheClangAST.html>`_. We will introduce how to implement customized parsers and renderers through interface classes. To install dependencies, run: .. code-block:: bash %%shell pip install graphviz For more details, please refer to :py:mod:`tvm.contrib.relay_viz`. """ from typing import ( Dict, Union, Tuple, List, ) import tvm from tvm import relay from tvm.contrib import relay_viz from tvm.contrib.relay_viz.interface import ( VizEdge, VizNode, VizParser, ) from tvm.contrib.relay_viz.terminal import ( TermGraph, TermPlotter, TermVizParser, ) ###################################################################### # Define a Relay IR Module with multiple GlobalVar # ------------------------------------------------ # Let's build an example Relay IR Module containing multiple ``GlobalVar``. # We define an ``add`` function and call it in the main function. data = relay.var("data") bias = relay.var("bias") add_op = relay.add(data, bias) add_func = relay.Function([data, bias], add_op) add_gvar = relay.GlobalVar("AddFunc") input0 = relay.var("input0") input1 = relay.var("input1") input2 = relay.var("input2") add_01 = relay.Call(add_gvar, [input0, input1]) add_012 = relay.Call(add_gvar, [input2, add_01]) main_func = relay.Function([input0, input1, input2], add_012) main_gvar = relay.GlobalVar("main") mod = tvm.IRModule({main_gvar: main_func, add_gvar: add_func}) ###################################################################### # Render the graph with Relay Visualizer on the terminal # ------------------------------------------------------ # The terminal can show a Relay IR module in text similar to clang AST-dump. # We should see ``main`` and ``AddFunc`` function. ``AddFunc`` is called twice in the ``main`` function. viz = relay_viz.RelayVisualizer(mod) viz.render() ###################################################################### # Customize Parser for Interested Relay Types # ------------------------------------------- # Sometimes we want to emphasize interested information, or parse things differently for a specific usage. # It is possible to provide customized parsers as long as it obeys the interface. # Here demonstrate how to customize parsers for ``relay.var``. # We need to implement abstract interface :py:class:`tvm.contrib.relay_viz.interface.VizParser`. class YourAwesomeParser(VizParser): def __init__(self): self._delegate = TermVizParser() def get_node_edges( self, node: relay.Expr, relay_param: Dict[str, tvm.runtime.NDArray], node_to_id: Dict[relay.Expr, str], ) -> Tuple[Union[VizNode, None], List[VizEdge]]: if isinstance(node, relay.Var): node = VizNode(node_to_id[node], "AwesomeVar", f"name_hint {node.name_hint}") # no edge is introduced. So return an empty list. return node, [] # delegate other types to the other parser. return self._delegate.get_node_edges(node, relay_param, node_to_id) ###################################################################### # Pass the parser and an interested renderer to visualizer. # Here we just the terminal renderer. viz = relay_viz.RelayVisualizer(mod, {}, TermPlotter(), YourAwesomeParser()) viz.render() ###################################################################### # Customization around Graph and Plotter # ------------------------------------------- # Besides parsers, we can also customize graph and renderers by implementing # abstract class :py:class:`tvm.contrib.relay_viz.interface.VizGraph` and # :py:class:`tvm.contrib.relay_viz.interface.Plotter`. # Here we override the ``TermGraph`` defined in ``terminal.py`` for easier demo. # We add a hook duplicating above ``AwesomeVar``, and make ``TermPlotter`` use the new class. class AwesomeGraph(TermGraph): def node(self, viz_node): # add the node first super().node(viz_node) # if it's AwesomeVar, duplicate it. if viz_node.type_name == "AwesomeVar": duplicated_id = f"duplicated_{viz_node.identity}" duplicated_type = "double AwesomeVar" super().node(VizNode(duplicated_id, duplicated_type, "")) # connect the duplicated var to the original one super().edge(VizEdge(duplicated_id, viz_node.identity)) # override TermPlotter to use `AwesomeGraph` instead class AwesomePlotter(TermPlotter): def create_graph(self, name): self._name_to_graph[name] = AwesomeGraph(name) return self._name_to_graph[name] viz = relay_viz.RelayVisualizer(mod, {}, AwesomePlotter(), YourAwesomeParser()) viz.render() ###################################################################### # Summary # ------- # This tutorial demonstrates the usage of Relay Visualizer and customization. # The class :py:class:`tvm.contrib.relay_viz.RelayVisualizer` is composed of interfaces # defined in ``interface.py``. # # It is aimed for quick look-then-fix iterations. # The constructor arguments are intended to be simple, while the customization is still # possible through a set of interface classes. #
6,783
38.905882
133
py
tvm
tvm-main/gallery/how_to/work_with_relay/using_pipeline_executor.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ Using Pipeline Executor in Relay ================================= **Author**: `Hua Jiang <https://github.com/huajsj>`_ This is a short tutorial on how to use "Pipeline Executor" with Relay. """ import tvm from tvm import te import numpy as np from tvm.contrib import graph_executor as runtime from tvm.relay.op.contrib.cutlass import partition_for_cutlass from tvm import relay from tvm.relay import testing import tvm.testing from tvm.contrib.cutlass import finalize_modules img_size = 8 ####################################################################### # Create a simple network, this network can be a pre-trained model too. # --------------------------------------------------------------------- # Let's create a very simple network for demonstration. # It consists of convolution, batch normalization, dense, and ReLU activation. def get_network(): out_channels = 16 batch_size = 1 data = relay.var("data", relay.TensorType((batch_size, 3, img_size, img_size), "float16")) dense_weight = relay.var( "dweight", relay.TensorType((batch_size, 16 * img_size * img_size), "float16") ) weight = relay.var("weight") bn_gamma = relay.var("bn_gamma") bn_beta = relay.var("bn_beta") bn_mmean = relay.var("bn_mean") bn_mvar = relay.var("bn_var") simple_net = relay.nn.conv2d( data=data, weight=weight, kernel_size=(3, 3), channels=out_channels, padding=(1, 1) ) simple_net = relay.nn.batch_norm(simple_net, bn_gamma, bn_beta, bn_mmean, bn_mvar)[0] simple_net = relay.nn.relu(simple_net) simple_net = relay.nn.batch_flatten(simple_net) simple_net = relay.nn.dense(simple_net, dense_weight) simple_net = relay.Function(relay.analysis.free_vars(simple_net), simple_net) data_shape = (batch_size, 3, img_size, img_size) net, params = testing.create_workload(simple_net) return net, params, data_shape net, params, data_shape = get_network() ########################################### # Splitting the network into two subgraphs. # ----------------------------------------- # This function called 'graph_split' from a unit test is just an example. User can create a customized logic # to split the graph. import inspect import os tutorial_dir = os.path.dirname(inspect.getfile(lambda: None)) os.sys.path.append(os.path.join(tutorial_dir, "../../../tests/python/relay")) from test_pipeline_executor import graph_split ########################################### # Splitting the network into two subgraphs. split_config = [{"op_name": "nn.relu", "op_index": 0}] subgraphs = graph_split(net["main"], split_config, params) ########################################################### # The generated subgraphs should look something like below. """ #subgraphs[0]) def @main(%data: Tensor[(1, 3, img_size, img_size), float16]) { %0 = nn.conv2d(%data, meta[relay.Constant][0] /* ty=Tensor[(16, 3, 3, 3), float16] */, padding=[1, 1, 1, 1], channels=16, kernel_size=[3, 3]) /* ty=Tensor[(1, 16, img_size, img_size), float16] */; %1 = nn.batch_norm(%0, meta[relay.Constant][1] /* ty=Tensor[(16), float16] */, meta[relay.Constant][2] /* ty=Tensor[(16), float16]*/, meta[relay.Constant][3] /* ty=Tensor[(16), float16] */, meta[relay.Constant][4] /* ty=Tensor[(16), float16] */) /* ty=(Tensor[(1,16, img_size, img_size), float16], Tensor[(16), float16], Tensor[(16), float16]) */; %2 = %1.0; nn.relu(%2) /* ty=Tensor[(1, 16, img_size, img_size), float16] */ } #subgraphs[1] def @main(%data_n_0: Tensor[(1, 16, 8, 8), float16] /* ty=Tensor[(1, 16, 8, 8), float16] */) { %0 = nn.batch_flatten(%data_n_0) /* ty=Tensor[(1, 1024), float16] */; nn.dense(%0, meta[relay.Constant][0] /* ty=Tensor[(1, 1024), float16] */, units=None) /* ty=Tensor[(1, 1), float16] */ } """ ######################################### # Build the subgraph with cutlass target. # --------------------------------------- cutlass = tvm.target.Target( { "kind": "cutlass", "sm": int(tvm.target.Target("cuda").arch.split("_")[1]), "use_3xtf32": True, "split_k_slices": [1], "profile_all_alignments": False, "find_first_valid": True, "use_multiprocessing": True, "use_fast_math": False, "tmp_dir": "./tmp", }, host=tvm.target.Target("llvm"), ) def cutlass_build(mod, target, params=None, target_host=None, mod_name="default"): target = [target, cutlass] lib = relay.build_module.build( mod, target=target, params=params, target_host=target_host, mod_name=mod_name ) return lib ########################################################### # Run the two subgraphs in pipeline with pipeline executor. # --------------------------------------------------------- # Set 'USE_PIPELINE_EXECUTOR' as ON, and set USE_CUTLASS' as ON in cmake. from tvm.contrib import graph_executor, pipeline_executor, pipeline_executor_build ######################################### # Create subgraph pipeline configuration. # Associate a subgraph module with a target. # Use CUTLASS BYOC to build the second subgraph module. mod0, mod1 = subgraphs[0], subgraphs[1] # Use cutlass as the codegen. mod1 = partition_for_cutlass(mod1) ################################################# # Get the pipeline executor configuration object. pipe_config = pipeline_executor_build.PipelineConfig() ########################################################################### # Set the compile target of the subgraph module. pipe_config[mod0].target = "llvm" pipe_config[mod0].dev = tvm.cpu(0) ############################################################## # Set the compile target of the second subgraph module as cuda. pipe_config[mod1].target = "cuda" pipe_config[mod1].dev = tvm.device("cuda", 0) pipe_config[mod1].build_func = cutlass_build pipe_config[mod1].export_cc = "nvcc" # Create the pipeline by connecting the subgraph modules. # The global input will be forwarded to the input interface of the first module named mod0 pipe_config["input"]["data"].connect(pipe_config[mod0]["input"]["data"]) # The first output of mod0 will be forwarded to the input interface of mod1 pipe_config[mod0]["output"][0].connect(pipe_config[mod1]["input"]["data_n_0"]) # The first output of mod1 will be the first global output. pipe_config[mod1]["output"][0].connect(pipe_config["output"][0]) ###################################### # The pipeline configuration as below. """ print(pipe_config) Inputs |data: mod0:data output |output(0) : mod1.output(0) connections |mod0.output(0)-> mod1.data_n_0 """ ############################## # Build the pipeline executor. # ---------------------------- with tvm.transform.PassContext(opt_level=3): pipeline_mod_factory = pipeline_executor_build.build(pipe_config) ############################################### # Export the parameter configuration to a file. directory_path = tvm.contrib.utils.tempdir().temp_dir os.makedirs(directory_path, exist_ok=True) config_file_name = pipeline_mod_factory.export_library(directory_path) ################################################################ # Use the load function to create and initialize PipelineModule. # -------------------------------------------------------------- pipeline_module = pipeline_executor.PipelineModule.load_library(config_file_name) ############################ # Run the pipeline executor. # -------------------------- # Allocate input data. data = np.random.uniform(-1, 1, size=data_shape).astype("float16") pipeline_module.set_input("data", tvm.nd.array(data)) ########################################################################## # Run the two subgraph in the pipeline mode to get the output asynchronously # or synchronously. In the following example, it is synchronous. pipeline_module.run() outputs = pipeline_module.get_output() ###################################### # Use graph_executor for verification. # ------------------------------------ # Run these two subgraphs in sequence with graph_executor to get the output. target = "llvm" dev0 = tvm.device(target, 0) lib0 = relay.build_module.build(mod0, target, params=params) module0 = runtime.GraphModule(lib0["default"](dev0)) cuda = tvm.target.Target("cuda", host=tvm.target.Target("llvm")) lib1 = relay.build_module.build(mod1, [cuda, cutlass], params=params) lib1 = finalize_modules(lib1, "compile.so", "./tmp") dev1 = tvm.device("cuda", 0) module1 = runtime.GraphModule(lib1["default"](dev1)) module0.set_input("data", data) module0.run() out_shape = (1, 16, img_size, img_size) out = module0.get_output(0, tvm.nd.empty(out_shape, "float16")) module1.set_input("data_n_0", out) module1.run() out_shape = (1, 1) out = module1.get_output(0, tvm.nd.empty(out_shape, "float16")) #################### # Verify the result. tvm.testing.assert_allclose(outputs[0].numpy(), out.numpy())
9,609
40.244635
349
py
tvm
tvm-main/gallery/how_to/work_with_relay/build_gcn.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ Building a Graph Convolutional Network ====================================== **Author**: `Yulun Yao <https://yulunyao.io/>`_, \ `Chien-Yu Lin <https://homes.cs.washington.edu/~cyulin/>`_ This article is an introductory tutorial to build a Graph Convolutional Network (GCN) with Relay. In this tutorial, we will run our GCN on Cora dataset to demonstrate. Cora dataset is a common benchmark for Graph Neural Networks (GNN) and frameworks that support GNN training and inference. We directly load the dataset from DGL library to do the apples to apples comparison against DGL. .. code-block:: bash %%shell pip install torch==2.0.0 pip install dgl==v1.0.0 Please refer to DGL doc for installation at https://docs.dgl.ai/install/index.html. Please refer to PyTorch guide for PyTorch installation at https://pytorch.org/get-started/locally/. """ ###################################################################### # Define GCN in DGL with PyTorch backend # -------------------------------------- # # DGL example: https://github.com/dmlc/dgl/tree/master/examples/pytorch/gcn # This part reuses the code from the above example. import torch import torch.nn as nn import torch.nn.functional as F import dgl import networkx as nx from dgl.nn.pytorch import GraphConv class GCN(nn.Module): def __init__(self, g, n_infeat, n_hidden, n_classes, n_layers, activation): super(GCN, self).__init__() self.g = g self.layers = nn.ModuleList() self.layers.append(GraphConv(n_infeat, n_hidden, activation=activation)) for i in range(n_layers - 1): self.layers.append(GraphConv(n_hidden, n_hidden, activation=activation)) self.layers.append(GraphConv(n_hidden, n_classes)) def forward(self, features): h = features for i, layer in enumerate(self.layers): # handle api changes for differnt DGL version if dgl.__version__ > "0.3": h = layer(self.g, h) else: h = layer(h, self.g) return h ###################################################################### # Define the functions to load dataset and evaluate accuracy # ---------------------------------------------------------- # You may substitute this part with your own dataset, here we load data from DGL from dgl.data import load_data from collections import namedtuple def evaluate(g, logits): label = g.ndata["label"] test_mask = g.ndata["test_mask"] pred = logits.argmax(axis=1) acc = (torch.Tensor(pred[test_mask]) == label[test_mask]).float().mean() return acc ###################################################################### # Load the data and set up model parameters # ----------------------------------------- """ Parameters ---------- num_layer: int number of hidden layers num_hidden: int number of the hidden units in the hidden layer infeat_dim: int dimension of the input features num_classes: int dimension of model output (Number of classes) """ dataset = dgl.data.CoraGraphDataset() dgl_g = dataset[0] num_layers = 1 num_hidden = 16 features = dgl_g.ndata["feat"] infeat_dim = features.shape[1] num_classes = dataset.num_classes ###################################################################### # Set up the DGL-PyTorch model and get the golden results # ------------------------------------------------------- # # The weights are trained with https://github.com/dmlc/dgl/blob/master/examples/pytorch/gcn/train.py from tvm.contrib.download import download_testdata features = torch.FloatTensor(features) torch_model = GCN(dgl_g, infeat_dim, num_hidden, num_classes, num_layers, F.relu) # Download the pretrained weights model_url = "https://homes.cs.washington.edu/~cyulin/media/gnn_model/gcn_cora.torch" model_path = download_testdata(model_url, "gcn_cora.pickle", module="gcn_model") # Load the weights into the model torch_model.load_state_dict(torch.load(model_path)) ###################################################################### # Run the DGL model and test for accuracy # --------------------------------------- torch_model.eval() with torch.no_grad(): logits_torch = torch_model(features) print("Print the first five outputs from DGL-PyTorch execution\n", logits_torch[:5]) acc = evaluate(dgl_g, logits_torch.numpy()) print("Test accuracy of DGL results: {:.2%}".format(acc)) ###################################################################### # Define Graph Convolution Layer in Relay # --------------------------------------- # To run GCN on TVM, we first need to implement Graph Convolution Layer. # You may refer to https://github.com/dmlc/dgl/blob/master/python/dgl/nn/mxnet/conv/graphconv.py for a GraphConv Layer implemented in DGL with MXNet Backend # # The layer is defined with below operations, note that we apply two transposes to keep adjacency matrix on right hand side of sparse_dense operator, # this method is temporary and will be updated in next few weeks when we have sparse matrix transpose and support for left sparse operator. # # .. math:: # # \mbox{GraphConv}(A, H, W) = A * H * W # = ((H * W)^t * A^t)^t # = ((W^t * H^t) * A^t)^t from tvm import relay from tvm.contrib import graph_executor import tvm from tvm import te def GraphConv(layer_name, input_dim, output_dim, adj, input, norm=None, bias=True, activation=None): """ Parameters ---------- layer_name: str Name of layer input_dim: int Input dimension per node feature output_dim: int, Output dimension per node feature adj: namedtuple, Graph representation (Adjacency Matrix) in Sparse Format (`data`, `indices`, `indptr`), where `data` has shape [num_nonzeros], indices` has shape [num_nonzeros], `indptr` has shape [num_nodes + 1] input: relay.Expr, Input feature to current layer with shape [num_nodes, input_dim] norm: relay.Expr, Norm passed to this layer to normalize features before and after Convolution. bias: bool Set bias to True to add bias when doing GCN layer activation: <function relay.op.nn>, Activation function applies to the output. e.g. relay.nn.{relu, sigmoid, log_softmax, softmax, leaky_relu} Returns ---------- output: tvm.relay.Expr The Output Tensor for this layer [num_nodes, output_dim] """ if norm is not None: input = relay.multiply(input, norm) weight = relay.var(layer_name + ".weight", shape=(input_dim, output_dim)) weight_t = relay.transpose(weight) dense = relay.nn.dense(weight_t, input) output = relay.nn.sparse_dense(dense, adj) output_t = relay.transpose(output) if norm is not None: output_t = relay.multiply(output_t, norm) if bias is True: _bias = relay.var(layer_name + ".bias", shape=(output_dim, 1)) output_t = relay.nn.bias_add(output_t, _bias, axis=-1) if activation is not None: output_t = activation(output_t) return output_t ###################################################################### # Prepare the parameters needed in the GraphConv layers # ----------------------------------------------------- # import numpy as np import networkx as nx def prepare_params(g): params = {} params["infeats"] = g.ndata["feat"].numpy().astype("float32") # Generate adjacency matrix nx_graph = dgl.to_networkx(g) adjacency = nx.to_scipy_sparse_array(nx_graph) params["g_data"] = adjacency.data.astype("float32") params["indices"] = adjacency.indices.astype("int32") params["indptr"] = adjacency.indptr.astype("int32") # Normalization w.r.t. node degrees degs = [g.in_degrees(i) for i in range(g.number_of_nodes())] params["norm"] = np.power(degs, -0.5).astype("float32") params["norm"] = params["norm"].reshape((params["norm"].shape[0], 1)) return params params = prepare_params(dgl_g) # Check shape of features and the validity of adjacency matrix assert len(params["infeats"].shape) == 2 assert ( params["g_data"] is not None and params["indices"] is not None and params["indptr"] is not None ) assert params["infeats"].shape[0] == params["indptr"].shape[0] - 1 ###################################################################### # Put layers together # ------------------- # Define input features, norms, adjacency matrix in Relay infeats = relay.var("infeats", shape=features.shape) norm = relay.Constant(tvm.nd.array(params["norm"])) g_data = relay.Constant(tvm.nd.array(params["g_data"])) indices = relay.Constant(tvm.nd.array(params["indices"])) indptr = relay.Constant(tvm.nd.array(params["indptr"])) Adjacency = namedtuple("Adjacency", ["data", "indices", "indptr"]) adj = Adjacency(g_data, indices, indptr) # Construct the 2-layer GCN layers = [] layers.append( GraphConv( layer_name="layers.0", input_dim=infeat_dim, output_dim=num_hidden, adj=adj, input=infeats, norm=norm, activation=relay.nn.relu, ) ) layers.append( GraphConv( layer_name="layers.1", input_dim=num_hidden, output_dim=num_classes, adj=adj, input=layers[-1], norm=norm, activation=None, ) ) # Analyze free variables and generate Relay function output = layers[-1] ###################################################################### # Compile and run with TVM # ------------------------ # # Export the weights from PyTorch model to Python Dict model_params = {} for param_tensor in torch_model.state_dict(): model_params[param_tensor] = torch_model.state_dict()[param_tensor].numpy() for i in range(num_layers + 1): params["layers.%d.weight" % (i)] = model_params["layers.%d.weight" % (i)] params["layers.%d.bias" % (i)] = model_params["layers.%d.bias" % (i)] # Set the TVM build target target = "llvm" # Currently only support `llvm` as target func = relay.Function(relay.analysis.free_vars(output), output) func = relay.build_module.bind_params_by_name(func, params) mod = tvm.IRModule() mod["main"] = func # Build with Relay with tvm.transform.PassContext(opt_level=0): # Currently only support opt_level=0 lib = relay.build(mod, target, params=params) # Generate graph executor dev = tvm.device(target, 0) m = graph_executor.GraphModule(lib["default"](dev)) ###################################################################### # Run the TVM model, test for accuracy and verify with DGL # -------------------------------------------------------- m.run() logits_tvm = m.get_output(0).numpy() print("Print the first five outputs from TVM execution\n", logits_tvm[:5]) acc = evaluate(dgl_g, logits_tvm) print("Test accuracy of TVM results: {:.2%}".format(acc)) import tvm.testing # Verify the results with the DGL model tvm.testing.assert_allclose(logits_torch, logits_tvm, atol=1e-3)
11,740
33.031884
156
py
tvm
tvm-main/gallery/how_to/work_with_schedules/intrin_math.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ Intrinsics and Math Functions ============================= **Author**: `Tianqi Chen <https://tqchen.github.io>`_ While TVM supports basic arithmetic operations. In many cases usually we will need more complicated builtin functions. For example :code:`exp` to take the exponential of the function. These functions are target system dependent and may have different names of different target platforms. In this tutorial, we will learn how we can invoke these target specific functions, and how we can unify the interface via TVM's intrinsic API. """ from __future__ import absolute_import, print_function import numpy as np import tvm from tvm import te from tvm.ir import register_op_attr, register_intrin_lowering ###################################################################### # Direct Declare Extern Math Call # ------------------------------- # The most straight-forward way to call target specific function is via # extern function call construct in tvm. # In the following example, we use :any:`tvm.tir.call_pure_extern` to call # :code:`__expf` function, which is only available under CUDA. # n = te.var("n") A = te.placeholder((n,), name="A") B = te.compute(A.shape, lambda i: tvm.tir.call_pure_extern("float32", "__expf", A[i]), name="B") s = te.create_schedule(B.op) num_thread = 64 bx, tx = s[B].split(B.op.axis[0], factor=num_thread) s[B].bind(bx, te.thread_axis("blockIdx.x")) s[B].bind(tx, te.thread_axis("threadIdx.x")) f = tvm.build(s, [A, B], "cuda", name="myexp") print(f.imported_modules[0].get_source()) ###################################################################### # Unified Intrinsic Call # ---------------------- # The above code verifies that direct external call can be used to # call into device specific functions. # However, the above way only works for CUDA target with float type. # Ideally, we want to write same code for any device and any data type. # # TVM intrinsic provides the user a mechanism to achieve this, and this # is the recommended way to solve the problem. # The following code use te.exp instead, which create an intrinsic call # :py::func:`tvm.te.exp` to do the exponential. # n = te.var("n") A = te.placeholder((n,), name="A") B = te.compute(A.shape, lambda i: te.exp(A[i]), name="B") s = te.create_schedule(B.op) num_thread = 64 bx, tx = s[B].split(B.op.axis[0], factor=num_thread) s[B].bind(bx, te.thread_axis("blockIdx.x")) s[B].bind(tx, te.thread_axis("threadIdx.x")) fcuda = tvm.build(s, [A, B], "cuda", name="myexp") print(fcuda.imported_modules[0].get_source()) ###################################################################### # We can find that the code works for both CUDA and opencl. # The same te.exp can also be used for float64 data types. # fopencl = tvm.build(s, [A, B], "opencl", name="myexp") print(fopencl.imported_modules[0].get_source()) ###################################################################### # Intrinsic Lowering Rule # ----------------------- # When :py:func:`tvm.te.exp` is called, TVM creates an intrinsic Call Expr. # TVM uses transformation rules to transform the intrinsic # call to device specific extern calls. # # TVM also allows user to customize the rules during runtime. # The following example customizes CUDA lowering rule for :code:`exp`. # def my_cuda_math_rule(op): """Customized CUDA intrinsic lowering rule""" assert isinstance(op, tvm.tir.Call) name = op.op.name assert name.startswith("tir.") dispatch_name = name[4:] if op.dtype == "float32": # call float function return tvm.tir.call_pure_extern("float32", "%sf" % dispatch_name, op.args[0]) elif op.dtype == "float64": # call double function return tvm.tir.call_pure_extern("float32", dispatch_name, op.args[0]) else: # cannot do translation, return self. return op register_intrin_lowering("tir.exp", target="cuda", f=my_cuda_math_rule, level=99) ###################################################################### # Register the rule to TVM with override option to override existing rule. # Notice the difference between the printed code from previous one: # our new rule uses math function :code:`expf` instead of # fast math version :code:`__expf`. # fcuda = tvm.build(s, [A, B], "cuda", name="myexp") print(fcuda.imported_modules[0].get_source()) ###################################################################### # Add Your Own Intrinsic # ---------------------- # If there is an intrinsic that is not provided by TVM. # User can easily add new intrinsic by using the intrinsic rule system. # The following example add an intrinsic :code:`mylog` to the system. # def mylog(x): """customized log intrinsic function""" return tvm.tir.call_intrin(x.dtype, "tir.mylog", x) def my_cuda_mylog_rule(op): """CUDA lowering rule for log""" if op.dtype == "float32": return tvm.tir.call_pure_extern("float32", "logf", op.args[0]) elif op.dtype == "float64": return tvm.tir.call_pure_extern("float64", "log", op.args[0]) else: return op # new op registration is triggered by registering an attribute of the op register_op_attr("tir.mylog", "TCallEffectKind", tvm.tir.CallEffectKind.Pure) register_intrin_lowering("tir.mylog", target="cuda", f=my_cuda_mylog_rule, level=99) n = te.var("n") A = te.placeholder((n,), name="A") B = te.compute(A.shape, lambda i: mylog(A[i]), name="B") s = te.create_schedule(B.op) num_thread = 64 bx, tx = s[B].split(B.op.axis[0], factor=num_thread) s[B].bind(bx, te.thread_axis("blockIdx.x")) s[B].bind(tx, te.thread_axis("threadIdx.x")) fcuda = tvm.build(s, [A, B], "cuda", name="mylog") print(fcuda.imported_modules[0].get_source()) ###################################################################### # Summary # ------- # - TVM can call extern target dependent math function. # - Use intrinsic to defined a unified interface for the functions. # - For more intrinsics available in tvm, take a look at :any:`tvm.tir` # - You can customize the intrinsic behavior by defining your own rules. #
6,848
38.362069
96
py
tvm
tvm-main/gallery/how_to/work_with_schedules/reduction.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ Reduction ========= **Author**: `Tianqi Chen <https://tqchen.github.io>`_ This is an introduction material on how to do reduction in TVM. Associative reduction operators like sum/max/min are typical construction blocks of linear algebra operations. In this tutorial, we will demonstrate how to do reduction in TVM. """ from __future__ import absolute_import, print_function # sphinx_gallery_start_ignore # sphinx_gallery_requires_cuda = True # sphinx_gallery_end_ignore import tvm import tvm.testing from tvm import te import numpy as np ###################################################################### # Describe Sum of Rows # -------------------- # Assume we want to compute sum of rows as our example. # In numpy semantics this can be written as :code:`B = numpy.sum(A, axis=1)` # # The following lines describe the row sum operation. # To create a reduction formula, we declare a reduction axis using # :any:`te.reduce_axis`. :any:`te.reduce_axis` takes in the range of reductions. # :any:`te.sum` takes in the expression to be reduced as well as the reduction # axis and compute the sum of value over all k in the declared range. # # The equivalent C code is as follows: # # .. code-block:: c # # for (int i = 0; i < n; ++i) { # B[i] = 0; # for (int k = 0; k < m; ++k) { # B[i] = B[i] + A[i][k]; # } # } # n = te.var("n") m = te.var("m") A = te.placeholder((n, m), name="A") k = te.reduce_axis((0, m), "k") B = te.compute((n,), lambda i: te.sum(A[i, k], axis=k), name="B") ###################################################################### # Schedule the Reduction # ---------------------- # There are several ways to schedule a reduction. # Before doing anything, let us print out the IR code of default schedule. # s = te.create_schedule(B.op) print(tvm.lower(s, [A, B], simple_mode=True)) ###################################################################### # You can find that the IR code is quite like the C code. # The reduction axis is similar to a normal axis, it can be splitted. # # In the following code we split both the row axis of B as well # axis by different factors. The result is a nested reduction. # ko, ki = s[B].split(B.op.reduce_axis[0], factor=16) xo, xi = s[B].split(B.op.axis[0], factor=32) print(tvm.lower(s, [A, B], simple_mode=True)) ###################################################################### # If we are building a GPU kernel, we can bind the rows of B to GPU threads. s[B].bind(xo, te.thread_axis("blockIdx.x")) s[B].bind(xi, te.thread_axis("threadIdx.x")) print(tvm.lower(s, [A, B], simple_mode=True)) ###################################################################### # Reduction Factoring and Parallelization # --------------------------------------- # One problem of building a reduction is that we cannot simply # parallelize over the reduction axis. We need to divide the computation # of the reduction, store the local reduction result in a temporal array # before doing a reduction over the temp array. # # The rfactor primitive does such rewrite of the computation. # In the following schedule, the result of B is written to a temporary # result B.rf. The factored dimension becomes the first dimension of B.rf. # s = te.create_schedule(B.op) ko, ki = s[B].split(B.op.reduce_axis[0], factor=16) BF = s.rfactor(B, ki) print(tvm.lower(s, [A, B], simple_mode=True)) ###################################################################### # The scheduled operator of B also get rewritten to be sum over # the first axis of reduced result of B.f # print(s[B].op.body) ###################################################################### # Cross Thread Reduction # ---------------------- # We can now parallelize over the factored axis. # Here the reduction axis of B is marked to be a thread. # TVM allows reduction axis to be marked as thread if it is the only # axis in reduction and cross thread reduction is possible in the device. # # This is indeed the case after the factoring. # We can directly compute BF at the reduction axis as well. # The final generated kernel will divide the rows by blockIdx.x and threadIdx.y # columns by threadIdx.x and finally do a cross thread reduction over threadIdx.x # xo, xi = s[B].split(s[B].op.axis[0], factor=32) s[B].bind(xo, te.thread_axis("blockIdx.x")) s[B].bind(xi, te.thread_axis("threadIdx.y")) tx = te.thread_axis("threadIdx.x") s[B].bind(s[B].op.reduce_axis[0], tx) s[BF].compute_at(s[B], s[B].op.reduce_axis[0]) s[B].set_store_predicate(tx.var.equal(0)) fcuda = tvm.build(s, [A, B], "cuda") print(fcuda.imported_modules[0].get_source()) ###################################################################### # Verify the correctness of result kernel by comparing it to numpy. # nn = 128 dev = tvm.cuda(0) a = tvm.nd.array(np.random.uniform(size=(nn, nn)).astype(A.dtype), dev) b = tvm.nd.array(np.zeros(nn, dtype=B.dtype), dev) fcuda(a, b) tvm.testing.assert_allclose(b.numpy(), np.sum(a.numpy(), axis=1), rtol=1e-4) ###################################################################### # Describe Convolution via 2D Reduction # ------------------------------------- # In TVM, we can describe convolution via 2D reduction in a simple way. # Here is an example for 2D convolution with filter size = [3, 3] and strides = [1, 1]. # n = te.var("n") Input = te.placeholder((n, n), name="Input") Filter = te.placeholder((3, 3), name="Filter") di = te.reduce_axis((0, 3), name="di") dj = te.reduce_axis((0, 3), name="dj") Output = te.compute( (n - 2, n - 2), lambda i, j: te.sum(Input[i + di, j + dj] * Filter[di, dj], axis=[di, dj]), name="Output", ) s = te.create_schedule(Output.op) print(tvm.lower(s, [Input, Filter, Output], simple_mode=True)) ###################################################################### # .. _general-reduction: # # Define General Commutative Reduction Operation # ---------------------------------------------- # Besides the built-in reduction operations like :any:`te.sum`, # :any:`tvm.te.min` and :any:`tvm.te.max`, you can also define your # commutative reduction operation by :any:`te.comm_reducer`. # n = te.var("n") m = te.var("m") product = te.comm_reducer(lambda x, y: x * y, lambda t: tvm.tir.const(1, dtype=t), name="product") A = te.placeholder((n, m), name="A") k = te.reduce_axis((0, m), name="k") B = te.compute((n,), lambda i: product(A[i, k], axis=k), name="B") ###################################################################### # .. note:: # # Sometimes we would like to perform reduction that involves multiple # values like :code:`argmax`, which can be done by tuple inputs. # See :ref:`reduction-with-tuple-inputs` for more detail. ###################################################################### # Summary # ------- # This tutorial provides a walk through of reduction schedule. # # - Describe reduction with reduce_axis. # - Use rfactor to factor out axis if we need parallelism. # - Define new reduction operation by :any:`te.comm_reducer`
7,770
37.661692
98
py
tvm
tvm-main/gallery/how_to/work_with_schedules/scan.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ Scan and Recurrent Kernel ========================= **Author**: `Tianqi Chen <https://tqchen.github.io>`_ This is an introduction material on how to do recurrent computing in TVM. Recurrent computing is a typical pattern in neural networks. """ from __future__ import absolute_import, print_function # sphinx_gallery_start_ignore # sphinx_gallery_requires_cuda = True # sphinx_gallery_end_ignore import tvm import tvm.testing from tvm import te import numpy as np ###################################################################### # TVM supports a scan operator to describe symbolic loop. # The following scan op computes cumsum over columns of X. # # The scan is carried over the highest dimension of the tensor. # :code:`s_state` is a placeholder that describes the transition state of the scan. # :code:`s_init` describes how we can initialize the first k timesteps. # Here since s_init's first dimension is 1, it describes how we initialize # The state at first timestep. # # :code:`s_update` describes how to update the value at timestep t. The update # value can refer back to the values of previous timestep via state placeholder. # Note that while it is invalid to refer to :code:`s_state` at current or later timestep. # # The scan takes in state placeholder, initial value and update description. # It is also recommended(although not necessary) to list the inputs to the scan cell. # The result of the scan is a tensor, giving the result of :code:`s_state` after the # update over the time domain. # m = te.var("m") n = te.var("n") X = te.placeholder((m, n), name="X") s_state = te.placeholder((m, n)) s_init = te.compute((1, n), lambda _, i: X[0, i]) s_update = te.compute((m, n), lambda t, i: s_state[t - 1, i] + X[t, i]) s_scan = tvm.te.scan(s_init, s_update, s_state, inputs=[X]) ###################################################################### # Schedule the Scan Cell # ---------------------- # We can schedule the body of the scan by scheduling the update and # init part separately. Note that it is invalid to schedule the # first iteration dimension of the update part. # To split on the time iteration, user can schedule on scan_op.scan_axis instead. # s = te.create_schedule(s_scan.op) num_thread = 256 block_x = te.thread_axis("blockIdx.x") thread_x = te.thread_axis("threadIdx.x") xo, xi = s[s_init].split(s_init.op.axis[1], factor=num_thread) s[s_init].bind(xo, block_x) s[s_init].bind(xi, thread_x) xo, xi = s[s_update].split(s_update.op.axis[1], factor=num_thread) s[s_update].bind(xo, block_x) s[s_update].bind(xi, thread_x) print(tvm.lower(s, [X, s_scan], simple_mode=True)) ###################################################################### # Build and Verify # ---------------- # We can build the scan kernel like other TVM kernels, here we use # numpy to verify the correctness of the result. # fscan = tvm.build(s, [X, s_scan], "cuda", name="myscan") dev = tvm.cuda(0) n = 1024 m = 10 a_np = np.random.uniform(size=(m, n)).astype(s_scan.dtype) a = tvm.nd.array(a_np, dev) b = tvm.nd.array(np.zeros((m, n), dtype=s_scan.dtype), dev) fscan(a, b) tvm.testing.assert_allclose(b.numpy(), np.cumsum(a_np, axis=0)) ###################################################################### # Multi-Stage Scan Cell # --------------------- # In the above example we described the scan cell using one Tensor # computation stage in s_update. It is possible to use multiple # Tensor stages in the scan cell. # # The following lines demonstrate a scan with two stage operations # in the scan cell. # m = te.var("m") n = te.var("n") X = te.placeholder((m, n), name="X") s_state = te.placeholder((m, n)) s_init = te.compute((1, n), lambda _, i: X[0, i]) s_update_s1 = te.compute((m, n), lambda t, i: s_state[t - 1, i] * 2, name="s1") s_update_s2 = te.compute((m, n), lambda t, i: s_update_s1[t, i] + X[t, i], name="s2") s_scan = tvm.te.scan(s_init, s_update_s2, s_state, inputs=[X]) ###################################################################### # These intermediate tensors can also be scheduled normally. # To ensure correctness, TVM creates a group constraint to forbid # the body of scan to be compute_at locations outside the scan loop. # s = te.create_schedule(s_scan.op) xo, xi = s[s_update_s2].split(s_update_s2.op.axis[1], factor=32) s[s_update_s1].compute_at(s[s_update_s2], xo) print(tvm.lower(s, [X, s_scan], simple_mode=True)) ###################################################################### # Multiple States # --------------- # For complicated applications like RNN, we might need more than one # recurrent state. Scan support multiple recurrent states. # The following example demonstrates how we can build recurrence with two states. # m = te.var("m") n = te.var("n") l = te.var("l") X = te.placeholder((m, n), name="X") s_state1 = te.placeholder((m, n)) s_state2 = te.placeholder((m, l)) s_init1 = te.compute((1, n), lambda _, i: X[0, i]) s_init2 = te.compute((1, l), lambda _, i: 0.0) s_update1 = te.compute((m, n), lambda t, i: s_state1[t - 1, i] + X[t, i]) s_update2 = te.compute((m, l), lambda t, i: s_state2[t - 1, i] + s_state1[t - 1, 0]) s_scan1, s_scan2 = tvm.te.scan( [s_init1, s_init2], [s_update1, s_update2], [s_state1, s_state2], inputs=[X] ) s = te.create_schedule(s_scan1.op) print(tvm.lower(s, [X, s_scan1, s_scan2], simple_mode=True)) ###################################################################### # Summary # ------- # This tutorial provides a walk through of scan primitive. # # - Describe scan with init and update. # - Schedule the scan cells as normal schedule. # - For complicated workload, use multiple states and steps in scan cell.
6,429
39.440252
89
py
tvm
tvm-main/gallery/how_to/work_with_schedules/schedule_primitives.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ .. _schedule_primitives: Schedule Primitives in TVM ========================== **Author**: `Ziheng Jiang <https://github.com/ZihengJiang>`_ TVM is a domain specific language for efficient kernel construction. In this tutorial, we will show you how to schedule the computation by various primitives provided by TVM. """ from __future__ import absolute_import, print_function import tvm from tvm import te import numpy as np ###################################################################### # # There often exist several methods to compute the same result, # however, different methods will result in different locality and # performance. So TVM asks user to provide how to execute the # computation called **Schedule**. # # A **Schedule** is a set of transformation of computation that # transforms the loop of computations in the program. # # declare some variables for use later n = te.var("n") m = te.var("m") ###################################################################### # A schedule can be created from a list of ops, by default the # schedule computes tensor in a serial manner in a row-major order. # declare a matrix element-wise multiply A = te.placeholder((m, n), name="A") B = te.placeholder((m, n), name="B") C = te.compute((m, n), lambda i, j: A[i, j] * B[i, j], name="C") s = te.create_schedule([C.op]) # lower will transform the computation from definition to the real # callable function. With argument `simple_mode=True`, it will # return you a readable C like statement, we use it here to print the # schedule result. print(tvm.lower(s, [A, B, C], simple_mode=True)) ###################################################################### # One schedule is composed by multiple stages, and one # **Stage** represents schedule for one operation. We provide various # methods to schedule every stage. ###################################################################### # split # ----- # :code:`split` can split a specified axis into two axes by # :code:`factor`. A = te.placeholder((m,), name="A") B = te.compute((m,), lambda i: A[i] * 2, name="B") s = te.create_schedule(B.op) xo, xi = s[B].split(B.op.axis[0], factor=32) print(tvm.lower(s, [A, B], simple_mode=True)) ###################################################################### # You can also split a axis by :code:`nparts`, which splits the axis # contrary with :code:`factor`. A = te.placeholder((m,), name="A") B = te.compute((m,), lambda i: A[i], name="B") s = te.create_schedule(B.op) bx, tx = s[B].split(B.op.axis[0], nparts=32) print(tvm.lower(s, [A, B], simple_mode=True)) ###################################################################### # tile # ---- # :code:`tile` help you execute the computation tile by tile over two # axes. A = te.placeholder((m, n), name="A") B = te.compute((m, n), lambda i, j: A[i, j], name="B") s = te.create_schedule(B.op) xo, yo, xi, yi = s[B].tile(B.op.axis[0], B.op.axis[1], x_factor=10, y_factor=5) print(tvm.lower(s, [A, B], simple_mode=True)) ###################################################################### # fuse # ---- # :code:`fuse` can fuse two consecutive axes of one computation. A = te.placeholder((m, n), name="A") B = te.compute((m, n), lambda i, j: A[i, j], name="B") s = te.create_schedule(B.op) # tile to four axes first: (i.outer, j.outer, i.inner, j.inner) xo, yo, xi, yi = s[B].tile(B.op.axis[0], B.op.axis[1], x_factor=10, y_factor=5) # then fuse (i.inner, j.inner) into one axis: (i.inner.j.inner.fused) fused = s[B].fuse(xi, yi) print(tvm.lower(s, [A, B], simple_mode=True)) ###################################################################### # reorder # ------- # :code:`reorder` can reorder the axes in the specified order. A = te.placeholder((m, n), name="A") B = te.compute((m, n), lambda i, j: A[i, j], name="B") s = te.create_schedule(B.op) # tile to four axes first: (i.outer, j.outer, i.inner, j.inner) xo, yo, xi, yi = s[B].tile(B.op.axis[0], B.op.axis[1], x_factor=10, y_factor=5) # then reorder the axes: (i.inner, j.outer, i.outer, j.inner) s[B].reorder(xi, yo, xo, yi) print(tvm.lower(s, [A, B], simple_mode=True)) ###################################################################### # bind # ---- # :code:`bind` can bind a specified axis with a thread axis, often used # in gpu programming. A = te.placeholder((n,), name="A") B = te.compute(A.shape, lambda i: A[i] * 2, name="B") s = te.create_schedule(B.op) bx, tx = s[B].split(B.op.axis[0], factor=64) s[B].bind(bx, te.thread_axis("blockIdx.x")) s[B].bind(tx, te.thread_axis("threadIdx.x")) print(tvm.lower(s, [A, B], simple_mode=True)) ###################################################################### # compute_at # ---------- # For a schedule that consists of multiple operators, TVM will compute # tensors at the root separately by default. A = te.placeholder((m,), name="A") B = te.compute((m,), lambda i: A[i] + 1, name="B") C = te.compute((m,), lambda i: B[i] * 2, name="C") s = te.create_schedule(C.op) print(tvm.lower(s, [A, B, C], simple_mode=True)) ###################################################################### # :code:`compute_at` can move computation of `B` into the first axis # of computation of `C`. A = te.placeholder((m,), name="A") B = te.compute((m,), lambda i: A[i] + 1, name="B") C = te.compute((m,), lambda i: B[i] * 2, name="C") s = te.create_schedule(C.op) s[B].compute_at(s[C], C.op.axis[0]) print(tvm.lower(s, [A, B, C], simple_mode=True)) ###################################################################### # compute_inline # -------------- # :code:`compute_inline` can mark one stage as inline, then the body of # computation will be expanded and inserted at the address where the # tensor is required. A = te.placeholder((m,), name="A") B = te.compute((m,), lambda i: A[i] + 1, name="B") C = te.compute((m,), lambda i: B[i] * 2, name="C") s = te.create_schedule(C.op) s[B].compute_inline() print(tvm.lower(s, [A, B, C], simple_mode=True)) ###################################################################### # compute_root # ------------ # :code:`compute_root` can move computation of one stage to the root. A = te.placeholder((m,), name="A") B = te.compute((m,), lambda i: A[i] + 1, name="B") C = te.compute((m,), lambda i: B[i] * 2, name="C") s = te.create_schedule(C.op) s[B].compute_at(s[C], C.op.axis[0]) s[B].compute_root() print(tvm.lower(s, [A, B, C], simple_mode=True)) ###################################################################### # Summary # ------- # This tutorial provides an introduction to schedule primitives in # tvm, which permits users schedule the computation easily and # flexibly. # # In order to get a good performance kernel implementation, the # general workflow often is: # # - Describe your computation via series of operations. # - Try to schedule the computation with primitives. # - Compile and run to see the performance difference. # - Adjust your schedule according the running result.
7,735
35.490566
79
py
tvm
tvm-main/gallery/how_to/work_with_schedules/extern_op.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ External Tensor Functions ========================= **Author**: `Tianqi Chen <https://tqchen.github.io>`_ While TVM supports transparent code generation, sometimes it is also helpful to incorporate manual written code into the pipeline. For example, we might want to use cuDNN for some of the convolution kernels and define the rest of the stages. TVM supports these black box function calls natively. Specifically, TVM support all the tensor functions that are DLPack compatible. Which means we can call any function with POD types(pointer, int, float) or pointer to DLTensor as argument. """ from __future__ import absolute_import, print_function import tvm from tvm import te import numpy as np from tvm.contrib import cblas import tvm.testing if not tvm.get_global_func("tvm.contrib.cblas.matmul", allow_missing=True): raise Exception("Not compiled with cblas support; can't build this tutorial") ###################################################################### # Use Extern Tensor Function # -------------------------- # In the example below, we use :any:`te.extern` to add an extern # array function call. In the extern call, we declare the shape # of output tensors. In the second argument we provide the list of inputs. # # User will need to provide a function describing how to compute the result. # The compute function takes list of symbolic placeholder for the inputs, # list of symbolic placeholder for the outputs and returns the executing statement. # # In this case we simply call a registered TVM function, which invokes a CBLAS call. # TVM does not control internal of the extern array function and treats it as black-box. # We can further mix schedulable TVM calls that add a bias term to the result. # n = 1024 l = 128 m = 235 bias = te.var("bias", dtype="float32") A = te.placeholder((n, l), name="A") B = te.placeholder((l, m), name="B") C = te.extern( (n, m), [A, B], lambda ins, outs: tvm.tir.call_packed( "tvm.contrib.cblas.matmul", ins[0], ins[1], outs[0], False, False ), name="C", ) D = te.compute(C.shape, lambda i, j: C[i, j] + bias, name="D") s = te.create_schedule(D.op) ###################################################################### # Verify the Result # ----------------- # We can verify that the result matches what we expected. # dev = tvm.cpu(0) f = tvm.build(s, [A, B, D, bias], "llvm") a = tvm.nd.array(np.random.uniform(size=(n, l)).astype(A.dtype), dev) b = tvm.nd.array(np.random.uniform(size=(l, m)).astype(B.dtype), dev) d = tvm.nd.array(np.zeros((n, m), dtype=D.dtype), dev) bb = 10.0 f(a, b, d, bb) tvm.testing.assert_allclose(d.numpy(), np.dot(a.numpy(), b.numpy()) + 10, rtol=1e-5) ###################################################################### # Extern Contrib Wrappers # ----------------------- # TVM also provide extern contrib wrappers to useful extern calls, # the following line is equivalent to the previous example. # from tvm.contrib import cblas C = cblas.matmul(A, B) D = te.compute(C.shape, lambda i, j: C[i, j] + bias, name="D") s = te.create_schedule(D.op) ###################################################################### # Hook Python Function as Extern # ------------------------------ # Since we can call into any PackedFunc in TVM. We can use the extern # function to callback into python. # # The following example registers a python function into TVM runtime system # and use it to complete one stage of the computation. # This makes TVM much more flexible. For example, we can insert front-end # callbacks to inspect the intermediate results or mix customized code # with TVM. # @tvm.register_func("tvm.contrib.my_tvm_addone") def my_tvm_addone(x, y): print("my_tvm_addone signatures: %s, %s" % (type(x), type(y))) tvm.nd.array(x.numpy() + 1).copyto(y) A = te.placeholder((n,), name="A") B = te.extern( A.shape, [A], lambda ins, outs: tvm.tir.call_packed("tvm.contrib.my_tvm_addone", ins[0], outs[0]), name="C", ) s = te.create_schedule(B.op) f = tvm.build(s, [A, B], "llvm") a = tvm.nd.array(np.random.uniform(size=(n,)).astype(A.dtype), dev) b = tvm.nd.array(np.random.uniform(size=(n,)).astype(B.dtype), dev) f(a, b) tvm.testing.assert_allclose(b.numpy(), a.numpy() + 1, rtol=1e-5) ###################################################################### # Summary # ------- # - TVM calls extern tensor function via :any:`te.extern` # - Use contrib wrappers for short sugars of extern tensor calls. # - We can hook front-end function as extern tensor callbacks. #
5,320
36.737589
88
py
tvm
tvm-main/gallery/how_to/work_with_schedules/tensorize.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ .. _tutorials-tensorize: Use Tensorize to Leverage Hardware Intrinsics ============================================= **Author**: `Yizhi Liu <https://github.com/yzhliu>`_ This is an introduction material on how to perform tensorization in TVM. By using schedule primitive :code:`tensorize`, people can replace a unit of computation with the corresponding intrinsics, making it easy to leverage handcrafted micro-kernels, as well as extend TVM to support new hardware architectures. The purpose of this tutorial is to show the functionality and usage of tensorize instead of providing an efficient solution. """ from __future__ import absolute_import, print_function import tvm from tvm import te import tvm.testing import numpy as np ###################################################################### # Define Matrix Multiplication # ---------------------------- # Take matrix multiplication as our example. # Matmul first multiply the corresponding elements between two matrix, # then accumulate across a certain axis. # The following lines describe the computation :code:`A * B^T` in TVM. # N, M, L = 1024, 512, 64 A = te.placeholder((N, L), name="A") B = te.placeholder((M, L), name="B") k = te.reduce_axis((0, L), name="k") C = te.compute((N, M), lambda i, j: te.sum(A[i, k] * B[j, k], axis=k), name="C") s = te.create_schedule(C.op) print(tvm.lower(s, [A, B, C], simple_mode=True)) ###################################################################### # Schedule the Matmul # ------------------- # Now, suppose we have an accelerator that supports # matrix-vector multiplication (GEMV) as a hardware primitive, # which can take arbitrary size of reduce axis, # but another axis needs to be no larger than 16. # Thus we break down the matmul loops to make the innermost loops a (16x64) GEMV. # factor = 16 x, y = C.op.axis (z,) = C.op.reduce_axis yo, yi = s[C].split(y, factor=factor) s[C].reorder(x, yo, yi, z) print(tvm.lower(s, [A, B, C], simple_mode=True)) ###################################################################### # As showed in the IR printed above, # the inner loops :code:`j.inner` along with :code:`k` together form a computation of GEMV # - within the inner most two loops, the index :code:`i` is fixed, # the access to the matrix :code:`A` only varies by :code:`k`, # which makes the access pattern of :code:`A` a "vector". # In order to leverage our hypothetical hardware's GEMV instruction, # we can tensorize over :code:`j.inner`. # # Define GEMV Tensorization Intrinsic # ----------------------------------- # Before scheduling the tensorization, we need to first define the intrinsic function for GEMV. # It includes two parts, the first is a compute definition of GEMV. # TVM uses it to match the computing pattern in the original Matmul schedule. # The second is to specify how to execute GEMV on the device, # which is done in :code:`intrin_func` below. # def intrin_gemv(m, l): a = te.placeholder((l,), name="a") b = te.placeholder((m, l), name="b") k = te.reduce_axis((0, l), name="k") c = te.compute((m,), lambda i: te.sum(a[k] * b[i, k], axis=k), name="c") Ab = tvm.tir.decl_buffer(a.shape, a.dtype, name="A", offset_factor=1, strides=[1]) Bb = tvm.tir.decl_buffer(b.shape, b.dtype, name="B", offset_factor=1, strides=[te.var("s1"), 1]) Cb = tvm.tir.decl_buffer(c.shape, c.dtype, name="C", offset_factor=1, strides=[1]) def intrin_func(ins, outs): ib = tvm.tir.ir_builder.create() aa, bb = ins cc = outs[0] ib.emit( tvm.tir.call_extern( "int32", "gemv_update", cc.access_ptr("w"), aa.access_ptr("r"), bb.access_ptr("r"), m, l, bb.strides[0], ) ) return ib.get() return te.decl_tensor_intrin(c.op, intrin_func, binds={a: Ab, b: Bb, c: Cb}) ###################################################################### # Here :code:`te.decl_tensor_intrin` declares how to execute the computation :code:`c.op`. # Our implementation simply takes the inputs and outputs, # converts them to pointers and emit an external function call. # Note that tensorization requires user to specify :code:`offset_factor`, # with this information, TVM has knowledge of whether the data is aligned # between the start address of the original data structure # and the offset being passed to tensorize, # so that it has chance to optimize with vectorized loading. # We set the factor to 1 for simplification. # # Buffers are also declared for inputs and outputs, though this is not required, # we benefit from the extra information provided by buffers. For example, we pass # :code:`bb.strides[0]` as an argument to the external function :code:`gemv_update`. # For now :code:`bb.strides[0] == l`, # but later we will see how they can differ with more complicated schedules. # # Note that we use :code:`te.var("s1")` as the first stride dimension for :code:`B`. # If the strides can be inferred # - in this case, TVM knows tensor B is compact thus the strides are :code:`[L, 1]` - # such placeholder can be put to let TVM automatically bind the inferred value for us. # gemv = intrin_gemv(factor, L) s[C].tensorize(yi, gemv) print(tvm.lower(s, [A, B, C], simple_mode=True)) ###################################################################### # By tensorizing over :code:`yi`, the inner most two loops are # now replaced by the intrinsic function we defined before. # In order to build and run the module, let's define the external function :code:`gemv_update`, # it is a naive implementation of GEMV, just for demonstration. # def gemv_impl(): cc_code = """ extern "C" int gemv_update(float *cc, float *aa, float *bb, int m, int l, int stride) { for (int i = 0; i < m; ++i) { for (int j = 0; j < l; ++j) { cc[i] += aa[j] * bb[i * stride + j]; } } return 0; } """ from tvm.contrib import utils, clang temp = utils.tempdir() ll_path = temp.relpath("temp.ll") # Create LLVM ir from c source code ll_code = clang.create_llvm(cc_code, output=ll_path) return ll_code ###################################################################### # Now we leverage the pragma attribute :code:`import_llvm` to import llvm asm inline. # The importing needs to happen before the tensorized GEMV being executed. # s[C].pragma(x, "import_llvm", gemv_impl()) print(tvm.lower(s, [A, B, C], simple_mode=True)) ###################################################################### # Finally we compare the tensorize version with that :code:`numpy.dot` produces, # ensure our implementation is correct. # func = tvm.build(s, [A, B, C], target="llvm", name="gemv") from tvm.topi.utils import get_const_tuple dtype = A.dtype dev = tvm.device("cpu", 0) a = np.random.uniform(size=get_const_tuple(A.shape)).astype(dtype) b = np.random.uniform(size=get_const_tuple(B.shape)).astype(dtype) c = tvm.nd.array(np.zeros(get_const_tuple(C.shape), dtype=dtype), dev) func(tvm.nd.array(a, dev), tvm.nd.array(b, dev), c) tvm.testing.assert_allclose(c.numpy(), np.dot(a, b.T), rtol=1e-3) ###################################################################### # Reduce-update for Tensorize # --------------------------- # So far you have learned the basic idea of tensorize, # now let's move one step forward to a more complicated case. # # Assume our accelerator could only multiply a vector by a square matrix, # in which the vector size needs to be no larger than 16. # Given such hardware constrain, now we need to split the reduce axis as following, # zo, zi = s[C].split(z, factor=factor) s[C].reorder(x, yo, zo, yi, zi) ###################################################################### # However, since the tensorize intrinsic now only covers a part of the reduce axis, # instead of using one "body" function, TVM requires a :code:`reduce_reset` function, # which will be invoked before the reduce for-loop, and a :code:`reduce_update` function, # which defines the "update" computing strategy. # def gemv_impl(): cc_code = """ extern "C" int gemv_update(float *cc, float *aa, float *bb, int m, int l, int stride) { for (int i = 0; i < m; ++i) { for (int j = 0; j < l; ++j) { cc[i] += aa[j] * bb[i * stride + j]; } } return 0; } extern "C" int gemv_reset(float *cc, int m) { for (int i = 0; i < m; ++i) { cc[i] = 0.0; } return 0; } """ from tvm.contrib import utils, clang temp = utils.tempdir() ll_path = temp.relpath("temp.ll") # Create LLVM ir from c source code ll_code = clang.create_llvm(cc_code, output=ll_path) return ll_code def intrin_gemv(m, l): a = te.placeholder((l,), name="a") b = te.placeholder((m, l), name="b") k = te.reduce_axis((0, l), name="k") c = te.compute((m,), lambda i: te.sum(a[k] * b[i, k], axis=k), name="c") Ab = tvm.tir.decl_buffer(a.shape, a.dtype, name="A", offset_factor=1, strides=[1]) Bb = tvm.tir.decl_buffer(b.shape, b.dtype, name="B", offset_factor=1, strides=[te.var("s1"), 1]) Cb = tvm.tir.decl_buffer(c.shape, c.dtype, name="C", offset_factor=1, strides=[1]) def intrin_func(ins, outs): aa, bb = ins cc = outs[0] def _body(): ib = tvm.tir.ir_builder.create() ib.emit( tvm.tir.call_extern( "int32", "gemv_update", cc.access_ptr("w"), aa.access_ptr("r"), bb.access_ptr("r"), m, l, bb.strides[0], ) ) return ib.get() def _reduce_reset(): ib = tvm.tir.ir_builder.create() ib.emit(tvm.tir.call_extern("int32", "gemv_reset", cc.access_ptr("w"), m)) return ib.get() def _reduce_update(): return _body() return _body(), _reduce_reset(), _reduce_update() return te.decl_tensor_intrin(c.op, intrin_func, binds={a: Ab, b: Bb, c: Cb}) ###################################################################### # Note that :code:`intrin_func` now returns a triplet: # :code:`(body, reduce_reset, reduce_update)`. # If tensorization includes all the reduce axes, function :code:`body()` will be invoked, # otherwise :code:`reduce_reset()` and :code:`reduce_update()` together will be used. # In our example :code:`body()` and :code:`reduce_update()` # share the same implementation, # while in other cases, hardware may have different instructions for these two functions. # Moreover, we can see now :code:`bb.strides[0]` is different from :code:`l` # due to the tiling. # # Tensorize for squared GEMV, build and check the results, # gemv = intrin_gemv(factor, factor) s[C].tensorize(yi, gemv) s[C].pragma(yo, "import_llvm", gemv_impl()) func = tvm.build(s, [A, B, C], target="llvm", name="gemv") a = np.random.uniform(size=get_const_tuple(A.shape)).astype(dtype) b = np.random.uniform(size=get_const_tuple(B.shape)).astype(dtype) c = tvm.nd.array(np.zeros(get_const_tuple(C.shape), dtype=dtype), dev) func(tvm.nd.array(a, dev), tvm.nd.array(b, dev), c) tvm.testing.assert_allclose(c.numpy(), np.dot(a, b.T), rtol=1e-3) ###################################################################### # Summary # ------- # This tutorial demonstrates the usage of tensorize intrinsic in TVM. # Tensorize provides a way for users to get fully optimized schedule via micro-kernels. # For example, INT8 quantization on Intel CPUs uses tensorization # to invoke AVX instruction directly. # It also enables TVM to compile to ASICs - # checkout :ref:`vta-index` for details. # We also demonstrates how to use inline assembly importing, # which helps users inject asm easily into the schedule. #
12,789
38.96875
100
py
tvm
tvm-main/gallery/how_to/work_with_schedules/tuple_inputs.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ Compute and Reduce with Tuple Inputs ======================================= **Author**: `Ziheng Jiang <https://github.com/ZihengJiang>`_ Often we want to compute multiple outputs with the same shape within a single loop or perform reduction that involves multiple values like :code:`argmax`. These problems can be addressed by tuple inputs. In this tutorial, we will introduce the usage of tuple inputs in TVM. """ from __future__ import absolute_import, print_function import tvm from tvm import te import numpy as np ###################################################################### # Describe Batchwise Computation # ------------------------------ # For operators which have the same shape, we can put them together as # the inputs of :any:`te.compute`, if we want them to be scheduled # together in the next schedule procedure. # n = te.var("n") m = te.var("m") A0 = te.placeholder((m, n), name="A0") A1 = te.placeholder((m, n), name="A1") B0, B1 = te.compute((m, n), lambda i, j: (A0[i, j] + 2, A1[i, j] * 3), name="B") # The generated IR code would be: s = te.create_schedule(B0.op) print(tvm.lower(s, [A0, A1, B0, B1], simple_mode=True)) ###################################################################### # .. _reduction-with-tuple-inputs: # # Describe Reduction with Collaborative Inputs # -------------------------------------------- # Sometimes, we require multiple inputs to express some reduction # operators, and the inputs will collaborate together, e.g. :code:`argmax`. # In the reduction procedure, :code:`argmax` need to compare the value of # operands, also need to keep the index of operand. It can be expressed # with :py:func:`te.comm_reducer` as below: # x and y are the operands of reduction, both of them is a tuple of index # and value. def fcombine(x, y): lhs = tvm.tir.Select((x[1] >= y[1]), x[0], y[0]) rhs = tvm.tir.Select((x[1] >= y[1]), x[1], y[1]) return lhs, rhs # our identity element also need to be a tuple, so `fidentity` accepts # two types as inputs. def fidentity(t0, t1): return tvm.tir.const(-1, t0), tvm.te.min_value(t1) argmax = te.comm_reducer(fcombine, fidentity, name="argmax") # describe the reduction computation m = te.var("m") n = te.var("n") idx = te.placeholder((m, n), name="idx", dtype="int32") val = te.placeholder((m, n), name="val", dtype="int32") k = te.reduce_axis((0, n), "k") T0, T1 = te.compute((m,), lambda i: argmax((idx[i, k], val[i, k]), axis=k), name="T") # the generated IR code would be: s = te.create_schedule(T0.op) print(tvm.lower(s, [idx, val, T0, T1], simple_mode=True)) ###################################################################### # .. note:: # # For ones who are not familiar with reduction, please refer to # :ref:`general-reduction`. ###################################################################### # Schedule Operation with Tuple Inputs # ------------------------------------ # It is worth mentioning that although you will get multiple outputs # with one batch operation, but they can only be scheduled together # in terms of operation. n = te.var("n") m = te.var("m") A0 = te.placeholder((m, n), name="A0") B0, B1 = te.compute((m, n), lambda i, j: (A0[i, j] + 2, A0[i, j] * 3), name="B") A1 = te.placeholder((m, n), name="A1") C = te.compute((m, n), lambda i, j: A1[i, j] + B0[i, j], name="C") s = te.create_schedule(C.op) s[B0].compute_at(s[C], C.op.axis[0]) # as you can see in the below generated IR code: print(tvm.lower(s, [A0, A1, C], simple_mode=True)) ###################################################################### # Summary # ------- # This tutorial introduces the usage of tuple inputs operation. # # - Describe normal batchwise computation. # - Describe reduction operation with tuple inputs. # - Notice that you can only schedule computation in terms of operation instead of tensor.
4,639
36.419355
90
py
tvm
tvm-main/gallery/how_to/work_with_schedules/tedd.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ Use Tensor Expression Debug Display (TEDD) for Visualization ============================================================ **Author**: `Yongfeng Gu <https://github.com/yongfeng-nv>`_ This is an introduction about using TEDD to visualize tensor expressions. Tensor Expressions are scheduled with primitives. Although individual primitives are usually easy to understand, they become complicated quickly when you put them together. We have introduced an operational model of schedule primitives in Tensor Expression. * the interactions between different schedule primitives, * the impact of the schedule primitives on the final code generation. The operational model is based on a Dataflow Graph, a Schedule Tree and an IterVar Relationship Graph. Schedule primitives perform operations on these graphs. TEDD renders these three graphs from a given schedule. This tutorial demonstrates how to use TEDD and how to interpret the rendered graphs. """ import tvm from tvm import te from tvm import topi from tvm.contrib import tedd ###################################################################### # Define and Schedule Convolution with Bias and ReLU # -------------------------------------------------- # Let's build an example Tensor Expression for a convolution followed by Bias and ReLU. # We first connect conv2d, add, and relu TOPIs. Then, we create a TOPI generic schedule. # batch = 1 in_channel = 256 in_size = 32 num_filter = 256 kernel = 3 stride = 1 padding = "SAME" dilation = 1 A = te.placeholder((in_size, in_size, in_channel, batch), name="A") W = te.placeholder((kernel, kernel, in_channel, num_filter), name="W") B = te.placeholder((1, num_filter, 1), name="bias") with tvm.target.Target("llvm"): t_conv = topi.nn.conv2d_hwcn(A, W, stride, padding, dilation) t_bias = topi.add(t_conv, B) t_relu = topi.nn.relu(t_bias) s = topi.generic.schedule_conv2d_hwcn([t_relu]) ###################################################################### # Render Graphs with TEDD # ----------------------- # We render graphs to see the computation # and how it is scheduled. # If you run the tutorial in a Jupyter notebook, you can use the following commented lines # to render SVG figures showing in notebook directly. # tedd.viz_dataflow_graph(s, dot_file_path="/tmp/dfg.dot") # tedd.viz_dataflow_graph(s, show_svg = True) ###################################################################### # .. image:: https://github.com/dmlc/web-data/raw/main/tvm/tutorial/tedd_dfg.png # :align: center # # The first one is a dataflow graph. Every node represents a stage with name and memory # scope shown in the middle and inputs/outputs information on the sides. # Edges show nodes' dependency. # tedd.viz_schedule_tree(s, dot_file_path="/tmp/scheduletree.dot") # tedd.viz_schedule_tree(s, show_svg = True) ###################################################################### # We just rendered the schedule tree graph. You may notice an warning about ranges not # available. # The message also suggests to call normalize() to infer range information. We will # skip inspecting the first schedule tree and encourage you to compare the graphs before # and after normalize() for its impact. # s = s.normalize() tedd.viz_schedule_tree(s, dot_file_path="/tmp/scheduletree2.dot") # tedd.viz_schedule_tree(s, show_svg = True) ###################################################################### # .. image:: https://github.com/dmlc/web-data/raw/main/tvm/tutorial/tedd_st.png # :align: center # # Now, let us take a close look at the second schedule tree. Every block under ROOT # represents a # stage. Stage name shows in the top row and compute shows in the bottom row. # The middle rows are for IterVars, the higher the outer, the lower the inner. # An IterVar row contains its index, name, type, and other optional information. # Let's use the W.shared stage as an example. The top row tells # its name, "W.shared", and memory scope, "Shared". Its compute is # :code:`W(ax0, ax1, ax2, ax3)`. # Its outer most loop IterVar is ax0.ax1.fused.ax2.fused.ax3.fused.outer, # indexed with 0, of kDataPar, bound to threadIdx.y, and with range(min=0, ext=8). # You can also tell # IterVar type with the index box color, shown in the legend. # # If a stage doesn't compute_at any other stage, it has an edge directly to the # ROOT node. Otherwise, it has an edge pointing to the IterVar it attaches to, # such as W.shared attaches to rx.outer in the middle compute stage. # ###################################################################### # .. note:: # # By definition, IterVars are internal nodes and computes are leaf nodes in # a schedule tree. The edges among IterVars and compute within one stage are # omitted, making every stage a block, for better readability. # tedd.viz_itervar_relationship_graph(s, dot_file_path="/tmp/itervar.dot") # tedd.viz_itervar_relationship_graph(s, show_svg = True) ###################################################################### # .. image:: https://github.com/dmlc/web-data/raw/main/tvm/tutorial/tedd_itervar_rel.png # :align: center # # The last one is an IterVar Relationship Graph. Every subgraph represents a # stage and contains IterVar nodes and transformation nodes. For example, # W.shared has three split nodes and three fuse nodes. The rest are IterVar # nodes of the same format as the IterVar rows in Schedule Trees. Root # IterVars are those not driven by any transformation node, such as ax0; leaf # IterVars don't drive any transformation node and have non-negative indices, # such as ax0.ax1.fused.ax2.fused.ax3.fused.outer with index of 0. # ###################################################################### # Summary # ------- # This tutorial demonstrates the usage of TEDD. We use an example built # with TOPI to show the schedules under the hood. You can also use # it before and after any schedule primitive to inspect its effect. #
6,761
40.740741
90
py
tvm
tvm-main/gallery/how_to/extend_tvm/bring_your_own_datatypes.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ Bring Your Own Datatypes to TVM =============================== **Authors**: `Gus Smith <https://github.com/gussmith23>`_, `Andrew Liu <https://github.com/hypercubestart>`_ In this tutorial, we will show you how to utilize the Bring Your Own Datatypes framework to use your own custom datatypes in TVM. Note that the Bring Your Own Datatypes framework currently only handles **software emulated versions of datatypes**. The framework does not support compiling for custom accelerator datatypes out-of-the-box. Datatype Libraries ------------------ The Bring Your Own Datatypes allows users to register their own datatype implementations alongside TVM's native datatypes (such as ``float``). In the wild, these datatype implementations often appear as libraries. For example: - `libposit <https://github.com/cjdelisle/libposit>`_, a posit library - `Stillwater Universal <https://github.com/stillwater-sc/universal>`_, a library with posits, fixed-point numbers, and other types - `SoftFloat <https://github.com/ucb-bar/berkeley-softfloat-3>`_, Berkeley's software implementation of IEEE 754 floating-point The Bring Your Own Datatypes enables users to plug these datatype implementations into TVM! In this section, we will use an example library we have already implemented, located at ``3rdparty/byodt/myfloat.cc``. This datatype, which we dubbed "myfloat", is really just a IEE-754 float under-the-hood, but it serves a useful example to show that any datatype can be used in the BYODT framework. Setup ----- Since we do not use any 3rdparty library, there is no setup needed. If you would like to try this with your own datatype library, first bring the library's functions into the process space with ``CDLL``: .. code-block:: python ctypes.CDLL('my-datatype-lib.so', ctypes.RTLD_GLOBAL) """ ###################### # A Simple TVM Program # -------------------- # # We'll begin by writing a simple program in TVM; afterwards, we will re-write it to use custom datatypes. import tvm from tvm import relay # Our basic program: Z = X + Y x = relay.var("x", shape=(3,), dtype="float32") y = relay.var("y", shape=(3,), dtype="float32") z = x + y program = relay.Function([x, y], z) module = tvm.IRModule.from_expr(program) ###################################################################### # Now, we create random inputs to feed into this program using numpy: import numpy as np np.random.seed(23) # for reproducibility x_input = np.random.rand(3).astype("float32") y_input = np.random.rand(3).astype("float32") print("x: {}".format(x_input)) print("y: {}".format(y_input)) ###################################################################### # Finally, we're ready to run the program: z_output = relay.create_executor(mod=module).evaluate()(x_input, y_input) print("z: {}".format(z_output)) ###################################################################### # Adding Custom Datatypes # ----------------------- # Now, we will do the same, but we will use a custom datatype for our intermediate computation. # # We use the same input variables ``x`` and ``y`` as above, but before adding ``x + y``, we first cast both ``x`` and ``y`` to a custom datatype via the ``relay.cast(...)`` call. # # Note how we specify the custom datatype: we indicate it using the special ``custom[...]`` syntax. # Additionally, note the "32" after the datatype: this is the bitwidth of the custom datatype. This tells TVM that each instance of ``myfloat`` is 32 bits wide. try: with tvm.transform.PassContext(config={"tir.disable_vectorize": True}): x_myfloat = relay.cast(x, dtype="custom[myfloat]32") y_myfloat = relay.cast(y, dtype="custom[myfloat]32") z_myfloat = x_myfloat + y_myfloat z = relay.cast(z_myfloat, dtype="float32") except tvm.TVMError as e: # Print last line of error print(str(e).split("\n")[-1]) ###################################################################### # Trying to generate this program throws an error from TVM. # TVM does not know how to handle any custom datatype out of the box! # We first have to register the custom type with TVM, giving it a name and a type code: tvm.target.datatype.register("myfloat", 150) ###################################################################### # Note that the type code, 150, is currently chosen manually by the user. # See ``TVMTypeCode::kCustomBegin`` in `include/tvm/runtime/c_runtime_api.h <https://github.com/apache/tvm/blob/main/include/tvm/runtime/data_type.h>`_. # Now we can generate our program again: x_myfloat = relay.cast(x, dtype="custom[myfloat]32") y_myfloat = relay.cast(y, dtype="custom[myfloat]32") z_myfloat = x_myfloat + y_myfloat z = relay.cast(z_myfloat, dtype="float32") program = relay.Function([x, y], z) module = tvm.IRModule.from_expr(program) module = relay.transform.InferType()(module) ###################################################################### # Now we have a Relay program that uses myfloat! print(program) ###################################################################### # Now that we can express our program without errors, let's try running it! try: with tvm.transform.PassContext(config={"tir.disable_vectorize": True}): z_output_myfloat = relay.create_executor("graph", mod=module).evaluate()(x_input, y_input) print("z: {}".format(y_myfloat)) except tvm.TVMError as e: # Print last line of error print(str(e).split("\n")[-1]) ###################################################################### # Now, trying to compile this program throws an error. # Let's dissect this error. # # The error is occurring during the process of lowering the custom datatype code to code that TVM can compile and run. # TVM is telling us that it cannot find a *lowering function* for the ``Cast`` operation, when casting from source type 2 (``float``, in TVM), to destination type 150 (our custom datatype). # When lowering custom datatypes, if TVM encounters an operation over a custom datatype, it looks for a user-registered *lowering function*, which tells it how to lower the operation to an operation over datatypes it understands. # We have not told TVM how to lower ``Cast`` operations for our custom datatypes; thus, the source of this error. # # To fix this error, we simply need to specify a lowering function: tvm.target.datatype.register_op( tvm.target.datatype.create_lower_func( { (32, 32): "FloatToCustom32", # cast from float32 to myfloat32 } ), "Cast", "llvm", "float", "myfloat", ) ###################################################################### # The ``register_op(...)`` call takes a lowering function, and a number of parameters which specify exactly the operation which should be lowered with the provided lowering function. # In this case, the arguments we pass specify that this lowering function is for lowering a ``Cast`` from ``float`` to ``myfloat`` for target ``"llvm"``. # # The lowering function passed into this call is very general: it should take an operation of the specified type (in this case, `Cast`) and return another operation which only uses datatypes which TVM understands. # # In the general case, we expect users to implement operations over their custom datatypes using calls to an external library. # In our example, our ``myfloat`` library implements a ``Cast`` from ``float`` to 32-bit ``myfloat`` in the function ``FloatToCustom32``. # To provide for the general case, we have made a helper function, ``create_lower_func(...)``, # which does just this: given a dictionary, it replaces the given operation with a ``Call`` to the appropriate function name provided based on the op and the bit widths. # It additionally removes usages of the custom datatype by storing the custom datatype in an opaque ``uint`` of the appropriate width; in our case, a ``uint32_t``. # For more information, see `the source code <https://github.com/apache/tvm/blob/main/python/tvm/target/datatype.py>`_. # We can now re-try running the program: try: with tvm.transform.PassContext(config={"tir.disable_vectorize": True}): z_output_myfloat = relay.create_executor("graph", mod=module).evaluate()(x_input, y_input) print("z: {}".format(z_output_myfloat)) except tvm.TVMError as e: # Print last line of error print(str(e).split("\n")[-1]) ###################################################################### # This new error tells us that the ``Add`` lowering function is not found, which is good news, as it's no longer complaining about the ``Cast``! # We know what to do from here: we just need to register the lowering functions for the other operations in our program. # # Note that for ``Add``, ``create_lower_func`` takes in a dict where the key is an integer. # For ``Cast`` operations, we require a 2-tuple to specify the ``src_bit_length`` and the ``dest_bit_length``, # while for all other operations, the bit length is the same between the operands so we only require one integer to specify ``bit_length``. tvm.target.datatype.register_op( tvm.target.datatype.create_lower_func({32: "Custom32Add"}), "Add", "llvm", "myfloat", ) tvm.target.datatype.register_op( tvm.target.datatype.create_lower_func({(32, 32): "Custom32ToFloat"}), "Cast", "llvm", "myfloat", "float", ) # Now, we can run our program without errors. with tvm.transform.PassContext(config={"tir.disable_vectorize": True}): z_output_myfloat = relay.create_executor(mod=module).evaluate()(x_input, y_input) print("z: {}".format(z_output_myfloat)) print("x:\t\t{}".format(x_input)) print("y:\t\t{}".format(y_input)) print("z (float32):\t{}".format(z_output)) print("z (myfloat32):\t{}".format(z_output_myfloat)) # Perhaps as expected, the ``myfloat32`` results and ``float32`` are exactly the same! ###################################################################### # Running Models With Custom Datatypes # ------------------------------------ # # We will first choose the model which we would like to run with myfloat. # In this case we use `Mobilenet <https://arxiv.org/abs/1704.04861>`_. # We choose Mobilenet due to its small size. # In this alpha state of the Bring Your Own Datatypes framework, we have not implemented any software optimizations for running software emulations of custom datatypes; the result is poor performance due to many calls into our datatype emulation library. # # First let us define two helper functions to get the mobilenet model and a cat image. def get_mobilenet(): dshape = (1, 3, 224, 224) from mxnet.gluon.model_zoo.vision import get_model block = get_model("mobilenet0.25", pretrained=True) shape_dict = {"data": dshape} return relay.frontend.from_mxnet(block, shape_dict) def get_cat_image(): from tvm.contrib.download import download_testdata from PIL import Image url = "https://gist.githubusercontent.com/zhreshold/bcda4716699ac97ea44f791c24310193/raw/fa7ef0e9c9a5daea686d6473a62aacd1a5885849/cat.png" dst = "cat.png" real_dst = download_testdata(url, dst, module="data") img = Image.open(real_dst).resize((224, 224)) # CoreML's standard model image format is BGR img_bgr = np.array(img)[:, :, ::-1] img = np.transpose(img_bgr, (2, 0, 1))[np.newaxis, :] return np.asarray(img, dtype="float32") module, params = get_mobilenet() ###################################################################### # It's easy to execute MobileNet with native TVM: ex = tvm.relay.create_executor("graph", mod=module, params=params) input = get_cat_image() result = ex.evaluate()(input).numpy() # print first 10 elements print(result.flatten()[:10]) ###################################################################### # Now, we would like to change the model to use myfloat internally. To do so, we need to convert the network. To do this, we first define a function which will help us convert tensors: def convert_ndarray(dst_dtype, array): """Converts an NDArray into the specified datatype""" x = relay.var("x", shape=array.shape, dtype=str(array.dtype)) cast = relay.Function([x], x.astype(dst_dtype)) with tvm.transform.PassContext(config={"tir.disable_vectorize": True}): return relay.create_executor("graph").evaluate(cast)(array) ###################################################################### # Now, to actually convert the entire network, we have written `a pass in Relay <https://github.com/gussmith23/tvm/blob/ea174c01c54a2529e19ca71e125f5884e728da6e/python/tvm/relay/frontend/change_datatype.py#L21>`_ which simply converts all nodes within the model to use the new datatype. from tvm.relay.frontend.change_datatype import ChangeDatatype src_dtype = "float32" dst_dtype = "custom[myfloat]32" module = relay.transform.InferType()(module) # Currently, custom datatypes only work if you run simplify_inference beforehand module = tvm.relay.transform.SimplifyInference()(module) # Run type inference before changing datatype module = tvm.relay.transform.InferType()(module) # Change datatype from float to myfloat and re-infer types cdtype = ChangeDatatype(src_dtype, dst_dtype) expr = cdtype.visit(module["main"]) module = tvm.relay.transform.InferType()(module) # We also convert the parameters: params = {k: convert_ndarray(dst_dtype, v) for k, v in params.items()} # We also need to convert our input: input = convert_ndarray(dst_dtype, input) # Finally, we can try to run the converted model: try: # Vectorization is not implemented with custom datatypes. with tvm.transform.PassContext(config={"tir.disable_vectorize": True}): result_myfloat = tvm.relay.create_executor("graph", mod=module).evaluate(expr)( input, **params ) except tvm.TVMError as e: print(str(e).split("\n")[-1]) ###################################################################### # When we attempt to run the model, we get a familiar error telling us that more functions need to be registered for myfloat. # # Because this is a neural network, many more operations are required. # Here, we register all the needed functions: tvm.target.datatype.register_op( tvm.target.datatype.create_lower_func({32: "FloatToCustom32"}), "FloatImm", "llvm", "myfloat", ) tvm.target.datatype.register_op( tvm.target.datatype.lower_ite, "Call", "llvm", "myfloat", intrinsic_name="tir.if_then_else" ) tvm.target.datatype.register_op( tvm.target.datatype.lower_call_pure_extern, "Call", "llvm", "myfloat", intrinsic_name="tir.call_pure_extern", ) tvm.target.datatype.register_op( tvm.target.datatype.create_lower_func({32: "Custom32Mul"}), "Mul", "llvm", "myfloat", ) tvm.target.datatype.register_op( tvm.target.datatype.create_lower_func({32: "Custom32Div"}), "Div", "llvm", "myfloat", ) tvm.target.datatype.register_op( tvm.target.datatype.create_lower_func({32: "Custom32Sqrt"}), "Call", "llvm", "myfloat", intrinsic_name="tir.sqrt", ) tvm.target.datatype.register_op( tvm.target.datatype.create_lower_func({32: "Custom32Sub"}), "Sub", "llvm", "myfloat", ) tvm.target.datatype.register_op( tvm.target.datatype.create_lower_func({32: "Custom32Exp"}), "Call", "llvm", "myfloat", intrinsic_name="tir.exp", ) tvm.target.datatype.register_op( tvm.target.datatype.create_lower_func({32: "Custom32Max"}), "Max", "llvm", "myfloat", ) tvm.target.datatype.register_min_func( tvm.target.datatype.create_min_lower_func({32: "MinCustom32"}, "myfloat"), "myfloat", ) ###################################################################### # Note we are making use of two new functions: ``register_min_func`` and ``create_min_lower_func``. # # ``register_min_func`` takes in an integer ``num_bits`` for the bit length, and should return an operation # representing the minimum finite representable value for the custom data type with the specified bit length. # # Similar to ``register_op`` and ``create_lower_func``, the ``create_min_lower_func`` handles the general case # where the minimum representable custom datatype value is implemented using calls to an external library. # # Now we can finally run the model: # Vectorization is not implemented with custom datatypes. with tvm.transform.PassContext(config={"tir.disable_vectorize": True}): result_myfloat = relay.create_executor(mod=module).evaluate(expr)(input, **params) result_myfloat = convert_ndarray(src_dtype, result_myfloat).numpy() # print first 10 elements print(result_myfloat.flatten()[:10]) # Again, note that the output using 32-bit myfloat exactly the same as 32-bit floats, # because myfloat is exactly a float! np.testing.assert_array_equal(result, result_myfloat)
17,598
41.92439
286
py
tvm
tvm-main/gallery/how_to/extend_tvm/use_pass_instrument.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # pylint: disable=line-too-long """ .. _tutorial-use-pass-instrument: How to Use TVM Pass Instrument ============================== **Author**: `Chi-Wei Wang <https://github.com/chiwwang>`_ As more and more passes are implemented, it becomes useful to instrument pass execution, analyze per-pass effects, and observe various events. We can instrument passes by providing a list of :py:class:`tvm.ir.instrument.PassInstrument` instances to :py:class:`tvm.transform.PassContext`. We provide a pass instrument for collecting timing information (:py:class:`tvm.ir.instrument.PassTimingInstrument`), but an extension mechanism is available via the :py:func:`tvm.instrument.pass_instrument` decorator. This tutorial demonstrates how developers can use ``PassContext`` to instrument passes. Please also refer to the :ref:`pass-infra`. """ import tvm import tvm.relay as relay from tvm.relay.testing import resnet from tvm.contrib.download import download_testdata from tvm.relay.build_module import bind_params_by_name from tvm.ir.instrument import ( PassTimingInstrument, pass_instrument, ) ############################################################################### # Create An Example Relay Program # ------------------------------- # We use pre-defined resnet-18 network in Relay. batch_size = 1 num_of_image_class = 1000 image_shape = (3, 224, 224) output_shape = (batch_size, num_of_image_class) relay_mod, relay_params = resnet.get_workload(num_layers=18, batch_size=1, image_shape=image_shape) print("Printing the IR module...") print(relay_mod.astext(show_meta_data=False)) ############################################################################### # Create PassContext With Instruments # ----------------------------------- # To run all passes with an instrument, pass it via the ``instruments`` argument to # the ``PassContext`` constructor. A built-in ``PassTimingInstrument`` is used to # profile the execution time of each passes. timing_inst = PassTimingInstrument() with tvm.transform.PassContext(instruments=[timing_inst]): relay_mod = relay.transform.InferType()(relay_mod) relay_mod = relay.transform.FoldScaleAxis()(relay_mod) # before exiting the context, get profile results. profiles = timing_inst.render() print("Printing results of timing profile...") print(profiles) ############################################################################### # Use Current PassContext With Instruments # ---------------------------------------- # One can also use the current ``PassContext`` and register # ``PassInstrument`` instances by ``override_instruments`` method. # Note that ``override_instruments`` executes ``exit_pass_ctx`` method # if any instrument already exists. Then it switches to new instruments # and calls ``enter_pass_ctx`` method of new instruments. # Refer to following sections and :py:func:`tvm.instrument.pass_instrument` for these methods. cur_pass_ctx = tvm.transform.PassContext.current() cur_pass_ctx.override_instruments([timing_inst]) relay_mod = relay.transform.InferType()(relay_mod) relay_mod = relay.transform.FoldScaleAxis()(relay_mod) profiles = timing_inst.render() print("Printing results of timing profile...") print(profiles) ############################################################################### # Register empty list to clear existing instruments. # # Note that ``exit_pass_ctx`` of ``PassTimingInstrument`` is called. # Profiles are cleared so nothing is printed. cur_pass_ctx.override_instruments([]) # Uncomment the call to .render() to see a warning like: # Warning: no passes have been profiled, did you enable pass profiling? # profiles = timing_inst.render() ############################################################################### # Create Customized Instrument Class # ---------------------------------- # A customized instrument class can be created using the # :py:func:`tvm.instrument.pass_instrument` decorator. # # Let's create an instrument class which calculates the change in number of # occurrences of each operator caused by each pass. We can look at ``op.name`` to # find the name of each operator. And we do this before and after passes to calculate the difference. @pass_instrument class RelayCallNodeDiffer: def __init__(self): self._op_diff = [] # Passes can be nested. # Use stack to make sure we get correct before/after pairs. self._op_cnt_before_stack = [] def enter_pass_ctx(self): self._op_diff = [] self._op_cnt_before_stack = [] def exit_pass_ctx(self): assert len(self._op_cnt_before_stack) == 0, "The stack is not empty. Something wrong." def run_before_pass(self, mod, info): self._op_cnt_before_stack.append((info.name, self._count_nodes(mod))) def run_after_pass(self, mod, info): # Pop out the latest recorded pass. name_before, op_to_cnt_before = self._op_cnt_before_stack.pop() assert name_before == info.name, "name_before: {}, info.name: {} doesn't match".format( name_before, info.name ) cur_depth = len(self._op_cnt_before_stack) op_to_cnt_after = self._count_nodes(mod) op_diff = self._diff(op_to_cnt_after, op_to_cnt_before) # only record passes causing differences. if op_diff: self._op_diff.append((cur_depth, info.name, op_diff)) def get_pass_to_op_diff(self): """ return [ (depth, pass_name, {op_name: diff_num, ...}), ... ] """ return self._op_diff @staticmethod def _count_nodes(mod): """Count the number of occurrences of each operator in the module""" ret = {} def visit(node): if isinstance(node, relay.expr.Call): if hasattr(node.op, "name"): op_name = node.op.name else: # Some CallNode may not have 'name' such as relay.Function return ret[op_name] = ret.get(op_name, 0) + 1 relay.analysis.post_order_visit(mod["main"], visit) return ret @staticmethod def _diff(d_after, d_before): """Calculate the difference of two dictionary along their keys. The result is values in d_after minus values in d_before. """ ret = {} key_after, key_before = set(d_after), set(d_before) for k in key_before & key_after: tmp = d_after[k] - d_before[k] if tmp: ret[k] = d_after[k] - d_before[k] for k in key_after - key_before: ret[k] = d_after[k] for k in key_before - key_after: ret[k] = -d_before[k] return ret ############################################################################### # Apply Passes and Multiple Instrument Classes # -------------------------------------------- # We can use multiple instrument classes in a ``PassContext``. # However, it should be noted that instrument methods are executed sequentially, # obeying the order of ``instruments`` argument. # So for instrument classes like ``PassTimingInstrument``, it is inevitable to # count-up the execution time of other instrument classes to the final # profile result. call_node_inst = RelayCallNodeDiffer() desired_layouts = { "nn.conv2d": ["NHWC", "HWIO"], } pass_seq = tvm.transform.Sequential( [ relay.transform.FoldConstant(), relay.transform.ConvertLayout(desired_layouts), relay.transform.FoldConstant(), ] ) relay_mod["main"] = bind_params_by_name(relay_mod["main"], relay_params) # timing_inst is put after call_node_inst. # So the execution time of ``call_node.inst.run_after_pass()`` is also counted. with tvm.transform.PassContext(opt_level=3, instruments=[call_node_inst, timing_inst]): relay_mod = pass_seq(relay_mod) profiles = timing_inst.render() # Uncomment the next line to see timing-profile results. # print(profiles) ############################################################################### # We can see how many CallNode increase/decrease per op type. from pprint import pprint print("Printing the change in number of occurrences of each operator caused by each pass...") pprint(call_node_inst.get_pass_to_op_diff()) ############################################################################### # Exception Handling # ------------------ # Let's see what happens if an exception occurs in a method of a ``PassInstrument``. # # Define ``PassInstrument`` classes which raise exceptions in enter/exit ``PassContext``: class PassExampleBase: def __init__(self, name): self._name = name def enter_pass_ctx(self): print(self._name, "enter_pass_ctx") def exit_pass_ctx(self): print(self._name, "exit_pass_ctx") def should_run(self, mod, info): print(self._name, "should_run") return True def run_before_pass(self, mod, pass_info): print(self._name, "run_before_pass") def run_after_pass(self, mod, pass_info): print(self._name, "run_after_pass") @pass_instrument class PassFine(PassExampleBase): pass @pass_instrument class PassBadEnterCtx(PassExampleBase): def enter_pass_ctx(self): print(self._name, "bad enter_pass_ctx!!!") raise ValueError("{} bad enter_pass_ctx".format(self._name)) @pass_instrument class PassBadExitCtx(PassExampleBase): def exit_pass_ctx(self): print(self._name, "bad exit_pass_ctx!!!") raise ValueError("{} bad exit_pass_ctx".format(self._name)) ############################################################################### # If an exception occurs in ``enter_pass_ctx``, ``PassContext`` will disable the pass # instrumentation. And it will run the ``exit_pass_ctx`` of each ``PassInstrument`` # which successfully finished ``enter_pass_ctx``. # # In following example, we can see ``exit_pass_ctx`` of `PassFine_0` is executed after exception. demo_ctx = tvm.transform.PassContext( instruments=[ PassFine("PassFine_0"), PassBadEnterCtx("PassBadEnterCtx"), PassFine("PassFine_1"), ] ) try: with demo_ctx: relay_mod = relay.transform.InferType()(relay_mod) except ValueError as ex: print("Catching", str(ex).split("\n")[-1]) ############################################################################### # Exceptions in ``PassInstrument`` instances cause all instruments of the current ``PassContext`` # to be cleared, so nothing is printed when ``override_instruments`` is called. demo_ctx.override_instruments([]) # no PassFine_0 exit_pass_ctx printed....etc ############################################################################### # If an exception occurs in ``exit_pass_ctx``, then the pass instrument is disabled. # Then exception is propagated. That means ``PassInstrument`` instances registered # after the one throwing the exception do not execute ``exit_pass_ctx``. demo_ctx = tvm.transform.PassContext( instruments=[ PassFine("PassFine_0"), PassBadExitCtx("PassBadExitCtx"), PassFine("PassFine_1"), ] ) try: # PassFine_1 execute enter_pass_ctx, but not exit_pass_ctx. with demo_ctx: relay_mod = relay.transform.InferType()(relay_mod) except ValueError as ex: print("Catching", str(ex).split("\n")[-1]) ############################################################################### # Exceptions occurred in ``should_run``, ``run_before_pass``, ``run_after_pass`` # are not handled explicitly -- we rely on the context manager (the ``with`` syntax) # to exit ``PassContext`` safely. # # We use ``run_before_pass`` as an example: @pass_instrument class PassBadRunBefore(PassExampleBase): def run_before_pass(self, mod, pass_info): print(self._name, "bad run_before_pass!!!") raise ValueError("{} bad run_before_pass".format(self._name)) demo_ctx = tvm.transform.PassContext( instruments=[ PassFine("PassFine_0"), PassBadRunBefore("PassBadRunBefore"), PassFine("PassFine_1"), ] ) try: # All exit_pass_ctx are called. with demo_ctx: relay_mod = relay.transform.InferType()(relay_mod) except ValueError as ex: print("Catching", str(ex).split("\n")[-1]) ############################################################################### # Also note that pass instrumentation is not disable. So if we call # ``override_instruments``, the ``exit_pass_ctx`` of old registered ``PassInstrument`` # is called. demo_ctx.override_instruments([]) ############################################################################### # If we don't wrap pass execution with ``with`` syntax, ``exit_pass_ctx`` is not # called. Let try this with current ``PassContext``: cur_pass_ctx = tvm.transform.PassContext.current() cur_pass_ctx.override_instruments( [ PassFine("PassFine_0"), PassBadRunBefore("PassBadRunBefore"), PassFine("PassFine_1"), ] ) ############################################################################### # Then call passes. ``exit_pass_ctx`` is not executed after the exception, # as expectation. try: # No ``exit_pass_ctx`` got executed. relay_mod = relay.transform.InferType()(relay_mod) except ValueError as ex: print("Catching", str(ex).split("\n")[-1]) ############################################################################### # Clear instruments. cur_pass_ctx.override_instruments([])
14,251
37.106952
101
py
tvm
tvm-main/gallery/how_to/extend_tvm/use_pass_infra.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # pylint: disable=line-too-long """ .. _tutorial-use-pass-infra: How to Use TVM Pass Infra ========================= **Author**: `Zhi Chen <https://github.com/zhiics>`_ As the number of optimization passes increases in Relay/tir, it becomes intractable to execute them and maintain their dependencies manually. Therefore, we have introduced an infrastructure to manage the optimization passes and make it applicable to different layers of the IR in the TVM stack. The optimizations of a Relay/tir program could be applied at various granularity, namely function-level and module-level using :py:class:`tvm.relay.transform.FunctionPass`/ :py:class:`tvm.tir.transform.PrimFuncPass` and :py:class:`tvm.transform.ModulePass` respectively. Or users can rely on :py:class:`tvm.transform.Sequential` to apply a sequence of passes on a Relay/tir program where the dependencies between passes can be resolved by the pass infra. For more details about each type of these passes, please refer to the :ref:`pass-infra` This tutorial mainly demonstrates how developers can use the pass infra to perform a certain optimization and create an optimization pipeline for a Relay program. The same approach can be used for tir as well. """ import numpy as np import tvm from tvm import te import tvm.relay as relay ############################################################################### # Create An Example Relay Program # ------------------------------- # First of all, we create a simple Relay program for the tutorial. This program # will be used by various optimizations of the examples in this tutorial. # Similarly, users can write a tir primitive function and apply the tir passes. def example(): shape = (1, 64, 54, 54) c_data = np.empty(shape).astype("float32") c = relay.const(c_data) weight = relay.var("weight", shape=(64, 64, 3, 3)) x = relay.var("x", relay.TensorType((1, 64, 56, 56), "float32")) conv = relay.nn.conv2d(x, weight) y = relay.add(c, c) y = relay.multiply(y, relay.const(2, "float32")) y = relay.add(conv, y) z = relay.add(y, c) z1 = relay.add(y, c) z2 = relay.add(z, z1) return relay.Function([x, weight], z2) ############################################################################### # Optimize the Program # -------------------- # Now we would like to optimize the program. Relay features a host of # optimizations. We will select some of them to apply on this example program. # # There are multiple ways to optimize a Relay program. Below we will provide # examples for each of them. # # Manually Apply Optimization Passes # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # Let's first create a relay Module which contains one or multiple Relay # functions for optimization. f = example() mod = tvm.IRModule.from_expr(f) # Now we can apply constant folding on the module. # fold_const here is a callback that doesn't take any parameters. fold_const = relay.transform.FoldConstant() # Then, we can invoke the pass on the given module. Note that the constant # folding pass works at the function-level. That being said, each function in # the module will be applied with the optimization. Users don't need to iterate # through individual functions manually to apply this pass. mod = fold_const(mod) # We can see from the updated program that the constants are folded. print(mod) ############################################################################### # More optimizations can be applied in the similar manner. For instance, we can # eliminate the common expressions that used by `z` and `z1`. mod = relay.transform.EliminateCommonSubexpr()(mod) print(mod) ############################################################################### # Some optimizations, such as fusion, are parametric as well. For example, # opt level 0 will not allow operators to be fused together. Users can pass the # `fuse_opt_level` to enable this. mod = relay.transform.FuseOps(fuse_opt_level=0)(mod) # We can observe that the optimized module contains functions that only have # a signle primitive op. print(mod) ############################################################################### # Use Sequential to Apply a Sequence of Passes # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # Applying passes as above is actually tedious and it may require users to have # better understanding about the dependencies between them. For example, fusion # currently doesn't work well on let bindings. Therefore, we would not be able # to fuse operators that were fusable if :py:func:`relay.transform.ToANormalForm` is applied before # fusion, as this pass generates let bindings for each expression to # canonicalize a Relay program. # # Relay, hence, provides :py:class:`tvm.transform.Sequential` to alleviate developers from handling # these issues explicitly by specifying the required passes of each pass and # packing them as a whole to execute. For example, the same passes can now be # applied using the sequential style as the following. :py:class:`tvm.transform.Sequential` is # similar to `torch.nn.sequential <https://pytorch.org/docs/stable/nn.html#torch.nn.Sequential>`_ # and `mxnet.gluon.block <https://mxnet.apache.org/api/python/docs/_modules/mxnet/gluon/block.html>`_. # For example, `torch.nn.sequential` is used to contain a sequence of PyTorch # `Modules` that will be added to build a network. It focuses on the network # layers. Instead, the :py:class:`tvm.transform.Sequential` in our pass infra works on the optimizing # pass. # Now let's execute some passes through :py:class:`tvm.transform.Sequential` f = example() mod = tvm.IRModule.from_expr(f) # Glob the interested passes. seq = tvm.transform.Sequential( [ relay.transform.FoldConstant(), relay.transform.EliminateCommonSubexpr(), relay.transform.FuseOps(fuse_opt_level=2), ] ) mod1 = seq(mod) print(mod1) ############################################################################### # From the transformed Relay program, we can see that there are still two # identical addition operations. This is because ``EliminateCommonSubexpr`` # was not actually performed. The reason is because only the passes that have # optimization level less or equal to 2 will be executed by default under # :py:class:`tvm.transform.Sequential`. The pass infra, # however, provides a configuration interface # for users to customize the optimization level that they want to execute. with tvm.transform.PassContext(opt_level=3): mod2 = seq(mod) print(mod2) ############################################################################### # Now we can see that only one of the two identical additions is kept. # # In addition, users can selectively disable some passes using the # `disabled_pass` config, which is similar to the `-fno-xxx` option used the # general purpose compilers, such as Clang and GCC. For example, we can disable # EliminateCommonSubexpr as following. The printed module will again show two # identical addition operations. with tvm.transform.PassContext(opt_level=3, disabled_pass=["EliminateCommonSubexpr"]): mod3 = seq(mod) print(mod3) ############################################################################## # Implement a Pass Using Python Decorator # ------------------------------------------ # The next example illustrates how we can orchestrate a customized optimization # pipeline through the pass infra using Python decorators. This functionality # greatly eases the implementation of passes. For example, users can simply # define a decorated class to do function-level optimizations as the following # example shows. `transform_function` wraps a class to replace all constants # with a multiple of `c`. Later on, each function in a given module will be # visited and each constant in the function will be replaced when we invoke the # customized pass. @relay.transform.function_pass(opt_level=1) class CustomPipeline: """Simple test function to replace one argument to another.""" def __init__(self, multiplier): self.multiplier = multiplier # This function can define a pass. def transform_function(self, func, mod, ctx): obj = self class ReplaceConstant(tvm.relay.ExprMutator): def visit_constant(self, c): return relay.multiply(obj.multiplier, c) return ReplaceConstant().visit(func) f = example() mod = tvm.IRModule.from_expr(f) custom_pass = CustomPipeline(multiplier=relay.const(3, "float32")) assert custom_pass.info.name == "CustomPipeline" mod3 = custom_pass(mod) print(mod3) ############################################################################## # Debug a Pass # ------------ # TVM provides users a plug-and-play style debugging pass that print the IR # after a certain pass is done through a special pass (``PrintIR``) to dump the IR of the # whole module. A slightly modified version of the sequential pass example # could be like the following to enable IR dumping for ``FoldConstant`` optimization. f = example() mod = tvm.IRModule.from_expr(f) seq = tvm.transform.Sequential( [ relay.transform.FoldConstant(), tvm.transform.PrintIR(), relay.transform.EliminateCommonSubexpr(), relay.transform.FuseOps(), ] ) ############################################################################### # By inserting the ``PrintIR`` pass after ``FoldConstant``, the pass infra will # dump out the module IR when ``FoldConstant`` is done. Users can plug in this # pass after any pass they want to debug for viewing the optimization effect. # # There is a more flexible debugging mechanism. One can implement a ``PassInstrument`` # class to execute arbitrary code not only before and/or after each pass but also # at entering/exiting ``PassContext``. See :ref:`pass_instrument_cpp_backend` # for more details. # # Here we use :py::func`tvm.instrument.pass_instrument` decorator to implement # a PassInsturment class printing IR before execution of each passes: @tvm.instrument.pass_instrument class PrintIR: """Print the name of the pass, the IR, only before passes execute.""" def run_before_pass(self, mod, info): print("Running pass: {}", info) print(mod) with tvm.transform.PassContext(opt_level=3, instruments=[PrintIR()]): with tvm.target.Target("llvm"): # Perform the optimizations. mod = seq(mod) print(mod) print("done") ############################################################################## # Summary # ------- # This tutorial has covered how we can write and invoke passes in TVM more # conveniently using the pass infra. Different ways of invoking a pass are also # discussed. Using :py:class:`tvm.transform.Sequential` can largely help # users to ease the work of handling multiple optimization passes and their # dependencies. In addition, an example is provided to illustrate # how we can debug a pass using the ``PrintIR`` and tracing.
11,740
41.694545
102
py
tvm
tvm-main/gallery/how_to/extend_tvm/low_level_custom_pass.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ Writing a Customized Pass ========================= **Author**: `Jian Weng <https://were.github.io>`_ TVM is a framework that abstracts away the heterogenity of machine learning accelerators. Sometimes users may want customize some analysis and IR transformations to adapt TVM to their own specialized hardware. This tutorial helps users write a customized pass in TVM. Prerequisites ------------- Before reading this tutorial, we assume readers have already known these topics well: - Writing an algorithm in TVM and schedule it. Otherwise, see example tutorials like :ref:`opt-gemm`. - The basic structure of HalideIR. Otherwise, see ``HalideIR/src/ir/IR.h`` to learn what attributes of IR nodes are defined. - Visitor design pattern. Otherwise, check the `Python AST module <https://docs.python.org/3/library/ast.html>`_ to see how an AST visitor is implemented. - How a Schedule is lowered to either an IRModule class or a LLVM module. Otherwise, take a look at ``python/tvm/build_module.py`` to get some basics. """ import tvm from tvm import te import numpy as np ###################################################################### # We first write a very simple vector add and build it with the default schedule. Then, we use # our customized lowering pass to manipulate the IR directly instead of using schedule primitives. # n = tvm.tir.const(128, "int32") a = te.placeholder((n,), name="a") b = te.placeholder((n,), name="b") c = te.compute((n,), lambda i: a[i] + b[i], name="c") sch = te.create_schedule(c.op) ir = tvm.lower(sch, [a, b, c]) print(ir) ###################################################################### # Writing a Pass # -------------- # Essentially, an "IR transformation pass" is a function which maps a statement to a new statement. # Thus, we define this vectorize function and implement it step by step. # ###################################################################### # TVM already provides two class for users to both analyze and transform IR. # # IR Visitor # ~~~~~~~~~~ # We can use ``tvm.tir.stmt_functor.post_order_visit(stmt, func)`` to gather information from the Halide IR. # ``func`` is a function callback. This function will be called before exiting the current IR node, # i.e. post-order visit. Then we leverage side effects to store the result of IR visit, because the # return value of ``func`` will be ignored. # # .. note:: # # You MUST use some array to store the result of IR visit. Even the value is a single variable. # This is mainly due to the constraints in the Python-C runtime. The variable values will be # refreshed every recursion but the array values will be preserved. # loops = [] def find_width8(op): """Find all the 'tir.For' nodes whose extent can be divided by 8.""" if isinstance(op, tvm.tir.For): if isinstance(op.extent, tvm.tir.IntImm): if op.extent.value % 8 == 0: loops.append(op) ##################################################################### # IR Transformation # ~~~~~~~~~~~~~~~~~ # The transformation interface is slightly different from the visitor interface. There is only a # post-order callback in the visitor, but transformation visitor supports both a pre-order and a # post-order callback. If you want to keep the origin IR node, just return None. If you want to # change the current node to some node, use TVM IR maker interface to build it and return # this value. # # .. note:: # # If the pre-order function is called and returns a value which is not None, the post-order # function will be skipped. # def vectorize8(op): """Split can vectorize the loops found in `find_width8`.""" if op in loops: extent = op.extent.value name = op.loop_var.name lo, li = te.var(name + ".outer"), te.var(name + ".inner") body = tvm.tir.stmt_functor.substitute(op.body, {op.loop_var: lo * 8 + li}) body = tvm.tir.For(li, 0, 8, tvm.tir.ForKind.VECTORIZED, body) body = tvm.tir.For(lo, 0, extent // 8, tvm.tir.ForKind.SERIAL, body) return body return None @tvm.tir.transform.prim_func_pass(opt_level=0) def vectorize(f, mod, ctx): global loops tvm.tir.stmt_functor.post_order_visit(f.body, find_width8) if not loops: return f # The last list arugment indicates what kinds of nodes will be transformed. # Thus, in this case only `For` nodes will call `vectorize8` return f.with_body(tvm.tir.stmt_functor.ir_transform(f.body, None, vectorize8, ["tir.For"])) ##################################################################### # Glue to Lowering # ---------------- # So far, we are done with writing this IR transformation pass. What we need to do next is to glue # this pass to TVM's lower pass. # # In this case, we inject the pass written above into the TVM standard lowering # pass by feeding **a list of tuple** as argument to ``tir.add_lower_pass``. "Tuple" indicates different # phases of lowering. In TVM, there are four phases of lowering and user-customized ones will be # called after each phase is done. # # .. note:: # Here are the essential transformations done by each phase: # - Phase 0 generates the raw IR and loop levels. # - Phase 1 flattens the array storage. # - Phase 2 transforms loops, like unroll, vectorization and thread-binding. # - Phase 3 does some cleanup work. # # Thus, a good place to put this transformation pass is just after Phase 1. # with tvm.transform.PassContext(config={"tir.add_lower_pass": [(1, vectorize)]}): print(tvm.lower(sch, [a, b, c])) ##################################################################### # Quick View # ---------- # This tutorial gives a quick view of writing a customized IR transformation pass: # - Use ``tvm.tir.stmt_functor.post_order_visit`` to gather information on each IR nodes. # - Use ``tvm.tir.stmt_functor.ir_transform`` to transform IR nodes. # - Wrap up two above to write an IR-transformation function. # - Use ``tvm.transform.PassContext`` to put this function to TVM lowering pass #
6,882
38.786127
108
py
tvm
tvm-main/gallery/how_to/deploy_models/deploy_model_on_adreno_tvmc.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ .. _tutorial-deploy-model-on-adreno-tvmc: Deploy the Pretrained Model on Adreno™ with tvmc Interface ========================================================== **Author**: Siva Rama Krishna This article is a step-by-step tutorial to deploy pretrained Keras resnet50 model on Adreno™. Besides that, you should have TVM built for Android. See the following instructions on how to build it and setup RPC environment. `Deploy to Adreno GPU <https://tvm.apache.org/docs/how_to/deploy/adreno.html>`_ """ import os import tvm import numpy as np from tvm import relay from tvm.driver import tvmc from tvm.driver.tvmc.model import TVMCPackage from tvm.contrib import utils ################################################################# # Configuration # ------------- # Specify Adreno target before compiling to generate texture # leveraging kernels and get all the benefits of textures # Note: This generated example running on our x86 server for demonstration. # If running it on the Android device, we need to # specify its instruction set. Set :code:`local_demo` to False if you want # to run this tutorial with a real device over rpc. local_demo = True # by default on CPU target will execute. # select 'llvm', 'opencl' and 'opencl -device=adreno' target = "llvm" # Change target configuration. # Run `adb shell cat /proc/cpuinfo` to find the arch. arch = "arm64" target_host = "llvm -mtriple=%s-linux-android" % arch # Auto tuning is compute and time taking task, hence disabling for default run. Please enable it if required. is_tuning = False tune_log = "adreno-resnet50.log" # To enable OpenCLML accelerated operator library. enable_clml = False cross_compiler = ( os.getenv("ANDROID_NDK_HOME", "") + "/toolchains/llvm/prebuilt/linux-x86_64/bin/aarch64-linux-android28-clang" ) ####################################################################### # Make a Keras Resnet50 Model # --------------------------- from tensorflow.keras.applications.resnet50 import ResNet50 tmp_path = utils.tempdir() model_file_name = tmp_path.relpath("resnet50.h5") model = ResNet50(include_top=True, weights="imagenet", input_shape=(224, 224, 3), classes=1000) model.save(model_file_name) ####################################################################### # Load Model # ---------- # Convert a model from any framework to a tvm relay module. # tvmc.load supports models from any framework (like tensorflow saves_model, onnx, tflite ..etc) and auto detects the filetype. tvmc_model = tvmc.load(model_file_name) print(tvmc_model.mod) # tvmc_model consists of tvmc_mode.mod which is relay module and tvmc_model.params which parms of the module. ####################################################################### # AutoTuning # ---------- # Now, the below api can be used for autotuning the model for any target. # Tuning required RPC setup and please refer to # `Deploy to Adreno GPU <https://tvm.apache.org/docs/how_to/deploy/adreno.html>`_ rpc_tracker_host = os.environ.get("TVM_TRACKER_HOST", "127.0.0.1") rpc_tracker_port = int(os.environ.get("TVM_TRACKER_PORT", 9190)) rpc_key = "android" rpc_tracker = rpc_tracker_host + ":" + str(rpc_tracker_port) # Auto tuning is compute intensive and time taking task. # It is set to False in above configuration as this script runs in x86 for demonstration. # Please to set :code:`is_tuning` to True to enable auto tuning. # Also, :code:`test_target` is set to :code:`llvm` as this example to make compatible for x86 demonstration. # Please change it to :code:`opencl` or :code:`opencl -device=adreno` for RPC target in configuration above. if is_tuning: tvmc.tune( tvmc_model, target=target, tuning_records=tune_log, target_host=target_host, hostname=rpc_tracker_host, port=rpc_tracker_port, rpc_key=rpc_key, tuner="xgb", repeat=30, trials=3, early_stopping=0, ) ####################################################################### # Compilation # ----------- # Compilation to produce tvm artifacts # This generated example running on our x86 server for demonstration. # To deply and tun on real target over RPC please set :code:`local_demo` to False in above configuration sestion. # OpenCLML offloading will try to accelerate supported operators by using OpenCLML proprietory operator library. # By default :code:`enable_clml` is set to False in above configuration section. if not enable_clml: if local_demo: tvmc_package = tvmc.compile( tvmc_model, target=target, ) else: tvmc_package = tvmc.compile( tvmc_model, target=target, target_host=target_host, cross=cross_compiler, tuning_records=tune_log, ) else: # Altrernatively, we can save the compilation output and save it as a TVMCPackage. # This way avoids loading of compiled module without compiling again. target = target + ", clml" pkg_path = tmp_path.relpath("keras-resnet50.tar") tvmc.compile( tvmc_model, target=target, target_host=target_host, cross=cross_compiler, tuning_records=tune_log, package_path=pkg_path, ) # Load the compiled package tvmc_package = TVMCPackage(package_path=pkg_path) # tvmc_package consists of tvmc_package.lib_path, tvmc_package.graph, tvmc_package.params # Saved TVMPackage is nothing but tar archive with mod.so, mod.json and mod.params. ####################################################################### # Deploy & Run # ------------ # Deploy and run the compiled model on RPC # Let tvmc fill inputs using random # Run on RPC setup if local_demo: result = tvmc.run(tvmc_package, device="cpu", fill_mode="random") else: result = tvmc.run( tvmc_package, device="cl", rpc_key=rpc_key, hostname=rpc_tracker_host, port=rpc_tracker_port, fill_mode="random", ) # result is a dictionary of outputs. print("Result:", result)
6,849
33.422111
127
py
tvm
tvm-main/gallery/how_to/deploy_models/deploy_model_on_android.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ .. _tutorial-deploy-model-on-android: Deploy the Pretrained Model on Android ======================================= **Author**: `Tomohiro Kato <https://tkat0.github.io/>`_ This is an example of using Relay to compile a keras model and deploy it on Android device. """ import os import numpy as np from PIL import Image import keras from keras.applications.mobilenet_v2 import MobileNetV2 import tvm from tvm import te import tvm.relay as relay from tvm import rpc from tvm.contrib import utils, ndk, graph_executor as runtime from tvm.contrib.download import download_testdata ###################################################################### # Setup Environment # ----------------- # Since there are many required packages for Android, it is recommended to use the official Docker Image. # # First, to build and run Docker Image, we can run the following command. # # .. code-block:: bash # # git clone --recursive https://github.com/apache/tvm tvm # cd tvm # docker build -t tvm.demo_android -f docker/Dockerfile.demo_android ./docker # docker run --pid=host -h tvm -v $PWD:/workspace \ # -w /workspace -p 9190:9190 --name tvm -it tvm.demo_android bash # # You are now inside the container. The cloned TVM directory is mounted on /workspace. # At this time, mount the 9190 port used by RPC described later. # # .. note:: # # Please execute the following steps in the container. # We can execute :code:`docker exec -it tvm bash` to open a new terminal in the container. # # Next we build the TVM. # # .. code-block:: bash # # mkdir build # cd build # cmake -DUSE_LLVM=llvm-config-8 \ # -DUSE_RPC=ON \ # -DUSE_SORT=ON \ # -DUSE_VULKAN=ON \ # -DUSE_GRAPH_EXECUTOR=ON \ # .. # make -j10 # # After building TVM successfully, Please set PYTHONPATH. # # .. code-block:: bash # # echo 'export PYTHONPATH=/workspace/python:/workspace/vta/python:${PYTHONPATH}' >> ~/.bashrc # source ~/.bashrc ################################################################# # Start RPC Tracker # ----------------- # TVM uses RPC session to communicate with Android device. # # To start an RPC tracker, run this command in the container. The tracker is # required during the whole tuning process, so we need to open a new terminal for # this command: # # .. code-block:: bash # # python3 -m tvm.exec.rpc_tracker --host=0.0.0.0 --port=9190 # # The expected output is # # .. code-block:: bash # # INFO:RPCTracker:bind to 0.0.0.0:9190 ################################################################# # Register Android device to RPC Tracker # -------------------------------------- # Now we can register our Android device to the tracker. # # Follow this `readme page <https://github.com/apache/tvm/tree/main/apps/android_rpc>`_ to # install TVM RPC APK on the android device. # # Here is an example of config.mk. I enabled OpenCL and Vulkan. # # # .. code-block:: bash # # APP_ABI = arm64-v8a # # APP_PLATFORM = android-24 # # # whether enable OpenCL during compile # USE_OPENCL = 1 # # # whether to enable Vulkan during compile # USE_VULKAN = 1 # # ifeq ($(USE_VULKAN), 1) # # Statically linking vulkan requires API Level 24 or higher # APP_PLATFORM = android-24 # endif # # # the additional include headers you want to add, e.g., SDK_PATH/adrenosdk/Development/Inc # ADD_C_INCLUDES += /work/adrenosdk-linux-5_0/Development/Inc # ADD_C_INCLUDES = # # # the additional link libs you want to add, e.g., ANDROID_LIB_PATH/libOpenCL.so # ADD_LDLIBS = # # .. note:: # # At this time, don't forget to `create a standalone toolchain <https://github.com/apache/tvm/tree/main/apps/android_rpc#architecture-and-android-standalone-toolchain>`_ . # # for example # # .. code-block:: bash # # $ANDROID_NDK_HOME/build/tools/make-standalone-toolchain.sh \ # --platform=android-24 --use-llvm --arch=arm64 --install-dir=/opt/android-toolchain-arm64 # export TVM_NDK_CC=/opt/android-toolchain-arm64/bin/aarch64-linux-android-g++ # # Next, start the Android application and enter the IP address and port of RPC Tracker. # Then you have already registered your device. # # After registering devices, we can confirm it by querying rpc_tracker # # .. code-block:: bash # # python3 -m tvm.exec.query_rpc_tracker --host=0.0.0.0 --port=9190 # # For example, if we have 1 Android device. # the output can be # # .. code-block:: bash # # Queue Status # ---------------------------------- # key total free pending # ---------------------------------- # android 1 1 0 # ---------------------------------- # # To confirm that you can communicate with Android, we can run following test script. # If you use OpenCL and Vulkan, please set :code:`test_opencl` and :code:`test_vulkan` in the script. # # .. code-block:: bash # # export TVM_TRACKER_HOST=0.0.0.0 # export TVM_TRACKER_PORT=9190 # # .. code-block:: bash # # cd /workspace/apps/android_rpc # python3 tests/android_rpc_test.py # ###################################################################### # Load pretrained keras model # --------------------------- # We load a pretrained MobileNetV2(alpha=0.5) classification model provided by keras. keras.backend.clear_session() # Destroys the current TF graph and creates a new one. weights_url = "".join( [ "https://github.com/JonathanCMitchell/", "mobilenet_v2_keras/releases/download/v1.1/", "mobilenet_v2_weights_tf_dim_ordering_tf_kernels_0.5_224.h5", ] ) weights_file = "mobilenet_v2_weights.h5" weights_path = download_testdata(weights_url, weights_file, module="keras") keras_mobilenet_v2 = MobileNetV2( alpha=0.5, include_top=True, weights=None, input_shape=(224, 224, 3), classes=1000 ) keras_mobilenet_v2.load_weights(weights_path) ###################################################################### # In order to test our model, here we download an image of cat and # transform its format. img_url = "https://github.com/dmlc/mxnet.js/blob/main/data/cat.png?raw=true" img_name = "cat.png" img_path = download_testdata(img_url, img_name, module="data") image = Image.open(img_path).resize((224, 224)) dtype = "float32" def transform_image(image): image = np.array(image) - np.array([123.0, 117.0, 104.0]) image /= np.array([58.395, 57.12, 57.375]) image = image.transpose((2, 0, 1)) image = image[np.newaxis, :] return image x = transform_image(image) ###################################################################### # synset is used to transform the label from number of ImageNet class to # the word human can understand. synset_url = "".join( [ "https://gist.githubusercontent.com/zhreshold/", "4d0b62f3d01426887599d4f7ede23ee5/raw/", "596b27d23537e5a1b5751d2b0481ef172f58b539/", "imagenet1000_clsid_to_human.txt", ] ) synset_name = "imagenet1000_clsid_to_human.txt" synset_path = download_testdata(synset_url, synset_name, module="data") with open(synset_path) as f: synset = eval(f.read()) ###################################################################### # Compile the model with relay # ---------------------------- # If we run the example on our x86 server for demonstration, we can simply # set it as :code:`llvm`. If running it on the Android device, we need to # specify its instruction set. Set :code:`local_demo` to False if you want # to run this tutorial with a real device. local_demo = True # by default on CPU target will execute. # select 'cpu', 'opencl' and 'vulkan' test_target = "cpu" # Change target configuration. # Run `adb shell cat /proc/cpuinfo` to find the arch. arch = "arm64" target = tvm.target.Target("llvm -mtriple=%s-linux-android" % arch) if local_demo: target = tvm.target.Target("llvm") elif test_target == "opencl": target = tvm.target.Target("opencl", host=target) elif test_target == "vulkan": target = tvm.target.Target("vulkan", host=target) input_name = "input_1" shape_dict = {input_name: x.shape} mod, params = relay.frontend.from_keras(keras_mobilenet_v2, shape_dict) with tvm.transform.PassContext(opt_level=3): lib = relay.build(mod, target=target, params=params) # After `relay.build`, you will get three return values: graph, # library and the new parameter, since we do some optimization that will # change the parameters but keep the result of model as the same. # Save the library at local temporary directory. tmp = utils.tempdir() lib_fname = tmp.relpath("net.so") fcompile = ndk.create_shared if not local_demo else None lib.export_library(lib_fname, fcompile) ###################################################################### # Deploy the Model Remotely by RPC # -------------------------------- # With RPC, you can deploy the model remotely from your host machine # to the remote android device. tracker_host = os.environ.get("TVM_TRACKER_HOST", "127.0.0.1") tracker_port = int(os.environ.get("TVM_TRACKER_PORT", 9190)) key = "android" if local_demo: remote = rpc.LocalSession() else: tracker = rpc.connect_tracker(tracker_host, tracker_port) # When running a heavy model, we should increase the `session_timeout` remote = tracker.request(key, priority=0, session_timeout=60) if local_demo: dev = remote.cpu(0) elif test_target == "opencl": dev = remote.cl(0) elif test_target == "vulkan": dev = remote.vulkan(0) else: dev = remote.cpu(0) # upload the library to remote device and load it remote.upload(lib_fname) rlib = remote.load_module("net.so") # create the remote runtime module module = runtime.GraphModule(rlib["default"](dev)) ###################################################################### # Execute on TVM # -------------- # set input data module.set_input(input_name, tvm.nd.array(x.astype(dtype))) # run module.run() # get output out = module.get_output(0) # get top1 result top1 = np.argmax(out.numpy()) print("TVM prediction top-1: {}".format(synset[top1])) print("Evaluate inference time cost...") print(module.benchmark(dev, number=1, repeat=10)) ###################################################################### # Sample Output # ------------- # The following is the result of 'cpu', 'opencl' and 'vulkan' using Adreno 530 on Snapdragon 820 # # Although we can run on a GPU, it is slower than CPU. # To speed up, we need to write and optimize the schedule according to the GPU architecture. # # .. code-block:: bash # # # cpu # TVM prediction top-1: tiger cat # Evaluate inference time cost... # Mean inference time (std dev): 37.92 ms (19.67 ms) # # # opencl # TVM prediction top-1: tiger cat # Evaluate inference time cost... # Mean inference time (std dev): 419.83 ms (7.49 ms) # # # vulkan # TVM prediction top-1: tiger cat # Evaluate inference time cost... # Mean inference time (std dev): 465.80 ms (4.52 ms)
11,684
31.368421
173
py
tvm
tvm-main/gallery/how_to/deploy_models/deploy_prequantized.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ Deploy a Framework-prequantized Model with TVM ============================================== **Author**: `Masahiro Masuda <https://github.com/masahi>`_ This is a tutorial on loading models quantized by deep learning frameworks into TVM. Pre-quantized model import is one of the quantization support we have in TVM. More details on the quantization story in TVM can be found `here <https://discuss.tvm.apache.org/t/quantization-story/3920>`_. Here, we demonstrate how to load and run models quantized by PyTorch, MXNet, and TFLite. Once loaded, we can run compiled, quantized models on any hardware TVM supports. """ ################################################################################# # First, necessary imports from PIL import Image import numpy as np import torch from torchvision.models.quantization import mobilenet as qmobilenet import tvm from tvm import relay from tvm.contrib.download import download_testdata ################################################################################# # Helper functions to run the demo def get_transform(): import torchvision.transforms as transforms normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) return transforms.Compose( [ transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), normalize, ] ) def get_real_image(im_height, im_width): img_url = "https://github.com/dmlc/mxnet.js/blob/main/data/cat.png?raw=true" img_path = download_testdata(img_url, "cat.png", module="data") return Image.open(img_path).resize((im_height, im_width)) def get_imagenet_input(): im = get_real_image(224, 224) preprocess = get_transform() pt_tensor = preprocess(im) return np.expand_dims(pt_tensor.numpy(), 0) def get_synset(): synset_url = "".join( [ "https://gist.githubusercontent.com/zhreshold/", "4d0b62f3d01426887599d4f7ede23ee5/raw/", "596b27d23537e5a1b5751d2b0481ef172f58b539/", "imagenet1000_clsid_to_human.txt", ] ) synset_name = "imagenet1000_clsid_to_human.txt" synset_path = download_testdata(synset_url, synset_name, module="data") with open(synset_path) as f: return eval(f.read()) def run_tvm_model(mod, params, input_name, inp, target="llvm"): with tvm.transform.PassContext(opt_level=3): lib = relay.build(mod, target=target, params=params) runtime = tvm.contrib.graph_executor.GraphModule(lib["default"](tvm.device(target, 0))) runtime.set_input(input_name, inp) runtime.run() return runtime.get_output(0).numpy(), runtime ################################################################################# # A mapping from label to class name, to verify that the outputs from models below # are reasonable synset = get_synset() ################################################################################# # Everyone's favorite cat image for demonstration inp = get_imagenet_input() ################################################################################ # Deploy a quantized PyTorch Model # -------------------------------- # First, we demonstrate how to load deep learning models quantized by PyTorch, # using our PyTorch frontend. # # Please refer to the PyTorch static quantization tutorial below to learn about # their quantization workflow. # https://pytorch.org/tutorials/advanced/static_quantization_tutorial.html # # We use this function to quantize PyTorch models. # In short, this function takes a floating point model and converts it to uint8. # The model is per-channel quantized. def quantize_model(model, inp): model.fuse_model() model.qconfig = torch.quantization.get_default_qconfig("fbgemm") torch.quantization.prepare(model, inplace=True) # Dummy calibration model(inp) torch.quantization.convert(model, inplace=True) ############################################################################## # Load quantization-ready, pretrained Mobilenet v2 model from torchvision # ----------------------------------------------------------------------- # We choose mobilenet v2 because this model was trained with quantization aware # training. Other models require a full post training calibration. qmodel = qmobilenet.mobilenet_v2(pretrained=True).eval() ############################################################################## # Quantize, trace and run the PyTorch Mobilenet v2 model # ------------------------------------------------------ # The details are out of scope for this tutorial. Please refer to the tutorials # on the PyTorch website to learn about quantization and jit. pt_inp = torch.from_numpy(inp) quantize_model(qmodel, pt_inp) script_module = torch.jit.trace(qmodel, pt_inp).eval() with torch.no_grad(): pt_result = script_module(pt_inp).numpy() ############################################################################## # Convert quantized Mobilenet v2 to Relay-QNN using the PyTorch frontend # ---------------------------------------------------------------------- # The PyTorch frontend has support for converting a quantized PyTorch model to # an equivalent Relay module enriched with quantization-aware operators. # We call this representation Relay QNN dialect. # # You can print the output from the frontend to see how quantized models are # represented. # # You would see operators specific to quantization such as # qnn.quantize, qnn.dequantize, qnn.requantize, and qnn.conv2d etc. input_name = "input" # the input name can be be arbitrary for PyTorch frontend. input_shapes = [(input_name, (1, 3, 224, 224))] mod, params = relay.frontend.from_pytorch(script_module, input_shapes) # print(mod) # comment in to see the QNN IR dump ############################################################################## # Compile and run the Relay module # -------------------------------- # Once we obtained the quantized Relay module, the rest of the workflow # is the same as running floating point models. Please refer to other # tutorials for more details. # # Under the hood, quantization specific operators are lowered to a sequence of # standard Relay operators before compilation. target = "llvm" tvm_result, rt_mod = run_tvm_model(mod, params, input_name, inp, target=target) ########################################################################## # Compare the output labels # ------------------------- # We should see identical labels printed. pt_top3_labels = np.argsort(pt_result[0])[::-1][:3] tvm_top3_labels = np.argsort(tvm_result[0])[::-1][:3] print("PyTorch top3 labels:", [synset[label] for label in pt_top3_labels]) print("TVM top3 labels:", [synset[label] for label in tvm_top3_labels]) ########################################################################################### # However, due to the difference in numerics, in general the raw floating point # outputs are not expected to be identical. Here, we print how many floating point # output values are identical out of 1000 outputs from mobilenet v2. print("%d in 1000 raw floating outputs identical." % np.sum(tvm_result[0] == pt_result[0])) ########################################################################## # Measure performance # ------------------------- # Here we give an example of how to measure performance of TVM compiled models. n_repeat = 100 # should be bigger to make the measurement more accurate dev = tvm.cpu(0) print(rt_mod.benchmark(dev, number=1, repeat=n_repeat)) ###################################################################### # .. note:: # # We recommend this method for the following reasons: # # * Measurements are done in C++, so there is no Python overhead # * It includes several warm up runs # * The same method can be used to profile on remote devices (android etc.). ###################################################################### # .. note:: # # Unless the hardware has special support for fast 8 bit instructions, quantized models are # not expected to be any faster than FP32 models. Without fast 8 bit instructions, TVM does # quantized convolution in 16 bit, even if the model itself is 8 bit. # # For x86, the best performance can be achieved on CPUs with AVX512 instructions set. # In this case, TVM utilizes the fastest available 8 bit instructions for the given target. # This includes support for the VNNI 8 bit dot product instruction (CascadeLake or newer). # # Moreover, the following general tips for CPU performance equally applies: # # * Set the environment variable TVM_NUM_THREADS to the number of physical cores # * Choose the best target for your hardware, such as "llvm -mcpu=skylake-avx512" or # "llvm -mcpu=cascadelake" (more CPUs with AVX512 would come in the future) ############################################################################### # Deploy a quantized MXNet Model # ------------------------------ # TODO ############################################################################### # Deploy a quantized TFLite Model # ------------------------------- # TODO
9,941
39.91358
93
py
tvm
tvm-main/gallery/how_to/deploy_models/deploy_model_on_adreno.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ .. _tutorial-deploy-model-on-adreno: Deploy the Pretrained Model on Adreno™ ====================================== **Author**: Daniil Barinov, Siva Rama Krishna This article is a step-by-step tutorial to deploy pretrained Pytorch ResNet-18 model on Adreno (on different precisions). For us to begin with, PyTorch must be installed. TorchVision is also required since we will be using it as our model zoo. A quick solution is to install it via pip: .. code-block:: bash %%shell pip install torch pip install torchvision Besides that, you should have TVM builded for Android. See the following instructions on how to build it. `Deploy to Adreno GPU <https://tvm.apache.org/docs/how_to/deploy/adreno.html>`_ After the build section there should be two files in *build* directory «libtvm_runtime.so» and «tvm_rpc». Let's push them to the device and run TVM RPC Server. """ ###################################################################### # TVM RPC Server # -------------- # To get the hash of the device use: # # .. code-block:: bash # # adb devices # # Set the android device to use, if you have several devices connected to your computer. # # .. code-block:: bash # # export ANDROID_SERIAL=<device-hash> # # Then to upload these two files to the device you should use: # # .. code-block:: bash # # adb push {libtvm_runtime.so,tvm_rpc} /data/local/tmp # # At this moment you will have «libtvm_runtime.so» and «tvm_rpc» on path /data/local/tmp on your device. # Sometimes cmake can’t find «libc++_shared.so». Use: # # .. code-block:: bash # # find ${ANDROID_NDK_HOME} -name libc++_shared.so # # to find it and also push it with adb on the desired device: # # .. code-block:: bash # # adb push libc++_shared.so /data/local/tmp # # We are now ready to run the TVM RPC Server. # Launch rpc_tracker with following line in 1st console: # # .. code-block:: bash # # python3 -m tvm.exec.rpc_tracker --port 9190 # # Then we need to run tvm_rpc server from under the desired device in 2nd console: # # .. code-block:: bash # # adb reverse tcp:9190 tcp:9190 # adb forward tcp:5000 tcp:5000 # adb forward tcp:5002 tcp:5001 # adb forward tcp:5003 tcp:5002 # adb forward tcp:5004 tcp:5003 # adb shell LD_LIBRARY_PATH=/data/local/tmp /data/local/tmp/tvm_rpc server --host=0.0.0.0 --port=5000 --tracker=127.0.0.1:9190 --key=android --port-end=5100 # # Before proceeding to compile and infer model, specify TVM_TRACKER_HOST and TVM_TRACKER_PORT # # .. code-block:: bash # # export TVM_TRACKER_HOST=0.0.0.0 # export TVM_TRACKER_PORT=9190 # # check that the tracker is running and the device is available # # .. code-block:: bash # # python -m tvm.exec.query_rpc_tracker --port 9190 # # For example, if we have 1 Android device, # the output can be: # # .. code-block:: bash # # Queue Status # ---------------------------------- # key total free pending # ---------------------------------- # android 1 1 0 # ---------------------------------- ################################################################# # Configuration # ------------- import os import torch import torchvision import tvm from tvm import te from tvm import relay, rpc from tvm.contrib import utils, ndk from tvm.contrib import graph_executor from tvm.relay.op.contrib import clml from tvm import autotvm # Below are set of configuration that controls the behaviour of this script like # local run or device run, target definitions, dtype setting and auto tuning enablement. # Change these settings as needed if required. # Adreno devices are efficient with float16 compared to float32 # Given the expected output doesn't effect by lowering precision # it's advisable to use lower precision. # We have a helper API to make the precision conversion simple and # it supports dtype with "float16" and "float16_acc32" modes. # Let's choose "float16" for calculation and "float32" for accumulation. calculation_dtype = "float16" acc_dtype = "float32" # Specify Adreno target before compiling to generate texture # leveraging kernels and get all the benefits of textures # Note: This generated example running on our x86 server for demonstration. # If running it on the Android device, we need to # specify its instruction set. Set :code:`local_demo` to False if you want # to run this tutorial with a real device over rpc. local_demo = True # by default on CPU target will execute. # select 'cpu', 'opencl' and 'opencl -device=adreno' test_target = "cpu" # Change target configuration. # Run `adb shell cat /proc/cpuinfo` to find the arch. arch = "arm64" target = tvm.target.Target("llvm -mtriple=%s-linux-android" % arch) # Auto tuning is compute intensive and time taking task, # hence disabling for default run. Please enable it if required. is_tuning = False tune_log = "adreno-resnet18.log" # To enable OpenCLML accelerated operator library. enable_clml = False ################################################################# # Get a PyTorch Model # ------------------- # Get resnet18 from torchvision models model_name = "resnet18" model = getattr(torchvision.models, model_name)(pretrained=True) model = model.eval() # We grab the TorchScripted model via tracing input_shape = [1, 3, 224, 224] input_data = torch.randn(input_shape) scripted_model = torch.jit.trace(model, input_data).eval() ################################################################# # Load a test image # ----------------- # As an example we would use classical cat image from ImageNet from PIL import Image from tvm.contrib.download import download_testdata from matplotlib import pyplot as plt import numpy as np img_url = "https://github.com/dmlc/mxnet.js/blob/main/data/cat.png?raw=true" img_path = download_testdata(img_url, "cat.png", module="data") img = Image.open(img_path).resize((224, 224)) plt.imshow(img) plt.show() # Preprocess the image and convert to tensor from torchvision import transforms my_preprocess = transforms.Compose( [ transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ] ) img = my_preprocess(img) img = np.expand_dims(img, 0) ################################################################# # Convert PyTorch model to Relay module # ------------------------------------- # TVM has frontend api for various frameworks under relay.frontend and now # for pytorch model import we have relay.frontend.from_pytorch api. # Input name can be arbitrary input_name = "input0" shape_list = [(input_name, img.shape)] mod, params = relay.frontend.from_pytorch(scripted_model, shape_list) ################################################################# # Precisions # ---------- # Adreno devices are efficient with float16 compared to float32 # Given the expected output doesn't effect by lowering precision # it's advisable to use lower precision. # TVM support Mixed Precision through ToMixedPrecision transformation pass. # We may need to register precision rules like precision type, accumultation # datatype ...etc. for the required operators to override the default settings. # The below helper api simplifies the precision conversions across the module. # Calculation dtype is set to "float16" and accumulation dtype is set to "float32" # in configuration section above. from tvm.driver.tvmc.transform import apply_graph_transforms mod = apply_graph_transforms( mod, { "mixed_precision": True, "mixed_precision_ops": ["nn.conv2d", "nn.dense"], "mixed_precision_calculation_type": calculation_dtype, "mixed_precision_acc_type": acc_dtype, }, ) ################################################################# # As you can see in the IR, the architecture now contains cast operations, which are # needed to convert to FP16 precision. # You can also use "float16" or "float32" precisions as other dtype options. ################################################################# # Prepare TVM Target # ------------------ # This generated example running on our x86 server for demonstration. # To deply and tun on real target over RPC please set :code:`local_demo` to False in above configuration sestion. # Also, :code:`test_target` is set to :code:`llvm` as this example to make compatible for x86 demonstration. # Please change it to :code:`opencl` or :code:`opencl -device=adreno` for RPC target in configuration above. if local_demo: target = tvm.target.Target("llvm") elif test_target.find("opencl"): target = tvm.target.Target(test_target, host=target) ################################################################## # AutoTuning # ---------- # The below few instructions can auto tune the relay module with xgboost being the tuner algorithm. # Auto Tuning process involces stages of extracting the tasks, defining tuning congiguration and # tuning each task for best performing kernel configuration. # Get RPC related settings. rpc_tracker_host = os.environ.get("TVM_TRACKER_HOST", "127.0.0.1") rpc_tracker_port = int(os.environ.get("TVM_TRACKER_PORT", 9190)) key = "android" # Auto tuning is compute intensive and time taking task. # It is set to False in above configuration as this script runs in x86 for demonstration. # Please to set :code:`is_tuning` to True to enable auto tuning. if is_tuning: # Auto Tuning Stage 1: Extract tunable tasks tasks = autotvm.task.extract_from_program( mod, target=test_target, target_host=target, params=params ) # Auto Tuning Stage 2: Define tuning configuration tmp_log_file = tune_log + ".tmp" measure_option = autotvm.measure_option( builder=autotvm.LocalBuilder( build_func=ndk.create_shared, timeout=15 ), # Build the test kernel locally runner=autotvm.RPCRunner( # The runner would be on a remote device. key, # RPC Key host=rpc_tracker_host, # Tracker host port=int(rpc_tracker_port), # Tracker port number=3, # Number of runs before averaging timeout=600, # RPC Timeout ), ) n_trial = 1024 # Number of iteration of training before choosing the best kernel config early_stopping = False # Can be enabled to stop tuning while the loss is not minimizing. # Auto Tuning Stage 3: Iterate through the tasks and tune. from tvm.autotvm.tuner import XGBTuner for i, tsk in enumerate(reversed(tasks[:3])): print("Task:", tsk) prefix = "[Task %2d/%2d] " % (i + 1, len(tasks)) # choose tuner tuner = "xgb" # create tuner if tuner == "xgb": tuner_obj = XGBTuner(tsk, loss_type="reg") elif tuner == "xgb_knob": tuner_obj = XGBTuner(tsk, loss_type="reg", feature_type="knob") elif tuner == "xgb_itervar": tuner_obj = XGBTuner(tsk, loss_type="reg", feature_type="itervar") elif tuner == "xgb_curve": tuner_obj = XGBTuner(tsk, loss_type="reg", feature_type="curve") elif tuner == "xgb_rank": tuner_obj = XGBTuner(tsk, loss_type="rank") elif tuner == "xgb_rank_knob": tuner_obj = XGBTuner(tsk, loss_type="rank", feature_type="knob") elif tuner == "xgb_rank_itervar": tuner_obj = XGBTuner(tsk, loss_type="rank", feature_type="itervar") elif tuner == "xgb_rank_curve": tuner_obj = XGBTuner(tsk, loss_type="rank", feature_type="curve") elif tuner == "xgb_rank_binary": tuner_obj = XGBTuner(tsk, loss_type="rank-binary") elif tuner == "xgb_rank_binary_knob": tuner_obj = XGBTuner(tsk, loss_type="rank-binary", feature_type="knob") elif tuner == "xgb_rank_binary_itervar": tuner_obj = XGBTuner(tsk, loss_type="rank-binary", feature_type="itervar") elif tuner == "xgb_rank_binary_curve": tuner_obj = XGBTuner(tsk, loss_type="rank-binary", feature_type="curve") elif tuner == "ga": tuner_obj = GATuner(tsk, pop_size=50) elif tuner == "random": tuner_obj = RandomTuner(tsk) elif tuner == "gridsearch": tuner_obj = GridSearchTuner(tsk) else: raise ValueError("Invalid tuner: " + tuner) tsk_trial = min(n_trial, len(tsk.config_space)) tuner_obj.tune( n_trial=tsk_trial, early_stopping=early_stopping, measure_option=measure_option, callbacks=[ autotvm.callback.progress_bar(tsk_trial, prefix=prefix), autotvm.callback.log_to_file(tmp_log_file), ], ) # Auto Tuning Stage 4: Pick the best performing configurations from the overall log. autotvm.record.pick_best(tmp_log_file, tune_log) ################################################################# # Enable OpenCLML Offloading # -------------------------- # OpenCLML offloading will try to accelerate supported operators # by using OpenCLML proprietory operator library. # By default :code:`enable_clml` is set to False in above configuration section. if not local_demo and enable_clml: mod = clml.partition_for_clml(mod, params) ################################################################# # Compilation # ----------- # Use tuning cache if exists. if os.path.exists(tune_log): with autotvm.apply_history_best(tune_log): with tvm.transform.PassContext(opt_level=3): lib = relay.build(mod, target=target, params=params) else: with tvm.transform.PassContext(opt_level=3): lib = relay.build(mod, target=target, params=params) ################################################################# # Deploy the Model Remotely by RPC # -------------------------------- # Using RPC you can deploy the model from host # machine to the remote Adreno device if local_demo: remote = rpc.LocalSession() else: tracker = rpc.connect_tracker(rpc_tracker_host, rpc_tracker_port) # When running a heavy model, we should increase the `session_timeout` remote = tracker.request(key, priority=0, session_timeout=60) if local_demo: dev = remote.cpu(0) elif test_target.find("opencl"): dev = remote.cl(0) else: dev = remote.cpu(0) temp = utils.tempdir() dso_binary = "dev_lib_cl.so" dso_binary_path = temp.relpath(dso_binary) fcompile = ndk.create_shared if not local_demo else None lib.export_library(dso_binary_path, fcompile) remote_path = "/data/local/tmp/" + dso_binary remote.upload(dso_binary_path) rlib = remote.load_module(dso_binary) m = graph_executor.GraphModule(rlib["default"](dev)) ################################################################# # Run inference # ------------- # We now can set inputs, infer our model and get predictions as output m.set_input(input_name, tvm.nd.array(img.astype("float32"))) m.run() tvm_output = m.get_output(0) ################################################################# # Get predictions and performance statistic # ----------------------------------------- # This piece of code displays the top-1 and top-5 predictions, as # well as provides information about the model's performance from os.path import join, isfile from matplotlib import pyplot as plt from tvm.contrib import download # Download ImageNet categories categ_url = "https://github.com/uwsampl/web-data/raw/main/vta/models/" categ_fn = "synset.txt" download.download(join(categ_url, categ_fn), categ_fn) synset = eval(open(categ_fn).read()) top_categories = np.argsort(tvm_output.asnumpy()[0]) top5 = np.flip(top_categories, axis=0)[:5] # Report top-1 classification result print("Top-1 id: {}, class name: {}".format(top5[1 - 1], synset[top5[1 - 1]])) # Report top-5 classification results print("\nTop5 predictions: \n") print("\t#1:", synset[top5[1 - 1]]) print("\t#2:", synset[top5[2 - 1]]) print("\t#3:", synset[top5[3 - 1]]) print("\t#4:", synset[top5[4 - 1]]) print("\t#5:", synset[top5[5 - 1]]) print("\t", top5) ImageNetClassifier = False for k in top_categories[-5:]: if "cat" in synset[k]: ImageNetClassifier = True assert ImageNetClassifier, "Failed ImageNet classifier validation check" print("Evaluate inference time cost...") print(m.benchmark(dev, number=1, repeat=10))
17,091
35.059072
158
py
tvm
tvm-main/gallery/how_to/deploy_models/deploy_model_on_nano.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ .. _tutorial-deploy-model-on-nano: Deploy the Pretrained Model on Jetson Nano =========================================== **Author**: `BBuf <https://github.com/BBuf>`_ This is an example of using Relay to compile a ResNet model and deploy it on Jetson Nano. """ # sphinx_gallery_start_ignore # sphinx_gallery_requires_cuda = True # sphinx_gallery_end_ignore import tvm from tvm import te import tvm.relay as relay from tvm import rpc from tvm.contrib import utils, graph_executor as runtime from tvm.contrib.download import download_testdata ###################################################################### # .. _build-tvm-runtime-on-jetson-nano: # # Build TVM Runtime on Jetson Nano # -------------------------------- # # The first step is to build the TVM runtime on the remote device. # # .. note:: # # All instructions in both this section and next section should be # executed on the target device, e.g. Jetson Nano. And we assume it # has Linux running. # # Since we do compilation on local machine, the remote device is only used # for running the generated code. We only need to build tvm runtime on # the remote device. # # .. code-block:: bash # # git clone --recursive https://github.com/apache/tvm tvm # cd tvm # mkdir build # cp cmake/config.cmake build # cd build # cmake .. # make runtime -j4 # .. note:: # # If we want to use Jetson Nano's GPU for inference, # we need to enable the CUDA option in `config.cmake`, # that is, `set(USE_CUDA ON)` # # After building runtime successfully, we need to set environment varibles # in :code:`~/.bashrc` file. We can edit :code:`~/.bashrc` # using :code:`vi ~/.bashrc` and add the line below (Assuming your TVM # directory is in :code:`~/tvm`): # # .. code-block:: bash # # export PYTHONPATH=$PYTHONPATH:~/tvm/python # # To update the environment variables, execute :code:`source ~/.bashrc`. ###################################################################### # Set Up RPC Server on Device # --------------------------- # To start an RPC server, run the following command on your remote device # (Which is Jetson Nano in our example). # # .. code-block:: bash # # python -m tvm.exec.rpc_server --host 0.0.0.0 --port=9091 # # If you see the line below, it means the RPC server started # successfully on your device. # # .. code-block:: bash # # INFO:RPCServer:bind to 0.0.0.0:9091 # ###################################################################### # Prepare the Pre-trained Model # ----------------------------- # Back to the host machine, which should have a full TVM installed (with LLVM). # # We will use pre-trained model from # `MXNet Gluon model zoo <https://mxnet.apache.org/api/python/gluon/model_zoo.html>`_. # You can found more details about this part at tutorial :ref:`tutorial-from-mxnet`. from mxnet.gluon.model_zoo.vision import get_model from PIL import Image import numpy as np # one line to get the model block = get_model("resnet18_v1", pretrained=True) ###################################################################### # In order to test our model, here we download an image of cat and # transform its format. img_url = "https://github.com/dmlc/mxnet.js/blob/main/data/cat.png?raw=true" img_name = "cat.png" img_path = download_testdata(img_url, img_name, module="data") image = Image.open(img_path).resize((224, 224)) def transform_image(image): image = np.array(image) - np.array([123.0, 117.0, 104.0]) image /= np.array([58.395, 57.12, 57.375]) image = image.transpose((2, 0, 1)) image = image[np.newaxis, :] return image x = transform_image(image) ###################################################################### # synset is used to transform the label from number of ImageNet class to # the word human can understand. synset_url = "".join( [ "https://gist.githubusercontent.com/zhreshold/", "4d0b62f3d01426887599d4f7ede23ee5/raw/", "596b27d23537e5a1b5751d2b0481ef172f58b539/", "imagenet1000_clsid_to_human.txt", ] ) synset_name = "imagenet1000_clsid_to_human.txt" synset_path = download_testdata(synset_url, synset_name, module="data") with open(synset_path) as f: synset = eval(f.read()) ###################################################################### # Now we would like to port the Gluon model to a portable computational graph. # It's as easy as several lines. # We support MXNet static graph(symbol) and HybridBlock in mxnet.gluon shape_dict = {"data": x.shape} mod, params = relay.frontend.from_mxnet(block, shape_dict) # we want a probability so add a softmax operator func = mod["main"] func = relay.Function(func.params, relay.nn.softmax(func.body), None, func.type_params, func.attrs) ###################################################################### # Here are some basic data workload configurations. batch_size = 1 num_classes = 1000 image_shape = (3, 224, 224) data_shape = (batch_size,) + image_shape ###################################################################### # Compile The Graph # ----------------- # To compile the graph, we call the :py:func:`relay.build` function # with the graph configuration and parameters. However, You cannot to # deploy a x86 program on a device with ARM instruction set. It means # Relay also needs to know the compilation option of target device, # apart from arguments :code:`net` and :code:`params` to specify the # deep learning workload. Actually, the option matters, different option # will lead to very different performance. ###################################################################### # If we run the example on our x86 server for demonstration, we can simply # set it as :code:`llvm`. If running it on the Jetson Nano, we need to # set it as :code:`nvidia/jetson-nano`. Set :code:`local_demo` to False # if you want to run this tutorial with a real device. local_demo = True if local_demo: target = tvm.target.Target("llvm") else: target = tvm.target.Target("nvidia/jetson-nano") assert target.kind.name == "cuda" assert target.attrs["arch"] == "sm_53" assert target.attrs["shared_memory_per_block"] == 49152 assert target.attrs["max_threads_per_block"] == 1024 assert target.attrs["thread_warp_size"] == 32 assert target.attrs["registers_per_block"] == 32768 with tvm.transform.PassContext(opt_level=3): lib = relay.build(func, target, params=params) # After `relay.build`, you will get three return values: graph, # library and the new parameter, since we do some optimization that will # change the parameters but keep the result of model as the same. # Save the library at local temporary directory. tmp = utils.tempdir() lib_fname = tmp.relpath("net.tar") lib.export_library(lib_fname) ###################################################################### # Deploy the Model Remotely by RPC # -------------------------------- # With RPC, you can deploy the model remotely from your host machine # to the remote device. # obtain an RPC session from remote device. if local_demo: remote = rpc.LocalSession() else: # The following is my environment, change this to the IP address of your target device host = "192.168.1.11" port = 9091 remote = rpc.connect(host, port) # upload the library to remote device and load it remote.upload(lib_fname) rlib = remote.load_module("net.tar") # create the remote runtime module if local_demo: dev = remote.cpu(0) else: dev = remote.cuda(0) module = runtime.GraphModule(rlib["default"](dev)) # set input data module.set_input("data", tvm.nd.array(x.astype("float32"))) # run module.run() # get output out = module.get_output(0) # get top1 result top1 = np.argmax(out.numpy()) print("TVM prediction top-1: {}".format(synset[top1]))
8,552
33.768293
99
py
tvm
tvm-main/gallery/how_to/deploy_models/deploy_object_detection_pytorch.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ Compile PyTorch Object Detection Models ======================================= This article is an introductory tutorial to deploy PyTorch object detection models with Relay VM. For us to begin with, PyTorch should be installed. TorchVision is also required since we will be using it as our model zoo. A quick solution is to install via pip .. code-block:: bash pip install torch pip install torchvision or please refer to official site https://pytorch.org/get-started/locally/ PyTorch versions should be backwards compatible but should be used with the proper TorchVision version. Currently, TVM supports PyTorch 1.7 and 1.4. Other versions may be unstable. """ import tvm from tvm import relay from tvm import relay from tvm.runtime.vm import VirtualMachine from tvm.contrib.download import download_testdata import numpy as np import cv2 # PyTorch imports import torch import torchvision ###################################################################### # Load pre-trained maskrcnn from torchvision and do tracing # --------------------------------------------------------- in_size = 300 input_shape = (1, 3, in_size, in_size) def do_trace(model, inp): model_trace = torch.jit.trace(model, inp) model_trace.eval() return model_trace def dict_to_tuple(out_dict): if "masks" in out_dict.keys(): return out_dict["boxes"], out_dict["scores"], out_dict["labels"], out_dict["masks"] return out_dict["boxes"], out_dict["scores"], out_dict["labels"] class TraceWrapper(torch.nn.Module): def __init__(self, model): super().__init__() self.model = model def forward(self, inp): out = self.model(inp) return dict_to_tuple(out[0]) model_func = torchvision.models.detection.maskrcnn_resnet50_fpn model = TraceWrapper(model_func(pretrained=True)) model.eval() inp = torch.Tensor(np.random.uniform(0.0, 250.0, size=(1, 3, in_size, in_size))) with torch.no_grad(): out = model(inp) script_module = do_trace(model, inp) ###################################################################### # Download a test image and pre-process # ------------------------------------- img_url = ( "https://raw.githubusercontent.com/dmlc/web-data/master/gluoncv/detection/street_small.jpg" ) img_path = download_testdata(img_url, "test_street_small.jpg", module="data") img = cv2.imread(img_path).astype("float32") img = cv2.resize(img, (in_size, in_size)) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) img = np.transpose(img / 255.0, [2, 0, 1]) img = np.expand_dims(img, axis=0) ###################################################################### # Import the graph to Relay # ------------------------- input_name = "input0" shape_list = [(input_name, input_shape)] mod, params = relay.frontend.from_pytorch(script_module, shape_list) ###################################################################### # Compile with Relay VM # --------------------- # Note: Currently only CPU target is supported. For x86 target, it is # highly recommended to build TVM with Intel MKL and Intel OpenMP to get # best performance, due to the existence of large dense operator in # torchvision rcnn models. # Add "-libs=mkl" to get best performance on x86 target. # For x86 machine supports AVX512, the complete target is # "llvm -mcpu=skylake-avx512 -libs=mkl" target = "llvm" with tvm.transform.PassContext(opt_level=3, disabled_pass=["FoldScaleAxis"]): vm_exec = relay.vm.compile(mod, target=target, params=params) ###################################################################### # Inference with Relay VM # ----------------------- dev = tvm.cpu() vm = VirtualMachine(vm_exec, dev) vm.set_input("main", **{input_name: img}) tvm_res = vm.run() ###################################################################### # Get boxes with score larger than 0.9 # ------------------------------------ score_threshold = 0.9 boxes = tvm_res[0].numpy().tolist() valid_boxes = [] for i, score in enumerate(tvm_res[1].numpy().tolist()): if score > score_threshold: valid_boxes.append(boxes[i]) else: break print("Get {} valid boxes".format(len(valid_boxes)))
4,953
31.168831
95
py