repo stringlengths 2 99 | file stringlengths 13 225 | code stringlengths 0 18.3M | file_length int64 0 18.3M | avg_line_length float64 0 1.36M | max_line_length int64 0 4.26M | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
tvm | tvm-main/python/tvm/topi/arm_cpu/conv2d_spatial_pack.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unused-variable,no-else-return
"""Conv2D spatial pack implementation for ARM CPU"""
from __future__ import absolute_import as _abs
import tvm
from tvm import te
from tvm import autotvm
from tvm.target import Target
from tvm.autotvm.task.space import SplitEntity, OtherOptionEntity, AnnotateEntity, ReorderEntity
from .. import nn
from ..utils import get_const_tuple
from ..nn.utils import get_const_int, get_pad_tuple
def conv2d_spatial_pack_nchw(cfg, data, kernel, strides, padding, dilation, out_dtype, num_tile):
"""compute define for Conv2d Spatial Pack with NCHW layout"""
out_dtype = out_dtype or data.dtype
N, CI, IH, IW = get_const_tuple(data.shape)
if isinstance(N, tvm.tir.Any):
N = tvm.te.size_var("n")
if not isinstance(IH, int) or not isinstance(IW, int):
raise RuntimeError("ARM winograd conv2d doesn't support dynamic input height or width.")
if isinstance(dilation, int):
dilation_h = dilation_w = dilation
else:
dilation_h, dilation_w = dilation
if len(kernel.shape) == 4:
pre_packed = False
CO, _, KH, KW = get_const_tuple(kernel.shape)
else: # kernel tensor is pre packed
pre_packed = True
CO, _, KH, KW, VC = get_const_tuple(kernel.shape)
CO = CO * VC
dilated_kernel_h = (KH - 1) * dilation_h + 1
dilated_kernel_w = (KW - 1) * dilation_w + 1
pad_top, pad_left, pad_bottom, pad_right = get_pad_tuple(
padding, (dilated_kernel_h, dilated_kernel_w)
)
HSTR, WSTR = strides if isinstance(strides, (tuple, list)) else (strides, strides)
OH = (IH + pad_top + pad_bottom - dilated_kernel_h) // HSTR + 1
OW = (IW + pad_left + pad_right - dilated_kernel_w) // WSTR + 1
data_pad = nn.pad(data, [0, 0, pad_top, pad_left], [0, 0, pad_bottom, pad_right])
# ==================== define configuration space ====================
# TODO(@kevinthesun): Support tuning/optimization for dynamic shape.
n_tuning_axis = N if isinstance(N, int) else 1
n, co, oh, ow = cfg.axis(n_tuning_axis), cfg.axis(CO), cfg.axis(OH), cfg.axis(OW)
ci, kh, kw = cfg.reduce_axis(CI), cfg.reduce_axis(KH), cfg.reduce_axis(KW)
if num_tile == 2: # for arm cpu
co, vc = cfg.define_split("tile_co", co, num_outputs=2)
oh, vh = cfg.define_split("tile_oh", oh, num_outputs=2)
ow, vw = cfg.define_split("tile_ow", ow, num_outputs=2)
elif num_tile == 3: # for mali gpu
co, _, vc = cfg.define_split("tile_co", co, num_outputs=3)
oh, _, vh = cfg.define_split("tile_oh", oh, num_outputs=3)
ow, _, vw = cfg.define_split("tile_ow", ow, num_outputs=3)
else:
raise RuntimeError("Invalid num_tile")
cfg.define_reorder(
"reorder_0",
[n, co, oh, ow, ci, kh, kw, vh, vw, vc],
policy="candidate",
candidate=[
[n, co, oh, ow, ci, kh, kw, vh, vw, vc],
[n, co, oh, ow, ci, kh, kw, vc, vh, vw],
],
)
cfg.define_annotate("ann_reduce", [kh, kw], policy="try_unroll")
cfg.define_annotate("ann_spatial", [vh, vw, vc], policy="try_unroll_vec")
# fallback support
if cfg.is_fallback:
if num_tile == 2: # arm cpu
ref_log = autotvm.tophub.load_reference_log(
"arm_cpu", "rk3399", "conv2d_nchw_spatial_pack.arm_cpu"
)
cfg.fallback_with_reference_log(ref_log)
elif num_tile == 3: # mali gpu
ref_log = autotvm.tophub.load_reference_log(
"mali", "rk3399", "conv2d_nchw_spatial_pack.mali"
)
cfg.fallback_with_reference_log(ref_log)
# ====================================================================
VC = cfg["tile_co"].size[-1]
VH = cfg["tile_oh"].size[-1]
VW = cfg["tile_ow"].size[-1]
kvshape = (CO // VC, CI, KH, KW, VC)
ovshape = (N, CO // VC, OH // VH, OW // VW, VH, VW, VC)
oshape = (N, CO, OH, OW)
if dilation_h != 1 or dilation_w != 1:
# undilate input data
dvshape = (N, OH // VH, OW // VW, CI, KH, KW, VH, VW)
data_vec = te.compute(
dvshape,
lambda n, h, w, ci, kh, kw, vh, vw: data_pad[n][ci][
(h * VH + vh) * HSTR + kh * dilation_h
][(w * VW + vw) * WSTR + kw * dilation_w],
name="data_vec_undilated",
)
else:
dvshape = (N, OH // VH, OW // VW, CI, VH * HSTR + KH - 1, VW * WSTR + KW - 1)
data_vec = te.compute(
dvshape,
lambda n, h, w, ci, vh, vw: data_pad[n][ci][h * VH * HSTR + vh][w * VW * WSTR + vw],
name="data_vec",
)
if autotvm.GLOBAL_SCOPE.in_tuning:
# use "kernel_autotvm" instead of "kernel" to avoid naming conflict with OpenCL keyword
kernel_vec = tvm.te.placeholder(kvshape, kernel.dtype, name="kernel_autotvm")
else:
if pre_packed:
kernel_vec = kernel
else:
kernel_vec = te.compute(
kvshape,
lambda co, ci, kh, kw, vc: kernel[co * VC + vc][ci][kh][kw],
name="kernel_vec",
)
ci = te.reduce_axis((0, CI), name="ci")
kh = te.reduce_axis((0, KH), name="kh")
kw = te.reduce_axis((0, KW), name="kw")
if dilation_h != 1 or dilation_w != 1:
conv = te.compute(
ovshape,
lambda n, co, h, w, vh, vw, vc: te.sum(
data_vec[n, h, w, ci, kh, kw, vh, vw].astype(out_dtype)
* kernel_vec[co, ci, kh, kw, vc].astype(out_dtype),
axis=[ci, kh, kw],
),
name="conv",
)
else:
conv = te.compute(
ovshape,
lambda n, co, h, w, vh, vw, vc: te.sum(
data_vec[n, h, w, ci, vh * HSTR + kh, vw * WSTR + kw].astype(out_dtype)
* kernel_vec[co, ci, kh, kw, vc].astype(out_dtype),
axis=[ci, kh, kw],
),
name="conv",
)
idxdiv = tvm.tir.indexdiv
idxmod = tvm.tir.indexmod
output = te.compute(
oshape,
lambda n, co, h, w: conv[
n,
idxdiv(co, VC),
idxdiv(h, VH),
idxdiv(w, VW),
idxmod(h, VH),
idxmod(w, VW),
idxmod(co, VC),
],
name="output_unpack",
tag="spatial_conv2d_output",
)
return output
def schedule_conv2d_spatial_pack_nchw(cfg, s, data_vec, kernel_vec, conv, output, last):
"""schedule implementation"""
n, co, oh, ow, vh, vw, vc = s[conv].op.axis
ci, kh, kw = s[conv].op.reduce_axis
# schedule conv
cfg["reorder_0"].apply(s, conv, [n, co, oh, ow, ci, kh, kw, vh, vw, vc])
cfg["ann_reduce"].apply(
s,
conv,
[kh, kw],
axis_lens=[get_const_int(kh.dom.extent), get_const_int(kw.dom.extent)],
max_unroll=None,
cfg=cfg,
)
cfg["ann_spatial"].apply(
s,
conv,
[vh, vw, vc],
axis_lens=[cfg["tile_oh"].size[-1], cfg["tile_ow"].size[-1], cfg["tile_co"].size[-1]],
max_unroll=None,
cfg=cfg,
)
# schedule fusion
n, co, h, w = s[last].op.axis
co, vc = cfg["tile_co"].apply(s, last, co)
oh, vh = cfg["tile_oh"].apply(s, last, h)
ow, vw = cfg["tile_ow"].apply(s, last, w)
s[last].reorder(n, co, oh, ow, vh, vw, vc)
if last != output:
s[output].compute_inline()
cfg["ann_spatial"].apply(
s,
last,
[vh, vw, vc],
axis_lens=[cfg["tile_oh"].size[-1], cfg["tile_ow"].size[-1], cfg["tile_co"].size[-1]],
max_unroll=16,
cfg=cfg,
)
s[conv].compute_at(s[last], ow)
# mark parallel
s[last].parallel(co)
if data_vec.op.name == "data_vec_undilated":
_, h, _, _, _, _, _, _ = s[data_vec].op.axis
else:
_, h, _, _, _, _ = s[data_vec].op.axis
s[data_vec].parallel(h)
if kernel_vec.op.name == "kernel_vec":
if not autotvm.GLOBAL_SCOPE.in_tuning:
co, _, _, _, _ = s[kernel_vec].op.axis
s[kernel_vec].parallel(co)
elif kernel_vec.op.name == "kernel_vec_conv2d_transpose": # for conv2d transpose
co, _, _, _, _ = s[kernel_vec].op.axis
s[kernel_vec].parallel(co)
return s
def conv2d_spatial_pack_nhwc(cfg, data, kernel, strides, padding, dilation, out_dtype, num_tile=2):
"""Spatial pack compute for Conv2d NHWC"""
out_dtype = out_dtype or data.dtype
N, IH, IW, IC = get_const_tuple(data.shape)
assert len(kernel.shape) == 4, "AlterOpLayout not enabled for NHWC yet"
KH, KW, _, OC = get_const_tuple(kernel.shape)
if isinstance(dilation, int):
dilation_h = dilation_w = dilation
else:
dilation_h, dilation_w = dilation
dilated_kernel_h = (KH - 1) * dilation_h + 1
dilated_kernel_w = (KW - 1) * dilation_w + 1
pad_top, pad_left, pad_down, pad_right = get_pad_tuple(
padding, (dilated_kernel_h, dilated_kernel_w)
)
HSTR, WSTR = strides if isinstance(strides, (tuple, list)) else (strides, strides)
OH = (IH + pad_top + pad_down - dilated_kernel_h) // HSTR + 1
OW = (IW + pad_left + pad_right - dilated_kernel_w) // WSTR + 1
data_pad = nn.pad(data, [0, pad_top, pad_left, 0], [0, pad_down, pad_right, 0])
# ==================== define configuration space ====================
# If it has dynamic shape in batch, we fix the split factor to 1
n = cfg.axis(N) if isinstance(N, int) else cfg.axis(1)
oc, oh, ow = cfg.axis(OC), cfg.axis(OH), cfg.axis(OW)
ic, kh, kw = cfg.reduce_axis(IC), cfg.reduce_axis(KH), cfg.reduce_axis(KW)
if num_tile == 2: # for arm cpu
oco, oci = cfg.define_split("tile_co", oc, num_outputs=2)
oho, ohi = cfg.define_split("tile_oh", oh, num_outputs=2)
owo, owi = cfg.define_split("tile_ow", ow, num_outputs=2)
elif num_tile == 3: # for mali gpu
oco, _, oci = cfg.define_split("tile_co", oc, num_outputs=3)
oho, _, ohi = cfg.define_split("tile_oh", oh, num_outputs=3)
owo, _, owi = cfg.define_split("tile_ow", ow, num_outputs=3)
else:
raise RuntimeError("Invalid num_tile")
cfg.define_reorder(
"reorder_conv",
[n, oho, owo, oco, kh, kw, ic, ohi, owi, oci],
policy="candidate",
candidate=[
[n, oho, owo, oco, kh, kw, ic, ohi, owi, oci],
[n, oho, owo, oco, ohi, kh, kw, ic, owi, oci],
[n, oho, owo, oco, ohi, kh, kw, owi, ic, oci],
[n, oho, owo, ohi, oco, kh, kw, owi, ic, oci],
],
)
cfg.define_annotate("ann_reduce", [kh, kw], policy="try_unroll")
cfg.define_annotate("ann_spatial", [owi, oci], policy="try_unroll_vec")
# ====================================================================
# If there are no tuning records, use this config
if cfg.is_fallback:
def _tile_size(axis, candidates):
for candidate in candidates:
tiles_divisible_by_candidate = axis % candidate == 0
if tiles_divisible_by_candidate:
return candidate
return 1
# For data tensors with unity height and width we can leave it to the
# backend to vectorize the inner loop. This has been observed to be more
# performant on SVE targets with a vector width > 128bits.
target = Target.current(allow_none=False)
if target.features.has_sve and OW == OH and OW == 1:
tile_size = [OC]
vectorize = "none"
else:
# Tile size 8 results in efficient vectorization for these schedules.
# If the axis is not divisible by 8, try 4
tile_size = [8, 4]
vectorize = "vec"
cfg["tile_oh"] = SplitEntity([-1, 1])
cfg["tile_ow"] = SplitEntity([-1, _tile_size(OW, [8, 4])])
cfg["tile_co"] = SplitEntity([-1, _tile_size(OC, tile_size)])
cfg["ann_spatial"] = AnnotateEntity(["none", vectorize])
cfg["ann_reduce"] = AnnotateEntity(["none", "none"])
cfg["reorder_conv"] = ReorderEntity([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
cfg["compat"] = OtherOptionEntity(0)
OCI = cfg["tile_co"].size[-1]
OHI = cfg["tile_oh"].size[-1]
OWI = cfg["tile_ow"].size[-1]
OCO = OC // OCI
OHO = OH // OHI
OWO = OW // OWI
kvshape = (OCO, KH, KW, IC, OCI)
ovshape = (N, OHO, OWO, OCO, OHI, OWI, OCI)
oshape = (N, OH, OW, OC)
if dilation_h != 1 or dilation_w != 1:
# undilate input data
dvshape = (N, OHO, OWO, KH, KW, IC, OHI, OWI)
data_vec = te.compute(
dvshape,
lambda n, oho, owo, kh, kw, ic, ohi, owi: data_pad[n][
(oho * OHI + ohi) * HSTR + kh * dilation_h
][(owo * OWI + owi) * WSTR + kw * dilation_w][ic],
name="data_vec_undilated",
)
else:
dvshape = (N, OHO, OWO, KH + (OHI - 1) * HSTR, KW + (OWI - 1) * WSTR, IC)
data_vec = te.compute(
dvshape,
lambda n, oho, owo, ohi, owi, ic: data_pad[n][oho * OHI * HSTR + ohi][
owo * OWI * WSTR + owi
][ic],
name="data_vec",
)
if autotvm.GLOBAL_SCOPE.in_tuning:
kernel_vec = tvm.te.placeholder(kvshape, kernel.dtype, name="kernel")
else:
kernel_vec = te.compute(
kvshape,
lambda oco, kh, kw, ic, oci: kernel[kh][kw][ic][oco * OCI + oci],
name="kernel_vec",
)
ic = te.reduce_axis((0, IC), name="ic")
kh = te.reduce_axis((0, KH), name="kh")
kw = te.reduce_axis((0, KW), name="kw")
if dilation_h != 1 or dilation_w != 1:
conv = te.compute(
ovshape,
lambda n, oho, owo, oco, ohi, owi, oci: te.sum(
data_vec[n, oho, owo, kh, kw, ic, ohi, owi].astype(out_dtype)
* kernel_vec[oco, kh, kw, ic, oci].astype(out_dtype),
axis=[ic, kh, kw],
),
name="conv",
)
else:
conv = te.compute(
ovshape,
lambda n, oho, owo, oco, ohi, owi, oci: te.sum(
data_vec[n, oho, owo, ohi * HSTR + kh, owi * WSTR + kw, ic].astype(out_dtype)
* kernel_vec[oco, kh, kw, ic, oci].astype(out_dtype),
axis=[ic, kh, kw],
),
name="conv",
)
idiv = tvm.tir.indexdiv
imod = tvm.tir.indexmod
output = te.compute(
oshape,
lambda n, oho, owo, oc: conv[n][idiv(oho, OHI)][idiv(owo, OWI)][idiv(oc, OCI)][
imod(oho, OHI)
][imod(owo, OWI)][imod(oc, OCI)],
name="output_unpack",
tag="spatial_conv_output_NHWC",
)
return output
def schedule_conv2d_spatial_pack_nhwc(cfg, s, op, output):
"""Spatial Pack schedule for Conv2d NHWC"""
unpack = op.output(0)
conv = unpack.op.input_tensors[0]
data_vec = conv.op.input_tensors[0]
kernel_vec = conv.op.input_tensors[1]
data_pad = data_vec.op.input_tensors[0]
OWI = cfg["tile_ow"].size[-1]
OCI = cfg["tile_co"].size[-1]
# schedule unpack/output
if output != unpack:
s[unpack].compute_inline()
n, oh, ow, oc = s[output].op.axis
oco, oci = cfg["tile_co"].apply(s, output, oc)
oho, ohi = cfg["tile_oh"].apply(s, output, oh)
owo, owi = cfg["tile_ow"].apply(s, output, ow)
s[output].reorder(n, oho, owo, oco, ohi, owi, oci)
cfg["ann_spatial"].apply(s, output, [owi, oci], axis_lens=[OWI, OCI], max_unroll=16, cfg=cfg)
cfg.define_knob("compat", [0, 1])
compat_axis = [owo, oco][cfg["compat"].val] # pylint: disable=R1706
s[conv].compute_at(s[output], compat_axis)
paxis = s[output].fuse(n, oho)
s[output].parallel(paxis)
# schedule conv
n, oho, owo, oco, ohi, owi, oci = s[conv].op.axis
ic, kh, kw = s[conv].op.reduce_axis
cfg["reorder_conv"].apply(s, conv, [n, oho, owo, oco, kh, kw, ic, ohi, owi, oci])
cfg["ann_reduce"].apply(
s,
conv,
[kh, kw],
axis_lens=[get_const_int(kh.dom.extent), get_const_int(kw.dom.extent)],
max_unroll=16,
cfg=cfg,
)
cfg["ann_spatial"].apply(s, conv, [owi, oci], axis_lens=[OWI, OCI], max_unroll=16, cfg=cfg)
# schedule data_vec, data_pad and kernel_vec
compat_axis = [owo, oco][cfg["compat"].val] # pylint: disable=R1706
s[kernel_vec].compute_at(s[conv], compat_axis)
s[data_vec].compute_at(s[conv], compat_axis)
# Inlining kernel vec brings a performance improvement, but the tuner seems to not
# like it, so inline only when we are using the fallback config
if cfg.is_fallback:
s[kernel_vec].compute_inline()
if data_vec.op.name == "data_vec_undilated":
n, oho, owo, kh, kw, ic, ohi, owi = s[data_vec].op.axis
else:
n, oho, owo, ohi, owi, ic = s[data_vec].op.axis
s[data_pad].compute_at(s[data_vec], n)
return s
| 17,788 | 36.293501 | 99 | py |
tvm | tvm-main/python/tvm/topi/arm_cpu/conv1d.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-variable, no-else-return, unused-argument, import-outside-toplevel
"""Conv1D schedule for ARM CPU"""
from __future__ import absolute_import as _abs
from tvm import autotvm
from .mprofile.dsp.conv1d import (
conv1d_nwc_dsp_compute,
conv1d_nwc_dsp_schedule,
)
@autotvm.register_topi_compute("conv1d_nwc_dsp.arm_cpu")
def conv1d_nwc_dsp(cfg, data, kernel, strides, padding, dilation, out_dtype):
"""Compute conv1d with v7e-m DSP instructions."""
return conv1d_nwc_dsp_compute(cfg, data, kernel, strides, padding, dilation, out_dtype)
@autotvm.register_topi_schedule("conv1d_nwc_dsp.arm_cpu")
def schedule_conv1d_nwc_dsp(cfg, outs):
return conv1d_nwc_dsp_schedule(cfg, outs)
| 1,521 | 39.052632 | 105 | py |
tvm | tvm-main/python/tvm/topi/arm_cpu/qnn.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Contains TVMScript implementations of some QNN operators for Arm.
Currently, the only ops with compute functions are fused regular and depthwise convolutions for
Arm Cortex-M with DSP. Additionally, these functions explicitly do not support padding - it
must be done in a separate Relay op for memory reasons.
"""
from typing import Callable, Dict, Tuple
import tvm
from tvm import te, tir, TVMError
from tvm.script import tir as T
from tvm.tir import const
from ..utils import get_const_tuple
from .mprofile.dsp.micro_kernel import tensordot
def _int_ceil_division(x, y):
return -(x // -y)
def _compute_output_dim(data_length, kernel_length, stride):
return _int_ceil_division(data_length + 1 - kernel_length, stride)
def _pick_num_outputs(out_width):
"""Guess a good value for num_outputs."""
assert out_width > 1
# num_outputs is capped at 8
for i in range(2, min(out_width + 1, 8)):
if out_width % i == 0:
return i
raise TVMError(f"Cannot pick a good num_outputs value for out_width = {out_width}!")
def _pick_tensordot_impl(attrs, inputs, num_outputs=2, is_depthwise=False):
"""Helper function that chooses the right implementation of micro_kernel.tensordot.
Takes as input the parameters of the conv2d, and returns a tuple of TWO (function_name,
function_code). The first pair (the aligned one) is for even numbered output channels, and the
second pair (the offset one) is for odd-numbered output channels. This function is used for
regular and depthwise convolutions.
We need different implementations for even vs odd numbered output channels, because the "start"
of an odd output channel in the data tensor or kernel might or might not be on a word boundary,
and the tensordot code expects all input pointers to be word-aligned.
"""
data, kernel = inputs[0:2]
rq_output_zero_point_const = inputs[10]
assert len(rq_output_zero_point_const.op.body) == 1
output_zero_point = rq_output_zero_point_const.op.body[0]
_, stride_w = get_const_tuple(attrs.strides)
if is_depthwise:
assert attrs.data_layout == "NCHW"
assert attrs.kernel_layout == "IOHW"
_, _, height, width = get_const_tuple(data.shape)
_, out_channels, kernel_h, kernel_w = get_const_tuple(kernel.shape)
dimensions = (width, kernel_h, kernel_w)
in_stride = stride_w
data_per_oc_size = height * width
else:
assert attrs.data_layout == "NHWC"
assert attrs.kernel_layout == "OHWI"
_, height, width, in_channels = get_const_tuple(data.shape)
out_channels, kernel_h, kernel_w, _ = get_const_tuple(kernel.shape)
dimensions = (width * in_channels, kernel_h, kernel_w * in_channels)
in_stride = in_channels * stride_w
data_per_oc_size = 0
assert attrs.out_layout is not None
if attrs.out_layout == "NHWC":
out_stride = out_channels
elif attrs.out_layout == "NCHW":
out_stride = 1
else:
raise ValueError(f"Unsupported output layout {attrs.out_layout}!")
x_strides = (in_stride, out_stride)
aligned_func = tensordot.tensordot_int16_impl(
num_outputs,
dimensions,
(0, 0, 0),
x_strides,
output_zero_point=output_zero_point,
)
kernel_per_oc_size = dimensions[1] * dimensions[2]
offsets = (data_per_oc_size % 2, kernel_per_oc_size % 2, 0)
offset_func = tensordot.tensordot_int16_impl(
num_outputs,
dimensions,
offsets,
x_strides,
output_zero_point=output_zero_point,
)
return (aligned_func, offset_func)
def _make_tscript_ptr(buffer, offset, length, dtype="int16"):
return T.tvm_access_ptr(
T.type_annotation(dtype=dtype),
buffer.data,
offset,
length,
1,
dtype="handle",
)
def _bias_ptr(bias, c):
return _make_tscript_ptr(bias, c, 1, dtype="int32")
def _scale_ptr(scale, c):
return _make_tscript_ptr(scale, c, 1, dtype="int32")
def _make_tscript_call(func_name, *args):
return T.evaluate(T.call_extern(func_name, *args, dtype="int32"))
def _make_conv2d_primfunc(
output_dimensions: Tuple[int, int, int, int],
buffer_shapes: Tuple,
aligned_func: Tuple[str, str],
offset_func: Tuple[str, str],
ptr_gens: Tuple[Callable, Callable],
output_layout: str = "NHWC",
) -> tir.function.PrimFunc:
"""Makes a TIR PrimFunc computing Conv2D using a call to tensordot.
Can be used to generate regular, depthwise, and grouped Conv2D operators by passing different
arguments and ptr_gen functions. However, it only works for Conv2D operators where the height
stride of the tensor is divisible by two.
Parameters
----------
output_dimensions : Tuple[int, int, int, int]
A tuple containing the out_height, out_width, out_channels, and desired num_outputs values
in that order.
buffer_shapes: Tuple[tvm.ir.container.Array]
The shapes of the data, kernel, bias, scale, and output tensors, in that order. Each shape
should be a TVM Array.
aligned_func: Tuple[str, str]
A tuple containing the (name, C implementation) of a word-aligned tensordot operator.
offset_func: Tuple[str, str]
A tuple containing the (name, C implementation) of a word-unaligned tensordot operator. Can
be a tuple of empty strings if the Conv2D in question does not need an unaligned operator.
ptr_gens: Tuple[Callable, Callable]
A tuple of two functions to generate data and kernel access pointers. They should take as
inputs the buffer, (y, x, c) indices, and an alignment offset. They should return a
T.tvm_access_ptr object which can be used in T.call_extern.
output_layout: str
The tensor layout that will be prosued by the generated PrimFunc. Should be NHWC or NCHW.
"""
out_height, out_width, out_channels, num_outputs = output_dimensions
data_shape, kernel_shape, bias_shape, scale_shape, output_shape = buffer_shapes
aligned_func_name, aligned_func_code = aligned_func
offset_func_name, offset_func_code = offset_func
data_ptr, kernel_ptr = ptr_gens
# If the functions are identical, we can skip the second loop
if aligned_func_name == offset_func_name:
aligned_channels = out_channels
offset_channels = 0
c_step = const(1)
else:
aligned_channels = out_channels // 2
offset_channels = out_channels // 2
c_step = const(2)
def output_ptr(output, y, x, c):
if output_layout == "NHWC":
return _make_tscript_ptr(
output,
y * const(out_width * out_channels) + x * const(out_channels * num_outputs) + c,
1,
)
elif output_layout == "NCHW":
return _make_tscript_ptr(
output,
c * const(out_height * out_width) + y * const(out_width) + x * const(num_outputs),
1,
)
else:
raise TVMError(f"Unsupported out_layout '{output_layout}'!")
@T.prim_func
def biased_quantized_conv2d(
data_handle: T.handle,
kernel_handle: T.handle,
bias_handle: T.handle,
scale_handle: T.handle,
output_handle: T.handle,
) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
data = T.match_buffer(data_handle, data_shape, dtype="int16")
kernel = T.match_buffer(kernel_handle, kernel_shape, dtype="int16")
bias = T.match_buffer(bias_handle, bias_shape, dtype="int32")
# We don't specify a data type for the requantization scale, even though we will read it as
# an int32. This is because we must pretend it is a float32, as Relay's requantize op only
# allows floating point scales.
scale = T.match_buffer(scale_handle, scale_shape)
output = T.match_buffer(output_handle, output_shape, dtype="int16")
# This hack prevents TVM from seeing these variables as "unused". I should be using T.reads
# and T.writes, but they don't work. I think it's an issue with BufferTouchedDomain.
# pylint: disable=unused-variable
output[0, 0, 0, 0] = 0
__1 = data[0, 0, 0, 0]
__2 = kernel[0, 0, 0, 0]
__3 = bias[0, 0, 0, 0]
__4 = scale[0]
# pylint: enable=unused-variable
for c_ax, y_ax, x_ax in T.grid(
const(aligned_channels), const(out_height), const(out_width // num_outputs)
):
with T.block("conv2d_aligned"):
T.block_attr({"pragma_import_c": aligned_func_code})
y, x, c_interval = T.axis.remap("SSS", [y_ax, x_ax, c_ax])
c = c_interval * c_step
_make_tscript_call(
aligned_func_name,
output_ptr(output, y, x, c),
data_ptr(data, y, x, c),
kernel_ptr(kernel, c),
_bias_ptr(bias, c),
_scale_ptr(scale, c),
)
for c_ax, y_ax, x_ax in T.grid(
const(offset_channels), const(out_height), const(out_width // num_outputs)
):
with T.block("conv2d_offset"):
T.block_attr({"pragma_import_c": offset_func_code})
y, x, c_interval = T.axis.remap("SSS", [y_ax, x_ax, c_ax])
c = c_interval * c_step + 1
_make_tscript_call(
offset_func_name,
output_ptr(output, y, x, c),
data_ptr(data, y, x, c, offset=1),
kernel_ptr(kernel, c, offset=1),
_bias_ptr(bias, c),
_scale_ptr(scale, c),
)
return biased_quantized_conv2d
def qnn_conv2d(attrs, inputs, out_type):
"""Compute for qnn.conv2d with NHWC layout.
Note that this is a DIFFERENT layout from the Hexagon variant, because they have special
instructions Cortex-M doesn't have. We expect the kernel to have OHWI layout. We also assume
that padding is not necessary, as it will have been done by another pass.
"""
# Make a few checks to unpack the function arguments and ensure it was called with the right
# arguments. Note that unlike most schedules, qnn_conv2d does not use a wrapper.
assert len(inputs) == 11
assert not any(get_const_tuple(attrs.padding))
data, kernel, _izp, _kzp, _iscale, _kscale, bias, scale = inputs[0:8]
_, height, width, in_channels = get_const_tuple(data.shape)
out_channels, kernel_h, kernel_w, _ = get_const_tuple(kernel.shape)
y_stride, x_stride = get_const_tuple(attrs.strides)
out_height = _compute_output_dim(height, kernel_h, y_stride)
out_width = _compute_output_dim(width, kernel_w, x_stride)
# Decide how many sums our function should have running at the same time. Doing
# this lets us do "more work" for each memory load, but doing too many of them causes us to run
# out of registers. Currently this is set to the smallest value greater than one that divides
# the output width, but autotuning this value would improve performance a lot.
num_outputs = _pick_num_outputs(out_width)
# Next, decide whether whether we need "parity alternation". For example, if we have an
# 8x3x3x3 kernel (8 output channels, height 3, width 3, input channels 3) in the OHWI layout,
# then every output channel kernel slice will be 27 halfwords. This means every other output
# channel will not be word aligned, which will cause slowness/crashes!
# We solve this problem by handling the "aligned" and "offset" output channels with different
# versions of our tensordot function. The "aligned func" assumes that the start positions of the
# output, data, and kernel are given exactly by their pointer. The "offset" version assumes that
# the "true" start of the output is the value in the output pointer, plus an offset of 0 or 1.
# _pick_tensordot_impl decides whether this is the case. If not, we only want to generate one
# function (to save flash), so offset_func is a tuple of empty strings.
aligned_func, offset_func = _pick_tensordot_impl(attrs, inputs, num_outputs, False)
# We need to disable pylint's unused argument checker, as the kwarg offset is unused but must
# be present for compatibility. We cannot add an underscore as we normally would, as this makes
# the keyword not match.
# pylint: disable=unused-argument
def data_ptr(buffer, y, x, c, offset=0):
return _make_tscript_ptr(
buffer,
y * const(y_stride * width * in_channels)
+ x * const(x_stride * num_outputs * in_channels),
1,
)
# pylint: enable=unused-argument
def kernel_ptr(buffer, c, offset=0):
return _make_tscript_ptr(
buffer,
c * const(kernel_h * kernel_w * in_channels) - offset,
1,
)
prim_func = _make_conv2d_primfunc(
(out_height, out_width, out_channels, num_outputs),
(data.shape, kernel.shape, bias.shape, scale.shape, out_type.shape),
aligned_func,
offset_func,
(data_ptr, kernel_ptr),
output_layout=attrs.out_layout,
)
output = te.extern_primfunc([data, kernel, bias, scale], prim_func, name="tir", dtype="int16")
return [output]
def schedule_qnn_conv2d(_attrs, _outs, _target):
"""Schedule function for qnn.conv2d."""
return None
def qnn_depthwise_conv2d(attrs, inputs, out_type):
"""Compute for qnn.depthwise_conv2d with NCHW layout.
Works basically the same way as regular conv2d - see above.
"""
assert len(inputs) == 11
assert not any(get_const_tuple(attrs.padding))
data, kernel, _izp, _kzp, _iscale, _kscale, bias, scale = inputs[0:8]
_, _, height, width = get_const_tuple(data.shape)
_, out_channels, kernel_h, kernel_w = get_const_tuple(kernel.shape)
y_stride, x_stride = get_const_tuple(attrs.strides)
out_height = _compute_output_dim(height, kernel_h, y_stride)
out_width = _compute_output_dim(width, kernel_w, x_stride)
num_outputs = _pick_num_outputs(out_width)
aligned_func, offset_func = _pick_tensordot_impl(attrs, inputs, num_outputs, True)
def data_ptr(buffer, y, x, c, offset=0):
if height * width % 2 == 1:
x_ptr_offset = tvm.tir.const(-1)
else:
x_ptr_offset = tvm.tir.const(0)
return _make_tscript_ptr(
buffer,
c * const(width * height)
+ y * const(y_stride * width)
+ x * const(x_stride * num_outputs)
+ offset * x_ptr_offset,
1,
)
def kernel_ptr(buffer, c, offset=0):
return _make_tscript_ptr(
buffer,
c * tvm.tir.const(kernel_h * kernel_w) - offset,
1,
)
prim_func = _make_conv2d_primfunc(
(out_height, out_width, out_channels, num_outputs),
(data.shape, kernel.shape, bias.shape, scale.shape, out_type.shape),
aligned_func,
offset_func,
(data_ptr, kernel_ptr),
output_layout=attrs.out_layout,
)
output = te.extern_primfunc([data, kernel, bias, scale], prim_func, name="tir", dtype="int16")
return [output]
def schedule_qnn_depthwise_conv2d(_attrs, _outs, _target):
"""Schedule function for qnn.depthwise_conv2d."""
return None
def _make_unrolled_conv2d_primfunc(
output_dimensions: Tuple[int, int, int],
buffer_shapes: Tuple[Tuple, Tuple, Tuple, Tuple, Tuple],
function_names: Dict[Tuple, str],
function_code: str,
ptr_gens: Tuple[Callable, Callable],
output_layout: str = "NHWC",
) -> tir.function.PrimFunc:
"""Makes a TIR PrimFunc computing Conv2D using a call to tensordot.
Can be used to generate regular, depthwise, and grouped Conv2D operators by passing different
arguments and ptr_gen functions. Takes some of the same arguments as _make_conv2d_primfunc, but
requires the tensordot function variations to be passed differently. The generated PrimFunc is
simlar to the one produced by _make_conv2d_primfunc, but unrolls the height and width loops
over the input tensor. This results in longer code, but unlike _make_conv2d_primfunc this
function does not require the height stride be an even number of words.
This is required to compute layer 25 in MobileNetV1 models, among other things.
Parameters
----------
output_dimensions : Tuple[int, int, int, int]
A tuple containing the out_height, out_width, out_channels, and desired num_outputs values
in that order.
buffer_shapes: Tuple[tvm.ir.container.Array]
The shapes of the data, kernel, bias, scale, and output tensors, in that order. Each shape
should be a TVM Array.
function_names: Dict[Tuple, str]
A dictionary mapping a tuple of (data, kernel, output) alignments to the name of the
appropriate tensordot function.
function_code: str
A string containing all verions of tensordot function our PrimFunc needs. This will usually
be a string of 4+ function variations concatenated together.
ptr_gens: Tuple[Callable, Callable]
A tuple of two functions to generate data and kernel access pointers. They should take as
inputs the buffer, (y, x, c) indices, and an alignment offset. They should return a
T.tvm_access_ptr object which can be used in T.call_extern.
output_layout: str
The tensor layout that will be prosued by the generated PrimFunc. Should be NHWC or NCHW.
"""
out_height, out_width, out_channels = output_dimensions
data_shape, kernel_shape, bias_shape, scale_shape, output_shape = buffer_shapes
data_ptr, kernel_ptr = ptr_gens
def output_ptr(output, y, c):
if output_layout == "NHWC":
return _make_tscript_ptr(output, y * const(out_width * out_channels) + c, 1)
elif output_layout == "NCHW":
return _make_tscript_ptr(
output, c * const(out_height * out_width) + y * const(out_width), 1
)
else:
raise TVMError(f"Unsupported out_layout '{output_layout}'!")
def make_row_calls(buffers, c_var, out_height):
output, data, kernel, bias, scale = buffers
for y in range(out_height):
for c in range(2):
_make_tscript_call(
function_names[(y + c) % 2, c % 2, 0],
output_ptr(output, y, c_var + c),
data_ptr(data, y, c_var + c, offset=(y + c) % 2),
kernel_ptr(kernel, c_var + c, offset=c),
_bias_ptr(bias, c_var + c),
_scale_ptr(scale, c_var + c),
)
@T.prim_func
def biased_quantized_conv2d(
data_handle: T.handle,
kernel_handle: T.handle,
bias_handle: T.handle,
scale_handle: T.handle,
output_handle: T.handle,
) -> None:
# Same setup is used as in _make_conv2d_primfunc
T.func_attr({"global_symbol": "main", "tir.noalias": True})
data = T.match_buffer(data_handle, data_shape, dtype="int16")
kernel = T.match_buffer(kernel_handle, kernel_shape, dtype="int16")
bias = T.match_buffer(bias_handle, bias_shape, dtype="int32")
scale = T.match_buffer(scale_handle, scale_shape)
output = T.match_buffer(output_handle, output_shape, dtype="int16")
# pylint: disable=unused-variable
output[0, 0, 0, 0] = 0
__1 = data[0, 0, 0, 0]
__2 = kernel[0, 0, 0, 0]
__3 = bias[0, 0, 0, 0]
__4 = scale[0]
# pylint: enable=unused-variable
for c_ax in T.grid(out_channels // 2):
with T.block("conv2ds"):
T.block_attr({"pragma_import_c": function_code})
c = T.axis.remap("S", [c_ax]) * 2
make_row_calls((output, data, kernel, bias, scale), c, out_height)
return biased_quantized_conv2d
def qnn_unrolled_depthwise_conv2d(attrs, inputs, out_type):
"""Compute for qnn.depthwise_conv2d with NCHW layout for convolutions with small width, height.
Behaves similarly to qnn_depthwise_conv2d, but does not iterate over the output width and height
and instead calls these functions explicitly. This gives a tiny performance boost in exchange
for larger code size, but more importantly does not require out_width * out_height
* y_stride % 2 == 0. This does, however, require y_stride == x_stride == 1.
"""
assert len(inputs) == 11
assert not any(get_const_tuple(attrs.padding))
y_stride, x_stride = get_const_tuple(attrs.strides)
assert y_stride == x_stride == 1
data, kernel, _izp, _kzp, _iscale, _kscale, bias, scale = inputs[0:8]
_, _, height, width = get_const_tuple(data.shape)
_, out_channels, kernel_h, kernel_w = get_const_tuple(kernel.shape)
y_stride, x_stride = get_const_tuple(attrs.strides)
out_height = _compute_output_dim(height, kernel_h, y_stride)
out_width = _compute_output_dim(width, kernel_w, x_stride)
rq_output_zero_point_const = inputs[10]
assert len(rq_output_zero_point_const.op.body) == 1
output_zero_point = rq_output_zero_point_const.op.body[0]
dimensions = (width, kernel_h, kernel_w)
x_strides = (1, out_channels)
func_names = {}
impls = []
for alignment in ((0, 0, 0), (0, 1, 0), (1, 0, 0), (1, 1, 0)):
func_name, impl = tensordot.tensordot_int16_impl(
out_width, dimensions, alignment, x_strides, output_zero_point=output_zero_point
)
func_names[alignment] = func_name
impls.append(impl)
def data_ptr(buffer, y, c, offset=0):
return _make_tscript_ptr(buffer, c * const(width * height) + y * const(width) - offset, 1)
def kernel_ptr(buffer, c, offset=0):
return _make_tscript_ptr(buffer, c * const(kernel_h * kernel_w) - offset, 1)
prim_func = _make_unrolled_conv2d_primfunc(
(out_height, out_width, out_channels),
(data.shape, kernel.shape, bias.shape, scale.shape, out_type.shape),
func_names,
"\n".join(impls),
(data_ptr, kernel_ptr),
output_layout=attrs.out_layout,
)
output = te.extern_primfunc([data, kernel, bias, scale], prim_func, name="tir", dtype="int16")
return [output]
def schedule_qnn_unrolled_depthwise_conv2d(_attrs, _outs, _target):
"""Schedule function for qnn.depthwise_conv2d."""
return None
| 23,468 | 38.71066 | 100 | py |
tvm | tvm-main/python/tvm/topi/arm_cpu/conv2d_int8.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unused-variable,unused-argument,no-member
"""Conv2D int8 schedule on ARM"""
from tvm import te, target, autotvm
from ..utils import traverse_inline, get_const_tuple
from ..generic import conv2d as conv2d_generic
from .. import nn
from ...target import codegen
from ..nn.conv2d import _get_workload as _get_conv2d_workload, unpack_NCHWc_to_nchw
from ..x86.conv2d_int8 import _pack_data
from ..nn.utils import get_pad_tuple
from .tensor_intrin import dot_int8_int8_int32_neon_82, dot_int8_int8_int32_neon
from .conv2d_gemm import (
compute_conv2d_gemm_without_weight_transform,
schedule_conv2d_gemm_interleaved,
schedule_conv2d_gemm_native,
)
from .arm_utils import get_tiling_B_interleaved_t
def _get_default_config(cfg, data, kernel, strides, padding, dilation, out_dtype):
"""
Get default int8 schedule config for the workload
"""
wkl = _get_conv2d_workload(data, kernel, strides, padding, dilation, out_dtype)
is_kernel_1x1 = wkl.kernel_h == 1 and wkl.kernel_w == 1
if is_kernel_1x1:
conv2d_generic.fallback_schedule_cpu_1x1_int8(cfg, wkl, int32_lanes=4, num_int8_elements=4)
else:
conv2d_generic.fallback_schedule_cpu_common_int8(
cfg, wkl, int32_lanes=4, num_int8_elements=4
)
@autotvm.register_topi_compute("conv2d_NCHWc_int8.arm_cpu")
def conv2d_NCHWc_int8(cfg, data, kernel, strides, padding, dilation, layout, out_layout, out_dtype):
"""Compute conv2d int8 with NCHWc layout"""
# layout and out_layout are not used here,
# we keep them for debug convenience when dumping autotvm workload
if len(data.shape) == 5: # data is in nchwc
n, ic_chunk, ih, iw, ic_bn = get_const_tuple(data.shape)
in_channel = ic_chunk * ic_bn
oc_chunk, ic_chunk, kh, kw, ic_bn, oc_bn = get_const_tuple(kernel.shape)[:6]
num_filter = oc_chunk * oc_bn
else:
# data is nchw, implicitly treat it as nchw1c
n, in_channel, ih, iw = get_const_tuple(data.shape)
num_filter, _, kh, kw = get_const_tuple(kernel.shape)
# Define autotvm tuning space
is_kernel_1x1 = kh == 1 and kw == 1
pt, pl, pb, pr = get_pad_tuple(padding, (kh, kw))
sh, sw = strides if isinstance(strides, (tuple, list)) else (strides, strides)
dh, dw = dilation if isinstance(dilation, (tuple, list)) else (dilation, dilation)
dilated_kernel_h = (kh - 1) * dh + 1
dilated_kernel_w = (kw - 1) * dw + 1
oh = (ih - dilated_kernel_h + pt + pb) // sh + 1
ow = (iw - dilated_kernel_w + pl + pr) // sw + 1
# input and output should be a multiple of 8 (intrinsics are 8 lanes)
cfg.define_split(
"tile_ic", in_channel, num_outputs=2, filter=lambda y: y.size[-1] % min(8, in_channel) == 0
)
cfg.define_split(
"tile_oc", num_filter, num_outputs=2, filter=lambda y: y.size[-1] % min(8, num_filter) == 0
)
cfg.define_split("tile_ow", ow, num_outputs=2, filter=lambda y: y.size[-1] <= 64)
if is_kernel_1x1:
cfg.define_knob("tile_oh", [1, 2] if oh > 1 else [1])
else:
cfg.define_knob("unroll_kw", [True, False])
# If no config was set, we can fallback to NCHW config.
if cfg.is_fallback:
_get_default_config(
cfg,
te.placeholder((n, in_channel, ih, iw), dtype=data.dtype),
te.placeholder((num_filter, in_channel, kh, kw), dtype=kernel.dtype),
strides,
padding,
dilation,
out_dtype,
)
# Pack data if raw 4-D data is provided.
# This can only happen when autotuning.
if len(data.shape) == 4:
data, kernel = _pack_data(cfg, data, kernel)
n_elems = int(kernel.shape[-1])
return nn.conv2d_NCHWc_int8(
data, kernel, strides, padding, dilation, layout, out_layout, out_dtype, n_elems=n_elems
)
def is_int8_hw_support(data_dtype, kernel_dtype):
"""
Checks to ensure that we can use int8 on arm
1) The datatypes are correct.
2) LLVM version has support for the instructions.
"""
# 1) Check datatypes
is_dtype_support = data_dtype == kernel_dtype and "int8" in data_dtype
# 2) Check LLVM support
llvm_version = codegen.llvm_version_major()
is_llvm_support = llvm_version >= 8
# 3) Check target
current_target = target.Target.current(allow_none=False)
is_target_support = bool(
current_target.features.has_asimd or current_target.features.has_dotprod
)
return is_dtype_support and is_llvm_support and is_target_support
@autotvm.register_topi_schedule("conv2d_NCHWc_int8.arm_cpu")
def schedule_conv2d_NCHWc_int8(cfg, outs):
"""Create schedule for tensors"""
s = te.create_schedule([x.op for x in outs])
scheduled_ops = []
def _callback(op):
if "conv2d_NCHWc_int8" in op.tag:
conv_out = op.output(0)
kernel_vec = conv_out.op.input_tensors[1]
data_vec = conv_out.op.input_tensors[0]
data = (
data_vec.op.input_tensors[0]
if isinstance(data_vec.op, te.tensor.ComputeOp) and "pad" not in data_vec.op.tag
else data_vec
)
if isinstance(data.op, te.tensor.ComputeOp) and "pad" in data.op.tag:
data_pad = data
data = data_pad.op.input_tensors[0]
args = [s, cfg, data_vec, kernel_vec, conv_out, outs[0]]
# int8 conv kernel is 7-dim
_, _, kh, kw, _, _, n_elems = get_const_tuple(kernel_vec.shape)
assert n_elems == 4
dtype = "uint" if data.dtype == "uint8" else "int"
current_target = target.Target.current(allow_none=False)
if current_target.features.has_dotprod:
intrin = dot_int8_int8_int32_neon_82(int32_lanes=4, dtype=dtype)
elif current_target.features.has_asimd:
assert dtype == "int", "uint8 not supported if dot product is not available"
intrin = dot_int8_int8_int32_neon()
else:
raise RuntimeError(
"Cannot schedule schedule_NCHWc_int8 without neon or arm v8.2 neon support"
)
# On raspberry pi 4s, we see poor performance when the fused
# operations are inlined into the main computation body. These
# fused ops dominated the runtime on small convolutions repeatedly
# blow the cache. Using workloads from resnet50, inceptionv3, and
# mobilenetv3, we empirically determine the size at which inline is
# not worth it to be kernel heigh * kernel width < 500. These tests
# were only run on raspberry pi 4, other arm cpus may have larger
# caches where inlining has good performance.
if target.Target.current().mcpu == "cortex-a72" and kh * kw < 500:
inline_fused = False
else:
inline_fused = True
if kh == 1 and kw == 1:
conv2d_generic.schedule_conv_NCHWc_cpu_1x1_int8(
*args, int32_lanes=4, int8_elems=4, intrin=intrin, inline_fused=inline_fused
)
else:
conv2d_generic.schedule_conv_NCHWc_cpu_common_int8(
*args, int32_lanes=4, int8_elems=4, intrin=intrin, inline_fused=inline_fused
)
traverse_inline(s, outs[0].op, _callback)
return s
def conv2d_nchw_int8(data, kernel, strides, padding, dilation, out_dtype):
"""Compute conv2d with NCHW layout and int8 dtype"""
layout = "NCHW"
# pylint: disable=no-value-for-parameter
packed_out = conv2d_NCHWc_int8(
data, kernel, strides, padding, dilation, layout, layout, out_dtype
)
return unpack_NCHWc_to_nchw(packed_out, out_dtype)
def schedule_conv2d_nchw_int8(outs):
"""Create the schedule for conv2d_nchw_int8"""
# pylint: disable=no-value-for-parameter
return schedule_conv2d_NCHWc_int8(outs)
def _compute_conv2d_NHWC_quantized(
cfg, data, kernel, strides, padding, dilation, out_dtype, interleave_A
):
N, IH, IW, IC = get_const_tuple(data.shape)
KH, KW, _, OC = get_const_tuple(kernel.shape)
tile_rows_B, tile_cols_B = get_tiling_B_interleaved_t(interleave_A)
kernel = nn.conv2d_gemm_weight_transform(kernel, tile_rows_B, tile_cols_B)
return compute_conv2d_gemm_without_weight_transform(
cfg, data, kernel, strides, padding, dilation, out_dtype, (KH, KW), OC, interleave_A
)
def _compute_conv2d_NHWC_quantized_without_transform(
cfg,
data,
B,
strides,
padding,
dilation,
out_dtype,
kernel_size=None,
output_channels=None,
interleave_A=False,
):
return compute_conv2d_gemm_without_weight_transform(
cfg,
data,
B,
strides,
padding,
dilation,
out_dtype,
kernel_size,
output_channels,
interleave_A,
)
def _schedule_conv2d_NHWC_quantized(cfg, outs, interleave_A):
"""Create schedule for tensors"""
s = te.create_schedule([x.op for x in outs])
# Vectorize the output and then inline all the rest
out = outs[0]
n, h, w, c = out.op.axis
n_h_fused = s[out].fuse(n, h)
outer, inner = s[out].split(c, 4)
s[out].vectorize(inner)
s[out].parallel(n_h_fused)
def _callback(op):
"""Traverse operators from computation graph"""
if op.name == "conv2d_gemm_output":
conv_out = op.output(0)
if interleave_A:
schedule_conv2d_gemm_interleaved(cfg, s, conv_out, out)
else:
schedule_conv2d_gemm_native(cfg, s, conv_out, out)
if out != conv_out:
s[conv_out].compute_at(s[out], inner)
else:
C = conv_out.op.input_tensors[0]
if interleave_A:
s[C].compute_at(s[out], inner)
traverse_inline(s, outs[0].op, _callback)
return s
# Interleaved schedules: those schedule will interleave the input data. The
# weights are interleaved and transposed
@autotvm.register_topi_compute("conv2d_NHWC_quantized_interleaved.arm_cpu")
def compute_conv2d_NHWC_quantized_interleaved(
cfg, data, kernel, strides, padding, dilation, out_dtype
):
"""Interface for interleaved compute_conv2d_NHWC_quantized_interleaved"""
return _compute_conv2d_NHWC_quantized(
cfg, data, kernel, strides, padding, dilation, out_dtype, True
)
@autotvm.register_topi_compute("conv2d_NHWC_quantized_interleaved_without_transform.arm_cpu")
def compute_conv2d_NHWC_quantized_interleaved_without_transform(
cfg, data, kernel, strides, padding, dilation, out_dtype, kernel_size, output_channels
):
"""Interface for interleaved compute_conv2d_NHWC_quantized_interleaved_without_transform"""
return _compute_conv2d_NHWC_quantized_without_transform(
cfg, data, kernel, strides, padding, dilation, out_dtype, kernel_size, output_channels, True
)
@autotvm.register_topi_schedule("conv2d_NHWC_quantized_interleaved.arm_cpu")
def schedule_conv2d_NHWC_quantized_interleaved(cfg, outs):
"""Interface for interleaved schedule_conv2d_NHWC_quantized_interleaved"""
return _schedule_conv2d_NHWC_quantized(cfg, outs, True)
@autotvm.register_topi_schedule("conv2d_NHWC_quantized_interleaved_without_transform.arm_cpu")
def schedule_conv2d_NHWC_quantized_interleaved_without_transform(cfg, outs):
"""Interface for interleaved schedule_conv2d_NHWC_quantized_interleaved"""
return _schedule_conv2d_NHWC_quantized(cfg, outs, True)
# Native schedules: those schedule won't interleave A (which is left in its native form).
# The weights are interleaved and transposed
@autotvm.register_topi_compute("conv2d_NHWC_quantized_native.arm_cpu")
def compute_conv2d_NHWC_quantized_native(cfg, data, kernel, strides, padding, dilation, out_dtype):
"""Interface for native compute_conv2d_NHWC_quantized"""
return _compute_conv2d_NHWC_quantized(
cfg, data, kernel, strides, padding, dilation, out_dtype, False
)
@autotvm.register_topi_compute("conv2d_NHWC_quantized_native_without_transform.arm_cpu")
def compute_conv2d_NHWC_quantized_native_without_transform(
cfg, data, kernel, strides, padding, dilation, out_dtype, kernel_size, output_channels
):
"""Interface for compute_conv2d_NHWC_quantized_native_without_transform"""
return _compute_conv2d_NHWC_quantized_without_transform(
cfg,
data,
kernel,
strides,
padding,
dilation,
out_dtype,
kernel_size,
output_channels,
False,
)
@autotvm.register_topi_schedule("conv2d_NHWC_quantized_native.arm_cpu")
def schedule_conv2d_NHWC_quantized_native(cfg, outs):
"""Interface for native schedule_conv2d_NHWC_quantized"""
return _schedule_conv2d_NHWC_quantized(cfg, outs, False)
@autotvm.register_topi_schedule("conv2d_NHWC_quantized_native_without_transform.arm_cpu")
def schedule_conv2d_NHWC_quantized_native_without_transform(cfg, outs):
"""Interface for native schedule_conv2d_NHWC_quantized"""
return _schedule_conv2d_NHWC_quantized(cfg, outs, False)
| 13,990 | 38.634561 | 100 | py |
tvm | tvm-main/python/tvm/topi/arm_cpu/conv2d_alter_op.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unused-variable,unused-argument,no-member
"""Conv2D alter op and legalize functions for arm cpu"""
import logging
import numpy as np
import tvm
from tvm import te
from tvm import relay
from tvm import autotvm
from ..nn import conv2d_alter_layout, conv2d_legalize
from ..utils import get_const_tuple
from ..x86.conv2d import _get_default_config as _get_x86_default_config
from ..x86.conv2d_int8 import _get_default_config_int8
from .conv2d_int8 import is_int8_hw_support
from .arm_utils import get_tiling_B_interleaved_t
from ..generic.conv2d import conv2d_alter_int8_common
from .mprofile.dsp.micro_kernel.common import num_simd_lanes_per_word
logger = logging.getLogger("topi")
def interleave_transpose_weights(inputs, data, kernel, interleave_A):
"""Transform the weight matrix by reshaping, interleaving and transposing it
Parameters
----------
inputs : tvm.relay.Expr
Grouped input symbols
data :
Input shape and dtype
kernel :
Input shape and dtype
interleave_A: indicates if we expect matrix A to be interleaved
Returns
----------
new_kernel : tvm.te.placeholder
A placeholder with the new shape
new_kernel_expr : tvm.relay.Expr
The relay expression of the weights
"""
assert (
data.dtype == "int8"
and kernel.dtype == "int8"
or data.dtype == "uint8"
and kernel.dtype == "uint8"
)
KH, KW, IC, OC = get_const_tuple(kernel.shape)
K = KH * KW * IC
N = OC
# Get tiling information for the interleaved transposed version of B
tile_rows_B, tile_cols_B = get_tiling_B_interleaved_t(interleave_A)
pad_K = 0
pad_N = 0
if N % tile_rows_B != 0:
pad_N = tile_rows_B - (N % tile_rows_B)
if K % tile_cols_B != 0:
pad_K = tile_cols_B - (K % tile_cols_B)
N_padded = N + pad_N
K_padded = K + pad_K
new_kernel_expr = relay.nn.contrib_conv2d_gemm_weight_transform(
inputs[1], tile_rows_B, tile_cols_B
)
new_kernel = te.placeholder(
(N_padded // tile_rows_B, K_padded // tile_cols_B, tile_rows_B, tile_cols_B), kernel.dtype
)
return new_kernel, new_kernel_expr
@conv2d_alter_layout.register(["arm_cpu"])
def _alter_conv2d_layout(attrs, inputs, tinfos, out_type):
target = tvm.target.Target.current(allow_none=False)
dispatch_ctx = autotvm.task.DispatchContext.current
_, outs = relay.backend.te_compiler.select_implementation(
relay.op.get("nn.conv2d"), attrs, tinfos, out_type, target
)
workload = autotvm.task.get_workload(outs)
if workload is None:
# The best implementation is not an AutoTVM template,
# we then assume it's not necessary to alter this op.
return None
cfg = dispatch_ctx.query(target, workload)
topi_tmpl = workload[0]
new_attrs = {k: attrs[k] for k in attrs.keys()}
strides = attrs.get_int_tuple("strides")
padding = attrs.get_int_tuple("padding")
dilation = attrs.get_int_tuple("dilation")
data_layout = attrs["data_layout"]
kernel_layout = attrs["kernel_layout"]
data, kernel = tinfos
out_dtype = out_type.dtype
# Extract data types
data_tensor, kernel_tensor = tinfos
data_dtype = data_tensor.dtype
kernel_dtype = kernel_tensor.dtype
idxd = tvm.tir.indexdiv
if topi_tmpl == "depthwise_conv2d_nhwc_dsp.arm_cpu":
assert data_layout == "NHWC" and kernel_layout == "HWOI"
# We are not able to check if inputs[1] (the kernel) is a constant in the
# strategy function, so as a stopgap solution we use an assert here.
assert isinstance(
inputs[1], relay.Constant
), "depthwise_conv2d_nhwc_dsp.arm_cpu requires kernel be a relay Constant"
channels = get_const_tuple(data.shape)[3]
KH, KW, _, _ = get_const_tuple(kernel.shape)
simd_lanes = num_simd_lanes_per_word(data.dtype)
HWOI_kernel_np = inputs[1].data.numpy()
CHWc_kernel_np = np.zeros((channels // simd_lanes, KH, KW, simd_lanes), dtype=kernel.dtype)
for i in range(channels // simd_lanes):
CHWc_kernel_np[i] = HWOI_kernel_np[:, :, simd_lanes * i : simd_lanes * (i + 1), 0]
reshaped_new_kernel = CHWc_kernel_np.reshape((KH, KW, channels, 1))
# Store the same config for the altered operator (workload)
new_data = data
new_kernel = te.placeholder((KH, KW, channels, 1), dtype=kernel.dtype)
new_workload = autotvm.task.args_to_workload(
[new_data, new_kernel, strides, padding, dilation, out_dtype],
"depthwise_conv2d_nhwc_dsp.arm_cpu",
)
dispatch_ctx.update(target, new_workload, cfg)
return relay.nn.conv2d(
inputs[0], relay.Constant(tvm.nd.array(reshaped_new_kernel)), **new_attrs
)
# Only microTVM does layout alteration for NHWC layout with real data types
if data_layout == "NHWC" and data_dtype not in ["uint8", "int8"]:
return None
if topi_tmpl == "conv2d_nchw_spatial_pack.arm_cpu":
assert data_layout == "NCHW" and kernel_layout == "OIHW"
N, CI, H, W = get_const_tuple(data.shape)
CO, _, KH, KW = get_const_tuple(kernel.shape)
VC = cfg["tile_co"].size[-1]
new_attrs["kernel_layout"] = f"OIHW{VC}o"
new_data = data
new_kernel = te.placeholder((idxd(CO, VC), CI, KH, KW, VC), dtype=kernel.dtype)
new_workload = autotvm.task.args_to_workload(
[new_data, new_kernel, strides, padding, dilation, out_dtype],
"conv2d_nchw_spatial_pack.arm_cpu",
)
dispatch_ctx.update(target, new_workload, cfg)
return relay.nn.conv2d(*inputs, **new_attrs)
if topi_tmpl == "conv2d_nhwc_spatial_pack.arm_cpu":
assert (
data.dtype == "int8"
and kernel.dtype == "int8"
or data.dtype == "uint8"
and kernel.dtype == "uint8"
)
assert data_layout == "NHWC" and kernel_layout == "HWIO"
data_expr, kernel_expr = inputs
data_int16 = relay.cast(data_expr, dtype="int16")
kernel_int16 = relay.cast(kernel_expr, dtype="int16")
new_attrs = {k: attrs[k] for k in attrs.keys()}
new_data = te.placeholder(data.shape, "int16")
new_kernel = te.placeholder(kernel.shape, "int16")
new_workload = autotvm.task.args_to_workload(
[new_data, new_kernel, strides, padding, dilation, out_dtype],
"conv2d_nhwc_spatial_pack.arm_cpu",
)
dispatch_ctx.update(target, new_workload, cfg)
return relay.nn.conv2d(data_int16, kernel_int16, **new_attrs)
if topi_tmpl == "conv2d_nchw_winograd.arm_cpu":
assert data_layout == "NCHW" and kernel_layout == "OIHW"
N, CI, H, W = get_const_tuple(data.shape)
CO, _, KH, KW = get_const_tuple(kernel.shape)
VC = cfg["tile_k"].size[-1]
tile_size = 4
weight_expr = inputs[1]
weight_expr = relay.nn.contrib_conv2d_winograd_weight_transform(
weight_expr, tile_size=tile_size
)
weight_expr = relay.reshape(
weight_expr, newshape=(KH + tile_size - 1, KW + tile_size - 1, CO // VC, VC, CI)
)
weight_expr = relay.transpose(weight_expr, axes=[0, 1, 2, 4, 3])
new_attrs["tile_size"] = tile_size
new_attrs["channels"] = CO
new_data = data
new_kernel = te.placeholder(
(KH + tile_size - 1, KW + tile_size - 1, idxd(CO, VC), CI, VC), kernel.dtype
)
new_workload = autotvm.task.args_to_workload(
[new_data, new_kernel, strides, padding, dilation, out_dtype],
"conv2d_nchw_winograd.arm_cpu",
)
dispatch_ctx.update(target, new_workload, cfg)
return relay.nn.contrib_conv2d_winograd_without_weight_transform(
inputs[0], weight_expr, **new_attrs
)
if topi_tmpl == "conv2d_nchw_winograd_nnpack.arm_cpu":
assert data_layout == "NCHW" and kernel_layout == "OIHW"
N, CI, H, W = get_const_tuple(data.shape)
CO, _, KH, KW = get_const_tuple(kernel.shape)
new_attrs["channels"] = CO
# pre-compute winograd_nnpack transform
# for winograd_nnpack_fp16, the precompute prune pass must run on device,
# where float16 is supported
weight_dtype = "float32"
weight_expr = inputs[1]
transformed_weight = relay.nn.contrib_conv2d_winograd_nnpack_weight_transform(
weight_expr,
convolution_algorithm=cfg["winograd_nnpack_algorithm"].val,
out_dtype=weight_dtype,
)
new_data = data
new_kernel = te.placeholder((CO, CI, 8, 8), "float32")
new_workload = autotvm.task.args_to_workload(
[new_data, new_kernel, None, strides, padding, dilation, out_dtype],
"conv2d_nchw_winograd_nnpack_without_weight_transform.arm_cpu",
)
dispatch_ctx.update(target, new_workload, cfg)
return relay.nn.contrib_conv2d_winograd_without_weight_transform(
inputs[0], transformed_weight, **new_attrs
)
if topi_tmpl == "depthwise_conv2d_nchw_spatial_pack.arm_cpu":
assert data_layout == "NCHW" and kernel_layout == "OIHW"
N, CI, H, W = get_const_tuple(data.shape)
CO, M, KH, KW = get_const_tuple(kernel.shape)
VC = cfg["tile_co"].size[-1]
new_attrs["kernel_layout"] = f"OIHW{cfg['tile_co'].size[-1]}o"
# Store the same config for the altered operator (workload)
new_data = data
new_kernel = te.placeholder((idxd(CO, VC), M, KH, KW, VC), dtype=kernel.dtype)
new_workload = autotvm.task.args_to_workload(
[new_data, new_kernel, strides, padding, dilation, out_dtype],
"depthwise_conv2d_nchw_spatial_pack.arm_cpu",
)
dispatch_ctx.update(target, new_workload, cfg)
return relay.nn.conv2d(*inputs, **new_attrs)
if topi_tmpl == "conv2d_NCHWc.x86":
# Converting NCHW to NCHWc.
assert data_layout == "NCHW" and kernel_layout == "OIHW"
if cfg.is_fallback:
_get_x86_default_config(
cfg,
data_tensor,
kernel_tensor,
strides,
padding,
dilation,
out_dtype,
False,
data_layout,
)
batch_size, in_channel, height, width = get_const_tuple(data_tensor.shape)
out_channel, _, kh, kw = get_const_tuple(kernel_tensor.shape)
ic_bn, oc_bn = cfg["tile_ic"].size[-1], cfg["tile_oc"].size[-1]
# update new attrs
new_attrs["channels"] = out_channel
new_attrs["data_layout"] = f"NCHW{ic_bn}c"
# (oc, ic, h, w) -> (OC, IC, h, w, ic, oc)
new_attrs["kernel_layout"] = f"OIHW{ic_bn}i{oc_bn}o"
new_attrs["out_layout"] = f"NCHW{oc_bn}c"
# Store altered operator's config
new_data = te.placeholder(
(batch_size, in_channel // ic_bn, height, width, ic_bn), dtype=data_dtype
)
new_kernel = te.placeholder(
(out_channel // oc_bn, in_channel // ic_bn, kh, kw, ic_bn, oc_bn),
dtype=kernel_tensor.dtype,
)
new_workload = autotvm.task.args_to_workload(
[
new_data,
new_kernel,
strides,
padding,
dilation,
new_attrs["data_layout"],
new_attrs["out_layout"],
out_dtype,
],
topi_tmpl,
)
dispatch_ctx.update(target, new_workload, cfg)
return relay.nn.contrib_conv2d_nchwc(*inputs, **new_attrs)
if topi_tmpl == "depthwise_conv2d_NCHWc.x86":
# Converting NCHW to NCHWc.
assert data_layout == "NCHW" and kernel_layout == "OIHW"
if cfg.is_fallback:
_get_x86_default_config(
cfg, data_tensor, kernel_tensor, strides, padding, out_dtype, True, data_layout
)
batch_size, in_channel, height, width = get_const_tuple(data_tensor.shape)
out_channel, channel_multiplier, kh, kw = get_const_tuple(kernel_tensor.shape)
ic_bn, oc_bn = cfg["tile_ic"].size[-1], cfg["tile_oc"].size[-1]
assert channel_multiplier == 1
# update new attrs
new_attrs["channels"] = out_channel
new_attrs["data_layout"] = f"NCHW{ic_bn}c"
new_attrs["kernel_layout"] = f"OIHW1i{oc_bn}o"
new_attrs["out_layout"] = f"NCHW{oc_bn}c"
# Store altered operator's config.
new_data = te.placeholder(
(batch_size, in_channel // ic_bn, height, width, ic_bn), dtype=data_dtype
)
new_kernel = te.placeholder((out_channel // oc_bn, 1, kh, kw, 1, oc_bn), dtype=kernel_dtype)
new_workload = autotvm.task.args_to_workload(
[
new_data,
new_kernel,
strides,
padding,
dilation,
new_attrs["data_layout"],
new_attrs["out_layout"],
out_dtype,
],
topi_tmpl,
)
dispatch_ctx.update(target, new_workload, cfg)
return relay.nn.contrib_depthwise_conv2d_nchwc(*inputs, **new_attrs)
if topi_tmpl == "conv2d_NCHWc_int8.arm_cpu":
assert data_layout == "NCHW" and kernel_layout == "OIHW"
batch_size, in_channel, height, width = get_const_tuple(data_tensor.shape)
out_channel, _, kh, kw = get_const_tuple(kernel_tensor.shape)
n_elems = 4
if cfg.is_fallback:
_get_default_config_int8(
cfg,
data_tensor,
kernel_tensor,
strides,
padding,
dilation,
out_dtype,
False,
data_layout,
int32_lanes=4,
)
ic_bn, oc_bn = cfg["tile_ic"].size[-1], cfg["tile_oc"].size[-1]
if cfg.is_fallback:
# ic_bn needs to be divided by n_elems below
ic_bn = max(ic_bn, n_elems)
# update new attrs
new_attrs["channels"] = out_channel
new_attrs["data_layout"] = f"NCHW{ic_bn}c"
new_attrs["kernel_layout"] = f"OIHW{ic_bn // n_elems:n}i{oc_bn:n}o{n_elems:n}i"
new_attrs["out_layout"] = f"NCHW{oc_bn}c"
# Store altered operator's config.
new_data = te.placeholder(
(batch_size, in_channel // ic_bn, height, width, ic_bn), dtype=data_dtype
)
new_kernel = te.placeholder(
(out_channel // oc_bn, in_channel // ic_bn, kh, kw, ic_bn // n_elems, oc_bn, n_elems),
dtype=kernel_dtype,
)
new_workload = autotvm.task.args_to_workload(
[
new_data,
new_kernel,
strides,
padding,
dilation,
new_attrs["data_layout"],
new_attrs["out_layout"],
out_dtype,
],
topi_tmpl,
)
dispatch_ctx.update(target, new_workload, cfg)
return relay.nn.contrib_conv2d_nchwc(*inputs, **new_attrs)
if topi_tmpl == "conv2d_NHWC_quantized_interleaved.arm_cpu":
# TODO(masahi): This schedule can easily result in a tensorization error
# if used in the fallback mode
if cfg.is_fallback: # if is fallback, clear query cache and return None
autotvm.task.clear_fallback_cache(target, workload)
return None
assert data_layout == "NHWC" and kernel_layout == "HWIO"
KH, KW, _, OC = get_const_tuple(kernel.shape)
new_workload_name = "conv2d_NHWC_quantized_interleaved_without_transform.arm_cpu"
new_kernel, new_kernel_expr = interleave_transpose_weights(
inputs, data, kernel, interleave_A=True
)
new_workload = autotvm.task.args_to_workload(
[data, new_kernel, strides, padding, dilation, out_dtype, (KH, KW), OC],
new_workload_name,
)
dispatch_ctx.update(target, new_workload, cfg)
return relay.nn.contrib_conv2d_gemm_without_weight_transform(
inputs[0], new_kernel_expr, **new_attrs
)
if topi_tmpl == "conv2d_NHWC_quantized_native.arm_cpu":
# TODO(masahi): This schedule can easily result in a tensorization error
# if used in the fallback mode
if cfg.is_fallback: # if is fallback, clear query cache and return None
autotvm.task.clear_fallback_cache(target, workload)
return None
assert data_layout == "NHWC" and kernel_layout == "HWIO"
KH, KW, _, OC = get_const_tuple(kernel.shape)
new_workload_name = "conv2d_NHWC_quantized_native_without_transform.arm_cpu"
new_kernel, new_kernel_expr = interleave_transpose_weights(
inputs, data, kernel, interleave_A=False
)
new_workload = autotvm.task.args_to_workload(
[data, new_kernel, strides, padding, dilation, out_dtype, (KH, KW), OC],
new_workload_name,
)
dispatch_ctx.update(target, new_workload, cfg)
return relay.nn.contrib_conv2d_gemm_without_weight_transform(
inputs[0], new_kernel_expr, **new_attrs
)
return None
@conv2d_legalize.register("arm_cpu")
def _conv2d_legalize(attrs, inputs, arg_types):
"""Legalizes Conv2D op.
Parameters
----------
attrs : tvm.ir.Attrs
Attributes of current convolution
inputs : list of tvm.relay.Expr
The args of the Relay expr to be legalized
types : list of types
List of input and output types
Returns
-------
result : tvm.relay.Expr
The legalized expr
"""
# Collect the input tensors.
data_tensor, kernel_tensor = arg_types[0], arg_types[1]
data_dtype = data_tensor.dtype
kernel_dtype = kernel_tensor.dtype
# Collect the output tensor.
output_tensor = arg_types[2]
# Collect the input exprs.
data, kernel = inputs
# ARM vector instructions operate on the same dtype for data and kernel, we
# provide those here and conv2d_alter_int8_common will convert to the
# correct datatype.
if is_int8_hw_support(kernel_dtype, kernel_dtype):
# ARM intrinsics need the datatypes of data and kernel to be the same
return conv2d_alter_int8_common(
data, data_tensor, kernel, kernel_tensor, output_tensor, attrs, kernel_dtype, 8, 8
)
return None
| 19,496 | 36.494231 | 100 | py |
tvm | tvm-main/python/tvm/topi/arm_cpu/group_conv2d.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unused-variable,unused-argument,no-member
# pylint: disable=no-value-for-parameter,import-outside-toplevel
"""Grouped Spatial Pack Convolution (Group Conv2D) schedule on ARM"""
import tvm
from tvm import autotvm
from tvm import te
from tvm.autotvm.task.space import SplitEntity, OtherOptionEntity
from ..utils import get_const_tuple
from ..nn.pad import pad
from .. import tag
from ..nn.conv2d import _get_workload as _get_conv2d_workload
def group_conv2d_nchw(data, kernel, strides, padding, dilation, groups, out_dtype):
"""Compute group_conv2d with NCHW layout"""
return group_conv2d_nchw_spatial_pack(
data, kernel, strides, padding, dilation, groups, out_dtype
)
def schedule_group_conv2d_nchw(outs):
"""Compute group_conv2d with NCHW layout"""
return schedule_group_conv2d_nchwc(outs)
def _get_default_config(
cfg, data, kernel, strides, padding, dilation, groups, out_dtype, layout="NCHW"
):
"""
Get default schedule config for the workload
"""
static_data_shape = []
for dim in get_const_tuple(data.shape):
if isinstance(dim, tvm.tir.Var):
static_data_shape.append(1)
else:
static_data_shape.append(dim)
data = te.placeholder(static_data_shape, dtype=data.dtype)
wkl = _get_conv2d_workload(data, kernel, strides, padding, dilation, out_dtype, layout)
_fallback_schedule(cfg, wkl)
def _fallback_schedule(cfg, wkl):
simd_width = 4 # assume ARM SIMD Width is 4
pad_left, pad_right = wkl.padl, wkl.padr
stride_w = wkl.stride_w
out_width = (wkl.width + pad_left + pad_right - wkl.kernel_w) // stride_w + 1
groups = wkl.groups
kernels_per_group = wkl.out_filter // groups
kernel_depth = wkl.in_filter // groups
oc_bn = 1
oc_bn = 1
for bn in range(simd_width, 0, -1):
if kernels_per_group % bn == 0:
oc_bn = bn
break
if oc_bn > kernels_per_group:
oc_bn = kernels_per_group
ic_bn = 1
for bn in range(oc_bn, 0, -1):
if kernel_depth % bn == 0:
ic_bn = bn
break
if ic_bn > kernel_depth:
ic_bn = kernel_depth
reg_n = 1
for n in range(31, 0, -1):
if out_width % n == 0:
reg_n = n
break
cfg["tile_ic"] = SplitEntity([wkl.in_filter // ic_bn, ic_bn])
cfg["tile_oc"] = SplitEntity([wkl.out_filter // oc_bn, oc_bn])
cfg["tile_ow"] = SplitEntity([out_width // reg_n, reg_n])
cfg["unroll_kw"] = OtherOptionEntity(False)
@autotvm.register_topi_compute("group_conv2d_nchw.arm_cpu")
def group_conv2d_nchw_spatial_pack(
cfg, data, kernel, strides, padding, dilation, groups, out_dtype="float32"
):
"""
Compute group conv2d with NCHW layout, using GSPC algorithm.
https://arxiv.org/abs/2006.09791
"""
assert isinstance(dilation, int) or len(dilation) == 2
if isinstance(dilation, int):
dilation_h, dilation_w = dilation, dilation
else:
dilation_h, dilation_w = dilation
assert isinstance(padding, int) or len(padding) == 2 or len(padding) == 4
if isinstance(padding, int):
pad_top, pad_left, pad_bottom, pad_right = padding, padding, padding, padding
elif len(padding) == 2:
hpad, wpad = padding
pad_top, pad_bottom = hpad, hpad
pad_left, pad_right = wpad, wpad
else:
pad_top, pad_left, pad_bottom, pad_right = padding
hpad = pad_top + pad_bottom
wpad = pad_left + pad_right
assert isinstance(strides, int) or len(strides) == 2
if isinstance(strides, int):
stride_h, stride_w = strides, strides
else:
stride_h, stride_w = strides
batch_size, in_channel, in_height, in_width = get_const_tuple(data.shape)
out_channel, kernel_depth, k_height, k_width = get_const_tuple(kernel.shape)
pad_height = in_height + pad_top + pad_bottom
pad_width = in_width + pad_left + pad_right
dilated_kernel_h = (k_height - 1) * dilation_h + 1
dilated_kernel_w = (k_width - 1) * dilation_w + 1
out_height = (in_height + pad_top + pad_bottom - dilated_kernel_h) // stride_h + 1
out_width = (in_width + pad_left + pad_right - dilated_kernel_w) // stride_w + 1
kernels_per_group = out_channel // groups
cfg.define_split("tile_ic", in_channel, num_outputs=2)
cfg.define_split("tile_oc", out_channel, num_outputs=2)
cfg.define_split("tile_ow", out_width, num_outputs=2, filter=lambda y: y.size[-1] <= 64)
cfg.define_knob("unroll_kw", [True, False])
# If no config was set, we can fallback to default config.
if cfg.is_fallback:
_get_default_config(
cfg,
te.placeholder((batch_size, in_channel, in_height, in_width), dtype=data.dtype),
te.placeholder(
(out_channel, in_channel // groups, k_height, k_width), dtype=kernel.dtype
),
strides,
padding,
dilation,
groups,
out_dtype,
)
oc_bn = cfg["tile_oc"].size[-1]
ic_bn = cfg["tile_ic"].size[-1]
# pack data
DOPAD = hpad != 0 or wpad != 0
if DOPAD:
data_pad = pad(
data, (0, 0, pad_top, pad_left), (0, 0, pad_bottom, pad_right), name="data_pad"
)
else:
data_pad = data
shape = (groups, batch_size, kernel_depth // ic_bn, pad_height, ic_bn, pad_width)
data_vec = te.compute(
shape,
lambda g, n, C, h, c, w: data_pad[n, C * ic_bn + c + kernel_depth * g, h, w],
name="data_vec",
)
# pack kernel
shape = (
groups,
kernels_per_group // oc_bn,
kernel_depth // ic_bn,
k_height,
k_width,
ic_bn,
oc_bn,
)
kernel_vec = te.compute(
shape,
lambda g, out_channel, in_channel, h, w, ci, co: kernel[
(out_channel * oc_bn + co + g * kernels_per_group), in_channel * ic_bn + ci, h, w
],
name="kernel_vec",
)
# convolution
oshape = (groups, batch_size, kernels_per_group // oc_bn, out_height, out_width, oc_bn)
unpack_shape = (batch_size, out_channel, out_height, out_width)
ic = te.reduce_axis((0, (kernel_depth)), name="ic")
kh = te.reduce_axis((0, k_height), name="kh")
kw = te.reduce_axis((0, k_width), name="kw")
idxmod = tvm.tir.indexmod
idxdiv = tvm.tir.indexdiv
conv = te.compute(
oshape,
lambda g, n, oc_chunk, oh, ow, oc_block: te.sum(
data_vec[
g,
n,
idxdiv(ic, ic_bn),
oh * stride_h + kh * dilation_h,
idxmod(ic, ic_bn),
ow * stride_w + kw * dilation_w,
].astype(out_dtype)
* kernel_vec[
g, oc_chunk, idxdiv(ic, ic_bn), kh, kw, idxmod(ic, ic_bn), oc_block
].astype(out_dtype),
axis=[ic, kh, kw],
),
name="conv",
)
unpack = te.compute(
unpack_shape,
lambda n, c, h, w: conv[
idxdiv(c, kernels_per_group),
n,
idxmod(idxdiv(c, oc_bn), (kernels_per_group // oc_bn)),
h,
w,
idxmod(idxmod(c, oc_bn), kernels_per_group),
].astype(out_dtype),
name="output_unpack",
tag="group_conv2d_nchw",
)
return unpack
@autotvm.register_topi_schedule("group_conv2d_nchw.arm_cpu")
def schedule_group_conv2d_nchwc(cfg, outs):
"""Create schedule for tensors"""
s = te.create_schedule([x.op for x in outs])
scheduled_ops = []
def traverse(op):
"""Traverse operators from computation graph"""
# inline all one-to-one-mapping operators except the last stage (output)
if tag.is_broadcast(op.tag):
if op not in s.outputs:
s[op].compute_inline()
for tensor in op.input_tensors:
if isinstance(tensor.op, tvm.te.ComputeOp) and tensor.op not in scheduled_ops:
traverse(tensor.op)
if "group_conv2d_nchw" in op.tag:
output = op.output(0)
if "tile_ic" not in cfg:
return
conv_out = op.input_tensors[0]
kernel_vec = conv_out.op.input_tensors[1]
kernel = kernel_vec.op.input_tensors[0]
if isinstance(kernel.op, tvm.te.ComputeOp) and "dilate" in kernel.op.tag:
s[kernel].compute_inline()
data_vec = conv_out.op.input_tensors[0]
data = data_vec.op.input_tensors[0]
data_pad = None
if isinstance(data.op, tvm.te.ComputeOp) and "pad" in data.op.tag:
data_pad = data
data = data_pad.op.input_tensors[0]
args = [s, cfg, data, data_pad, data_vec, kernel_vec, conv_out, output, outs[0]]
_schedule_gspc_nchw(*args)
scheduled_ops.append(op)
traverse(outs[0].op)
return s
def _schedule_gspc_nchw(s, cfg, data, data_pad, data_vec, kernel_vec, conv_out, output, last):
"""Schedule GSPC"""
ic_bn, oc_bn, reg_n, unroll_kw = (
cfg["tile_ic"].size[-1],
cfg["tile_oc"].size[-1],
cfg["tile_ow"].size[-1],
cfg["unroll_kw"].val,
)
_, W = data, kernel_vec
A0, A1 = data_pad, data_vec
# schedule data
if (
data_pad is not None
and isinstance(data_pad.op, tvm.te.ComputeOp)
and "pad" in data_pad.op.tag
):
s[A0].compute_inline()
groups, batch, ic_chunk, ih, ic_block, _ = s[A1].op.axis
parallel_axis = s[A1].fuse(batch, ic_chunk, ih)
s[A1].parallel(parallel_axis)
# schedule kernel pack
groups, oc_chunk, ic_chunk, oh, ow, ic_block, oc_block = s[W].op.axis
s[W].reorder(oc_chunk, oh, ic_chunk, ow, ic_block, oc_block)
if oc_bn > 1:
s[W].vectorize(oc_block)
parallel_axis = s[W].fuse(groups, oc_chunk, oh)
s[W].parallel(parallel_axis)
# schedule conv
C, O0, O = conv_out, output, last
CC = s.cache_write(C, "global")
_, _, oc_chunk, oh, ow, oc_block = s[C].op.axis
ow_chunk, ow_block = s[C].split(ow, factor=reg_n)
s[C].reorder(oc_chunk, oh, ow_chunk, ow_block, oc_block)
s[C].fuse(oc_chunk, oh)
s[C].vectorize(oc_block)
groups, batch, oc_chunk, oh, ow, oc_block = s[CC].op.axis
ic, kh, kw = s[CC].op.reduce_axis
ow_chunk, ow_block = s[CC].split(ow, factor=reg_n)
ic_chunk, ic_block = s[CC].split(ic, factor=ic_bn)
if unroll_kw:
s[CC].reorder(oc_chunk, oh, ow_chunk, ic_chunk, kh, ic_block, kw, ow_block, oc_block)
s[CC].unroll(kw)
else:
s[CC].reorder(oc_chunk, oh, ow_chunk, ic_chunk, kh, kw, ic_block, ow_block, oc_block)
parallel_axis = s[CC].fuse(groups, batch, oc_chunk, oh)
s[CC].parallel(parallel_axis)
s[CC].vectorize(oc_block)
s[CC].unroll(ow_block)
if O0 != O:
s[O0].compute_inline()
batch, oc, oh, ow = s[O].op.axis
ow_chunk, ow_block = s[O].split(ow, factor=reg_n)
oc_chunk, oc_block = s[O].split(oc, factor=oc_bn)
s[O].reorder(batch, oc_chunk, oh, ow_chunk, ow_block, oc_block)
parallel_axis = s[O].fuse(oc_chunk, oh)
s[O].vectorize(oc_block)
s[O].parallel(parallel_axis)
return s
| 12,062 | 31.254011 | 94 | py |
tvm | tvm-main/python/tvm/topi/arm_cpu/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=wildcard-import
"""Schedule for ARM CPU"""
from .conv1d import *
from .conv2d import *
from .depthwise_conv2d import *
from .conv2d_transpose import *
from .conv2d_int8 import *
from . import conv2d_alter_op
from .bitserial_conv2d import *
from .bitserial_dense import *
from .injective import *
from .group_conv2d import *
from .pooling import *
from .dense import *
from .qnn import *
from . import qnn_alter_op
from . import qnn_legalize
| 1,245 | 34.6 | 62 | py |
tvm | tvm-main/python/tvm/topi/arm_cpu/qnn_alter_op.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Arm Cortex-M specific optimizations for quantized operators."""
from typing import Iterable
import numpy as np
from tvm import nd, relay, target
from ..utils import get_const_tuple
from ..nn import qnn_conv2d_alter_layout, add_alter_layout, qnn_requantize_alter_layout
def prev_ops_match(curr_op: relay.expr.Call, pattern: Iterable[str]):
"""Checks if the names of nested Relay operators match a pattern.
Note this function considers `curr_op` as a linear stack of operators, only considering args[0]
when traversing backwards. `pattern` should be an Iterable of operator names, written backwards
from last to first.
"""
prev_op = curr_op
for op_name in pattern:
if (not hasattr(prev_op, "op")) or prev_op.op.name != op_name:
return False
prev_op = prev_op.args[0]
return True
def edit_attrs(attrs, **kwargs):
return {**attrs, **kwargs}
def change_numpy_layout(arr, src_layout, dst_layout):
assert src_layout.isalpha() and dst_layout.isalpha()
axis_order = [src_layout.index(c) for c in dst_layout]
return np.transpose(arr, axis_order)
def _squash_transformations(expr):
if isinstance(expr, relay.expr.Constant):
return expr.data.numpy()
assert isinstance(expr, relay.expr.Call)
assert len(expr.args) == 1
prev_kernel = _squash_transformations(expr.args[0])
attrs = expr.attrs
if expr.op.name == "layout_transform":
return change_numpy_layout(prev_kernel, attrs.src_layout, attrs.dst_layout)
elif expr.op.name == "cast":
return prev_kernel.astype(attrs.dtype)
elif kernel.op.name == "expand_dims":
new_axes = range(attrs.axis, attrs.axis + attrs.num_newaxis)
return np.expand_dims(prev_kernel, tuple(new_axes))
else:
raise RuntimeError(f"Invalid kernel transformation '{expr}'!")
def _alter_depthwise_conv2d_layout(depthwise_conv2d):
cast_op = depthwise_conv2d.args[0]
requantize_op = cast_op.args[0]
add_op = requantize_op.args[0]
prev_conv2d_op = add_op.args[0]
return relay.qnn.op.conv2d(
relay.layout_transform(
relay.cast(
relay.qnn.op.requantize(
relay.op.add(
relay.qnn.op.conv2d(
*prev_conv2d_op.args,
**edit_attrs(prev_conv2d_op.attrs, out_layout="NCHW"),
),
relay.layout_transform(
add_op.args[1],
src_layout="NHWC",
dst_layout="NCHW",
),
),
*requantize_op.args[1:],
**edit_attrs(requantize_op.attrs, axis=1),
),
dtype="int16",
),
src_layout="NCHW",
dst_layout="NHWC",
),
*depthwise_conv2d.args[1:],
**edit_attrs(depthwise_conv2d.attrs, data_layout="NCHW"),
)
@qnn_conv2d_alter_layout.register(["arm_cpu"])
def alter_conv2d_layout(attrs, inputs, _tinfos, _out_type):
"""Adjust a qnn.conv2d and preceeding ops to better fit on Cortex-M."""
current_target = target.Target.current(allow_none=False)
if not "cortex-m" in current_target.mcpu:
return None
# Always cast to int16 and pick a our desired kernel layout - this won't affect anything
data_expr, kernel_expr = inputs[:2]
is_depthwise = attrs.groups > 1
new_kernel_layout = "IOHW" if is_depthwise else "OHWI"
op = relay.qnn.op.conv2d(
relay.cast(data_expr, dtype="int16"),
relay.cast(kernel_expr, dtype="int16"),
*inputs[2:],
**edit_attrs(attrs, kernel_layout=new_kernel_layout, out_layout="NHWC"),
)
# If possible, modify depthwise ops to take as input NCHW instead.
if is_depthwise and prev_ops_match(op.args[0], ("cast", "qnn.requantize", "add", "qnn.conv2d")):
op = _alter_depthwise_conv2d_layout(op)
return op
@add_alter_layout.register(["arm_cpu"])
def alter_add_layout(_attrs, inputs, _tinfos, _out_type):
"""Fuses the zero point for a previous quantized operator with this add operation.
Currently only supports qnn.conv2d, but qnn.dense support should be added. Note that this
optimization means we must pad tensors with the input zero point, and NOT with zero.
"""
prev_op, biases_data_op = inputs
if not prev_ops_match(inputs[0], ("qnn.conv2d",)):
return None
# We should not perform this alteration if the target has a uint * int SIMD MAC operation (since
# these do (x - (-128)) * y efficiently, and conv_input_zp is usually -128). For now, we
# restrict this optimization to just Cortex-M devices, but it might be helpful on others too.
current_target = target.Target.current(allow_none=False)
if not "cortex-m" in current_target.mcpu:
return None
conv_input_zp = prev_op.args[2].data.numpy().item()
kernel = _squash_transformations(prev_op.args[1])
if prev_op.attrs.groups == prev_op.attrs.channels:
axes_to_sum = "HW"
elif prev_op.attrs.groups == 1:
axes_to_sum = "HWI"
else:
# This alteration does not currently support grouped conv2d
return None
axes_to_sum = tuple(map(prev_op.attrs.kernel_layout.index, axes_to_sum))
element_sums = np.sum(kernel, axis=axes_to_sum).flatten()
# The zero point is subtracted from the input elements, so we need a "-" sign here
zp_shifted_sums = element_sums * (-conv_input_zp)
# The bias values may or may not be wrapped in an expand_dims op
if isinstance(biases_data_op, relay.expr.Call):
biases = biases_data_op.args[0]
else:
biases = biases_data_op
assert isinstance(biases, relay.expr.Constant)
# We want to make sure new_biases is representable as an int32. It's tempting to just check
# whether arr.dtype == "int32" (since Numpy will automatically increase dtype in some cases)
# but this leads to weird wrapping behavior and doesn't work. We must do it manually.
new_biases = biases.data.numpy().astype("int64") + zp_shifted_sums
if new_biases.min() < -(2**31) or new_biases.max() > 2**31 - 1:
return None
current_target = target.Target.current(allow_none=False)
new_input_zp = relay.Constant(nd.array(np.int32(0)))
new_conv_args = [*prev_op.args[:2], new_input_zp, *prev_op.args[3:]]
bias_constant = relay.Constant(nd.array(new_biases.astype("int32")))
# We should handle padding separately from convolution, so the original tensor can be
# de-allocated immediately. This may also help with fusing padding onto a previous
# operator. However, only do this if we're working with Cortex-M devices.
padding = get_const_tuple(prev_op.attrs.padding)
if "cortex-m" in current_target.mcpu and any(padding):
data_layout = prev_op.attrs.data_layout
assert data_layout.isupper()
pad_up, pad_left, pad_down, pad_right = padding
pad_op_arg = [(0, 0)] * len(data_layout)
pad_op_arg[data_layout.index("H")] = (pad_up, pad_down)
pad_op_arg[data_layout.index("W")] = (pad_left, pad_right)
new_conv_args[0] = relay.nn.pad(new_conv_args[0], tuple(pad_op_arg), conv_input_zp)
new_conv_op = relay.qnn.op.conv2d(
*new_conv_args,
**edit_attrs(prev_op.attrs, padding=(0, 0, 0, 0)),
)
# If biases was wrapped in an expand_dims op, we must re-wrap it
if isinstance(biases_data_op, relay.expr.Call):
new_biases_op = relay.expand_dims(bias_constant, **biases_data_op.attrs)
else:
new_biases_op = bias_constant
return relay.add(new_conv_op, new_biases_op)
@qnn_requantize_alter_layout.register(["arm_cpu"])
def alter_requantize_layout(attrs, inputs, _tinfos, _out_type):
"""Changes a floating point requantize op to use int64 multiply + shift for microTVM.
Usually, this is done by QNN legalization. However, microTVM wants to manually choose the
integer rounding constants in order to:
(a) Have int32, not int64 constants
(b) Use a constant rounding shift to skip a memory load.
Ideally, we would pick these constants in the requantize (or fused) schedule. Unfortunately that
is not currently possible, so we pick them with `alter_layout` as a hack. This will only work if
the requantize schedule "plays along" with this hack.
"""
# Only microTVM Cortex-M boards with DSP use the relevant schedules
current_target = target.Target.current(allow_none=False)
if not (current_target.features.has_dsp and "cortex-m" in current_target.mcpu):
return None
if not prev_ops_match(inputs[0], ("add", "qnn.conv2d")):
return None
_, in_scale, _, out_scale, _ = inputs
in_scale_numpy = in_scale.data.numpy().astype("float64")
out_scale_scalar = out_scale.data.numpy().item()
# Shifting by 33 and rounding means shifting by 32, adding 1, and shifting by 1 again. This is
# useful, because shifting a multiplication product by 32 can be done for "free" with SMMUL
scales = ((in_scale_numpy / out_scale_scalar) * 2**33).astype("int32")
# Requantize ops in Relay do not support int32 scales - if we try to use one, requantize.cc will
# raise an error. As a hacky work-around, we change the scale dtype to float32, without changing
# underlying data. This works, as our compute function knows to interpret the scale as an int32.
# This is only a work-around - a better long-term solution would be adding a new integer
# requantize op, which takes integer scales, shifts, and rounding behavior.
fake_float_scales = scales.view("float32")
scale_constant = relay.Constant(nd.array(fake_float_scales))
new_attrs = {k: attrs[k] for k in attrs.keys()}
new_attrs["out_dtype"] = "int16"
return relay.qnn.op.requantize(inputs[0], scale_constant, *inputs[2:], **new_attrs)
| 10,775 | 41.258824 | 100 | py |
tvm | tvm-main/python/tvm/topi/arm_cpu/conv2d_transpose.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-variable
"""Transposed 2D convolution operators (sometimes called Deconvolution)."""
from __future__ import absolute_import as _abs
import tvm
from tvm import te
from tvm import autotvm
from ..nn import dilate, pad, get_pad_tuple
from ..utils import get_const_tuple, traverse_inline
from .conv2d_spatial_pack import schedule_conv2d_spatial_pack_nchw
@autotvm.register_topi_compute("conv2d_transpose_nchw.arm_cpu")
def conv2d_transpose_nchw(cfg, Input, Filter, strides, padding, out_dtype, output_padding):
"""Transposed 2D convolution nchw forward operator.
Parameters
----------
Input : tvm.te.Tensor
4-D with shape [batch, in_channel, in_height, in_width]
Filter : tvm.te.Tensor
4-D with shape [in_channel, num_filter, filter_height, filter_width]
strides : tuple of two ints
The spatial stride along height and width
padding : int or str
Padding size, or ['VALID', 'SAME']
out_dtype: str
The output data type. This is used for mixed precision.
output_padding : tuple of int
Used to get the right output shape in gradients
Returns
-------
Output : tvm.te.Tensor
4-D with shape [batch, out_channel, out_height, out_width]
"""
return _decl_spatial_pack(
cfg, Input, Filter, strides, padding, "NCHW", out_dtype, 2, output_padding
)
def _decl_spatial_pack(
cfg, data, kernel, strides, padding, layout, out_dtype, num_tile, output_padding
):
assert layout == "NCHW", "Only support NCHW"
out_dtype = out_dtype or data.dtype
N, CI, IH, IW = get_const_tuple(data.shape)
if isinstance(N, tvm.tir.Any):
N = tvm.te.size_var("n")
if not isinstance(IH, int) or not isinstance(IW, int):
raise RuntimeError("ARM winograd conv2d doesn't support dynamic input height or width.")
_, CO, KH, KW = get_const_tuple(kernel.shape)
HSTR, WSTR = strides if isinstance(strides, (tuple, list)) else (strides, strides)
opad_h, opad_w = output_padding
assert opad_h < HSTR and opad_w < WSTR
pad_top, pad_left, pad_bottom, pad_right = get_pad_tuple(padding, (KH, KW))
bpad_top, bpad_bottom = KH - 1 - pad_top, KH - 1 - pad_bottom + opad_h
bpad_left, bpad_right = KW - 1 - pad_left, KW - 1 - pad_right + opad_w
OH = (IH - 1) * HSTR - pad_top - pad_bottom + KH + opad_h
OW = (IW - 1) * WSTR - pad_left - pad_right + KW + opad_w
dilated_input = dilate(data, [1, 1, HSTR, WSTR])
data_pad = pad(dilated_input, [0, 0, bpad_top, bpad_left], [0, 0, bpad_bottom, bpad_right])
# ==================== define configuration space ====================
# TODO(@kevinthesun): Support tuning/optimization for dynamic shape.
n_tuning_axis = N if isinstance(N, int) else 1
n, co, oh, ow = cfg.axis(n_tuning_axis), cfg.axis(CO), cfg.axis(OH), cfg.axis(OW)
ci, kh, kw = cfg.reduce_axis(CI), cfg.reduce_axis(KH), cfg.reduce_axis(KW)
if num_tile == 2: # for arm cpu
co, vc = cfg.define_split("tile_co", co, num_outputs=2)
oh, vh = cfg.define_split("tile_oh", oh, num_outputs=2)
ow, vw = cfg.define_split("tile_ow", ow, num_outputs=2)
elif num_tile == 3: # for mali gpu
co, _, vc = cfg.define_split("tile_co", co, num_outputs=3)
oh, _, vh = cfg.define_split("tile_oh", oh, num_outputs=3)
ow, _, vw = cfg.define_split("tile_ow", ow, num_outputs=3)
else:
raise RuntimeError("Invalid num_tile")
cfg.define_reorder(
"reorder_0",
[n, co, oh, ow, ci, kh, kw, vh, vw, vc],
policy="candidate",
candidate=[
[n, co, oh, ow, ci, kh, kw, vh, vw, vc],
[n, co, oh, ow, ci, kh, kw, vc, vh, vw],
],
)
cfg.define_annotate("ann_reduce", [kh, kw], policy="try_unroll")
cfg.define_annotate("ann_spatial", [vh, vw, vc], policy="try_unroll_vec")
# ====================================================================
VC = cfg["tile_co"].size[-1]
VH = cfg["tile_oh"].size[-1]
VW = cfg["tile_ow"].size[-1]
dvshape = (N, OH // VH, OW // VW, CI, VH + KH - 1, VW + KW - 1)
kvshape = (CO // VC, CI, KH, KW, VC)
ovshape = (N, CO // VC, OH // VH, OW // VW, VH, VW, VC)
oshape = (N, CO, OH, OW)
data_vec = te.compute(
dvshape,
lambda n, h, w, ci, vh, vw: data_pad[n][ci][h * VH + vh][w * VW + vw],
name="data_vec",
)
kernel_vec = te.compute(
kvshape,
lambda co, ci, kh, kw, vc: kernel[ci][co * VC + vc][kh][kw],
name="kernel_vec_conv2d_transpose",
)
ci = te.reduce_axis((0, CI), name="ci")
kh = te.reduce_axis((0, KH), name="kh")
kw = te.reduce_axis((0, KW), name="kw")
conv = te.compute(
ovshape,
lambda n, co, h, w, vh, vw, vc: te.sum(
data_vec[n, h, w, ci, vh + kh, vw + kw].astype(out_dtype)
* kernel_vec[co, ci, KH - 1 - kh, KW - 1 - kw, vc].astype(out_dtype),
axis=[ci, kh, kw],
),
name="conv",
)
idxdiv = tvm.tir.indexdiv
idxmod = tvm.tir.indexmod
output = te.compute(
oshape,
lambda n, co, h, w: conv[
n,
idxdiv(co, VC),
idxdiv(h, VH),
idxdiv(w, VW),
idxmod(h, VH),
idxmod(w, VW),
idxmod(co, VC),
],
name="output_unpack",
tag="spatial_conv2d_transpose_output",
)
return output
# register customized schedule for arm cpu.
@autotvm.register_topi_schedule("conv2d_transpose_nchw.arm_cpu")
def schedule_conv2d_transpose_nchw(cfg, outs):
"""Schedule conv2d transpose for arm cpu"""
s = te.create_schedule([x.op for x in outs])
def _callback(op):
if "spatial_conv2d_transpose_output" in op.tag:
output = op.output(0)
conv = op.input_tensors[0]
data_vec = conv.op.input_tensors[0]
data_pad = data_vec.op.input_tensors[0]
dilated_input = data_pad.op.input_tensors[0]
s[data_pad].compute_inline()
s[dilated_input].compute_inline()
kernel_vec = conv.op.input_tensors[1]
if kernel_vec.op.name == "kernel_vec":
kernel = kernel_vec.op.input_tensors[0]
else:
kernel = kernel_vec
if isinstance(kernel.op, tvm.te.ComputeOp) and "dilate" in kernel.op.tag:
s[kernel].compute_inline()
schedule_conv2d_spatial_pack_nchw(cfg, s, data_vec, kernel_vec, conv, output, outs[0])
traverse_inline(s, outs[0].op, _callback)
return s
| 7,416 | 35.004854 | 98 | py |
tvm | tvm-main/python/tvm/topi/arm_cpu/depthwise_conv2d.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unused-variable
"""Depthwise convolution schedule for ARM CPU"""
import tvm
from tvm.target import Target
from tvm import te
from tvm import autotvm
from tvm.autotvm.task.space import SplitEntity, OtherOptionEntity
from .. import nn
from ..utils import traverse_inline, get_const_tuple, get_const_int
from ..nn.utils import get_pad_tuple
from .tensor_intrin import smlal_int16_int32
from .mprofile.dsp.depthwise_conv2d import (
depthwise_conv2d_nhwc_dsp_compute,
depthwise_conv2d_nhwc_dsp_schedule,
)
@autotvm.register_topi_compute("depthwise_conv2d_nchw.arm_cpu")
def depthwise_conv2d_nchw(_, data, kernel, strides, padding, dilation, out_dtype):
"""Compute depthwise_conv2d with NCHW layout"""
return nn.depthwise_conv2d_nchw(data, kernel, strides, padding, dilation, out_dtype)
@autotvm.register_topi_schedule("depthwise_conv2d_nchw.arm_cpu")
def schedule_depthwise_conv2d_nchw(cfg, outs):
"""Schedule depthwise conv2d
Parameters
----------
cfg: ConfigEntity
The configuration of this template
outs: Array of Tensor
The computation graph description of depthwise convolution2d
in the format of an array of tensors.
Returns
-------
s: Schedule
The computation schedule for depthwise_conv2d nchw.
"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
def _schedule(cfg, s, data, data_pad, kernel, output):
A, B, C = data, kernel, output
s[data_pad].compute_inline()
##### space definition begin #####
n, c, h, w = s[output].op.axis
_, vc = cfg.define_split("tile_c", c, num_outputs=2)
_, vh = cfg.define_split("tile_h", h, num_outputs=2)
_, vw = cfg.define_split("tile_w", w, num_outputs=2)
cfg.define_annotate("ann", [vh, vw, vc], policy="try_unroll_vec")
# fallback support
if cfg.is_fallback:
ref_log = autotvm.tophub.load_reference_log(
"arm_cpu", "rk3399", "depthwise_conv2d_nchw.arm_cpu"
)
cfg.fallback_with_reference_log(ref_log)
##### space definition end #####
# park data to vector form [n, c, h, w] -> [n, C, h, w, VC]
A0 = s.cache_read(data_pad, "global", C)
n, c, h, w = s[A0].op.axis
c, vc = cfg["tile_c"].apply(s, A0, c)
s[A0].reorder(n, c, h, w, vc)
A1 = s.cache_write(A0, "global")
s[A0].compute_inline()
# park kernel to vector form [co, ci, kh, kw] -> [CO, ci, kh, kw, VC]
B0 = s.cache_read(B, "global", C)
c, m, h, w = s[B0].op.axis
c, vc, = cfg[
"tile_c"
].apply(s, B0, c)
s[B0].reorder(c, m, h, w, vc)
B1 = s.cache_write(B0, "global")
s[B0].compute_inline()
n, c, h, w = s[C].op.axis
c, vc, = cfg[
"tile_c"
].apply(s, C, c)
s[C].reorder(n, c, h, w, vc)
# depthwise conv
C0 = s.cache_write(C, "global")
_, c, h, w, vc = s[C0].op.axis
dh, dw = s[C0].op.reduce_axis
oh, ih = cfg["tile_h"].apply(s, C0, h)
ow, iw = cfg["tile_w"].apply(s, C0, w)
s[C0].reorder(c, oh, ow, dh, dw, ih, iw, vc)
s[A1].compute_at(s[C0], oh)
# try unroll and vectorization
cfg["ann"].apply(
s,
C0,
[ih, iw, vc],
axis_lens=[cfg["tile_h"].size[-1], cfg["tile_w"].size[-1], cfg["tile_c"].size[-1]],
max_unroll=16,
cfg=cfg,
)
# fusion
if C.op not in s.outputs:
s[C].compute_inline()
# mark parallel
last = outs[0]
n, c, h, w = s[last].op.axis
s[last].parallel(c)
n, c, h, w, vc = s[C0].op.axis
s[C0].parallel(c)
c, m, h, w, vc = s[B1].op.axis
s[B1].parallel(c)
return s
def _callback(op):
if op.tag == "depthwise_conv2d_nchw":
output = op.output(0)
kernel = op.input_tensors[1]
data = op.input_tensors[0]
data_pad = None
if isinstance(data.op, tvm.te.ComputeOp) and "pad" in data.op.tag:
data_pad = data
data = data_pad.op.input_tensors[0]
_schedule(cfg, s, data, data_pad, kernel, output)
traverse_inline(s, outs[0].op, _callback)
return s
# TODO:
# This schedule has incorrect result on some hardware platforms (like NV Jetson TX2)
# Let us comment it out but not remove.
# see discussion:
# https://discuss.tvm.apache.org/t/autotuner-incorrect-result-after-tuning-mobilenetv2-on-arm-cpu
@autotvm.register_topi_compute("depthwise_conv2d_nchw_spatial_pack.arm_cpu")
def depthwise_conv2d_nchw_spatial_pack(cfg, data, kernel, strides, padding, dilation, out_dtype):
"""TOPI compute callback for depthwise_conv2d nchw
Parameters
----------
cfg: ConfigEntity
The config for this template
data : tvm.te.Tensor
4-D with shape [batch, in_channel, in_height, in_width]
kernel : tvm.te.Tensor
4-D with shape [num_filter, multiplier, filter_height, filter_width] or
pre-packed 5-D with shape [num_filter_chunk, multiplier, filter_height,
filter_width, num_filter_block]
strides : list of two ints
[stride_height, stride_width]
padding : list of two ints
[pad_height, pad_width]
dilation : list of two ints
[dilation_height, dilation_width]
out_dtype: str
The output type. This is used for mixed precision.
Returns
-------
output : tvm.te.Tensor
4-D with shape [batch, out_channel, out_height, out_width]
"""
return _decl_spatial_pack(cfg, data, kernel, strides, padding, dilation, out_dtype, num_tile=2)
@autotvm.register_topi_compute("depthwise_conv2d_nhwc.arm_cpu")
def compute_depthwise_conv2d_nhwc(_, data, kernel, strides, padding, dilation, out_dtype):
"""TOPI compute callback for depthwise_conv2d nhwc
Parameters
----------
cfg: ConfigEntity
The config for this template
data : tvm.te.Tensor
4-D with shape [batch, in_height, in_width, in_channel]
kernel : tvm.te.Tensor
4-D with shape [filter_height, filter_width, in_channel, channel_multiplier]
strides : list of two ints
[stride_height, stride_width]
padding : list of two ints
[pad_height, pad_width]
dilation : list of two ints
[dilation_height, dilation_width]
out_dtype: str
The output type. This is used for mixed precision.
Returns
-------
output : tvm.te.Tensor
4-D with shape [batch, out_height, out_width, out_channel]
"""
out_dtype = out_dtype or data.dtype
N, IH, IW, IC = get_const_tuple(data.shape)
if isinstance(dilation, int):
dilation_h = dilation_w = dilation
else:
dilation_h, dilation_w = dilation
KH, KW, IC, channel_multiplier = get_const_tuple(kernel.shape)
dilated_kernel_h = (KH - 1) * dilation_h + 1
dilated_kernel_w = (KW - 1) * dilation_w + 1
pad_top, pad_left, pad_down, pad_right = get_pad_tuple(
padding, (dilated_kernel_h, dilated_kernel_w)
)
HSTR, WSTR = strides if isinstance(strides, (tuple, list)) else (strides, strides)
OH = (IH + pad_top + pad_down - dilated_kernel_h) // HSTR + 1
OW = (IW + pad_left + pad_right - dilated_kernel_w) // WSTR + 1
if pad_top or pad_left or pad_down or pad_right:
data_pad = nn.pad(
data, [0, pad_top, pad_left, 0], [0, pad_down, pad_right, 0], name="data_pad"
)
else:
data_pad = data
output_shape = (N, OH, OW, IC * channel_multiplier)
idxdiv = tvm.tir.indexdiv
idxmod = tvm.tir.indexmod
reduce_h = te.reduce_axis((0, KH), name="reduce_h")
reduce_w = te.reduce_axis((0, KW), name="reduce_w")
out = te.compute(
output_shape,
lambda n, h, w, c: te.sum(
data_pad[
n,
HSTR * h + dilation_h * reduce_h,
w * WSTR + reduce_w * dilation_w,
idxdiv(c, channel_multiplier),
].astype(out_dtype)
* kernel[
reduce_h, reduce_w, idxdiv(c, channel_multiplier), idxmod(c, channel_multiplier)
].astype(out_dtype),
axis=[reduce_h, reduce_w],
),
name="depthwise_conv2d_nhwc_output",
)
return out
@autotvm.register_topi_schedule("depthwise_conv2d_nhwc.arm_cpu")
def schedule_depthwise_conv2d_nhwc(cfg, outs):
"""Create the schedule for depthwise_conv2d_nchw_spatial_pack"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
out = outs[0]
##### space definition begin #####
_, h, w, c = s[out].op.axis
# Split the number of input/output channels
cfg.define_split("tile_c", c, num_outputs=2, filter=lambda entry: entry.size[1] <= 8)
# Split the height of the convolution
cfg.define_split("tile_h", h, num_outputs=2)
# Split the width of the convolution
cfg.define_split("tile_w", w, num_outputs=2)
# Additional out (e.g., requantization, bias addition, etc..)
# 0: locate the output on the second last axis of the main compuation
# 1: locate the output closest to the main computation
cfg.define_knob("locate_output", [0, 1])
# Determine if we should unroll the computation of the inner tile
cfg.define_knob("unroll_tile", [True, False])
# fallback support
if cfg.is_fallback:
cfg["tile_c"] = SplitEntity([-1, 8])
cfg["tile_h"] = SplitEntity([-1, 2])
cfg["tile_w"] = SplitEntity([-1, 2])
cfg["locate_output"] = OtherOptionEntity(1)
cfg["unroll_tile"] = OtherOptionEntity(True)
##### space definition end #####
def schedule_conv(conv):
conv_data = conv.op.input_tensors[0]
kernel_data = conv.op.input_tensors[1]
in_type = conv_data.dtype
_, _, IC, channel_multiplier = get_const_tuple(kernel_data.shape)
n, w, h, c = conv.op.axis
r_h, r_w = conv.op.reduce_axis
ho, hi = cfg["tile_h"].apply(s, conv, h)
wo, wi = cfg["tile_w"].apply(s, conv, w)
co, ci = cfg["tile_c"].apply(s, conv, c)
split_val = cfg["tile_c"].size[-1]
target = Target.current(allow_none=False)
use_tensorization = (
(in_type == "int16")
and (split_val == 8)
and (IC % split_val == 0)
and (channel_multiplier == 1)
and target.features.has_asimd
)
data_pad_value = -1
if conv_data.name == "data_pad":
assert isinstance(conv_data.op, tvm.te.ComputeOp)
# Define a strategy for padding computation
cfg.define_knob("data_pad_strategy", [1, 2, 3])
if cfg.is_fallback:
# We cannot inline padding when tensorizing.
# So, if we can tensorize, let's compute_at the closest axis
cfg["data_pad_strategy"] = (
OtherOptionEntity(2) if use_tensorization else OtherOptionEntity(3)
)
# Compute padding on the third to last axis of the computation
if cfg["data_pad_strategy"].val == 1:
s[conv_data].vectorize(list(s[conv_data].op.axis)[-1])
s[conv_data].compute_at(s[conv], ho)
# Compute padding on the second to last axis of the computation
if cfg["data_pad_strategy"].val == 2:
s[conv_data].vectorize(list(s[conv_data].op.axis)[-1])
s[conv_data].compute_at(s[conv], wo)
# Inline padding during computation
if cfg["data_pad_strategy"].val == 3:
s[conv_data].compute_inline()
data_pad_value = cfg["data_pad_strategy"].val
if use_tensorization and data_pad_value != 3:
smlal = smlal_int16_int32()
s[conv].tensorize(ci, smlal)
else:
s[conv].vectorize(ci)
if cfg["unroll_tile"].val:
s[conv].unroll(r_h)
s[conv].unroll(r_w)
s[conv].unroll(wi)
s[conv].unroll(hi)
s[conv].reorder(n, ho, wo, co, hi, wi, r_h, r_w, ci)
fused_n_ho = s[conv].fuse(n, ho)
return fused_n_ho
def schedule_conv_out(out):
n, h, w, c = out.op.axis
co, ci = cfg["tile_c"].apply(s, out, c)
wo, wi = cfg["tile_w"].apply(s, out, w)
ho, hi = cfg["tile_h"].apply(s, out, h)
s[out].reorder(n, ho, wo, co, hi, wi, ci)
if cfg["unroll_tile"]:
s[out].unroll(wi)
s[out].unroll(hi)
if out.dtype in ["int8", "uint8"]:
# In case of quantized convolution further split the channel in batches of 4 elements
# so that we can use arm intrinsics to run fixed_point_multiplication
ci_outer, ci_inner = s[out].split(ci, 4)
s[out].vectorize(ci_inner)
s[out].unroll(ci_outer)
else:
s[out].vectorize(ci)
fused_n_ho = s[out].fuse(n, ho)
return hi, wi, fused_n_ho
def _callback(op):
if op.name == "depthwise_conv2d_nhwc_output":
conv = op.output(0)
if conv != out:
hi, wi, p_axis = schedule_conv_out(out)
schedule_conv(conv)
if cfg["locate_output"].val == 0:
s[conv].compute_at(s[out], hi)
if cfg["locate_output"].val == 1:
s[conv].compute_at(s[out], wi)
else:
p_axis = schedule_conv(out)
s[out].parallel(p_axis)
traverse_inline(s, outs[0].op, _callback)
return s
@autotvm.register_topi_schedule("depthwise_conv2d_nchw_spatial_pack.arm_cpu")
def schedule_depthwise_conv2d_nchw_spatial_pack(cfg, outs):
"""Create the schedule for depthwise_conv2d_nchw_spatial_pack"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
def _callback(op):
if op.tag == "spatial_depthwise_conv2d_nchw_output":
output = op.output(0)
conv = op.input_tensors[0]
data_vec = conv.op.input_tensors[0]
kernel_vec = conv.op.input_tensors[1]
if kernel_vec.op.name == "kernel_vec":
kernel = kernel_vec.op.input_tensors[0]
else:
kernel = kernel_vec
if isinstance(kernel.op, tvm.te.ComputeOp) and "dilate" in kernel.op.tag:
s[kernel].compute_inline()
_schedule_spatial_pack(cfg, s, data_vec, kernel_vec, conv, output, outs[0])
traverse_inline(s, outs[0].op, _callback)
return s
def _decl_spatial_pack(cfg, data, kernel, strides, padding, dilation, out_dtype, num_tile):
out_dtype = out_dtype or data.dtype
N, C, IH, IW = get_const_tuple(data.shape)
if isinstance(dilation, int):
dilation_h = dilation_w = dilation
else:
dilation_h, dilation_w = dilation
if len(kernel.shape) == 4:
pre_packed = False
C, M, KH, KW = get_const_tuple(kernel.shape)
else: # kernel tensor is pre packed
pre_packed = True
C, M, KH, KW, VC = get_const_tuple(kernel.shape)
C = C * VC
dilated_kernel_h = (KH - 1) * dilation_h + 1
dilated_kernel_w = (KW - 1) * dilation_w + 1
pad_top, pad_left, pad_down, pad_right = get_pad_tuple(
padding, (dilated_kernel_h, dilated_kernel_w)
)
HSTR, WSTR = strides if isinstance(strides, (tuple, list)) else (strides, strides)
OH = (IH + pad_top + pad_down - dilated_kernel_h) // HSTR + 1
OW = (IW + pad_left + pad_right - dilated_kernel_w) // WSTR + 1
# pack data
HPAD = pad_top + pad_down
WPAD = pad_left + pad_right
DOPAD = HPAD != 0 or WPAD != 0
if DOPAD:
data_pad = nn.pad(
data, (0, 0, pad_top, pad_left), (0, 0, pad_down, pad_right), name="data_pad"
)
else:
data_pad = data
# fallback support
# Currently, Mali schedule doesn't use it like conv2d.
if cfg.is_fallback:
ref_log = autotvm.tophub.load_reference_log(
"arm_cpu", "rk3399", "depthwise_conv2d_nchw_spatial_pack.arm_cpu"
)
cfg.fallback_with_reference_log(ref_log)
# ==================== define configuration space ====================
n, c, oh, ow = cfg.axis(N), cfg.axis(C), cfg.axis(OH), cfg.axis(OW)
kh, kw = cfg.reduce_axis(KH), cfg.reduce_axis(KW)
# Currently, Mali schedule doesn't use it like conv2d.
# Leave num_tile for possible future use of Mali schedule
if num_tile == 2: # for arm cpu
co, vc = cfg.define_split("tile_co", c, num_outputs=2)
oh, vh = cfg.define_split("tile_oh", oh, num_outputs=2)
ow, vw = cfg.define_split("tile_ow", ow, num_outputs=2)
else:
raise RuntimeError("Invalid num_tile")
cfg.define_reorder(
"reorder_0",
[n, co, oh, ow, kh, kw, vh, vw, vc],
policy="candidate",
candidate=[[n, co, oh, ow, kh, kw, vh, vw, vc], [n, co, oh, ow, kh, kw, vc, vh, vw]],
)
cfg.define_reorder(
"reorder_1",
[n, co, oh, ow, vh, vw, vc],
policy="candidate",
candidate=[
[n, co, oh, ow, vh, vw, vc],
[n, co, oh, ow, vc, vh, vw],
[n, co, oh, ow, vh, vc, vw],
],
)
cfg.define_annotate("ann_reduce", [kh, kw], policy="try_unroll")
cfg.define_annotate("ann_spatial", [vh, vw, vc], policy="try_unroll_vec")
# ====================================================================
VC = cfg["tile_co"].size[-1]
VH = cfg["tile_oh"].size[-1]
VW = cfg["tile_ow"].size[-1]
kvshape = (C // VC, M, KH, KW, VC)
ovshape = (N, C * M // VC, OH // VH, OW // VW, VH, VW, VC)
oshape = (N, C * M, OH, OW)
if dilation_h != 1 or dilation_w != 1:
# undilate input data
dvshape = (N, OH // VH, OW // VW, C, KH, KW, VH, VW)
data_vec = te.compute(
dvshape,
lambda n, h, w, c, kh, kw, vh, vw: data_pad[n][c][
(h * VH + vh) * HSTR + kh * dilation_h
][(w * VW + vw) * WSTR + kw * dilation_w],
name="data_vec_undilated",
)
else:
dvshape = (N, OH // VH, OW // VW, C, VH * HSTR + KH - 1, VW * WSTR + KW - 1)
data_vec = te.compute(
dvshape,
lambda n, h, w, c, vh, vw: data_pad[n][c][h * VH * HSTR + vh][w * VW * WSTR + vw],
name="data_vec",
)
if pre_packed:
kernel_vec = kernel
else:
kernel_vec = te.compute(
kvshape, lambda co, m, kh, kw, vc: kernel[co * VC + vc][m][kh][kw], name="kernel_vec"
)
kh = te.reduce_axis((0, KH), name="kh")
kw = te.reduce_axis((0, KW), name="kw")
idxdiv = tvm.tir.indexdiv
idxmod = tvm.tir.indexmod
if dilation_h != 1 or dilation_w != 1:
conv = te.compute(
ovshape,
lambda n, co, h, w, vh, vw, vc: te.sum(
data_vec[n, h, w, idxdiv(co * VC + vc, M), kh, kw, vh, vw].astype(out_dtype)
* kernel_vec[idxdiv(co, M), idxmod(co, M), kh, kw, vc].astype(out_dtype),
axis=[kh, kw],
),
name="depthwise_conv",
)
else:
conv = te.compute(
ovshape,
lambda n, co, h, w, vh, vw, vc: te.sum(
data_vec[n, h, w, idxdiv((co * VC + vc), M), vh * HSTR + kh, vw * WSTR + kw].astype(
out_dtype
)
* kernel_vec[idxdiv(co, M), idxmod(co, M), kh, kw, vc].astype(out_dtype),
axis=[kh, kw],
),
name="depthwise_conv",
)
output = te.compute(
oshape,
lambda n, co, h, w: conv[
n,
idxdiv(co, VC),
idxdiv(h, VH),
idxdiv(w, VW),
idxmod(h, VH),
idxmod(w, VW),
idxmod(co, VC),
],
name="output_unpack",
tag="spatial_depthwise_conv2d_nchw_output",
)
return output
def _schedule_spatial_pack(cfg, s, data_vec, kernel_vec, conv, output, last):
"""schedule implementation"""
n, co, oh, ow, vh, vw, vc = s[conv].op.axis
kh, kw = s[conv].op.reduce_axis
if data_vec.op.name == "data_vec_undilated":
_, dv_oh, dv_ow, dv_c, _, _, dv_vh, dv_vw = s[data_vec].op.axis
else:
_, dv_oh, dv_ow, dv_c, dv_vh, dv_vw = s[data_vec].op.axis
data_pad = data_vec.op.input_tensors[0]
if data_pad.op.name == "data_pad":
assert isinstance(data_pad.op, tvm.te.ComputeOp)
has_padding = True
else:
assert isinstance(data_pad.op, tvm.te.PlaceholderOp)
has_padding = False
cfg.define_knob("data_pad_inline", [0, 1, 2, 3, 4])
if cfg["data_pad_inline"].val == 1 and has_padding:
s[data_pad].compute_inline()
if cfg["data_pad_inline"].val == 2 and has_padding:
s[data_pad].vectorize(list(s[data_pad].op.axis)[-1])
if cfg["data_pad_inline"].val == 3 and has_padding:
s[data_pad].vectorize(list(s[data_pad].op.axis)[-1])
s[data_pad].compute_at(s[data_vec], dv_oh)
if cfg["data_pad_inline"].val == 4 and has_padding:
s[data_pad].vectorize(list(s[data_pad].op.axis)[-1])
s[data_pad].compute_at(s[data_vec], dv_ow)
cfg.define_knob("data_vec_inline", [0, 1, 2, 3])
if cfg["data_vec_inline"].val == 1:
s[data_vec].compute_at(s[conv], oh)
if cfg["data_vec_inline"].val == 2:
s[data_vec].compute_at(s[conv], ow)
if cfg["data_vec_inline"].val == 3:
s[data_vec].compute_at(s[conv], co)
# schedule conv
cfg["reorder_0"].apply(s, conv, [n, co, oh, ow, kh, kw, vh, vw, vc])
cfg["ann_reduce"].apply(
s,
conv,
[kh, kw],
axis_lens=[get_const_int(kh.dom.extent), get_const_int(kw.dom.extent)],
max_unroll=16,
cfg=cfg,
)
cfg["ann_spatial"].apply(
s,
conv,
[vh, vw, vc],
axis_lens=[cfg["tile_oh"].size[-1], cfg["tile_ow"].size[-1], cfg["tile_co"].size[-1]],
max_unroll=16,
cfg=cfg,
)
# schedule fusion
n, co, h, w = s[last].op.axis
co, vc = cfg["tile_co"].apply(s, last, co)
oh, vh = cfg["tile_oh"].apply(s, last, h)
ow, vw = cfg["tile_ow"].apply(s, last, w)
cfg["reorder_1"].apply(s, last, [n, co, oh, ow, vh, vw, vc])
if last != output:
s[output].compute_inline()
cfg["ann_spatial"].apply(
s,
last,
[vh, vw, vc],
axis_lens=[cfg["tile_oh"].size[-1], cfg["tile_ow"].size[-1], cfg["tile_co"].size[-1]],
max_unroll=16,
cfg=cfg,
)
else:
s[last].vectorize(vw)
cfg.define_knob("conv_inline", [0, 1, 2, 3])
if cfg["conv_inline"].val == 1:
s[conv].compute_at(s[last], ow)
if cfg["conv_inline"].val == 2:
s[conv].compute_at(s[last], oh)
if cfg["conv_inline"].val == 3:
s[conv].compute_at(s[last], co)
# mark parallel
s[last].parallel(co)
if data_vec.op.name == "data_vec_undilated":
_, h, _, _, _, _, _, _ = s[data_vec].op.axis
else:
_, h, _, _, _, _ = s[data_vec].op.axis
s[data_vec].parallel(h)
if kernel_vec.op.name == "kernel_vec":
co, _, _, _, _ = s[kernel_vec].op.axis
if autotvm.GLOBAL_SCOPE.in_tuning:
# kernel packing will be pre-computed during compilation, so we skip
# this part to make tuning records correct
s[kernel_vec].pragma(co, "debug_skip_region")
else:
s[kernel_vec].parallel(co)
return s
@autotvm.register_topi_compute("depthwise_conv2d_nhwc_dsp.arm_cpu")
def depthwise_conv2d_nhwc_dsp(cfg, data, kernel, strides, padding, dilation, out_dtype):
"""Compute conv2d_nhwc with v7e-m DSP instructions."""
return depthwise_conv2d_nhwc_dsp_compute(
cfg, data, kernel, strides, padding, dilation, out_dtype
)
@autotvm.register_topi_schedule("depthwise_conv2d_nhwc_dsp.arm_cpu")
def schedule_depthwise_conv2d_nhwc_dsp(cfg, outs):
"""Create schedule for conv2d_nhwc_dsp"""
return depthwise_conv2d_nhwc_dsp_schedule(cfg, outs)
| 25,219 | 33.930748 | 100 | py |
tvm | tvm-main/python/tvm/topi/arm_cpu/arm_utils.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unused-variable,unused-argument,no-member
"""Arm target utility functions"""
from tvm.target import Target
def get_tiling_B_interleaved_t(interleave_A):
"""Compute the tiling information for matrix B', where B'
is the transposed and interleaved version of matrix B in C=A*B.
The tiling information is chosen to maximize register usage during the
tile computation.
Please refer to:
- https://discuss.tvm.apache.org/t/rfc-improve-quantized-convolution-performance-for-armv8-architectures # pylint: disable=line-too-long
- https://discuss.tvm.apache.org/t/rfc-accelerate-quantized-convolution-through-dot-product
- https://discuss.tvm.apache.org/t/rfc-improve-quantized-convolution-through-mmla-instruction
- Conv2DGemmWeightTransformRel in src/relay/op/nn/convolution.h
In order to have more information
Parameters
----------
interleave_A: bool
determines if A is expected to be interleaved
Returns
----------
tile_rows_B: the output tile rows of B'
tile_cols_B: the output tile columns of B'
"""
target = Target.current(allow_none=False)
if target.features.has_matmul_i8:
# If smmla/ummla is available, A must be interleaved.
# Each load from B' will contain 8 elements
# and we are loading 12 rows of B' (i.e., 12 columns of B)
tile_rows_B = 12
tile_cols_B = 8
elif target.features.has_dotprod:
# The number of tile rows of B' vary depending on the
# strategy:
# * If we are interleaving A, then we select 12 columns from B'(i.e.,
# 12 rows from B).
# * If we are not interleaving A, then we select 16 columns from B'(i.e.,
# 16 rows from B).
tile_rows_B = 12 if interleave_A else 16
# Dot product instruction groups 2 (u)int16x8 vectors in
# groups of 4 and compute the dot product among those groups
# This means that the number of columns in a tile of B' (i.e., the
# rows of the original matrix B) need to be 4.
tile_cols_B = 4
else:
# If no acceleration is available, A must be interleaved. In this case
# we load 4 rows of B' (i.e., 4 columns of B). Each of them will contain 16 elements
tile_rows_B = 4
tile_cols_B = 16
return tile_rows_B, tile_cols_B
| 3,171 | 40.736842 | 140 | py |
tvm | tvm-main/python/tvm/topi/arm_cpu/tensor_intrin.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unused-variable,unused-argument,no-member
"""Conv2D int8 schedule on ARM"""
import tvm
from tvm import te
from tvm.ir import register_intrin_lowering
def gemm_4x4_int8_int8_int32(M, N, K, unroll, in_type):
"""
Int8 4x4 matrix multiplication and accumulation using a sequence of
umull -> uadalp -> umull2 -> uadalp instructions. This function
takes two arrays of int8 data type A[4][K] and B[4][K], and produces
a 4x4 matrix which is equal to A*B'.
The pseudo code is as follows.
.. code-block:: c
void gemm_4x4_int8_int8_int32(int8 A[4][K], int8 B[4][K], int32 C[4][4]){
for (int i = 0; i < 4; i++){
for (int j = 0; j < 4; j++){
for (int k = 0; k < K; k++){
C[i][j] += A[i][k] * B[j][k]
}
}
}
Notes:
* The tiling strategy is picked to maximize register usage.
Parameters
----------
M : int
rows of the matrix A
N : int
columns of the matrix B
K : int
columns of matrix A
unroll : bool
Unroll the loop accumulation if True
in_type : str, {'uint8', 'int8'}
Returns
-------
intrin : TensorIntrin
The ARM uint8/int8 TensorIntrin that can be used in tensorizing schedule
"""
assert in_type in ["uint8", "int8"]
A = te.placeholder((K // 16, te.var("m"), 16), dtype=in_type, name="A")
B = te.placeholder((K // 16, te.var("n"), 16), dtype=in_type, name="B")
dtype_vec = in_type + "x16"
idxm = tvm.tir.indexmod
k = te.reduce_axis((0, K), "k")
C = te.compute(
(te.var("m"), te.var("n")),
lambda x, y: te.sum(
A[k // 16, x, idxm(k, 16)].astype("int32") * B[k // 16, y, idxm(k, 16)].astype("int32"),
axis=k,
),
name="C",
)
a_buffer = tvm.tir.decl_buffer(
A.shape,
dtype=in_type,
name="a_buffer",
offset_factor=1,
strides=[te.var("sa_1"), te.var("sa_2"), 1],
)
b_buffer = tvm.tir.decl_buffer(
B.shape,
dtype=in_type,
name="b_buffer",
offset_factor=1,
strides=[te.var("sb_1"), te.var("sb_2"), 1],
)
c_buffer = tvm.tir.decl_buffer(
C.shape, dtype="int32", name="c_buffer", offset_factor=1, strides=[te.var("sc"), 1]
)
# Intrinsics used in the following algorithm
umull_intrin = "llvm.aarch64.neon.umull" if in_type == "uint8" else "llvm.aarch64.neon.smull"
uaddlp_intrin = "llvm.aarch64.neon.uaddlp" if in_type == "uint8" else "llvm.aarch64.neon.saddlp"
addp_intrin = "llvm.aarch64.neon.addp"
def uadalp(a, b):
"""Add pair and accumulate
Parameters:
----------
a: int16x8 vector
b: int16x8 vector
Returns:
--------
return a int32x4 vector
Pseudocode:
----------
a += (b0+b1, b2+b3, b4+b5, b6+b7)
"""
return a + tvm.tir.call_llvm_pure_intrin(
"int32x4", uaddlp_intrin, tvm.tir.const(1, "uint32"), b
)
def umull(a, b):
"""Multiply long (higher part)
Parameters:
----------
a: int8x16 vector
b: int8x16 vector
Returns:
--------
return a int16x8 vector
Pseudocode:
----------
c = (a0*b0, a1*b1, a2*b2, a3*b3, a4*b4, a5*b5, a6*b6, a7*b7)
"""
a_high = tvm.tir.call_intrin("int8x8", "tir.vectorhigh", a)
b_high = tvm.tir.call_intrin("int8x8", "tir.vectorhigh", b)
c = tvm.tir.call_llvm_pure_intrin(
"int16x8", umull_intrin, tvm.tir.const(2, "uint32"), a_high, b_high
)
return c
def umull2(a, b):
"""Multiply long (lower part)
Parameters:
----------
a: int8x16 vector
b: int8x16 vector
Returns:
--------
return a int16x8 vector
Pseudocode:
----------
c = (a8*b8, a9*b9, a10*b10, a11*b11, a12*b12, a13*b13, a14*b14, a15*b15)
"""
a_low = tvm.tir.call_intrin("int8x8", "tir.vectorlow", a)
b_low = tvm.tir.call_intrin("int8x8", "tir.vectorlow", b)
c = tvm.tir.call_llvm_pure_intrin(
"int16x8", umull_intrin, tvm.tir.const(2, "uint32"), a_low, b_low
)
return c
def addp(a, b):
"""Add two vectors in pairs
Parameters:
----------
a: int32x4 vector
b: int32x4 vector
Returns:
--------
return a int32x4 vector
Pseudocode:
----------
c = (a0+a1, a2+a3, b0+b1, b0+b3)
"""
return tvm.tir.call_llvm_pure_intrin(
"int32x4", addp_intrin, tvm.tir.const(2, "uint32"), a, b
)
def accumulation_loop(M, N, ins, acc, tile_idx):
"""Internal tile accumulation. This function
takes two arrays of int8 data type A[tile_idx][4][16] and B[tile_idx][4][16], produces
a 4x4 matrix which is equal to A*B' and accumulates into C[4][4]
The pseudo code is as follows.
.. code-block:: c
void gemm_4x4_int8_int8_int32(int8 A[tile_idx][4][K],
int8 B[tile_idx][4][K],
int32 C[4][4]){
for (int i = 0; i < 4; i++){
for (int j = 0; j < 4; j++){
for (int k = 0; k < 16; k++){
C[i][j] += A[tile_idx][i][k] * B[tile_idx][j][k]
}
}
}
Notes:
* The tiling strategy is picked to maximize register usage.
Parameters:
----------
M : int
Number of total rows of the output matrix
N : int
Number of total columns of the output matrix
ins : list of tvm.tir.buffer
Input buffers
acc : tvm.tir.ir_builder.BufferVar
Bank of register accumulators
tiled_idx : int
Index of a sub-tile of A and B in A[tile_idx][:][:] and B[tile_idx][:][:].
Please note that 0 <= tile_idx <= K//16
"""
a0 = ins[0].vload([tile_idx, 0, 0], dtype_vec)
a1 = tvm.tir.const(0, "int8x16")
if M > 1:
a1 = ins[0].vload([tile_idx, 1, 0], dtype_vec)
a2 = tvm.tir.const(0, "int8x16")
if M > 2:
a2 = ins[0].vload([tile_idx, 2, 0], dtype_vec)
a3 = tvm.tir.const(0, "int8x16")
if M > 3:
a3 = ins[0].vload([tile_idx, 3, 0], dtype_vec)
b0 = ins[1].vload([tile_idx, 0, 0], dtype_vec)
b1 = tvm.tir.const(0, "int8x16")
if N > 1:
b1 = ins[1].vload([tile_idx, 1, 0], dtype_vec)
b2 = tvm.tir.const(0, "int8x16")
if N > 2:
b2 = ins[1].vload([tile_idx, 2, 0], dtype_vec)
b3 = tvm.tir.const(0, "int8x16")
if N > 3:
b3 = ins[1].vload([tile_idx, 3, 0], dtype_vec)
# First half
# Lower part of a0 * {b0,b1,b2,b3}
d00 = umull(a0, b0)
d01 = umull(a0, b1)
d02 = umull(a0, b2)
d03 = umull(a0, b3)
# Lower part of a1 * {b0,b1,b2,b3}
d10 = umull(a1, b0)
d11 = umull(a1, b1)
d12 = umull(a1, b2)
d13 = umull(a1, b3)
# Accumulate
acc[0] = uadalp(acc[0], d00)
acc[1] = uadalp(acc[1], d01)
acc[2] = uadalp(acc[2], d02)
acc[3] = uadalp(acc[3], d03)
acc[4] = uadalp(acc[4], d10)
acc[5] = uadalp(acc[5], d11)
acc[6] = uadalp(acc[6], d12)
acc[7] = uadalp(acc[7], d13)
# Higher part of a0 * {b0,b1,b2,b3}
d00 = umull2(a0, b0)
d01 = umull2(a0, b1)
d02 = umull2(a0, b2)
d03 = umull2(a0, b3)
# Higher part of a1 * {b0,b1,b2,b3}
d10 = umull2(a1, b0)
d11 = umull2(a1, b1)
d12 = umull2(a1, b2)
d13 = umull2(a1, b3)
# Accumulate again
acc[0] = uadalp(acc[0], d00)
acc[1] = uadalp(acc[1], d01)
acc[2] = uadalp(acc[2], d02)
acc[3] = uadalp(acc[3], d03)
acc[4] = uadalp(acc[4], d10)
acc[5] = uadalp(acc[5], d11)
acc[6] = uadalp(acc[6], d12)
acc[7] = uadalp(acc[7], d13)
# Second half
# Lower part of a2 * {b0,b1,b2,b3}
d00 = umull(a2, b0)
d01 = umull(a2, b1)
d02 = umull(a2, b2)
d03 = umull(a2, b3)
# Lower part of a3 * {b0,b1,b2,b3}
d10 = umull(a3, b0)
d11 = umull(a3, b1)
d12 = umull(a3, b2)
d13 = umull(a3, b3)
# Accumulate
acc[8] = uadalp(acc[8], d00)
acc[9] = uadalp(acc[9], d01)
acc[10] = uadalp(acc[10], d02)
acc[11] = uadalp(acc[11], d03)
acc[12] = uadalp(acc[12], d10)
acc[13] = uadalp(acc[13], d11)
acc[14] = uadalp(acc[14], d12)
acc[15] = uadalp(acc[15], d13)
# Higher part of a2 * {b0,b1,b2,b3}
d00 = umull2(a2, b0)
d01 = umull2(a2, b1)
d02 = umull2(a2, b2)
d03 = umull2(a2, b3)
# Lower part of a3 * {b0,b1,b2,b3}
d10 = umull2(a3, b0)
d11 = umull2(a3, b1)
d12 = umull2(a3, b2)
d13 = umull2(a3, b3)
# Accumulate
acc[8] = uadalp(acc[8], d00)
acc[9] = uadalp(acc[9], d01)
acc[10] = uadalp(acc[10], d02)
acc[11] = uadalp(acc[11], d03)
acc[12] = uadalp(acc[12], d10)
acc[13] = uadalp(acc[13], d11)
acc[14] = uadalp(acc[14], d12)
acc[15] = uadalp(acc[15], d13)
def _intrin_func(ins, outs):
def _instr():
ib = tvm.tir.ir_builder.create()
# Allocate a local buffer (possibly translates to registers)
acc = ib.allocate("int32x4", 16, name="accs", scope="local")
m = outs[0].shape[0]
n = outs[0].shape[1]
# Initialization
for i in range(0, 16):
acc[i] = tvm.tir.const(0, "int32x4")
if unroll:
for i in range(0, int(K // 16)):
accumulation_loop(M, N, ins, acc, i)
else:
with ib.for_range(0, K // 16, name="i") as i:
accumulation_loop(M, N, ins, acc, i)
# Final accumulations
# acc[4*r + c] contains the partial accumulations of element C[r][c]
#
# In particular:
# acc[4*r] contains the partial sums of a[r,0:K].*b[0,0:K] -> (a,b,c,d)
# acc[4*r+1] contains the partial sums of a[r, 0:K].*b[1,0:K] -> (e,f,g,h)
# acc[4*r+2] contains the partial sums of a[r, 0:K].*b[2,0:K] -> (i,j,k,l)
# acc[4*r+3] contains the partial sums of a[r, 0:K].*b[3,0:K] -> (m,n,o,p)
#
# Please note that 0<= r, c < 4
acc[0] = addp(acc[0], acc[1]) # (a+b, c+d, e+f, g+h)
acc[1] = addp(acc[2], acc[3]) # (i+j, k+l, m+n, o+p)
acc[0] = addp(acc[0], acc[1]) # (a+b+c+d, e+f+g+h, i+j+k+l, m+n+o+p)
acc[4] = addp(acc[4], acc[5]) # (a+b, c+d, e+f, g+h)
acc[5] = addp(acc[6], acc[7]) # (i+j, k+l, m+n, o+p)
acc[4] = addp(acc[4], acc[5]) # (a+b+c+d, e+f+g+h, i+j+k+l, m+n+o+p)
acc[8] = addp(acc[8], acc[9]) # (a+b, c+d, e+f, g+h)
acc[9] = addp(acc[10], acc[11]) # (i+j, k+l, m+n, o+p)
acc[8] = addp(acc[8], acc[9]) # (a+b+c+d, e+f+g+h, i+j+k+l, m+n+o+p)
acc[12] = addp(acc[12], acc[13]) # (a+b, c+d, e+f, g+h)
acc[13] = addp(acc[14], acc[15]) # (i+j, k+l, m+n, o+p)
acc[12] = addp(acc[12], acc[13]) # (a+b+c+d, e+f+g+h, i+j+k+l, m+n+o+p)
# Store the result
if N > 3:
out_0 = acc[0]
out_1 = acc[4]
out_2 = acc[8]
out_3 = acc[12]
elif N > 2:
out_0 = tvm.tir.call_intrin("int32x3", "tir.reinterpret", acc[0])
out_1 = tvm.tir.call_intrin("int32x3", "tir.reinterpret", acc[4])
out_2 = tvm.tir.call_intrin("int32x3", "tir.reinterpret", acc[8])
out_3 = tvm.tir.call_intrin("int32x3", "tir.reinterpret", acc[12])
elif N > 1:
out_0 = tvm.tir.call_intrin("int32x2", "tir.reinterpret", acc[0])
out_1 = tvm.tir.call_intrin("int32x2", "tir.reinterpret", acc[4])
out_2 = tvm.tir.call_intrin("int32x2", "tir.reinterpret", acc[8])
out_3 = tvm.tir.call_intrin("int32x2", "tir.reinterpret", acc[12])
else:
out_0 = tvm.tir.call_intrin("int32", "tir.reinterpret", acc[0])
out_1 = tvm.tir.call_intrin("int32", "tir.reinterpret", acc[4])
out_2 = tvm.tir.call_intrin("int32", "tir.reinterpret", acc[8])
out_3 = tvm.tir.call_intrin("int32", "tir.reinterpret", acc[12])
ib.emit(outs[0].vstore([0, 0], out_0))
if M > 1:
ib.emit(outs[0].vstore([1, 0], out_1))
if M > 2:
ib.emit(outs[0].vstore([2, 0], out_2))
if M > 3:
ib.emit(outs[0].vstore([3, 0], out_3))
return ib.get()
# body, reset, update
return _instr()
buffer_params = {"offset_factor": 1}
return te.decl_tensor_intrin(
C.op,
_intrin_func,
binds={A: a_buffer, B: b_buffer, C: c_buffer},
default_buffer_params=buffer_params,
)
def dot_int8_int8_int32_neon_82(int32_lanes, dtype="uint"):
"""
Int8 dot product by every 4 elements using ARM v8.2 udot.
This function takes two arrays of int8 datatype -- data[4] and
kernel[int32_lanes][4] -- and computes a dot product of data[4] with every
4 elements of kernels, resulting in output[int32_lanes] of uint32 datatype.
The pseudo code is as follows.
.. code-block:: c
void dot_int8_int8_int32(int8 data[4], int8 kernel[16][4], int32 output[16]){
for (int i = 0; i < int32_lanes; i++){
out[i] = 0;
for (int k = 0; k < 4; k++){
out[i] += data[k] * kernel[i][k]
}
}
}
Physically, the kernel array sits in a vector register and
the data[4] is broadcasted to another vector register. This
function returns a TensorIntrin that can be used to tensorize
a schedule.
Parameters
----------
int32_lanes : int
How many int32/uint32 to produce
dtype : str, optional, {"uint", "int"}
Whether it works on unsigned int or signed int
Returns
-------
intrin : TensorIntrin
The ARM uint8 TensorIntrin that can be used in tensorizing schedule
"""
num_int8_elements = 4 # 4 int8 elements in int32
data = te.placeholder((num_int8_elements,), dtype=f"{dtype}8", name="data")
kernel = te.placeholder((int32_lanes, num_int8_elements), dtype=f"{dtype}8", name="kernel")
k = te.reduce_axis((0, num_int8_elements), name="k")
C = te.compute(
(int32_lanes,),
lambda i: te.sum(data[k].astype(f"{dtype}32") * kernel[i, k].astype(f"{dtype}32"), axis=k),
name="C",
)
a_buffer = tvm.tir.decl_buffer(
data.shape, dtype=f"{dtype}8", name="a_buffer", offset_factor=1, strides=[1]
)
b_buffer = tvm.tir.decl_buffer(
kernel.shape, dtype=f"{dtype}8", name="b_buffer", offset_factor=1, strides=[te.var("s"), 1]
)
def _intrin_func(ins, outs):
def _instr(index):
ib = tvm.tir.ir_builder.create()
if index == 1:
ib.emit(outs[0].vstore(0, tvm.tir.const(0, f"{dtype}32x{int32_lanes}")))
return ib.get()
dtype_a = f"{dtype}8x{num_int8_elements}"
dtype_b = f"{dtype}8x{int32_lanes * num_int8_elements}"
dtype_c = f"{dtype}32x{int32_lanes}"
a_int8 = ins[0].vload([0], dtype_a)
re_int32 = tvm.tir.call_intrin(f"{dtype}32", "tir.reinterpret", a_int8)
# broadcast a
vec_ai32 = re_int32.astype(dtype_c)
vec_a = tvm.tir.call_intrin(dtype_b, "tir.reinterpret", vec_ai32)
vec_b = ins[1].vload([0, 0], dtype_b)
vec_c = outs[0].vload([0], dtype_c)
inst = "udot" if dtype == "uint" else "sdot"
inst = "llvm.aarch64.neon.%s.v%di32.v%di8" % (
inst,
int32_lanes,
int32_lanes * num_int8_elements,
)
vdot = tvm.tir.call_llvm_pure_intrin(
dtype_c, inst, tvm.tir.const(3, "uint32"), vec_c, vec_a, vec_b
)
ib.emit(outs[0].vstore(0, vdot))
return ib.get()
# body, reset, update
return _instr(0), _instr(1), _instr(2)
buffer_params = {"offset_factor": 1}
return te.decl_tensor_intrin(
C.op,
_intrin_func,
binds={data: a_buffer, kernel: b_buffer},
default_buffer_params=buffer_params,
)
def dot_int8_int8_int32_neon():
"""
Int8 dot product using vmlal instructions
.. code-block:: c
void dot_int8_int8_int32(int8 data[4], int8 kernel[4][4], int32 output[4]){
for (int i = 0; i < 4; i++){
out[i] = 0;
for (int k = 0; k < 4; k++){
out[i] += data[k] * kernel[i][k]
}
}
}
We use the smull and saddlp instructions to compute the dot product.
smull : int8x16 -> int8x16 -> int16x8 elementwise multiplication
saddlp: int16x8 -> int32x4 pairwise addition of elements
Data is broadcast across the register
int8 elements
| data | data |
| 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 |
smull
int8 elements
| kernel[i] | kernel[i+1] |
| 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 |
=
int16 elements
| data * kernel[i] | data * kernel[i+1] |
| 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 |
saddlp =
int32 elements
| partial sum(data * kernel[i]) | partial sum(data * kernel[i+1]) |
| 0 | 1 | 2 | 3 |
We apply the above kernel twice and use addp to compute the second set of pairwise additions
int32 elements (narrowed for so they fit on a line)
| psum d*k[i] | psum d*k[i+1] | | psum d*k[i+2] | psum d*k[i+3] |
| 0 | 1 | 2 | 3 | addp | 4 | 5 | 6 | 7 |
=
|sum d*ki |sum d*ki1|sum d*ki2|sum d*ki3|
| 0 | 1 | 2 | 3 |
"""
int32_lanes = 4 # 4 int32 lanes = 128
num_int8_elements = 4 # 4 int8 elements in int32
data = te.placeholder((num_int8_elements,), dtype="int8", name="data")
kernel = te.placeholder((int32_lanes, num_int8_elements), dtype="int8", name="kernel")
k = te.reduce_axis((0, num_int8_elements), name="k")
C = te.compute(
(int32_lanes,),
lambda i: te.sum(data[k].astype("int32") * kernel[i, k].astype("int32"), axis=k),
name="C",
)
a_buffer = tvm.tir.decl_buffer(
data.shape, dtype="int8", name="a_buffer", offset_factor=1, strides=[1]
)
b_buffer = tvm.tir.decl_buffer(
kernel.shape, dtype="int8", name="b_buffer", offset_factor=1, strides=[te.var("ldw"), 1]
)
def _intrin_func(ins, outs):
def _instr(index):
int_8xl = "int8x8"
int_32xl = "int32x4"
ib = tvm.tir.ir_builder.create()
if index == 1:
ib.emit(outs[0].vstore(0, tvm.tir.const(0, int_32xl)))
return ib.get()
# this broadcasts data to the vector size
a_int8 = ins[0].vload([0], "int8x4")
re_int32 = tvm.tir.call_intrin("int32", "tir.reinterpret", a_int8)
vec_ai32 = re_int32.astype("int32x2")
vec_a = tvm.tir.call_intrin(int_8xl, "tir.reinterpret", vec_ai32)
vec_b = ins[1].vload([0, 0], "int8x16")
def pairwise_add_mul(extract_half):
vec_b_half = tvm.tir.call_intrin("int8x8", extract_half, vec_b)
multiply = tvm.tir.call_llvm_pure_intrin(
"int16x8",
"llvm.aarch64.neon.smull.v8i16", # saturating pairwise multiplication
tvm.tir.const(2, "uint32"),
vec_a,
vec_b_half,
)
pairwise_reduction = tvm.tir.call_llvm_pure_intrin(
"int32x4",
"llvm.aarch64.neon.saddlp.v4i32.v8i16",
tvm.tir.const(1, "uint32"),
multiply,
)
return pairwise_reduction
pair_1 = pairwise_add_mul("tir.vectorlow")
pair_2 = pairwise_add_mul("tir.vectorhigh")
quad_reduction = tvm.tir.call_llvm_pure_intrin(
"int32x4",
"llvm.aarch64.neon.addp.v4i32",
tvm.tir.const(2, "uint32"),
pair_1,
pair_2,
)
if index == 0:
ib.emit(outs[0].vstore(0, quad_reduction))
else:
ib.emit(outs[0].vstore(0, quad_reduction + outs[0].vload([0], int_32xl)))
return ib.get()
# body, reset, update
return _instr(0), _instr(1), _instr(2)
buffer_params = {"offset_factor": 1}
return te.decl_tensor_intrin(
C.op,
_intrin_func,
binds={data: a_buffer, kernel: b_buffer},
default_buffer_params=buffer_params,
)
def select_word(vec, lane, dtype_vec):
"""
Utility function used to select a int8x4 word within a int8x16 vector
and replicate 4 times.
The pseudo-code for this operation is:
v = [x0, ..., x15]
vsub(lane) = v[4*lane:4*lane+3]
replicated_v(lane) = [vsub(lane), vsub(lane), vsub(lane), vsub(lane)]
Note that 0<=lane<4
Parameters
----------
vec : tvm.tir.Expr
int8x16 vector expression
lane : int
vector lane we want to replicate
dtype_vec : str
vector data type (e.g., int8x16)
Returns
----------
output : tvm.tir.Expr
replicated vector
"""
# Reinterpret vec_a as 4 int32 words
vec_int32 = tvm.tir.call_intrin("int32x4", "tir.reinterpret", vec)
# Broadcast the lane-th word
vec_int32_shuffled = tvm.tir.Shuffle([vec_int32], [lane, lane, lane, lane])
# Convert back to uint8x16
vec_int8_broadcast = tvm.tir.call_intrin(dtype_vec, "tir.reinterpret", vec_int32_shuffled)
return vec_int8_broadcast
def gemm_acc_4x4_int8_int8_int32(dtype):
"""
Int8 4x4 matrix multiplication and accumulation using sdot/udot
instructions. This function takes two arrays of int8 datatype
-- A[4][4] and B[4][4] and produces a 4x4 matrix
which is equal to A*B'.
The pseudo code is as follows.
.. code-block:: c
void gemm_acc_4x4_int8_int8_int32(int8 A[4][4], int8 B[4][4], int32 C[4][4]){
for (int i = 0; i < 4; i++){
for (int j = 0; j < 4; j++){
for (int k = 0; k < 4; k++){
C[i][j] += A[i][k] * B[j][k]
}
}
}
Notes:
* The tiling strategy is picked to maximize register usage.
Parameters
----------
dtype : str, {"uint8", "int8"}
Whether it works on unsigned int or signed int
Returns
-------
intrin : TensorIntrin
The Arm TensorIntrin that can be used in tensorizing schedule
"""
assert dtype in ["uint8", "int8"]
# This needs to be a variable number of "rows" since TVM
# "thinks" I only need to compute one row because of
# padding
A = te.placeholder((te.var("rows"), 4), dtype, name="A")
B = te.placeholder((4, 4), dtype, name="B")
dtype_vec = dtype + "x16"
k = te.reduce_axis((0, 4), name="k")
C = te.compute(
(te.var("rows"), 4),
lambda i, j: te.sum(A[i, k].astype("int32") * B[j, k].astype("int32"), axis=k),
name="C",
)
aa_buffer = tvm.tir.decl_buffer(
A.shape, dtype, name="aa_buffer", offset_factor=1, strides=[te.var("sa"), 1]
)
bb_buffer = tvm.tir.decl_buffer(
B.shape, dtype, name="bb_buffer", offset_factor=1, strides=[te.var("sb"), 1]
)
cc_buffer = tvm.tir.decl_buffer(
C.shape, dtype="int32", name="cc_buffer", offset_factor=1, strides=[te.var("sc"), 1]
)
llvm_intrin = "llvm.aarch64.neon.sdot" if dtype == "int8" else "llvm.aarch64.neon.udot"
def _intrin_func(ins, outs):
def _instr(index):
ib = tvm.tir.ir_builder.create()
if index == 1:
for i in range(0, 4):
ib.emit(outs[0].vstore([i, 0], tvm.tir.const(0, "int32x4")))
return ib.get()
# Load all the elements of tile A.
# vec_a = [a, b, c, d,
# e, f, g, h,
# l, m, n, o,
# p, q, r, s];
vec_a = ins[0].vload([0, 0], dtype_vec)
# Replicate 4 times the i-th row of A. For instance,
# vec_a[0] = [a, b, c, d,
# a, b, c, d,
# a, b, c, d,
# a, b, c, d,];
vec_aa = [select_word(vec_a, i, dtype_vec) for i in range(0, 4)]
# Load all the elements of B. Remember that B
# is transposed:
# vec_b = [0, 4, 8, 12,
# 1, 5, 9, 13,
# 2, 6, 10, 14,
# 3, 7, 11, 15,];
vec_b = ins[1].vload([0, 0], dtype_vec)
# Execute the dot product
for i in range(0, 4):
vec_c = outs[0].vload([i, 0], "int32x4")
# Compute the product between the i-th row of A
# and all the rows of B. Remember that sdot/udot
# subdive the input vectors in 16 elements
# and then take the dot product among each group.
# The result is stored in a int32x4 register
#
# For instance, for i=0, we have:
# sdot(vec_aa[0], vec_b) = [a*0+b*4+c*8+d*12,
# a*1+b*5+c*9+d*13,
# a*2+b*6+c*10+d*14,
# a*3+b*7+c*11+d*15]
vdot = tvm.tir.call_llvm_intrin(
"int32x4", llvm_intrin, tvm.tir.const(3, "uint32"), vec_c, vec_b, vec_aa[i]
)
# Store the result
ib.emit(outs[0].vstore([i, 0], vdot))
return ib.get()
# body, reset, update
return _instr(0), _instr(1), _instr(2)
buffer_params = {"offset_factor": 1}
return te.decl_tensor_intrin(
C.op,
_intrin_func,
binds={A: aa_buffer, B: bb_buffer, C: cc_buffer},
default_buffer_params=buffer_params,
)
def gemm_acc_nx16_int8_int8_int32(dtype, rows):
"""
Int8 nx16 matrix multiplication and accumulation using sdot/udot instructions
This function takes two arrays of int8 datatype -- A[n][4] and
B[4][16] and produces a rowsx16 matrix which is equal to A*B'
The pseudo code is as follows.
.. code-block:: c
void mmla_nx16_int8_int8_int32(int8 A[n][16], int8 B[4][16][4], int32 output[n][16]){
for (int i = 0; i < n; i++){
for (int j = 0; j < 16; j++){
for (int k = 0; k < 16; k++){
out[i][j] += A[i][k] * B[k//4][j][k%4]
}
}
}
}
Notes:
* The tile size of B is 16x4. Since the reduction variable k moves between 0 and 16
we need 4 tiles of B to compute a single row of the output. The first 4 values of
k will be fetched from B[0][j][k], the second batch of 4 from B[1][j][k] and so on
* The tiling strategy is picked to maximize register usage.
Parameters
----------
dtype : str, {"uint8", "int8"}
Whether it works on unsigned int or signed int
rows : int
Number of the output rows "n"
Returns
-------
intrin : TensorIntrin
The Arm TensorIntrin that can be used in tensorizing schedule
"""
assert dtype in ["uint8", "int8"]
A = te.placeholder((rows, 16), dtype, name="A")
B = te.placeholder((4, 16, 4), dtype, name="B")
dtype_vec = dtype + "x16"
idxm = tvm.tir.indexmod
k = te.reduce_axis((0, 16), name="k")
C = te.compute(
(rows, 16),
lambda i, j: te.sum(
A[i, k].astype("int32") * B[k // 4, j, idxm(k, 4)].astype("int32"), axis=k
),
name="C",
)
aa_buffer = tvm.tir.decl_buffer(
A.shape, dtype, name="aa_buffer", offset_factor=1, strides=[te.var("sa"), 1]
)
bb_buffer = tvm.tir.decl_buffer(
B.shape, dtype, name="bb_buffer", offset_factor=1, strides=[te.var("sb0"), te.var("sb1"), 1]
)
cc_buffer = tvm.tir.decl_buffer(
C.shape, dtype="int32", name="cc_buffer", offset_factor=1, strides=[te.var("sc"), 1]
)
llvm_intrin = "llvm.aarch64.neon.sdot" if dtype == "int8" else "llvm.aarch64.neon.udot"
def _intrin_func(ins, outs):
def _instr(index):
ib = tvm.tir.ir_builder.create()
if index == 1:
for i in range(0, rows):
ib.emit(outs[0].vstore([i, 0], tvm.tir.const(0, "int32x16")))
return ib.get()
# Iterate on the number of rows of the output
for k in range(0, rows):
# Load 16 elements of A
# vec_a = [a, b, c, d, e, f, g, h, l, m, n, o, p, q, r, s];
vec_a = ins[0].vload([k, 0], dtype_vec)
# Iterate over each of the 4 rowsx4 tiles of the output
for j in range(0, 4):
# Accumulate over each of the 4 (16x4) tiles contained in B
for i in range(0, 4):
# Replicate a single 4-element group of A (A[k, i:i+4])
vec_aa = select_word(vec_a, i, dtype_vec)
# Load 4 rows (each rows with 4 elements) from B (B[i:i+4, j:j+4])
# vec_b = [0, 16, 32, 48,
# 1, 17, 33, 49,
# 2, 18, 34, 50,
# 3, 19, 35, 51,];
vec_b = ins[1].vload([i, 4 * j, 0], dtype_vec)
# Accumulate in the correct part of the output
vec_c = outs[0].vload([k, 4 * j], "int32x4")
# Compute the dot product between the rowsx4 tile
# from A and the 4x4 tile from B
#
# For instance, for i=0, we have:
# sdot(vec_aa[0], vec_b) = [a*0+b*16+c*32+d*48,
# a*1+b*17+c*33+d*49,
# a*2+b*18+c*34+d*50,
# a*3+b*19+c*35+d*51]
vdot = tvm.tir.call_llvm_intrin(
"int32x4", llvm_intrin, tvm.tir.const(3, "uint32"), vec_c, vec_b, vec_aa
)
ib.emit(outs[0].vstore([k, 4 * j], vdot))
return ib.get()
# body, reset, update
return _instr(0), _instr(1), _instr(2)
buffer_params = {"offset_factor": 1}
return te.decl_tensor_intrin(
C.op,
_intrin_func,
binds={A: aa_buffer, B: bb_buffer, C: cc_buffer},
default_buffer_params=buffer_params,
)
def smlal_int16_int32():
"""
Intrinsic to be used in order to load two int16x8 vectors and multiply
them together through a pair of smlal/smlal2 instructions. The pseudo-code
for the algorithm is as follows:
vec_a = vload(A, "int16x8")
vec_b = vload(B, "int16x8")
vec_c[0:4] += vec_a[0:4]*vec_b[0:4] // -> smlal instruction
vec_c[4:8] += vec_a[4:8]*vec_b[4:8] // -> smlal2 instruction
So we load a single int16x8 vector and we accumulate its lower (0:4) and
higher part separately.
"""
int16_lanes = 8
A = te.placeholder((int16_lanes,), dtype="int16", name="A")
B = te.placeholder((int16_lanes, 1), dtype="int16", name="B")
C = te.compute(
(int16_lanes,), lambda i: A[i].astype("int32") * B[i, 0].astype("int32"), name="C"
)
a_buffer = tvm.tir.decl_buffer(
A.shape, dtype="int16", name="a_buffer", offset_factor=1, strides=[1]
)
b_buffer = tvm.tir.decl_buffer(
B.shape, dtype="int16", name="b_buffer", offset_factor=1, strides=[te.var("sb"), 1]
)
c_buffer = tvm.tir.decl_buffer(
C.shape, dtype="int32", name="c_buffer", offset_factor=1, strides=[1]
)
def _intrin_func(ins, outs):
def _instr(index):
ib = tvm.tir.ir_builder.create()
if index == 1:
ib.emit(outs[0].vstore(0, tvm.tir.const(0, "int32x8")))
return ib.get()
vec_a = ins[0].vload([0], "int16x8")
vec_b = ins[1].vload([0, 0], "int16x8")
inst = "llvm.aarch64.neon.smull"
# Higher part of the vector
vec_c_h = outs[0].vload([4], "int32x4")
vec_a_h = tvm.tir.call_intrin("int16x4", "tir.vectorhigh", vec_a)
vec_b_h = tvm.tir.call_intrin("int16x4", "tir.vectorhigh", vec_b)
vmull_h = tvm.tir.call_llvm_pure_intrin(
"int32x4", inst, tvm.tir.const(2, "uint32"), vec_a_h, vec_b_h
)
vec_out_h = vec_c_h + vmull_h
# Lower part of the vector
vec_c_l = outs[0].vload([0], "int32x4")
vec_a_l = tvm.tir.call_intrin("int16x4", "tir.vectorlow", vec_a)
vec_b_l = tvm.tir.call_intrin("int16x4", "tir.vectorlow", vec_b)
vmull_l = tvm.tir.call_llvm_pure_intrin(
"int32x4", inst, tvm.tir.const(2, "uint32"), vec_a_l, vec_b_l
)
vec_out_l = vec_c_l + vmull_l
# Combine higher and lower part in a single int32x8 vector to store
# (this will require two different store instructions, since the
# length of a NEON vector is fixed at 128
vec_out = tvm.tir.call_intrin("int32x8", "tir.vectorcombine", vec_out_l, vec_out_h)
ib.emit(outs[0].vstore(0, vec_out))
return ib.get()
# body, reset, update
return _instr(0), _instr(1), _instr(2)
buffer_params = {"offset_factor": 1}
return te.decl_tensor_intrin(
C.op,
_intrin_func,
binds={A: a_buffer, B: b_buffer, C: c_buffer},
default_buffer_params=buffer_params,
)
def gemm_acc_2x2_int8_int8_int32(dtype):
"""
Int8 2x2 matrix multiplication using smmla/ummla instructions
This function takes two arrays of int8 datatype -- A[2][8] and
B[2][8] and produces a 2x2 matrix which is equal to A*B'
The pseudo code is as follows.
.. code-block:: c
void mmla_2x2_int8_int8_int32(int8 A[2][8], int8 B[2][8], int32 C[2][2]){
for (int i = 0; i < 2; i++){
for (int j = 0; j < 2; j++){
for (int k = 0; k < 8; k++){
C[i][j] += A[i][k] * B[j][k]
}
}
}
Parameters
----------
dtype : str, {"uint8", "int8"}
Whether it works on unsigned int or signed int
Returns
-------
intrin : TensorIntrin
The Arm TensorIntrin that can be used in tensorizing schedule
"""
assert dtype in ["uint8", "int8"]
A = te.placeholder((2, 8), dtype, name="A")
B = te.placeholder((2, 8), dtype, name="B")
dtype_vec = dtype + "x16"
k = te.reduce_axis((0, 8), name="k")
C = te.compute(
(2, 2),
lambda i, j: te.sum(A[i, k].astype("int32") * B[j, k].astype("int32"), axis=k),
name="C",
)
aa_buffer = tvm.tir.decl_buffer(
A.shape, dtype, name="aa_buffer", offset_factor=1, strides=[te.var("sa"), 1]
)
bb_buffer = tvm.tir.decl_buffer(
B.shape, dtype, name="bb_buffer", offset_factor=1, strides=[te.var("sb"), 1]
)
cc_buffer = tvm.tir.decl_buffer(
C.shape, dtype="int32", name="cc_buffer", offset_factor=1, strides=[te.var("sc"), 1]
)
llvm_intrin = "llvm.aarch64.neon.smmla" if dtype == "int8" else "llvm.aarch64.neon.ummla"
def _intrin_func(ins, outs):
def _instr(index):
ib = tvm.tir.ir_builder.create()
if index == 1:
ib.emit(outs[0].vstore([0, 0], tvm.tir.const(0, "int32x4")))
return ib.get()
# Load in vec_a the two rows of A
# vec_a = [a, b, c, d, e, f, g, h;
# i, j, k, l, m, n, o, p,]
vec_a = ins[0].vload([0, 0], dtype_vec)
# Load in vec_b the two rows of B
# vec_b = [0, 2, 4, 6, 8, 10, 12, 14;
# 1, 3, 5, 7, 9, 11, 13, 14,]
vec_b = ins[1].vload([0, 0], dtype_vec)
# Execute the matrix multiplication via (s/u)mmla:
# vec_c = [a*0 + b*2 + c*4 + d*6 +e*8 + f*10 + g*12 + h*14;
# a*1 + b*3 + c*5 + d*7 +e*9 + f*11 + g*13 + h*15;
# i*0 + j*2 + k*4 + l*6 +m*8 + n*10 + o*12 + p*14;
# i*1 + j*3 + k*5 + l*7 +m*9 + n*11 + o*13 + p*15]
vec_c = outs[0].vload([0, 0], "int32x4")
vmmla = tvm.tir.call_llvm_intrin(
"int32x4", llvm_intrin, tvm.tir.const(3, "uint32"), vec_c, vec_a, vec_b
)
# Store the result
ib.emit(outs[0].vstore([0, 0], vmmla))
return ib.get()
# body, reset, update
return _instr(0), _instr(1), _instr(2)
buffer_params = {"offset_factor": 1}
return te.decl_tensor_intrin(
C.op,
_intrin_func,
binds={A: aa_buffer, B: bb_buffer, C: cc_buffer},
default_buffer_params=buffer_params,
)
def _q_multiply_shift_arm(op):
"""
Implementation of q_multiply_shift_arm through arm intrinsics
sqrdmulh and srshl when q == 31.
Please note that this is introducing a small round-up error for
some corner cases. This is because we are rounding twice instead
than only once. I.e.:
* original q_multiply_shift: round(x*y*2^-s)
* arm q_multiply_shift: round(round(x*y)*2^-s)
"""
x = op.args[0]
y = op.args[1]
q = op.args[2]
s = op.args[3]
# Don't use this intrinsic if we don't have a int32x4 vector
# or if we are not multiplying q31 numbers
if x.dtype != "int32x4" or q.value != 31:
return op
# Case 1, shift is negative
sqrdmulh = tvm.tir.call_llvm_intrin(
op.dtype, "llvm.aarch64.neon.sqrdmulh", tvm.tir.const(2, "uint32"), x, y
)
fixup = (sqrdmulh & (-s)) >> 31
fixed_up_x = sqrdmulh + fixup
out_1 = tvm.tir.call_llvm_intrin(
op.dtype, "llvm.aarch64.neon.srshl", tvm.tir.const(2, "uint32"), sqrdmulh, s
)
# Case 2, shift is positive
x = x * (1 << (s))
out_2 = tvm.tir.call_llvm_intrin(
op.dtype, "llvm.aarch64.neon.sqrdmulh", tvm.tir.const(2, "uint32"), x, y
)
# Select depending on the shift
return tvm.tir.Select(s < 0, out_1, out_2)
register_intrin_lowering(
"tir.q_multiply_shift", target="llvm.aarch64", f=_q_multiply_shift_arm, level=99
)
| 40,937 | 34.321829 | 100 | py |
tvm | tvm-main/python/tvm/topi/arm_cpu/bitserial_dense.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, invalid-name, too-many-locals, too-many-arguments, condition-evals-to-constant
"""Schedule for bitserial dense operator."""
from __future__ import absolute_import as _abs
import tvm
from tvm import te
from tvm import autotvm
from tvm.topi.utils import get_const_tuple
from .. import tag
from .bitserial_conv2d import _intrin_popcount
from ..nn.pad import pad
from ..nn.bitserial_util import bitpack, binary_op_multiplier
@autotvm.register_topi_compute("bitserial_dense.arm_cpu")
def bitserial_dense(cfg, data, weight, data_bits, weight_bits, pack_dtype, out_dtype, unipolar):
"""The default implementation of bitserial dense in topi.
Parameters
----------
data : tvm.te.Tensor
2-D with shape [batch, in_dim]
weight : tvm.te.Tensor
2-D with shape [out_dim, in_dim]
Returns
-------
output : tvm.te.Tensor
2-D with shape [batch, out_dim]
"""
data_packed = bitpack(data, data_bits, pack_axis=1, bit_axis=1, pack_type=pack_dtype)
if len(weight.shape) == 2:
weight_packed = bitpack(weight, weight_bits, pack_axis=1, bit_axis=1, pack_type=pack_dtype)
else:
weight_packed = weight
batch, DB, in_dim = get_const_tuple(data_packed.shape)
out_dim, WB, in_dim = get_const_tuple(weight_packed.shape)
# Pad Inputs so that microkernel can be used
# out_dim and in_dim need to be multiples of 8
if out_dim % 8 != 0:
out_dim_pad = out_dim % 8
data_packed = pad(data_packed, [0, 0, 0], [out_dim_pad, 0, 0], name="PaddedInput")
out_dim += out_dim_pad
######## Search space
x, y = cfg.axis(batch), cfg.axis(out_dim)
db, wb, k = cfg.reduce_axis(DB), cfg.reduce_axis(WB), cfg.reduce_axis(in_dim)
ko, ki = cfg.define_split(
"tile_k", k, num_outputs=2, filter=lambda xx: xx.size[-1] == 8 or xx.size[-1] == 16
)
xo, xi = cfg.define_split("tile_x", x, num_outputs=2)
yo, yi = cfg.define_split("tile_y", y, num_outputs=2, filter=lambda xx: xx.size[-1] == 8)
cfg.define_reorder(
"reorder_0",
[yo, xo, ko, xi, wb, db, yi, ki],
policy="candidate",
candidate=[
[yo, xo, ko, xi, wb, db, yi, ki],
[yo, xo, xi, ko, wb, db, yi, ki],
[yo, xo, ko, xi, wb, db, yi, ki],
],
)
###### Compute rule
VY = cfg["tile_y"].size[-1]
VK = cfg["tile_k"].size[-1]
wvshape = (out_dim // VY, in_dim // VK, WB, VY, VK)
oshape = (batch, out_dim)
k = te.reduce_axis((0, in_dim), name="k")
db = te.reduce_axis((0, DB), name="db")
wb = te.reduce_axis((0, WB), name="wb")
# Tile data and weights
weight_vec = te.compute(
wvshape,
lambda yo, ko, wb, vy, vk: weight_packed[yo * VY + vy][wb][ko * VK + vk],
name="weight_vec",
)
matmul_unipolar = te.compute(
oshape,
lambda x, y: te.sum(
(
tvm.tir.popcount(
weight_vec[y // VY, k // VK, wb, y % VY, k % VK].astype(out_dtype)
& data_packed[x, db, k].astype(out_dtype)
)
- tvm.tir.popcount(
~weight_vec[y // VY, k // VK, wb, y % VY, k % VK].astype(out_dtype)
& data_packed[x, db, k].astype(out_dtype)
)
)
<< (wb + db).astype(out_dtype),
axis=[wb, db, k],
),
tag="bitserial_dense_unipolar",
)
matmul = te.compute(
oshape,
lambda x, y: te.sum(
tvm.tir.popcount(
weight_vec[y // VY, k // VK, wb, y % VY, k % VK].astype(out_dtype)
& data_packed[x, db, k].astype(out_dtype)
)
<< (wb + db).astype(out_dtype),
axis=[wb, db, k],
),
tag="bitserial_dense",
)
cfg.add_flop(batch * out_dim * in_dim * binary_op_multiplier(pack_dtype))
if unipolar:
return matmul_unipolar
return matmul
@autotvm.register_topi_schedule("bitserial_dense.arm_cpu")
def schedule_bitserial_dense(cfg, outs):
"""Schedule for binary_dense.
Parameters
----------
outs: Array of Tensor
The computation graph description of bitserial dense operator.
in the format of an array of tensors.
Returns
-------
s: Schedule
The computation schedule for bitserial_dense.
"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
def _schedule(cfg, s, data_vec, weight_vec, output, unipolar):
z, k, _, y, x = s[weight_vec].op.axis
s[weight_vec].parallel(z)
s[weight_vec].vectorize(x)
x, y = s[output].op.axis
wb, db, k = s[output].op.reduce_axis
_, DB, _ = get_const_tuple(data_vec.shape)
_, _, WB, _, _ = get_const_tuple(weight_vec.shape)
yo, yi = cfg["tile_y"].apply(s, output, y)
xo, xi = cfg["tile_x"].apply(s, output, x)
ko, ki = cfg["tile_k"].apply(s, output, k)
cfg["reorder_0"].apply(s, output, [yo, xo, ko, xi, wb, db, yi, ki])
fused = s[output].fuse(xo, yo)
s[output].parallel(fused)
nfactor = cfg["tile_y"].size[-1]
kfactor = cfg["tile_k"].size[-1]
if nfactor % 8 == 0:
pc = _intrin_popcount(nfactor, kfactor, WB, DB, unipolar)
s[output].tensorize(wb, pc)
return s
def traverse(op):
"""Internal traverse function"""
# inline all one-to-one-mapping operators except the last stage (output)
if tag.is_broadcast(op.tag) or "elemwise" in op.tag:
if op not in s.outputs:
s[op].compute_inline()
for tensor in op.input_tensors:
if isinstance(tensor.op, tvm.te.ComputeOp):
traverse(tensor.op)
elif op.tag == "bitserial_dense" or "bitserial_dense_unipolar":
output = op.output(0)
weight_vec = op.input_tensors[0]
data_vec = op.input_tensors[1]
data = data_vec.op.input_tensors[0]
if "QuantizeInput" in data.op.name:
data = data.op.input_tensors[0]
unipolar = output.op.tag == "bitserial_dense_unipolar"
_schedule(cfg, s, data_vec, weight_vec, output, unipolar)
else:
raise RuntimeError(f"Unsupported operator: {op.tag}")
traverse(outs[0].op)
return s
| 7,252 | 33.212264 | 110 | py |
tvm | tvm-main/python/tvm/topi/arm_cpu/mprofile/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Schedules specialized for cortex-m DSP instructions."""
| 844 | 45.944444 | 62 | py |
tvm | tvm-main/python/tvm/topi/arm_cpu/mprofile/dsp/conv2d.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, no-value-for-parameter
"""Direct implementation of conv2d."""
from tvm import autotvm
from tvm.autotvm.task import deserialize_args
from tvm import te
from tvm.topi.utils import simplify, traverse_inline
from tvm.topi.nn.pad import pad
from tvm.topi.nn.utils import get_pad_tuple
from tvm.tir.expr import Mul
from .micro_kernel.gemm import (
intrin_gemm_MxKxN,
gemm_MxKxN_impl,
)
def conv2d_nhwc_dsp(*args, **kwargs):
"""Defines the v7e-m DSP instructions of conv2d."""
assert not kwargs, "Do not support kwargs in template function call"
args = deserialize_args(args)
data, kernel = args[:2]
layout = args[-2]
cfg = autotvm.get_config()
args = [cfg] + args
assert layout == "NHWC"
conv = conv2d_nhwc_dsp_compute(*args)
sched = conv2d_nhwc_dsp_schedule(cfg, [data, kernel, conv])
return sched, [data, kernel, conv]
conv2d_nhwc_dsp.template_key = "dsp"
conv2d_nhwc_dsp.default_data_layout = "NHWC"
conv2d_nhwc_dsp.default_kernel_layout = "HWOI"
def conv2d_nhwc_dsp_compute(cfg, data, kernel, strides, padding, dilation, out_dtype):
"""Compute function for v7e-m DSP instructions of conv2d."""
assert isinstance(strides, int) or len(strides) == 2
assert isinstance(dilation, int) or len(dilation) == 2
if isinstance(strides, int):
stride_h = stride_w = strides
else:
stride_h, stride_w = strides
if isinstance(dilation, int):
dilation_h = dilation_w = dilation
else:
dilation_h, dilation_w = dilation
batch_size, in_height, in_width, in_channels = data.shape
kernel_h, kernel_w, out_channels, _ = kernel.shape
# compute the output shape
dilated_kernel_h = (kernel_h - 1) * dilation_h + 1
dilated_kernel_w = (kernel_w - 1) * dilation_w + 1
pad_top, pad_left, pad_down, pad_right = get_pad_tuple(
padding, (dilated_kernel_h, dilated_kernel_w)
)
out_height = simplify((in_height - dilated_kernel_h + pad_top + pad_down) // stride_h + 1)
out_width = simplify((in_width - dilated_kernel_w + pad_left + pad_right) // stride_w + 1)
pad_before = [0, pad_top, pad_left, 0]
pad_after = [0, pad_down, pad_right, 0]
padded_data = pad(data, pad_before, pad_after, name="padded_data")
rc = te.reduce_axis((0, in_channels), name="rc")
ry = te.reduce_axis((0, kernel_h), name="ry")
rx = te.reduce_axis((0, kernel_w), name="rx")
conv = te.compute(
(batch_size, out_height, out_width, out_channels),
lambda nn, yy, xx, ff: te.sum(
padded_data[
nn, yy * stride_h + ry * dilation_h, xx * stride_w + rx * dilation_w, rc
].astype(out_dtype)
* kernel[ry, rx, ff, rc].astype(out_dtype),
axis=[ry, rx, rc],
),
name="conv2d",
tag="conv2d_nhwc",
)
###########################
# Config Space Definition #
###########################
n, oh, ow, co = (
cfg.axis(batch_size.value),
cfg.axis(out_height.value),
cfg.axis(out_width.value),
cfg.axis(out_channels.value),
)
kh, kw, ci = (
cfg.reduce_axis(kernel_h.value),
cfg.reduce_axis(kernel_w.value),
cfg.reduce_axis(in_channels.value),
)
owo, owi = cfg.define_split("tile_ow", ow, policy="factors", num_outputs=2)
cio, cii = cfg.define_split(
"tile_ci",
ci,
policy="factors",
num_outputs=2,
# TODO: check case with in_channels.value % 4 != 0 with AutoTVM
filter=None if cfg.is_fallback else lambda x: x.size[-1] % 4 == 0,
)
coo, coi = cfg.define_split("tile_co", co, policy="factors", num_outputs=2)
cfg.define_reorder(
"reorder_0_simd",
[n, oh, owo, owi, coo, coi, kh, kw, cio, cii],
policy="candidate",
candidate=[
[n, oh, kh, kw, owo, coo, cio, owi, coi, cii],
[n, oh, kh, kw, coo, owo, cio, owi, coi, cii],
[n, kh, kw, oh, owo, coo, cio, owi, coi, cii],
[n, kh, kw, oh, coo, owo, cio, owi, coi, cii],
],
)
cfg.define_knob("auto_unroll_max_step", [0, 2, 4, 8, 16, 32])
cfg.define_knob("unroll_explicit", [0, 1])
if cfg.is_fallback:
cfg.fallback_split("tile_ow", [-1, out_width.value])
cfg.fallback_split("tile_ci", [-1, in_channels.value])
cfg.fallback_split("tile_co", [-1, out_channels.value])
return conv
def conv2d_nhwc_dsp_schedule(cfg, outs):
"""Schedule function for v7e-m DSP instructions of conv2d."""
sched = te.create_schedule([x.op for x in outs])
def _callback(op):
if "conv2d_nhwc" not in op.tag:
return
# extract tensors
output = op.output(0)
conv = op
data_vec = conv.input_tensors[0]
kernel = conv.input_tensors[1] # pylint: disable=unused-variable
last = outs[0] # pylint: disable=unused-variable
source_index_w = output.op.body[0].source[0].a.value.indices[2].a
stride_w = source_index_w.b.value if isinstance(source_index_w, Mul) else 1
# tile reduction axes
n, oh, ow, co = sched[conv].op.axis
kh, kw, ci = sched[conv].op.reduce_axis
M = cfg["tile_ow"].size[-1]
K = cfg["tile_ci"].size[-1]
N = cfg["tile_co"].size[-1]
owo, owi = cfg["tile_ow"].apply(sched, conv, ow)
cio, cii = cfg["tile_ci"].apply(sched, conv, ci)
coo, coi = cfg["tile_co"].apply(sched, conv, co)
cfg["reorder_0_simd"].apply(sched, conv, [n, oh, owo, owi, coo, coi, kh, kw, cio, cii])
gemm, uniq_id = intrin_gemm_MxKxN(M, K, N, data_vec.dtype, output.dtype, stride_w)
sched[output].tensorize(owi, gemm)
sched[output].pragma(n, "import_c", gemm_MxKxN_impl(M, K, N, uniq_id))
# this is the scope to attach global config inside this kernel
kernel_scope = n
# tune unroll
sched[output].pragma(kernel_scope, "auto_unroll_max_step", cfg["auto_unroll_max_step"].val)
sched[output].pragma(kernel_scope, "unroll_explicit", cfg["unroll_explicit"].val)
traverse_inline(sched, outs[-1].op, _callback)
return sched
| 6,988 | 34.841026 | 99 | py |
tvm | tvm-main/python/tvm/topi/arm_cpu/mprofile/dsp/dense.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, no-value-for-parameter
"""Direct implementation of dense."""
from tvm import te
from tvm.topi.utils import traverse_inline, get_const_tuple
from .micro_kernel.gemm import (
intrin_gemm_MxKxN,
gemm_MxKxN_impl,
)
from .... import tag
def dense_dsp_compute(cfg, data, weight, bias=None, out_dtype=None):
"""Defines the v7e-m DSP instructions of dense."""
M, K = get_const_tuple(data.shape)
N, _ = get_const_tuple(weight.shape)
cfg.define_split("tile_x", M, policy="factors", num_outputs=2)
cfg.define_split("tile_y", N, policy="factors", num_outputs=2)
cfg.define_split("tile_k", K, policy="factors", num_outputs=2)
k = te.reduce_axis((0, K), "k")
C = te.compute(
(M, N),
lambda x, y: te.sum(
data[x, k].astype(out_dtype) * weight[y, k].astype(out_dtype),
axis=k,
),
name="dense",
tag="dense_dsp",
)
if bias is not None:
C = te.compute((M, N), lambda i, j: C[i, j] + bias[j].astype(out_dtype), tag=tag.BROADCAST)
return C
def dense_dsp_schedule(cfg, outs):
"""Schedule function for v7e-m DSP instructions of dense."""
sched = te.create_schedule([x.op for x in outs])
def _callback(op):
if "dense" not in op.tag:
return
output = op.output(0)
dense = op
data = dense.input_tensors[0]
M = cfg["tile_x"].size[-1]
N = cfg["tile_y"].size[-1]
K = cfg["tile_k"].size[-1]
x, y = sched[dense].op.axis
k = sched[dense].op.reduce_axis[0]
x_o, x_i = cfg["tile_x"].apply(sched, dense, x)
y_o, y_i = cfg["tile_y"].apply(sched, dense, y)
k_o, k_i = cfg["tile_k"].apply(sched, dense, k)
sched[dense].reorder(x_o, y_o, k_o, x_i, y_i, k_i)
gemm, uniq_id = intrin_gemm_MxKxN(M, K, N, data.dtype, output.dtype, stride_w=1)
sched[output].tensorize(x_i, gemm)
sched[output].pragma(x_o, "import_c", gemm_MxKxN_impl(M, K, N, uniq_id))
traverse_inline(sched, outs[-1].op, _callback)
return sched
| 2,886 | 32.183908 | 99 | py |
tvm | tvm-main/python/tvm/topi/arm_cpu/mprofile/dsp/conv1d.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, no-value-for-parameter
"""Direct implementation of conv1d."""
from tvm import autotvm
from tvm.autotvm.task import deserialize_args
from tvm import te
from tvm.topi.utils import simplify, traverse_inline
from tvm.topi.nn.pad import pad
from tvm.topi.nn.utils import get_pad_tuple1d
from tvm.tir.expr import Mul
from .micro_kernel.gemm import (
intrin_gemm_MxKxN,
gemm_MxKxN_impl,
)
def conv1d_nwc_dsp(*args, **kwargs):
"""Defines the v7e-m DSP instructions of conv1d on NWC layout."""
assert not kwargs, "Do not support kwargs in template function call"
args = deserialize_args(args)
data, kernel = args[:2]
layout = args[-2]
cfg = autotvm.get_config()
args = [cfg] + args
assert layout == "NWC"
conv = conv1d_nwc_dsp_compute(*args)
sched = conv1d_nwc_dsp_schedule(cfg, [data, kernel, conv])
return sched, [data, kernel, conv]
conv1d_nwc_dsp.template_key = "dsp"
conv1d_nwc_dsp.default_data_layout = "NWC"
conv1d_nwc_dsp.default_kernel_layout = "WOI"
def conv1d_nwc_dsp_compute(cfg, data, kernel, strides, padding, dilation, out_dtype):
"""Compute function for v7e-m DSP instructions of conv1d on NWC layout."""
if isinstance(strides, (tuple, list)):
strides = strides[0]
if isinstance(dilation, (tuple, list)):
dilation = dilation[0]
batch_size, data_width, in_channels = data.shape
kernel_size, out_channels, _ = kernel.shape
# Compute the output shape
dilated_kernel_size = (kernel_size - 1) * dilation + 1
pad_left, pad_right = get_pad_tuple1d(padding, (dilated_kernel_size,))
out_channels = simplify(out_channels)
out_width = simplify((data_width - dilated_kernel_size + pad_left + pad_right) // strides + 1)
# Apply padding
pad_before = [0, pad_left, 0]
pad_after = [0, pad_right, 0]
padded_data = pad(data, pad_before, pad_after, name="padded_data")
# Compute graph
rc = te.reduce_axis((0, in_channels), name="rc")
rw = te.reduce_axis((0, kernel_size), name="rw")
conv = te.compute(
(batch_size, out_width, out_channels),
lambda b, w, c: te.sum(
padded_data[b, w * strides + rw * dilation, rc].astype(out_dtype)
* kernel[rw, c, rc].astype(out_dtype),
axis=[rw, rc],
),
name="conv1d",
tag="conv1d_nwc",
)
###########################
# Config Space Definition #
###########################
n, ow, co = (
cfg.axis(batch_size.value),
cfg.axis(out_width.value),
cfg.axis(out_channels.value),
)
kw, ci = (
cfg.reduce_axis(kernel_size.value),
cfg.reduce_axis(in_channels.value),
)
owo, owi = cfg.define_split("tile_ow", ow, policy="factors", num_outputs=2)
cio, cii = cfg.define_split(
"tile_ci",
ci,
policy="factors",
num_outputs=2,
# TODO: check case with in_channels.value % 4 != 0 with AutoTVM
filter=None if cfg.is_fallback else lambda x: x.size[-1] % 4 == 0,
)
coo, coi = cfg.define_split("tile_co", co, policy="factors", num_outputs=2)
cfg.define_reorder(
"reorder_0_simd",
[n, owo, owi, coo, coi, kw, cio, cii],
policy="candidate",
candidate=[
[n, kw, owo, coo, cio, owi, coi, cii],
[n, kw, coo, owo, cio, owi, coi, cii],
[n, kw, owo, coo, cio, owi, coi, cii],
[n, kw, coo, owo, cio, owi, coi, cii],
],
)
cfg.define_knob("auto_unroll_max_step", [0, 2, 4, 8, 16, 32])
cfg.define_knob("unroll_explicit", [0, 1])
if cfg.is_fallback:
cfg.fallback_split("tile_ow", [-1, out_width.value])
cfg.fallback_split("tile_ci", [-1, in_channels.value])
cfg.fallback_split("tile_co", [-1, out_channels.value])
return conv
def conv1d_nwc_dsp_schedule(cfg, outs):
"""Schedule function for v7e-m DSP instructions of conv1d on NWC layout."""
sched = te.create_schedule([x.op for x in outs])
def _callback(op):
if "conv1d_nwc" not in op.tag:
return
# extract tensors
output = op.output(0)
conv = op
data_vec = conv.input_tensors[0]
source_index_w = output.op.body[0].source[0].a.value.indices[1].a
stride_w = source_index_w.b.value if isinstance(source_index_w, Mul) else 1
# tile reduction axes
n, ow, co = sched[conv].op.axis
kw, ci = sched[conv].op.reduce_axis
M = cfg["tile_ow"].size[-1]
K = cfg["tile_ci"].size[-1]
N = cfg["tile_co"].size[-1]
owo, owi = cfg["tile_ow"].apply(sched, conv, ow)
cio, cii = cfg["tile_ci"].apply(sched, conv, ci)
coo, coi = cfg["tile_co"].apply(sched, conv, co)
cfg["reorder_0_simd"].apply(sched, conv, [n, owo, owi, coo, coi, kw, cio, cii])
gemm, uniq_id = intrin_gemm_MxKxN(M, K, N, data_vec.dtype, output.dtype, stride_w)
sched[output].tensorize(owi, gemm)
sched[output].pragma(n, "import_c", gemm_MxKxN_impl(M, K, N, uniq_id))
# this is the scope to attach global config inside this kernel
kernel_scope = n
# tune unroll
sched[output].pragma(kernel_scope, "auto_unroll_max_step", cfg["auto_unroll_max_step"].val)
sched[output].pragma(kernel_scope, "unroll_explicit", cfg["unroll_explicit"].val)
traverse_inline(sched, outs[-1].op, _callback)
return sched
| 6,247 | 34.101124 | 99 | py |
tvm | tvm-main/python/tvm/topi/arm_cpu/mprofile/dsp/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 785 | 45.235294 | 62 | py |
tvm | tvm-main/python/tvm/topi/arm_cpu/mprofile/dsp/depthwise_conv2d.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""ARM Cortex-M DSP schedule for depthwise_conv2d"""
import random
import string
from tvm import te, topi
from tvm.topi.utils import traverse_inline
from tvm.topi.nn.pad import pad
from .micro_kernel.multi_channel_convolve import (
intrin_multi_channel_convolve,
multi_channel_convolve_impl,
)
from .micro_kernel.common import num_simd_lanes_per_word
def depthwise_conv2d_nhwc_dsp_compute(_cfg, data, kernel, strides, padding, dilation, out_dtype):
"""Compute function for v7e-m DSP instructions of DepthwiseConv2D. Has a lot of requirements
for use - if not all apply, the fallback implementation will be used instead."""
assert isinstance(strides, int) or len(strides) == 2
assert isinstance(dilation, int) or len(dilation) == 2
if isinstance(strides, int):
stride_h = stride_w = strides
else:
stride_h, stride_w = strides
# We do not support dilation currently. It would be possible, but it would require
# modifying the way the kernel is packed. Gnarly.
if isinstance(dilation, int):
dilation_h = dilation_w = dilation
else:
dilation_h, dilation_w = dilation
assert dilation_h == dilation_w == 1
batch_size, height, width, channels = data.shape
kernel_h, kernel_w, _, _ = kernel.shape
simd_lanes = num_simd_lanes_per_word(data.dtype)
# We don't support different numbers of input and output channels.
assert channels == kernel.shape[2]
assert kernel.shape[3] == 1
# We take in int8 as our dtype, but we spit out int32. This is because we cannot
# round until we compute activations.
assert out_dtype == "int32"
# Padding the data requires COPYING THE ENTIRE INPUT TENSOR, which
# is slow and bad. We should really implement a strip mining
# routine to avoid this, but TVM has terrible support for that.
if padding == "SAME":
# This assumption makes the logic easier. Could be removed with work.
assert height % stride_h == width % stride_w == 0
output_h = height // stride_h
output_w = width // stride_w
# This padding behavior is consistent with other TVM depthwise_conv2d schedules. However it
# differs from the TensorFlow, which only pads the bottom right if stride > 1. This probably
# brings down accuracy slightly for models imported from TFLite.
pad_down = 1 if stride_h == 1 else 0
pad_right = 1 if stride_w == 1 else 0
padded_data = pad(
data,
[0, kernel_h // 2, kernel_w // 2, 0],
[0, pad_down, pad_right, 0],
name="padded_data",
)
elif padding == "VALID":
assert height > kernel_h and width > kernel_w
output_h = (height - kernel_h) // stride_h + 1
output_w = (width - kernel_w) // stride_w + 1
padded_data = data
elif isinstance(padding, tuple):
if len(padding) == 2:
pad_up, pad_down = padding[0]
pad_left, pad_right = padding[1]
else:
pad_up, pad_left, pad_down, pad_right = padding
output_h = (height - kernel_h + pad_up + pad_down) // stride_h + 1
output_w = (width - kernel_w + pad_left + pad_right) // stride_w + 1
padded_data = pad(
data,
[0, pad_up, pad_left, 0],
[0, pad_down, pad_right, 0],
name="padded_data",
)
else:
raise RuntimeError()
_, padded_h, padded_w, _ = padded_data.shape
kh_i = te.reduce_axis((0, kernel_h), name="kh_i")
kw_i = te.reduce_axis((0, kernel_w), name="kw_i")
reshaped_kernel = topi.reshape(kernel, (channels // simd_lanes, kernel_h, kernel_w, simd_lanes))
return te.compute(
(batch_size, output_h, output_w, channels),
lambda h, i, j, k: te.sum(
padded_data[h, (i * stride_h) + kh_i, (j * stride_w) + kw_i, k].astype("int32")
* reshaped_kernel[k // simd_lanes, kh_i, kw_i, k % simd_lanes].astype("int32"),
axis=(kh_i, kw_i),
),
name="depthwise_conv2d",
tag=f"depthwise_conv2d_nhwc_{padded_h}_{padded_w}_dsp",
)
def depthwise_conv2d_nhwc_dsp_schedule(_cfg, outs):
"""Schedule function for v7e-m DSP instructions of conv2d."""
schedule = te.create_schedule([x.op for x in outs])
def _callback(operator):
if "depthwise_conv2d_nhwc" not in operator.tag:
return
# extract tensors
output = operator.output(0)
padded_data = output.op.input_tensors[0]
reshaped_kernel = output.op.input_tensors[1]
in_dtype = padded_data.dtype
_, padded_h, padded_w, channels = padded_data.shape
_, kernel_h, kernel_w, _ = reshaped_kernel.shape
suffix = "".join(random.choices(string.ascii_uppercase, k=8))
b_ax, y_ax, x_ax, c_ax = schedule[output].op.axis
ky_ax, kx_ax = schedule[output].op.reduce_axis
simd_lanes = num_simd_lanes_per_word(in_dtype)
c_ax_o, c_ax_i = schedule[output].split(c_ax, factor=simd_lanes)
schedule[output].reorder(b_ax, c_ax_o, y_ax, x_ax, ky_ax, kx_ax, c_ax_i)
multi_channel_convolve = intrin_multi_channel_convolve(
in_dtype, padded_h, padded_w, channels, kernel_h, kernel_w, suffix
)
schedule[output].tensorize(ky_ax, multi_channel_convolve)
schedule[output].pragma(
b_ax,
"import_c",
multi_channel_convolve_impl(
in_dtype, padded_h, padded_w, channels, kernel_h, kernel_w, suffix
),
)
traverse_inline(schedule, outs[-1].op, _callback)
return schedule
| 6,439 | 37.333333 | 100 | py |
tvm | tvm-main/python/tvm/topi/arm_cpu/mprofile/dsp/pool.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, no-value-for-parameter
"""Direct implementation of pool."""
import logging
import tvm
from tvm import te
from tvm.topi.utils import traverse_inline
from .micro_kernel.max_pool import (
intrin_max,
max_impl,
)
from .micro_kernel.avg_pool import (
intrin_sum,
sum_impl,
)
logger = logging.getLogger("topi")
def schedule_maxpool_1d_nwc(s, op):
"""Schedule function for v7e-m DSP instructions of maxpool 1d NWC layout."""
output = op.output(0)
data_vec = op.input_tensors[0]
channels = data_vec.shape[-1]
if isinstance(channels, tvm.tir.IntImm):
channels = channels.value
n, w, c = s[op].op.axis
(k,) = s[op].op.reduce_axis
s[op].reorder(n, w, k, c)
max_val, uniq_id = intrin_max((1, 1, channels), data_vec.dtype, output.dtype)
s[op].tensorize(c, max_val)
s[output].pragma(n, "import_c", max_impl(uniq_id))
def schedule_maxpool_2d_nhwc(s, op):
"""Schedule function for v7e-m DSP instructions of maxpool 2d NHWC layout."""
output = op.output(0)
data_vec = op.input_tensors[0]
channels = data_vec.shape[-1]
if isinstance(channels, tvm.tir.IntImm):
channels = channels.value
n, h, w, c = s[op].op.axis
ko, ki = s[op].op.reduce_axis
s[op].reorder(n, h, w, ko, ki, c)
max_val, uniq_id = intrin_max((1, 1, 1, channels), data_vec.dtype, output.dtype)
s[op].tensorize(c, max_val)
s[output].pragma(n, "import_c", max_impl(uniq_id))
def schedule_avgpool_1d_ncw(s, op):
"""Schedule function for v7e-m DSP instructions of avgpool 1d NCW layout."""
output = op.output(0)
data_vec = op.input_tensors[0]
n, _, _ = s[op].op.axis
(k,) = s[op].op.reduce_axis
pool_w = k.dom.extent.value
summary, uniq_id = intrin_sum((1, 1, pool_w), data_vec.dtype, output.dtype, reset=True)
s[op].tensorize(k, summary)
s[output].pragma(n, "import_c", sum_impl(pool_w, uniq_id))
def schedule_avgpool_2d_nchw(s, op):
"""Schedule function for v7e-m DSP instructions of avgpool 2d NCHW layout."""
output = op.output(0)
data_vec = op.input_tensors[0]
n, _, _, _ = s[op].op.axis
_, ki = s[op].op.reduce_axis
pool_w = ki.dom.extent.value
summary, uniq_id = intrin_sum((1, 1, 1, pool_w), data_vec.dtype, output.dtype)
s[op].tensorize(ki, summary)
s[output].pragma(n, "import_c", sum_impl(pool_w, uniq_id))
def pool_dsp_schedule(outs, layout):
"""Schedule function for v7e-m DSP instructions of pooling."""
s = te.create_schedule([x.op for x in outs])
def _callback(op):
if "pool_max" in op.tag:
in_dtype = op.input_tensors[0].dtype
if in_dtype != "int8":
logger.warning("Does not have micro-kernel for %s maxpool.", in_dtype)
elif layout == "NWC":
schedule_maxpool_1d_nwc(s, op)
elif layout == "NHWC":
schedule_maxpool_2d_nhwc(s, op)
elif "pool_sum" in op.tag:
in_dtype = op.input_tensors[0].dtype
if in_dtype != "int16":
logger.warning("Does not have micro-kernel for %s avgpool.", in_dtype)
elif layout == "NCW":
schedule_avgpool_1d_ncw(s, op)
elif layout == "NCHW":
schedule_avgpool_2d_nchw(s, op)
traverse_inline(s, outs[-1].op, _callback)
return s
| 4,171 | 31.850394 | 91 | py |
tvm | tvm-main/python/tvm/topi/arm_cpu/mprofile/dsp/micro_kernel/multi_channel_convolve.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This is a special intrinsic used for depthwise convolution using Cortex-M DSP instructions
(v7e-m). It takes as inputs an int8 HWC data tensor and an int8 CHWc kernel. This intrinsic "lays"
the kernel on top of the data tensors starting from a given pointer, performs signed sixteen-bit
multiplies on each pair of values, and sums all the products in an int32 accumlator. This process is
repeated four times giving four int32 outputs - one per channel."""
import textwrap
from tvm import te, tir
from .common import num_simd_lanes_per_word, common_includes
def _get_func_name(in_dtype, tensor_w, channels, kernel_h, kernel_w, suffix):
"""Gets the C function name of the tensorized function."""
return f"kernel_convolve_{in_dtype}_w{tensor_w}_c{channels}_kh{kernel_h}_kw{kernel_w}_{suffix}"
def intrin_multi_channel_convolve(
in_dtype, _tensor_h, tensor_w, channels, kernel_h, kernel_w, suffix
):
"""Defines a v7e-m DSP-accelerated multi-channel convolution. Works on two
channels if in_dtype==int16, and four channels if in_dtype==int8."""
simd_lanes = num_simd_lanes_per_word(in_dtype)
overlap_dims = (kernel_h, kernel_w, simd_lanes)
data_slice = te.placeholder(overlap_dims, name="data_slice", dtype=in_dtype)
kernel_slice = te.placeholder(overlap_dims, name="kernel_slice", dtype=in_dtype)
kh_i = te.reduce_axis((0, kernel_h), name="kh_i")
kw_i = te.reduce_axis((0, kernel_w), name="kw_i")
output_slice = te.compute(
(simd_lanes,),
lambda k: te.sum(
data_slice[kh_i, kw_i, k].astype("int32") * kernel_slice[kh_i, kw_i, k].astype("int32"),
axis=(kh_i, kw_i),
),
name="c",
)
data_buf = tir.decl_buffer(
data_slice.shape,
data_slice.dtype,
name="data",
offset_factor=1,
strides=[tensor_w * channels, channels, 1],
)
kernel_buf = tir.decl_buffer(
kernel_slice.shape,
kernel_slice.dtype,
name="kernel",
offset_factor=1,
strides=[kernel_w * simd_lanes, simd_lanes, 1],
)
output_buf = tir.decl_buffer(
output_slice.shape, output_slice.dtype, name="output", offset_factor=1, strides=[1]
)
def intrin_func(ins, outs):
builder = tir.ir_builder.create()
builder.emit(
tir.call_extern(
"int32",
_get_func_name(in_dtype, tensor_w, channels, kernel_h, kernel_w, suffix),
outs[0].access_ptr("w"),
ins[0].access_ptr("r"),
ins[1].access_ptr("r"),
)
)
return builder.get()
return te.decl_tensor_intrin(
output_slice.op,
intrin_func,
binds={data_slice: data_buf, kernel_slice: kernel_buf, output_slice: output_buf},
)
def multi_channel_convolve_impl(in_dtype, *args) -> str:
"""Generates C code for a fast multi-channel convolution function for ARM Cortex-M. This is done
by calling a sub-function depending on the input data type, as since v7e-m has no quad multiply
accumulate instruction, the int8 and int16 cases work differently."""
if in_dtype == "int8":
return _quad_int8_channel_convolve_impl(*args)
if in_dtype == "int16":
return _dual_int16_channel_convolve_impl(*args)
raise NotImplementedError(f"No Cortex-M {in_dtype} depthwise_conv2d implementation exists!")
def _quad_int8_channel_convolve_impl(_tensor_h, tensor_w, channels, kernel_h, kernel_w, suffix):
return textwrap.dedent(
(
common_includes
+ f"""
// __SXTB16(_ROR(X, Y)) is combined into one assembly instruction
#define TVMGEN_QUAD_INT8_CHANNEL_REARRANGE_SUM_DSP( \
arranged_kernel, \
tensor_c3210, \
sum_c0, sum_c1, sum_c2, sum_c3) {{ \
\
int32_t kernel_c3210 = *arranged_kernel++; \
\
int32_t tensor_c20 = __sxtb16(tensor_c3210); \
int32_t kernel_c20 = __sxtb16(kernel_c3210); \
sum_c0 = __builtin_arm_smlabb(tensor_c20, kernel_c20, sum_c0); \
sum_c2 = __builtin_arm_smlatt(tensor_c20, kernel_c20, sum_c2); \
\
int32_t tensor_c31 = __sxtb16(__ror(tensor_c3210, 8)); \
int32_t kernel_c31 = __sxtb16(__ror(kernel_c3210, 8)); \
sum_c1 = __builtin_arm_smlabb(tensor_c31, kernel_c31, sum_c1); \
sum_c3 = __builtin_arm_smlatt(tensor_c31, kernel_c31, sum_c3); \
}}
/* We do four channels at once to get this speed boost. */
#ifdef __cplusplus
extern "C"
#endif
int32_t {_get_func_name("int8", tensor_w, channels, kernel_h, kernel_w, suffix)}(
int32_t *out,
int8_t *tensor,
int8_t *kernel) {{
int32_t sum_c0 = 0;
int32_t sum_c1 = 0;
int32_t sum_c2 = 0;
int32_t sum_c3 = 0;
int32_t kernel_i32[{kernel_h} * {kernel_w}];
memcpy(kernel_i32, kernel, {kernel_h} * {kernel_w} * sizeof(int32_t));
int32_t *arranged_kernel = kernel_i32;
int32_t tensor_length = {((kernel_w - 1) * (channels // 4) + (kernel_h - 1) * tensor_w * (channels // 4)) + 1};
int32_t tensor_i32[tensor_length];
memcpy(tensor_i32, tensor, tensor_length * sizeof(int32_t));
#pragma GCC unroll 3
for (int i = 0; i < {kernel_h}; i++) {{
#pragma GCC unroll 3
for (int j = 0; j < {kernel_w}; j++) {{
TVMGEN_QUAD_INT8_CHANNEL_REARRANGE_SUM_DSP(
arranged_kernel,
*(tensor_i32 + j * {channels // 4} + i * {tensor_w * (channels // 4)}),
sum_c0, sum_c1, sum_c2, sum_c3)
}}
}}
out[0] = sum_c0;
out[1] = sum_c1;
out[2] = sum_c2;
out[3] = sum_c3;
return 0;
}}
#undef TVMGEN_QUAD_INT8_CHANNEL_REARRANGE_SUM_DSP
"""
)
)
def _dual_int16_channel_convolve_impl(_tensor_h, tensor_w, channels, kernel_h, kernel_w, suffix):
return textwrap.dedent(
(
common_includes
+ f"""
#include <stdint.h>
/* We do four channels at once to get this speed boost. */
#ifdef __cplusplus
extern "C"
#endif
int32_t {_get_func_name("int16", tensor_w, channels, kernel_h, kernel_w, suffix)}(
int32_t *out,
int16_t *tensor,
int16_t *kernel) {{
int32_t sum_c0 = 0;
int32_t sum_c1 = 0;
int32_t kernel_i32[{kernel_h} * {kernel_w}];
memcpy(kernel_i32, kernel, {kernel_h} * {kernel_w} * sizeof(int32_t));
int32_t tensor_length = {((kernel_w - 1) * (channels // 2) + (kernel_h - 1) * tensor_w * (channels // 2)) + 1};
int32_t tensor_i32[tensor_length];
memcpy(tensor_i32, tensor, tensor_length * sizeof(int32_t));
#pragma GCC unroll 3
for (int i = 0; i < {kernel_h}; i++) {{
#pragma GCC unroll 3
for (int j = 0; j < {kernel_w}; j++) {{
int32_t tensor_c10 = tensor_i32[j * {channels // 2} + i * {tensor_w * (channels // 2)}];
int32_t kernel_c10 = kernel_i32[{kernel_w} * i + j];
sum_c0 = __builtin_arm_smlabb(tensor_c10, kernel_c10, sum_c0);
sum_c1 = __builtin_arm_smlatt(tensor_c10, kernel_c10, sum_c1);
}}
}}
out[0] = sum_c0;
out[1] = sum_c1;
return 0;
}}
#undef TVMGEN_DUAL_INT16_CHANNEL_REARRANGE_SUM
"""
)
)
| 8,416 | 36.575893 | 121 | py |
tvm | tvm-main/python/tvm/topi/arm_cpu/mprofile/dsp/micro_kernel/gemm.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, no-value-for-parameter, f-string-without-interpolation
"""Defines gemm intrinsics for matrix multiplication with v7e-m DSP instructions."""
import random
import string
import tvm
from tvm import te
from . import common
##########################
# MxKxN MatMul Intrinsic #
##########################
# NOTE this is transposed matmul (A * B^T)
def intrin_gemm_MxKxN(M, K, N, in_dtype, out_dtype, stride_w=1):
"""Defines a v7e-m DSP-accelerated transposed matmul."""
# we generate a unique ID for every intrinsic definition, to prevent name
# collisions in the generated source (e.g., if there are multiple operators
# in the same module that use the same intrinsic)
#
# TODO(weberlo, areusch): to cut down on memory usage, we should cache each intrinsic
# instantiation and include it only once, eliminating the need for unique
# IDs
UNIQ_ID_LEN = 8
uniq_id = "".join(random.choices(string.ascii_uppercase, k=UNIQ_ID_LEN))
if isinstance(M, tvm.tir.IntImm):
M = M.value
if isinstance(K, tvm.tir.IntImm):
K = K.value
if isinstance(N, tvm.tir.IntImm):
N = N.value
# TODO(weberlo, areusch): support more dtypes?
assert in_dtype in ("int8", "int16")
assert out_dtype == "int32"
A = te.placeholder((M * stride_w - (stride_w - 1), K), name="a", dtype=in_dtype)
B = te.placeholder((N, K), name="b", dtype=in_dtype)
k = te.reduce_axis((0, K), name="k")
C = te.compute(
(M, N),
lambda i, j: te.sum(
A[i * stride_w, k].astype(out_dtype) * B[j, k].astype(out_dtype), axis=k
),
name="c",
)
A_buf = tvm.tir.decl_buffer(
A.shape, A.dtype, name="A", offset_factor=1, strides=[te.var("A_s"), 1]
)
B_buf = tvm.tir.decl_buffer(
B.shape, B.dtype, name="B", offset_factor=1, strides=[te.var("B_s"), 1]
)
C_buf = tvm.tir.decl_buffer(
C.shape, C.dtype, name="C", offset_factor=1, strides=[te.var("C_s"), 1]
)
def intrin_func(ins, outs):
aa, bb = ins
cc = outs[0]
gemm_func_prefix = "gemm" if in_dtype == "int8" else "gemm16"
def _reduce_update():
ib = tvm.tir.ir_builder.create()
ib.emit(
tvm.tir.call_extern(
"int32",
f"{gemm_func_prefix}_{M}x{K}x{N}_update_{uniq_id}",
aa.access_ptr("r"),
bb.access_ptr("r"),
cc.access_ptr("w"),
aa.strides[0] * stride_w,
bb.strides[0],
cc.strides[0],
)
)
return ib.get()
def _reduce_reset():
ib = tvm.tir.ir_builder.create()
ib.emit(
tvm.tir.call_extern(
"int32", f"gemm_{M}x{K}x{N}_reset_{uniq_id}", cc.access_ptr("w"), cc.strides[0]
)
)
return ib.get()
def _body():
ib = tvm.tir.ir_builder.create()
ib.emit(
tvm.tir.call_extern(
"int32",
f"{gemm_func_prefix}_{M}x{K}x{N}_body_{uniq_id}",
aa.access_ptr("r"),
bb.access_ptr("r"),
cc.access_ptr("w"),
aa.strides[0] * stride_w,
bb.strides[0],
cc.strides[0],
)
)
return ib.get()
return _body(), _reduce_reset(), _reduce_update()
intrin_decl = te.decl_tensor_intrin(C.op, intrin_func, binds={A: A_buf, B: B_buf, C: C_buf})
return intrin_decl, uniq_id
def gemm_MxKxN_impl(M, K, N, uniq_id):
"""Emit C code for gemm impl."""
# TODO(weberlo, areusch): are there any SIMD tricks to zero out arrays quickly?
# aa_pad_size = M * K
bb_pad_size = N * K
# code reference: CMSIS-NN paper (https://arxiv.org/abs/1801.06601)
cc_code = (
common.common_includes
+ f"""
#ifndef ARM_CPU_MPROFILE_READ_AND_PAD_EXISTS
#define ARM_CPU_MPROFILE_READ_AND_PAD_EXISTS
__attribute__((always_inline)) static inline const int8_t *read_and_pad(const int8_t *source, int32_t *out1, int32_t *out2)
{{
int32_t inA;
memcpy(&inA, source, 4);
source += 4;
int32_t inAbuf1 = __sxtb16(__ror((uint32_t)inA, 8));
int32_t inAbuf2 = __sxtb16(inA);
*out2 = (int32_t)(__pkhtb(inAbuf1, inAbuf2, 16));
*out1 = (int32_t)(__pkhbt(inAbuf2, inAbuf1, 16));
return source;
}}
#endif
"""
+ f"""
#ifdef __cplusplus
extern "C"
#endif
__attribute__((always_inline)) static inline int32_t gemm_{M}x{N}_body_rest_{uniq_id}(
int K,
int8_t *aa, int8_t *bb, int32_t *cc,
int A_stride, int B_stride, int C_stride) {{
int k_base = (K / 4) * 4;
switch ( K % 4 ) {{
case 1:
for (int i = 0; i < {M}; i++) {{
for (int j = 0; j < {N}; j++) {{
int8_t *a_ptr = &aa[i * A_stride + k_base];
int8_t *b_ptr = &bb[j * B_stride + k_base];
cc[i * C_stride + j] = (int32_t) a_ptr[0] * (int32_t) b_ptr[0];
}}
}}
break;
case 2:
for (int i = 0; i < {M}; i++) {{
for (int j = 0; j < {N}; j++) {{
int8_t *a_ptr = &aa[i * A_stride + k_base];
int8_t *b_ptr = &bb[j * B_stride + k_base];
cc[i * C_stride + j] = (int32_t) a_ptr[0] * (int32_t) b_ptr[0]
+ (int32_t) a_ptr[1] * (int32_t) b_ptr[1];
}}
}}
break;
case 3:
for (int i = 0; i < {M}; i++) {{
for (int j = 0; j < {N}; j++) {{
int8_t *a_ptr = &aa[i * A_stride + k_base];
int8_t *b_ptr = &bb[j * B_stride + k_base];
cc[i * C_stride + j] = (int32_t) a_ptr[0] * (int32_t) b_ptr[0]
+ (int32_t) a_ptr[1] * (int32_t) b_ptr[1]
+ (int32_t) a_ptr[2] * (int32_t) b_ptr[2];
}}
}}
break;
}}
return 0;
}}
#ifdef __cplusplus
extern "C"
#endif
__attribute__((always_inline)) static inline int32_t gemm_{M}x{K}x{N}_body_loop_{uniq_id}(
int8_t *aa, int8_t *bb, int32_t *cc,
int A_stride, int B_stride, int C_stride) {{
for (int i = 0; i < {M}; i++) {{
for (int j = 0; j < {N}; j++) {{
int32_t sum = 0;
for (int l = 0; l < {K}; l++) {{
sum += (int32_t) aa[i*A_stride + l] * (int32_t) bb[j*B_stride + l];
}}
// NOTE: this is the line where `*_body` differs from `*_update`. here
// we're *setting* the result, instead of accumulating, because we know
// the `i` and `j` itervars span their entire respective axes.
cc[i*C_stride + j] = sum;
}}
}}
return 0;
}}
#ifdef __cplusplus
extern "C"
#endif
__attribute__((always_inline)) static inline int32_t gemm_{M}x{K}x{N}_body_{uniq_id}(
int8_t *aa, int8_t *bb, int32_t *cc,
int A_stride, int B_stride, int C_stride) {{
int16_t bb_pad[{bb_pad_size}];
int32_t retcode = 0;
if ( {M} < 2 && {N} < 2 ) {{
retcode = gemm_{M}x{K}x{N}_body_loop_{uniq_id}(aa, bb, cc, A_stride, B_stride, C_stride);
goto out;
}}
for (int i = 0; i < {N}; i++)
for (int j = 0; j < {K} / 4; j++)
read_and_pad(&bb[i*B_stride + j*4], (int32_t*) &bb_pad[i*{K} + j*4], (int32_t*) &bb_pad[i*{K} + j*4 + 2]);
for (int i = 0; i < {M}; i++) {{
int16_t aa_pad_line[{K}];
for (int l = 0; l < {K} / 4; l++)
read_and_pad(&aa[i*A_stride + l*4], (int32_t*) &aa_pad_line[l*4], (int32_t*) &aa_pad_line[l*4 + 2]);
for (int j = 0; j < {N}; j++) {{
int32_t *aa_ptr = (int32_t *) aa_pad_line;
int32_t *bb_ptr = (int32_t *) &bb_pad[j*{K}];
int32_t sum = 0;
for (int l = 0; l < 2 * ({K} / 4); l++) {{
sum = __smlad(*aa_ptr, *bb_ptr, sum);
++ aa_ptr; ++ bb_ptr;
}}
// NOTE: this is the line where `*_body` differs from `*_update`. here
// we're *setting* the result, instead of accumulating, because we know
// the `i` and `j` itervars span their entire respective axes.
cc[i*C_stride + j] = sum;
}}
}}
if ( {K} % 4 != 0 )
gemm_{M}x{N}_body_rest_{uniq_id}({K}, aa, bb, cc, A_stride, B_stride, C_stride);
out:
return retcode;
}}
#ifdef __cplusplus
extern "C"
#endif
__attribute__((always_inline)) static inline int32_t gemm_{M}x{N}_update_rest_{uniq_id}(
int K,
int8_t *aa, int8_t *bb, int32_t *cc,
int A_stride, int B_stride, int C_stride) {{
int k_base = (K / 4) * 4;
switch ( K % 4 ) {{
case 1:
for (int i = 0; i < {M}; i++) {{
for (int j = 0; j < {N}; j++) {{
int8_t *a_ptr = &aa[i * A_stride + k_base];
int8_t *b_ptr = &bb[j * B_stride + k_base];
cc[i * C_stride + j] += (int32_t) a_ptr[0] * (int32_t) b_ptr[0];
}}
}}
break;
case 2:
for (int i = 0; i < {M}; i++) {{
for (int j = 0; j < {N}; j++) {{
int8_t *a_ptr = &aa[i * A_stride + k_base];
int8_t *b_ptr = &bb[j * B_stride + k_base];
cc[i * C_stride + j] += (int32_t) a_ptr[0] * (int32_t) b_ptr[0]
+ (int32_t) a_ptr[1] * (int32_t) b_ptr[1];
}}
}}
break;
case 3:
for (int i = 0; i < {M}; i++) {{
for (int j = 0; j < {N}; j++) {{
int8_t *a_ptr = &aa[i * A_stride + k_base];
int8_t *b_ptr = &bb[j * B_stride + k_base];
cc[i * C_stride + j] += (int32_t) a_ptr[0] * (int32_t) b_ptr[0]
+ (int32_t) a_ptr[1] * (int32_t) b_ptr[1]
+ (int32_t) a_ptr[2] * (int32_t) b_ptr[2];
}}
}}
break;
}}
return 0;
}}
#ifdef __cplusplus
extern "C"
#endif
__attribute__((always_inline)) static inline int32_t gemm_{M}x{K}x{N}_update_loop_{uniq_id}(
int8_t *aa, int8_t *bb, int32_t *cc,
int A_stride, int B_stride, int C_stride) {{
for (int i = 0; i < {M}; i++) {{
for (int j = 0; j < {N}; j++) {{
int32_t sum = 0;
for (int l = 0; l < {K}; l++) {{
sum += (int32_t) aa[i*A_stride + l] * (int32_t) bb[j*B_stride + l];
}}
cc[i*C_stride + j] += sum;
}}
}}
return 0;
}}
#ifdef __cplusplus
extern "C"
#endif
__attribute__((always_inline)) static inline int32_t gemm_{M}x{K}x{N}_update_{uniq_id}(
int8_t *aa, int8_t *bb, int32_t *cc,
int A_stride, int B_stride, int C_stride) {{
int16_t bb_pad[{bb_pad_size}];
int32_t retcode = 0;
if ( {M} < 2 && {N} < 2 ) {{
retcode = gemm_{M}x{K}x{N}_update_loop_{uniq_id}(aa, bb, cc, A_stride, B_stride, C_stride);
goto out;
}}
for (int i = 0; i < {N}; i++)
for (int j = 0; j < {K} / 4; j++)
read_and_pad(&bb[i*B_stride + j*4], (int32_t*) &bb_pad[i*{K} + j*4], (int32_t*) &bb_pad[i*{K} + j*4 + 2]);
for (int i = 0; i < {M}; i++) {{
int16_t aa_pad_line[{K}];
for (int l = 0; l < {K} / 4; l++)
read_and_pad(&aa[i*A_stride + l*4], (int32_t*) &aa_pad_line[l*4], (int32_t*) &aa_pad_line[l*4 + 2]);
for (int j = 0; j < {N}; j++) {{
int32_t *aa_ptr = (int32_t *) aa_pad_line;
int32_t *bb_ptr = (int32_t *) &bb_pad[j*{K}];
int32_t sum = 0;
for (int l = 0; l < 2 * ({K} / 4); l++) {{
sum = __smlad(*aa_ptr, *bb_ptr, sum);
++ aa_ptr; ++ bb_ptr;
}}
cc[i*C_stride + j] += sum;
}}
}}
if ( {K} % 4 != 0 )
gemm_{M}x{N}_update_rest_{uniq_id}({K}, aa, bb, cc, A_stride, B_stride, C_stride);
out:
return retcode;
}}
#ifdef __cplusplus
extern "C"
#endif
__attribute__((always_inline)) static inline int32_t gemm16_{M}x{N}_body_rest_{uniq_id}(
int K,
int16_t *aa, int16_t *bb, int32_t *cc,
int A_stride, int B_stride, int C_stride) {{
int k_base = (K / 2) * 2;
for (int i = 0; i < {M}; i++) {{
for (int j = 0; j < {N}; j++) {{
int16_t *a_ptr = &aa[i * A_stride + k_base];
int16_t *b_ptr = &bb[j * B_stride + k_base];
cc[i * C_stride + j] = (int32_t) a_ptr[0] * (int32_t) b_ptr[0];
}}
}}
return 0;
}}
#ifdef __cplusplus
extern "C"
#endif
__attribute__((always_inline)) static inline int32_t gemm16_{M}x{K}x{N}_body_loop_{uniq_id}(
int16_t *aa, int16_t *bb, int32_t *cc,
int A_stride, int B_stride, int C_stride) {{
for (int i = 0; i < {M}; i++) {{
for (int j = 0; j < {N}; j++) {{
int32_t sum = 0;
for (int l = 0; l < {K}; l++) {{
sum += (int32_t) aa[i*A_stride + l] * (int32_t) bb[j*B_stride + l];
}}
// NOTE: this is the line where `*_body` differs from `*_update`. here
// we're *setting* the result, instead of accumulating, because we know
// the `i` and `j` itervars span their entire respective axes.
cc[i*C_stride + j] = sum;
}}
}}
return 0;
}}
#ifdef __cplusplus
extern "C"
#endif
__attribute__((always_inline)) static inline int32_t gemm16_{M}x{K}x{N}_body_{uniq_id}(
int16_t *aa, int16_t *bb, int32_t *cc,
int A_stride, int B_stride, int C_stride) {{
int32_t retcode = 0;
if ( {M} < 2 && {N} < 2 ) {{
retcode = gemm16_{M}x{K}x{N}_body_loop_{uniq_id}(aa, bb, cc, A_stride, B_stride, C_stride);
goto out;
}}
if(((uint32_t)aa & 0x3) != 0 || ((uint32_t)bb & 0x3) != 0){{
retcode = kTvmErrorFunctionCallInvalidArg;
goto out;
}}
for (int i = 0; i < {M}; i++) {{
for (int j = 0; j < {N}; j++) {{
int32_t aa_vector[{K} / 2];
int32_t bb_vector[{K} / 2];
memcpy(&aa_vector, &aa[i * A_stride], sizeof(aa_vector));
memcpy(&bb_vector, &bb[j * B_stride], sizeof(bb_vector));
int32_t sum = 0;
for (int l = 0; l < {K} / 2; l++) {{
sum = __smlad(aa_vector[l], bb_vector[l], sum);
}}
// NOTE: this is the line where `*_body` differs from `*_update`. here
// we're *setting* the result, instead of accumulating, because we know
// the `i` and `j` itervars span their entire respective axes.
cc[i*C_stride + j] = sum;
}}
}}
if ( {K} % 2 != 0 )
gemm16_{M}x{N}_body_rest_{uniq_id}({K}, aa, bb, cc, A_stride, B_stride, C_stride);
out:
return retcode;
}}
#ifdef __cplusplus
extern "C"
#endif
__attribute__((always_inline)) static inline int32_t gemm16_{M}x{N}_update_rest_{uniq_id}(
int K,
int16_t *aa, int16_t *bb, int32_t *cc,
int A_stride, int B_stride, int C_stride) {{
int k_base = (K / 2) * 2;
for (int i = 0; i < {M}; i++) {{
for (int j = 0; j < {N}; j++) {{
int16_t *a_ptr = &aa[i * A_stride + k_base];
int16_t *b_ptr = &bb[j * B_stride + k_base];
cc[i * C_stride + j] += (int32_t) a_ptr[0] * (int32_t) b_ptr[0];
}}
}}
return 0;
}}
#ifdef __cplusplus
extern "C"
#endif
__attribute__((always_inline)) static inline int32_t gemm16_{M}x{K}x{N}_update_loop_{uniq_id}(
int16_t *aa, int16_t *bb, int32_t *cc,
int A_stride, int B_stride, int C_stride) {{
for (int i = 0; i < {M}; i++) {{
for (int j = 0; j < {N}; j++) {{
int32_t sum = 0;
for (int l = 0; l < {K}; l++) {{
sum += (int32_t) aa[i*A_stride + l] * (int32_t) bb[j*B_stride + l];
}}
cc[i*C_stride + j] += sum;
}}
}}
return 0;
}}
#ifdef __cplusplus
extern "C"
#endif
__attribute__((always_inline)) static inline int32_t gemm16_{M}x{K}x{N}_update_{uniq_id}(
int16_t *aa, int16_t *bb, int32_t *cc,
int A_stride, int B_stride, int C_stride) {{
int32_t retcode = 0;
if ( {M} < 2 && {N} < 2 ) {{
retcode = gemm16_{M}x{K}x{N}_update_loop_{uniq_id}(aa, bb, cc, A_stride, B_stride, C_stride);
goto out;
}}
for (int i = 0; i < {M}; i++) {{
for (int j = 0; j < {N}; j++) {{
int32_t aa_vector[{K} / 2];
int32_t bb_vector[{K} / 2];
memcpy(&aa_vector, &aa[i * A_stride], sizeof(aa_vector));
memcpy(&bb_vector, &bb[j * B_stride], sizeof(bb_vector));
int32_t sum = 0;
for (int l = 0; l < {K} / 2; l++) {{
sum = __smlad(aa_vector[l], bb_vector[l], sum);
}}
cc[i*C_stride + j] += sum;
}}
}}
if ( {K} % 2 != 0 )
gemm16_{M}x{N}_update_rest_{uniq_id}({K}, aa, bb, cc, A_stride, B_stride, C_stride);
out:
return retcode;
}}
#ifdef __cplusplus
extern "C"
#endif
__attribute__((always_inline)) static inline int32_t gemm_{M}x{K}x{N}_reset_{uniq_id}(int32_t *cc, int C_stride) {{
for (int i = 0; i < {M}; i++) {{
for (int j = 0; j < {N}; j++) {{
cc[i*C_stride + j] = 0;
}}
}}
return 0;
}}
"""
)
return cc_code
| 17,071 | 30.91028 | 123 | py |
tvm | tvm-main/python/tvm/topi/arm_cpu/mprofile/dsp/micro_kernel/common.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, no-value-for-parameter
"""Defines common C code for all microkernel operations."""
common_includes = """
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
#include <arm_acle.h>
#include <tvm/runtime/crt/error_codes.h>
#ifndef ARM_CPU_INTRINSICS_EXIST
#define ARM_CPU_INTRINSICS_EXIST
__attribute__((always_inline)) uint32_t __ror(uint32_t op1, uint32_t op2)
{
op2 %= 32U;
if (op2 == 0U)
{
return op1;
}
return (op1 >> op2) | (op1 << (32U - op2));
}
#define __pkhbt(ARG1,ARG2,ARG3) \
__extension__ \
({ \
uint32_t __RES, __ARG1 = (ARG1), __ARG2 = (ARG2); \
__asm("pkhbt %0, %1, %2, lsl %3" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2), "I" (ARG3) ); \
__RES; \
})
#define __pkhtb(ARG1,ARG2,ARG3) \
__extension__ \
({ \
uint32_t __RES, __ARG1 = (ARG1), __ARG2 = (ARG2); \
if (ARG3 == 0) \
__asm("pkhtb %0, %1, %2" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2) ); \
else \
__asm("pkhtb %0, %1, %2, asr %3" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2), "I" (ARG3) ); \
__RES; \
})
#endif
"""
MICRO_WORD_LENGTH_BITS = 32
def num_simd_lanes_per_word(dtype: str) -> int:
"""Takes a dtype, and returns how many of that dtype fit into a single microcontroller word.
>>> num_simd_lanes_per_word("int8")
4
>>> num_simd_lanes_per_word("int16")
2
"""
assert dtype.startswith("int")
dtype_width = int(dtype[3:])
return MICRO_WORD_LENGTH_BITS // dtype_width
| 2,309 | 28.240506 | 99 | py |
tvm | tvm-main/python/tvm/topi/arm_cpu/mprofile/dsp/micro_kernel/max_pool.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, no-value-for-parameter
"""Defines max intrinsics for elemwise max operation with v7e-m DSP instructions."""
import random
import string
import tvm
from tvm import te
from . import common
def intrin_max(shape, in_dtype, out_dtype):
"""Defines a v7e-m DSP-accelerated max pool."""
UNIQ_ID_LEN = 8
uniq_id = "".join(random.choices(string.ascii_uppercase, k=UNIQ_ID_LEN))
func_prefix = "max8"
assert in_dtype == "int8"
assert out_dtype == "int8"
x = te.placeholder(shape, name="x", dtype=in_dtype)
k = te.reduce_axis((0, 1), name="rc")
z = te.compute(shape, lambda *i: tvm.tir.max(x[i], axis=[k]).astype(out_dtype))
def _intrin_func(ins, outs):
aa = ins[0]
cc = outs[0]
def _body():
ib = tvm.tir.ir_builder.create()
ib.emit(
tvm.tir.call_extern(
cc.dtype,
f"{func_prefix}_{uniq_id}",
aa.access_ptr("r"),
cc.access_ptr("w"),
cc.strides[0],
)
)
return ib.get()
def _reduce_reset():
ib = tvm.tir.ir_builder.create()
ib.emit(
tvm.tir.call_extern(
cc.dtype, f"{func_prefix}_reset_{uniq_id}", cc.access_ptr("w"), cc.strides[0]
)
)
return ib.get()
def _reduce_update():
return _body()
return _body(), _reduce_reset(), _reduce_update()
binds = {
t: tvm.tir.decl_buffer(
t.shape,
t.dtype,
t.op.name,
strides=[te.var(f"{t.op.name}_s_{i}") for i in range(0, len(t.shape))],
offset_factor=1,
)
for t in [x, z]
}
intrin_decl = te.decl_tensor_intrin(z.op, _intrin_func, binds=binds)
return intrin_decl, uniq_id
def max_impl(uniq_id):
"""Emit C code for pool impl."""
cc_code = (
common.common_includes
+ f"""
#ifdef __cplusplus
extern "C"
#endif
__attribute__((always_inline)) static inline int32_t max8_reset_{uniq_id}(
int8_t *res,
int N) {{
memset(res, (int8_t)-128, N * sizeof(*res));
return 0;
}}
#ifdef __cplusplus
extern "C"
#endif
__attribute__((always_inline)) static inline int32_t max8_loop_{uniq_id}(
int8_t *arg,
int8_t *res,
int N) {{
for ( int i = 0; i < N; ++ i )
if ( arg[i] > res[i] )
res[i] = arg[i];
return 0;
}}
#ifdef __cplusplus
extern "C"
#endif
__attribute__((always_inline)) static inline int32_t max8_{uniq_id}(
int8_t *arg,
int8_t *res,
int N) {{
int32_t *parg32, *pres32;
int una_arg = (int32_t)arg & 0x3, una_res = (int32_t)res & 0x3;
int32_t retcode = 0;
if ( N < 4 || ((una_arg || una_res) && una_arg != una_res) ) {{
retcode = max8_loop_{uniq_id}(arg, res, N);
goto out;
}}
if ( una_arg ) {{
int n = (4 - una_arg);
if ( n > N || (N - n) < 4 )
n = N;
retcode = max8_loop_{uniq_id}(arg, res, n);
N -= n;
if ( N == 0 )
goto out;
arg += n; res += n;
}}
parg32 = (int32_t *)arg;
pres32 = (int32_t *)res;
for ( int i = 0; i < N / 4; ++ i ) {{
int32_t arg32 = *parg32 ++;
int32_t res32 = *pres32;
__ssub8(arg32, res32);
res32 = __sel(arg32, res32);
*pres32 ++ = res32;
}}
if ( N & 0x3 ) {{
retcode = max8_loop_{uniq_id}((int8_t *)parg32, (int8_t *)pres32, N & 0x3);
goto out;
}}
out:
return retcode;
}}
"""
)
return cc_code
| 4,336 | 25.126506 | 97 | py |
tvm | tvm-main/python/tvm/topi/arm_cpu/mprofile/dsp/micro_kernel/tensordot.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Generates optimized code to compute a tensor dot product on ARMv7E-M.
This function can be used to tensorize many common operators including regular conv2d, depthwise
conv2d, and grouped conv2d for some data and kernel layouts. When for regular convolution, use data
layout HHWC and kernel layout OHWI. For depthwise convolution, use data layout data layout is NCHW
and kernel layout OIHW.
The generated code will also work on v8-M chips that have the DSP instructions (unlike v7E-M, they
are optional in v8-M). Note that the generated code does not use the (potentially very useful) MVE
instructions present on some v8-M chips.
"""
from dataclasses import dataclass
from itertools import chain
import textwrap
from typing import Iterator, Optional, Tuple
@dataclass
class SMLAInstruction:
"""Class for keeping track of an item in inventory."""
instruction: str
tensor_var: str
kernel_var: str
def call_with_acle(self, accumulator_var: str) -> str:
return (
f"{accumulator_var} = __{self.instruction}"
f"({self.tensor_var}, {self.kernel_var}, {accumulator_var});"
)
def has_same_operands(self, other: "SMLAInstruction") -> bool:
return self.tensor_var == other.tensor_var and self.kernel_var == other.kernel_var
def _get_c_function_name(num_outputs, dimensions, offsets, x_strides):
"""Generates a C function name for tensordot.
We do not need a suffix, as the generated function will have an #include guard. Unlike other
microTVM operators, _get_c_function_name is never called externally.
"""
tensor_w, kernel_h, kernel_w = dimensions
return (
f"tensordot_opt_x{num_outputs}_int16_w{tensor_w}_"
+ f"{kernel_h}x{kernel_w}_"
+ "".join(map(str, offsets))
+ (f"_{x_strides[0]}_{x_strides[1]}" if num_outputs > 1 else "")
)
def _init_biased_accumulators(num_outputs):
"""Generates code to load the bias into the accumulators.
Addition is commutative, so we could add the bias before, during, or after performing our
multiply-accumulate operations. Where we add the bias does not change the overflow behavior.
Doing the bias add takes one cycle either way (if done at the beginning we can't use a SMULXY
trick to set sum_i to zero for "free"). However, doing it at the beginning frees up a register,
so we'll do it first.
"""
assignments = [f"sum_{x:x} = *bias" for x in range(num_outputs)]
joined_assignments = ", ".join(assignments)
return f"int32_t {joined_assignments};"
def _get_tensor_halfwords(dimensions, offset, num_outputs, in_stride) -> Iterator[Optional[Tuple]]:
"""Gets the logical indices of the data that will be stored in memory at the tensor pointer.
Returns an Iterator of Optional[Tuple], while skipping over word-aligned pairs of unrelated
halfwords. The returned iterator is as short as possible while having even length and containing
all relevant tensor data. Tuples in the returned Iterator represent an (y, x) offset from the
top-left tensor position being used in this convolution. We need to be aware of the None values
so our code is correctly word-aligned.
One consequence of these requirements - each row in the tensor is broken into word-aligned pairs
of halfwords (which are later combined into full words). See the test cases (located in
tests/python/topi/python/test_topi_conv2d_tensordot_opts.py) for usage examples.
"""
tensor_w, kernel_h, kernel_w = dimensions
max_x_val = (num_outputs - 1) * in_stride + kernel_w
halfwords = []
for y in range(kernel_h):
# If needed, pad so the beginning of the row is word-aligned
if (y * tensor_w + offset) % 2 == 1:
halfwords.append(None)
for x in range(max_x_val):
halfwords.append((y, x))
# If needed, pad so the row length is word aligned
if (y * tensor_w + offset + max_x_val) % 2 == 1:
halfwords.append(None)
return halfwords
def _get_kernel_halfwords(dimensions, offset) -> Iterator[Optional[Tuple]]:
"""Gets the logical indices of the data that will be stored in memory at the kernel pointer.
Returns an Iterator of Optional[Tuple]. The returned iterator is as short as possible while
having even length and containing all kernel data. Tuples in the returned Iterator represent
an (y, x) position in the kernel, while None values represent other, irrelevant data. We need
to be aware of the None values so our code is correctly word-aligned.
See test cases in tests/python/topi/python/test_topi_conv2d_tensordot_opts.py for examples.
"""
_, kernel_h, kernel_w = dimensions
halfwords = []
# Kernel data starts `offset` places after the pointer value
if offset == 1:
halfwords.append(None)
for y in range(kernel_h):
for x in range(kernel_w):
halfwords.append((y, x))
# Make sure the returned iterator has even length by padding with an "unknown" value. We want
# even length as this corresponds to an integer number of int32 words.
if (kernel_h * kernel_w + offset) % 2 == 1:
halfwords.append(None)
return halfwords
def _get_int16_alias(position) -> str:
if position is None:
return "unknown"
y, x = position
return f"y{y:0>2x}_x{x:0>2x}"
def _load_tensor_vars(halfwords, tensor_w) -> Iterator[str]:
assert len(halfwords) % 2 == 0
offset = int(not bool(halfwords[0]))
for i in range(0, len(halfwords), 2):
var_name = f"{_get_int16_alias(halfwords[i])}__{_get_int16_alias(halfwords[i+1])}"
y, x = halfwords[i + 1] or halfwords[i]
tensor_index = (y * tensor_w + x + offset) // 2
yield f"int32_t tensor__{var_name} = tensor[{tensor_index}];"
def _load_kernel_vars(halfwords) -> Iterator[str]:
assert len(halfwords) % 2 == 0
for i in range(0, len(halfwords), 2):
var_name = f"{_get_int16_alias(halfwords[i])}__{_get_int16_alias(halfwords[i+1])}"
yield f"int32_t kernel__{var_name} = kernel[{i // 2}];"
def _get_draft_macs(
kernel_dims, tensor_halfwords, kernel_halfwords, offset
) -> Iterator[SMLAInstruction]:
"""Generates unrolled MAC instructions to compute one tensordot sum.
Unrolling these loops increases code size a tiny bit (< 0.02 KB), but makes the generated code
much faster. The generated code does not use SIMD instructions - they are added later by
_apply_simd_optimizations.
We return an iterator of SMLAInstruction named tuples. Returning an iterator lets us do
optimizations by iterator chaining.
"""
def get_var(y, x, halfwords) -> Tuple[str, str]:
i = halfwords.index((y, x))
if i % 2 == 0:
return f"{_get_int16_alias((y, x))}__{_get_int16_alias(halfwords[i + 1])}", "b"
return f"{_get_int16_alias(halfwords[i - 1])}__{_get_int16_alias((y, x))}", "t"
kernel_h, kernel_w = kernel_dims
for y in range(kernel_h):
for x in range(kernel_w):
tensor_var, tensor_half = get_var(y, x + offset, tensor_halfwords)
kernel_var, kernel_half = get_var(y, x, kernel_halfwords)
instruction = f"smla{tensor_half}{kernel_half}"
yield SMLAInstruction(instruction, f"tensor__{tensor_var}", f"kernel__{kernel_var}")
def _apply_simd_optimizations(instruction_tuples) -> Iterator[SMLAInstruction]:
"""When possible, fuses single MACs into SIMD MAC instructions.
The compiler cannot do this automatically, as calling __smlaxy forces the SMLAxy instruction to
be used. This function takes as input an iterator of SMLAInstructions and returns an iterator of
SMLAInstructions (possibly of different length).
"""
curr_tuple = next(instruction_tuples, None)
while curr_tuple:
next_tuple = next(instruction_tuples, None)
if next_tuple is None:
yield curr_tuple
break
if curr_tuple.has_same_operands(next_tuple):
instructions = sorted([curr_tuple.instruction, next_tuple.instruction])
if instructions == ["smlabb", "smlatt"]:
yield SMLAInstruction("smlad", curr_tuple.tensor_var, curr_tuple.kernel_var)
next_tuple = next(instruction_tuples, None)
elif instructions == ["smlabt", "smlatb"]:
yield SMLAInstruction("smladx", curr_tuple.tensor_var, curr_tuple.kernel_var)
next_tuple = next(instruction_tuples, None)
else:
yield curr_tuple
else:
yield curr_tuple
curr_tuple = next_tuple
def _expand_instruction_tuples(instruction_tuples, index) -> Iterator[str]:
"""Converts an iterator of SMLAInstructions into lines of C code.
We want the compiler to re-order these with the memory loads, so we generate them as a series of
calls to instruction aliases instead of as a single `asm` block.
"""
for smla_instruction in instruction_tuples:
assert "smla" in smla_instruction.instruction
# We call the instruction using the Arm C Language Extensions. Using ACLE gives better
# cross-compiler compatibility than using __builtin functions.
yield smla_instruction.call_with_acle(f"sum_{index}")
def _requantize_sums(num_outputs, requantize_shift, output_zero_point) -> Iterator[str]:
"""Generates code to requantize the accumulator values.
The generated code does not use floating point instructions, as it simulates floating point
multiplication with an a int64 multiply + shift. The bias is added at the beginning, so we can
skip doing it now. The shift is hard-coded, as this saves a few cycles without hurting accuracy
in "most" cases.
It's *possible* we could save one more cycle here by pre-multiplying the bias with the
requantize multiplier, and then doing the bias addition and shift in the same cycle (via <op2>).
However, it's complicated and only saves one cycle.
It's also worth noting the SSAT16 operation doesn't help us here. The data isn't stored as two
halfwords in a word, and rearrainging it would take at least one cycle. Two SSAT operations is
just as good.
Calling __ssat directly is a little bit gross, but GCC and Clang are unreliable about compiling
other ways of writing this. Both the multiply + shift and shift + saturation combine to one
instruction each.
"""
yield "int32_t scale_val = *scale;"
for i in range(num_outputs):
yield f"int32_t requant_{i} = (sum_{i} * (int64_t) scale_val) >> {requantize_shift - 1};"
yield f"requant_{i} = (requant_{i} + 1) >> 1;"
yield f"requant_{i} = __ssat(requant_{i} + {output_zero_point}, 8);"
def _write_sums_to_memory(num_outputs, offset, stride) -> Iterator[str]:
"""Generates code to write the requantized sums to memory.
Note - halfword packing here *does* help. It seems
like it wouldn't, as doing two pipelined int16 stores takes two cycles - the same as halfword
packing plus a pipelined int32 store. We still do the int16 stores when there is an output
stride, though.
However, this lets the compiler re-order instructions to better preserve memory, as it doesn't
like breaking apart the store instructions (as this messes up pipelining).
"""
if stride > 1:
for i in range(num_outputs):
yield f"((int16_t*) output)[{i * stride + offset}] = (int16_t) requant_{i};"
else:
num_packed = (num_outputs - offset) // 2
for i in range(num_packed):
index = 2 * i + offset
# We must explicitly call asm inline to use the PKHBT instruction. It is not part of
# ACLE and has no __builtin. Writing it using masks and bitshifts does not work either:
# Arm GCC 12 with -O3 does not compile these efficiently.
yield f"int packed_res_{i};"
yield (
f'__asm__ ("pkhbt %0, %1, %2, lsl #16" : "=r" (packed_res_{i}) : '
f'"r" (requant_{index}), "r" (requant_{index + 1}));'
)
if offset == 1:
yield "((int16_t*) output)[1] = (int16_t) requant_0;"
for i in range(num_packed):
yield f"output[{offset + i}] = packed_res_{i};"
if (offset + num_outputs) % 2 == 1:
yield f"((int16_t*) output)[{num_packed * 2}] = (int16_t) requant_{num_packed * 2};"
def tensordot_int16_impl(
num_outputs: int,
dimensions: Tuple[int, int, int],
offsets: Tuple[int, int, int],
x_strides: Tuple[int, int],
requantize_shift: int = 33,
output_zero_point: int = -128,
) -> Tuple[str, str]:
"""Generates code to compute a tensor dot product with requantization.
The generated function takes pointers to the output, tensor, and kernel as input. All pointers
must be word aligned. Only works with `int16` data type. The generated code is optimized for the
ARMv7E-M architecture.
Parameters
----------
num_outputs: int
The number of tensordot outputs to compute per function call. Computing more than one at
once makes us much faster by reducing how often overlapping data is loaded. However, setting
this too high causes us to run out of registers and need to store data on the stack. We
should autotune this, but num_outputs=2 is usually OK.
dimensions: Tuple[int, int, int]
The dimensions of each tensordot operation. dimensions[1] and dimensions[2] are the height
and width of the kernel, respectively. dimensions[0] is the width of the data tensor, which
is usually larger than the kernel.
offsets: Tuple[int, int, int]
Each value is 0 or 1, and represents how far after the given data, kernel, and output
pointers (respectively) we should start reading/writing. This prevents us from having to
check if each pointer is aligned or unaligned at runtime, making us faster.
x_strides: Tuple[int, int]
The distance (in halfwords) between the start of each input tensor, and where to write each
output result respectively. Only used when num_outputs > 1.
requantize_shift: int
The distance to right shift after multiplying by the requantization scale. Defaults to 33,
as this lets us skip a shift operation.
outout_zero_point: int
The output zero point, which will be subtracted after scale multiplication but before
clipping. Defaults to -128, as most models always use this.
Returns
-------
func_name, func_code: Tuple[str, str]
The name and source code of the generated function.
"""
function_name = _get_c_function_name(num_outputs, dimensions, offsets, x_strides)
tensor_w, kernel_h, kernel_w = dimensions
tensor_offset, kernel_offset, output_offset = offsets
assert tensor_offset < 2 and kernel_offset < 2 and output_offset < 2
in_stride, out_stride = x_strides
tensor_halfwords = _get_tensor_halfwords(dimensions, tensor_offset, num_outputs, in_stride)
kernel_halfwords = _get_kernel_halfwords(dimensions, kernel_offset)
load_tensor_lines = _load_tensor_vars(tensor_halfwords, tensor_w)
load_kernel_lines = _load_kernel_vars(kernel_halfwords)
def gen_single_loop_macs(index):
draft_macs_iter = _get_draft_macs(
(kernel_h, kernel_w), tensor_halfwords, kernel_halfwords, index * in_stride
)
draft_macs_iter = _apply_simd_optimizations(draft_macs_iter)
return _expand_instruction_tuples(draft_macs_iter, index)
multiply_acc_lines = chain.from_iterable(gen_single_loop_macs(i) for i in range(num_outputs))
requantize_lines = _requantize_sums(
num_outputs, requantize_shift=requantize_shift, output_zero_point=output_zero_point
)
write_out_lines = _write_sums_to_memory(num_outputs, output_offset, out_stride)
def insert_lines(lines):
return ("\n" + " " * 10).join(lines)
# It's very common for one model to have different layers that use identical tensordot
# functions. To prevent function re-definition errors, we need an #include guard. This is better
# than adding a random suffix, as it saves flash memory.
code = textwrap.dedent(
f"""
#ifndef {function_name.upper()}_EXISTS
#define {function_name.upper()}_EXISTS
#include <arm_acle.h>
__attribute__((always_inline)) static inline int32_t {function_name}(
int32_t *output, int32_t *tensor, int32_t *kernel, int32_t *bias, int32_t *scale
) {{
{_init_biased_accumulators(num_outputs)}
{insert_lines(load_tensor_lines)}
{insert_lines(load_kernel_lines)}
{insert_lines(multiply_acc_lines)}
{insert_lines(requantize_lines)}
{insert_lines(write_out_lines)}
return 0;
}}
#endif
"""
)
return (function_name, code)
| 17,791 | 42.184466 | 100 | py |
tvm | tvm-main/python/tvm/topi/arm_cpu/mprofile/dsp/micro_kernel/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 785 | 45.235294 | 62 | py |
tvm | tvm-main/python/tvm/topi/arm_cpu/mprofile/dsp/micro_kernel/avg_pool.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, no-value-for-parameter
"""Defines sum intrinsics for sum operation with v7e-m DSP instructions."""
import random
import string
import tvm
from tvm import te
from . import common
def intrin_sum(shape, in_dtype, out_dtype, reset=False):
"""Defines a v7e-m DSP-accelerated sum operation."""
UNIQ_ID_LEN = 8
uniq_id = "".join(random.choices(string.ascii_uppercase, k=UNIQ_ID_LEN))
func_prefix = "sum16"
assert in_dtype == "int16"
assert out_dtype == "int16"
width = shape[-1]
x = te.placeholder(shape, name="x", dtype=in_dtype)
k = te.reduce_axis((0, width), name="rc")
def get_slice(indices, k):
s = list(indices)
s[-1] = s[-1] + k
return tuple(s)
z = te.compute(
(1,) * len(shape), lambda *i: te.sum(x[get_slice(i, k)], axis=[k]).astype(out_dtype)
)
def _intrin_func(ins, outs):
aa = ins[0]
cc = outs[0]
def _body():
ib = tvm.tir.ir_builder.create()
ib.emit(
tvm.tir.call_extern(
cc.dtype,
f"{func_prefix}_{width}_{uniq_id}",
aa.access_ptr("r"),
cc.access_ptr("w"),
aa.elem_offset,
1 if reset else 0,
)
)
return ib.get()
def _reduce_reset():
ib = tvm.tir.ir_builder.create()
ib.emit(
tvm.tir.call_extern(cc.dtype, f"{func_prefix}_reset_{uniq_id}", cc.access_ptr("w"))
)
return ib.get()
def _reduce_update():
return _body()
return _body(), _reduce_reset(), _reduce_update()
binds = {
t: tvm.tir.decl_buffer(
t.shape,
t.dtype,
t.op.name,
strides=[te.var(f"{t.op.name}_s_{i}") for i in range(0, len(t.shape))],
offset_factor=1,
)
for t in [x, z]
}
intrin_decl = te.decl_tensor_intrin(z.op, _intrin_func, binds=binds)
return intrin_decl, uniq_id
def sum_impl(N, uniq_id):
"""Emit C code for sum impl."""
cc_code = (
common.common_includes
+ f"""
#ifdef __cplusplus
extern "C"
#endif // __cplusplus
__attribute__((always_inline)) static inline int32_t sum16_reset_{uniq_id}(
int16_t *res) {{
*res = (int16_t)0;
return 0;
}}
#ifdef __cplusplus
extern "C"
#endif
__attribute__((always_inline)) static inline int32_t sum16_{N}_{uniq_id}(
int16_t *arr,
int16_t *res16,
long arr_offset,
int reset) {{
int n;
int32_t *p32;
int32_t res = reset ? 0 : *res16;
if ( arr_offset % 4 != 0 ) {{
res += *arr;
p32 = (int32_t *)(&arr[1]);
n = {N} - 1;
}} else {{
p32 = (int32_t *)arr;
n = {N};
}}
for ( int i = 0; i < n / 2; ++ i ) {{
res = __smlad(*p32, 0x00010001, res);
++ p32;
}}
if ( n % 2 != 0 )
res += *(int16_t *)p32;
*res16 = res;
return 0;
}}
"""
)
return cc_code
| 3,810 | 24.92517 | 99 | py |
tvm | tvm-main/python/tvm/topi/testing/group_norm_python.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, line-too-long, unused-variable, too-many-locals
"""Group normalization in python"""
import numpy as np
def group_norm_python(data, gamma, beta, num_groups, channel_axis, axes, epsilon=1e-5):
"""Group normalization operator.
Parameters
----------
data : tvm.te.Tensor
N-D with shape (d_0, d_1, ..., d_{N-1})
gamma: tvm.te.Tensor
1-D with shape (r_0) where r_0 == d_{channel_axis}
beta: tvm.te.Tensor
Optional, 1-D with shape (r_0) where r_0 == d_{channel_axis}
num_groups : int
The number of groups
channel_axis : int
The channel axis
axes : list of int
Axis over the normalization applied, excluding the channel axis
epsilon : float
The epsilon value to avoid division by zero.
Returns
-------
result : tvm.te.Tensor
N-D with shape (d_0, d_1, ..., d_{N-1})
"""
old_shape = data.shape
new_shape = list(old_shape)
new_shape[channel_axis] = data.shape[channel_axis] // num_groups
new_shape.insert(channel_axis, num_groups)
data = np.reshape(data, new_shape)
new_axes = [channel_axis + 1]
for axis in axes:
if axis < channel_axis:
new_axes.append(axis)
else:
new_axes.append(axis + 1)
mean = np.mean(data, axis=tuple(new_axes), keepdims=True)
var = np.var(data, axis=tuple(new_axes), keepdims=True)
data = (data - mean) / np.sqrt(var + epsilon)
data = np.reshape(data, old_shape)
gamma_broadcast_shape = [1 for _ in range(len(old_shape))]
gamma_broadcast_shape[channel_axis] = gamma.shape[0]
gamma = np.reshape(gamma, gamma_broadcast_shape)
beta_broadcast_shape = [1 for _ in range(len(old_shape))]
beta_broadcast_shape[channel_axis] = beta.shape[0]
if beta is not None:
beta = np.reshape(beta, beta_broadcast_shape)
data *= gamma
if beta is not None:
data += beta
return data
| 2,760 | 32.26506 | 87 | py |
tvm | tvm-main/python/tvm/topi/testing/batch_norm.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Batch Normalization implemented in Numpy."""
import numpy as np
def batch_norm(
x: np.ndarray,
gamma: np.ndarray,
beta: np.ndarray,
moving_mean: np.ndarray,
moving_var: np.ndarray,
axis: int,
epsilon: float,
center: bool,
scale: bool,
training: bool,
momentum: float,
):
"""Batch Normalization operator implemented in Numpy.
Parameters
----------
data : np.ndarray
Input to be batch-normalized.
gamma : np.ndarray
Scale factor to be applied to the normalized tensor.
beta : np.ndarray
Offset to be applied to the normalized tensor.
moving_mean : np.ndarray
Running mean of input.
moving_var : np.ndarray
Running variance of input.
axis : int
Specify along which shape axis the normalization should occur.
epsilon : float
Small float added to variance to avoid dividing by zero.
center : bool
If True, add offset of beta to normalized tensor, If False,
beta is ignored.
scale : bool
If True, scale normalized tensor by gamma. If False, gamma
is ignored.
training : bool
Indicating whether it is in training mode. If True, update
moving_mean and moving_var.
momentum : float
The value used for the moving_mean and moving_var update
Returns
-------
output : np.ndarray
Normalized data with same shape as input
moving_mean : np.ndarray
Running mean of input.
moving_var : np.ndarray
Running variance of input.
"""
shape = [1] * len(x.shape)
shape[axis] = x.shape[axis]
if training:
reduce_axes = list(range(len(x.shape)))
reduce_axes.remove(axis)
reduce_axes = tuple(reduce_axes)
data_mean = np.mean(x, axis=reduce_axes)
data_var = np.var(x, axis=reduce_axes)
data_mean_rs = np.reshape(data_mean, shape)
data_var_rs = np.reshape(data_var, shape)
out = (x - data_mean_rs) / np.sqrt(data_var_rs + epsilon)
else:
moving_mean_rs = moving_mean.reshape(shape)
moving_var_rs = moving_var.reshape(shape)
out = (x - moving_mean_rs) / np.sqrt(moving_var_rs + epsilon)
if scale:
out = out * gamma.reshape(shape)
if center:
out = out + beta.reshape(shape)
if training:
return [
out,
(1 - momentum) * moving_mean + momentum * data_mean,
(1 - momentum) * moving_var + momentum * data_var,
]
return [out, moving_mean, moving_var]
| 3,367 | 28.286957 | 70 | py |
tvm | tvm-main/python/tvm/topi/testing/adaptive_pool_python.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument, unused-variable
"""adaptive pool in python"""
import numpy as np
def _start_index(index, odim, idim):
return int(np.floor(index * idim / odim))
def _end_index(index, odim, idim):
return int(np.ceil((index + 1) * idim / odim))
def _pool1d(in_size, out_size, np_data, np_op):
out = np.zeros(out_size).astype(np_data.dtype)
ow = out_size[0]
for l in range(ow):
l_start = _start_index(l, ow, in_size[0])
l_end = _end_index(l, ow, in_size[0])
l_sl = slice(l_start, l_end)
out[l] = np_op(np_data[l_sl])
return out
def _pool2d(in_size, out_size, np_data, np_op):
out = np.zeros(out_size).astype(np_data.dtype)
oh, ow = out_size
for k in range(oh):
k_start = _start_index(k, oh, in_size[0])
k_end = _end_index(k, oh, in_size[0])
k_sl = slice(k_start, k_end)
for l in range(ow):
l_start = _start_index(l, ow, in_size[1])
l_end = _end_index(l, ow, in_size[1])
l_sl = slice(l_start, l_end)
out[k, l] = np_op(np_data[k_sl, l_sl])
return out
def _pool3d(in_size, out_size, np_data, np_op):
out = np.zeros(out_size).astype(np_data.dtype)
od, oh, ow = out_size
for m in range(od):
m_start = _start_index(m, od, in_size[0])
m_end = _end_index(m, od, in_size[0])
m_sl = slice(m_start, m_end)
for k in range(oh):
k_start = _start_index(k, oh, in_size[1])
k_end = _end_index(k, oh, in_size[1])
k_sl = slice(k_start, k_end)
for l in range(ow):
l_start = _start_index(l, ow, in_size[2])
l_end = _end_index(l, ow, in_size[2])
l_sl = slice(l_start, l_end)
out[m, k, l] = np_op(np_data[m_sl, k_sl, l_sl])
return out
def adaptive_pool_channel_first(np_data, out_size, pool_op, np_op):
"""The reference function for adaptive pool, channel first layout"""
ishape = np_data.shape
n, c = ishape[:2]
oshape = (n, c) + out_size
np_out = np.zeros(oshape).astype(np_data.dtype)
for i in range(n):
for j in range(c):
np_out[i, j] = pool_op(ishape[2:], out_size, np_data[i, j], np_op)
return np_out
def adaptive_pool_channel_last(np_data, out_size, pool_op, np_op):
"""The reference function for adaptive pool, channel last layout"""
ishape = np_data.shape
n, c = ishape[0], ishape[-1]
oshape = (n,) + out_size + (c,)
np_out = np.zeros(oshape).astype(np_data.dtype)
for i in range(n):
for j in range(c):
if len(out_size) == 1:
np_out[i, :, j] = pool_op(ishape[1:-1], out_size, np_data[i, :, j], np_op)
elif len(out_size) == 2:
np_out[i, :, :, j] = pool_op(ishape[1:-1], out_size, np_data[i, :, :, j], np_op)
else:
np_out[i, :, :, :, j] = pool_op(
ishape[1:-1], out_size, np_data[i, :, :, :, j], np_op
)
return np_out
def adaptive_pool(np_data, out_size, pool_type, layout):
"""The reference function for adaptive pool, for 2d and 3d"""
if isinstance(out_size, int):
out_size = (out_size,)
if len(out_size) == 1:
pool_op = _pool1d
elif len(out_size) == 2:
pool_op = _pool2d
else:
assert len(out_size) == 3
pool_op = _pool3d
np_op = np.mean if pool_type == "avg" else np.max
if layout in ["NCW", "NCHW", "NCDHW"]:
return adaptive_pool_channel_first(np_data, out_size, pool_op, np_op)
assert layout in ["NWC", "NHWC", "NDHWC"]
return adaptive_pool_channel_last(np_data, out_size, pool_op, np_op)
| 4,515 | 34.007752 | 96 | py |
tvm | tvm-main/python/tvm/topi/testing/conv2d_hwcn_python.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, line-too-long, unused-variable, too-many-locals
"""Convolution in python"""
import numpy as np
import scipy.signal
from tvm.topi.nn.utils import get_pad_tuple
def conv2d_hwcn_python(a_np, w_np, stride, padding):
"""Convolution operator in HWCN layout.
Parameters
----------
a_np : numpy.ndarray
4-D with shape [in_height, in_width, in_channel, batch]
w_np : numpy.ndarray
4-D with shape [filter_height, filter_width, in_channel, num_filter]
stride : int or a list/tuple of two ints
Stride size, or [stride_height, stride_width]
padding : int or str or a list/tuple of 2 or 4 ints
Padding size, or ['VALID', 'SAME'], or
[pad_height, pad_width] for 2 ints, or
[pad_top, pad_left, pad_bottom, pad_right] for 2 ints
Returns
-------
b_np : np.ndarray
4-D with shape [out_height, out_width, out_channel, batch]
"""
in_height, in_width, in_channel, batch = a_np.shape
kernel_h, kernel_w, _, num_filter = w_np.shape
if isinstance(stride, int):
stride_h = stride_w = stride
else:
stride_h, stride_w = stride
pad_top, pad_left, pad_bottom, pad_right = get_pad_tuple(padding, (kernel_h, kernel_w))
pad_h = pad_top + pad_bottom
pad_w = pad_left + pad_right
# compute the output shape
out_channel = num_filter
out_height = (in_height - kernel_h + pad_h) // stride_h + 1
out_width = (in_width - kernel_w + pad_w) // stride_w + 1
# change the layout from HWCN to NCHW
at = a_np.transpose((3, 2, 0, 1))
wt = w_np.transpose((3, 2, 0, 1))
bt = np.zeros((batch, out_channel, out_height, out_width))
# computation
for n in range(batch):
for f in range(out_channel):
for c in range(in_channel):
if pad_h > 0 or pad_w > 0:
apad = np.zeros((in_height + pad_h, in_width + pad_w))
apad[pad_top : pad_top + in_height, pad_left : pad_left + in_width] = at[n, c]
else:
apad = at[n, c]
out = scipy.signal.convolve2d(apad, np.rot90(np.rot90(wt[f, c])), mode="valid")
bt[n, f] += out[::stride_h, ::stride_w]
return bt.transpose((2, 3, 1, 0))
| 3,068 | 38.346154 | 98 | py |
tvm | tvm-main/python/tvm/topi/testing/pool_grad_python.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument, unused-variable
"""Gradient of pooling in python"""
import numpy as np
def pool_grad_nchw(
a_np, out_grad_np, pool_size, strides, padding, pool_type, ceil_mode, count_include_pad=True
):
"""pool_grad for NCHW layout in python"""
dtype = a_np.dtype
n, ic, ih, iw = a_np.shape
kh, kw = pool_size
sh, sw = strides
pt, pl, pb, pr = padding
pad_np = np.zeros(shape=(n, ic, ih + pt + pb, iw + pl + pr)).astype(dtype)
no_zero = (range(n), range(ic), (range(pt, ih + pt)), (range(pl, iw + pl)))
pad_np[np.ix_(*no_zero)] = a_np
_, _, oh, ow = out_grad_np.shape
pool_grad_np = np.zeros(shape=a_np.shape)
pad_pool_grad_np = np.zeros(shape=pad_np.shape)
if pool_type == "avg":
for i in range(oh):
for j in range(ow):
if count_include_pad:
shape = pad_np[:, :, i * sh : i * sh + kh, j * sw : j * sw + kw].shape
# this can be different from kh*kw if input size cannot divide stride
pad_count = shape[2] * shape[3]
else:
pad_count = np.sum(
pad_np[:, :, i * sh : i * sh + kh, j * sw : j * sw + kw] > 0, axis=(2, 3)
)
# take the first element, as they are the same across batch and channel
pad_count = pad_count.ravel()[0]
pad_pool_grad_np[:, :, i * sh : i * sh + kh, j * sw : j * sw + kw] += out_grad_np[
:, :, i, j
].reshape(n, ic, 1, 1) / np.maximum(pad_count, 1)
elif pool_type == "max":
for i in range(oh):
for j in range(ow):
a_patch = pad_np[:, :, i * sh : i * sh + kh, j * sw : j * sw + kw]
a_patch = np.reshape(a_patch, (n, ic, -1))
max_indices = np.argmax(a_patch, axis=2)
c_idx, n_idx = np.meshgrid(range(ic), range(n), sparse=True)
h_idx, w_idx = np.unravel_index(max_indices, (kh, kw))
pad_pool_grad_np[:, :, i * sh : i * sh + kh, j * sw : j * sw + kw][
n_idx, c_idx, h_idx, w_idx
] += out_grad_np[n_idx, c_idx, i, j]
for i in range(pool_grad_np.shape[2]):
for j in range(pool_grad_np.shape[3]):
pool_grad_np[:, :, i, j] = pad_pool_grad_np[:, :, i + pt, j + pl]
return pool_grad_np
| 3,226 | 44.450704 | 98 | py |
tvm | tvm-main/python/tvm/topi/testing/matrix_set_diag.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""MatrixSetDiag in Python"""
import numpy as np
def matrix_set_diag(input_np, diagonal, k=0, align="RIGHT_LEFT"):
"""matrix_set_diag operator implemented in numpy.
Returns a numpy array with the diagonals of input array
replaced with the provided diagonal values.
Parameters
----------
input_np : numpy.ndarray
Input Array.
Shape = [D1, D2, D3, ... , Dn-1 , Dn]
diagonal : numpy.ndarray
Values to be filled in the diagonal.
k : int or tuple of int
Diagonal Offsets.
align : string
Some diagonals are shorter than max_diag_len and need to be padded.
Possible Vales:
["RIGHT_LEFT" (default), "LEFT_RIGHT", "LEFT_LEFT", "RIGHT_RIGHT"]
Returns
-------
result : numpy.ndarray
New Array with given diagonal values.
Shape = [D1, D2, D3, ... , Dn-1 , Dn]
"""
out = np.array(input_np, copy=True)
cols = input_np.shape[-1]
rows = input_np.shape[-2]
onlyOneDiagonal = True
if isinstance(k, (tuple, list)):
if len(k) < 2 or k[0] == k[1]:
k = k[0]
else:
onlyOneDiagonal = False
if onlyOneDiagonal:
for i in range(diagonal.shape[-1]):
if k >= 0:
out[..., i, i + k] = diagonal[..., i]
else:
out[..., i - k, i] = diagonal[..., i]
else:
for ki in range(k[0], k[1] + 1):
diag_len = min(cols - max(ki, 0), rows + min(ki, 0))
offset = 0
if ki >= 0:
if align[:5] == "RIGHT":
offset = diagonal.shape[-1] - diag_len
else:
if align[-5:] == "RIGHT":
offset = diagonal.shape[-1] - diag_len
for i in range(diag_len):
if ki >= 0:
out[..., i, i + ki] = diagonal[..., k[1] - ki, i + offset]
else:
out[..., i - ki, i] = diagonal[..., k[1] - ki, i + offset]
return out
| 2,837 | 32.388235 | 78 | py |
tvm | tvm-main/python/tvm/topi/testing/dense.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""Dense in python"""
import numpy as np
def dense(x, y, bias, use_bias=False, use_relu=False, out_dtype=None):
"""dense operator implemented in numpy.
Parameters
----------
x : numpy.ndarray
2-D with shape [M, K]
y : numpy.ndarray
2-D with shape [N, K]
bias: numpy.ndarray
1-D with shape [M,]
out_dtype: string, optional
Specify the dtype of output
Returns
-------
out : numpy.ndarray
2-D with shape [M, N]
"""
dtype = x.dtype if out_dtype is None else out_dtype
if use_bias:
out = np.dot(x.astype(dtype), y.T.astype(dtype)) + bias
else:
out = np.dot(x.astype(dtype), y.T.astype(dtype))
if use_relu:
out = np.maximum(out, 0)
return out
| 1,590 | 28.462963 | 70 | py |
tvm | tvm-main/python/tvm/topi/testing/one_hot.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""OneHot in python"""
import numpy as np
def one_hot(indices, on_value, off_value, depth, axis, dtype):
"""one_hot operator implemented in numpy.
Returns a one-hot tensor where the locations repsented by indices take value on_value,
other locations take value off_value.
Final dimension is <indices outer dimensions> x depth x <indices inner dimensions>.
Parameters
----------
indices : numpy.ndarray
Locations to set to on_value.
on_value : int/float
Value to fill at indices.
off_value : int/float
Value to fill at all other positions besides indices.
depth : int
Depth of the one-hot dimension.
axis : int
Axis to fill.
dtype : str
Data type of the output tensor.
Returns
-------
ret : relay.Expr
The one-hot tensor.
"""
oshape = []
true_axis = len(indices.shape) if axis == -1 else axis
ndim = len(indices.shape) + 1
indices_index = 0
for i in range(0, ndim):
if i == true_axis:
oshape.append(depth)
else:
oshape.append(indices.shape[indices_index])
indices_index += 1
out = np.empty(oshape)
output_indices = list(np.ndindex(out.shape))
for output_index in output_indices:
indices_indices = []
for i, out_idx in enumerate(output_index):
if i == true_axis:
continue
indices_indices.append(out_idx)
index = output_index[true_axis]
if indices[tuple(indices_indices)] == index:
out[output_index] = on_value
else:
out[output_index] = off_value
return out.astype(dtype)
| 2,510 | 30 | 90 | py |
tvm | tvm-main/python/tvm/topi/testing/resize_python.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, line-too-long, unused-variable, too-many-locals
"""Upsampling in python"""
import math
import numpy as np
from tvm.topi.utils import nchw_pack_layout
def get_inx(x, image_width, target_width, coordinate_transformation_mode):
"""Infer input x from output x with various coordinate transformation methods"""
scale = image_width / target_width
if coordinate_transformation_mode == "half_pixel":
in_x = (x + 0.5) * scale - 0.5
elif coordinate_transformation_mode == "align_corners":
in_x = (image_width - 1) / (target_width - 1) * x if target_width > 1 else 0
elif coordinate_transformation_mode == "asymmetric":
in_x = scale * x
else:
raise ValueError(
f"Unsupported coordinate_transformation_mode: {coordinate_transformation_mode}"
)
return in_x
def get_index(x, image_width, target_width, coordinate_transformation_mode):
"""get and round the nearest index for nearest_neighbor"""
in_x = get_inx(x, image_width, target_width, coordinate_transformation_mode)
if coordinate_transformation_mode == "align_corners":
# round prefer ceil
out = int(math.floor(in_x + 0.5))
else:
out = int(math.floor(in_x))
out = max(min(out, image_width - 1), 0)
return out
def resize3d_nearest(arr, scale, coordinate_transformation_mode):
"""Populate the array by scale factor"""
d, h, w = arr.shape
out_d, out_h, out_w = [int(round(i * s)) for i, s in zip(arr.shape, scale)]
out = np.empty((out_d, out_h, out_w))
for z in range(out_d):
for y in range(out_h):
for x in range(out_w):
in_z = get_index(z, d, out_d, coordinate_transformation_mode)
in_y = get_index(y, h, out_h, coordinate_transformation_mode)
in_x = get_index(x, w, out_w, coordinate_transformation_mode)
out[z, y, x] = arr[in_z, in_y, in_x]
return out
def resize3d_linear(data_in, scale, coordinate_transformation_mode):
"""Trilinear 3d scaling using python"""
dtype = data_in.dtype
d, h, w = data_in.shape
new_d, new_h, new_w = [int(round(i * s)) for i, s in zip(data_in.shape, scale)]
data_out = np.ones((new_d, new_h, new_w))
indexes = np.mgrid[0:2, 0:2, 0:2]
def _get_patch(zint, yint, xint):
# Get the surrounding values
indices = indexes.copy()
indices[0] = np.maximum(np.minimum(indexes[0] + zint, d - 1), 0)
indices[1] = np.maximum(np.minimum(indexes[1] + yint, h - 1), 0)
indices[2] = np.maximum(np.minimum(indexes[2] + xint, w - 1), 0)
p = data_in[indices[0], indices[1], indices[2]]
return p
for m in range(new_d):
for j in range(new_h):
for k in range(new_w):
in_z = get_inx(m, d, new_d, coordinate_transformation_mode)
in_y = get_inx(j, h, new_h, coordinate_transformation_mode)
in_x = get_inx(k, w, new_w, coordinate_transformation_mode)
zint = math.floor(in_z)
zfract = in_z - math.floor(in_z)
yint = math.floor(in_y)
yfract = in_y - math.floor(in_y)
xint = math.floor(in_x)
xfract = in_x - math.floor(in_x)
wz = np.array([1.0 - zfract, zfract], dtype=dtype)
wy = np.array([1.0 - yfract, yfract], dtype=dtype)
wx = np.array([1.0 - xfract, xfract], dtype=dtype)
p = _get_patch(zint, yint, xint)
l = np.sum(p * wx, axis=-1)
col = np.sum(l * wy, axis=-1)
data_out[m, j, k] = np.sum(col * wz)
return data_out
def resize3d_cubic(data_in, scale, coordinate_transformation_mode):
"""Tricubic 3d scaling using python"""
dtype = data_in.dtype
d, h, w = data_in.shape
new_d, new_h, new_w = [int(round(i * s)) for i, s in zip(data_in.shape, scale)]
data_out = np.ones((new_d, new_h, new_w))
def _cubic_spline_weights(t, alpha=-0.5):
"""create cubic spline weights in 1D"""
t2 = t * t
t3 = t * t * t
w1 = alpha * (t3 - 2 * t2 + t)
w2 = (alpha + 2) * t3 - (3 + alpha) * t2 + 1
w3 = -(alpha + 2) * t3 + (3 + 2 * alpha) * t2 - alpha * t
w4 = -alpha * t3 + alpha * t2
return np.array([w1, w2, w3, w4])
indexes = np.mgrid[-1:3, -1:3, -1:3]
def _get_patch(zint, yint, xint):
# Get the surrounding values
indices = indexes.copy()
indices[0] = np.maximum(np.minimum(indexes[0] + zint, d - 1), 0)
indices[1] = np.maximum(np.minimum(indexes[1] + yint, h - 1), 0)
indices[2] = np.maximum(np.minimum(indexes[2] + xint, w - 1), 0)
p = data_in[indices[0], indices[1], indices[2]]
return p
for m in range(new_d):
for j in range(new_h):
for k in range(new_w):
in_z = get_inx(m, d, new_d, coordinate_transformation_mode)
in_y = get_inx(j, h, new_h, coordinate_transformation_mode)
in_x = get_inx(k, w, new_w, coordinate_transformation_mode)
zint = math.floor(in_z)
zfract = in_z - math.floor(in_z)
yint = math.floor(in_y)
yfract = in_y - math.floor(in_y)
xint = math.floor(in_x)
xfract = in_x - math.floor(in_x)
wz = _cubic_spline_weights(zfract)
wy = _cubic_spline_weights(yfract)
wx = _cubic_spline_weights(xfract)
p = _get_patch(zint, yint, xint)
l = np.sum(p * wx, axis=-1)
col = np.sum(l * wy, axis=-1)
data_out[m, j, k] = np.sum(col * wz)
return data_out
def resize3d_ncdhw(
data, scale, method="nearest_neighbor", coordinate_transformation_mode="align_corners"
):
"""reference kernel for 3D image resizing"""
ishape = data.shape
oshape = (
ishape[0],
ishape[1],
int(round(ishape[2] * scale[0])),
int(round(ishape[3] * scale[1])),
int(round(ishape[4] * scale[2])),
)
output_np = np.zeros(oshape, dtype=data.dtype)
for b in range(oshape[0]):
for c in range(oshape[1]):
if method == "nearest_neighbor":
output_np[b, c, :, :, :] = resize3d_nearest(
data[b, c, :, :, :], scale, coordinate_transformation_mode
)
elif method == "linear":
output_np[b, c, :, :, :] = resize3d_linear(
data[b, c, :, :, :], scale, coordinate_transformation_mode
)
elif method == "cubic":
output_np[b, c, :, :, :] = resize3d_cubic(
data[b, c, :, :, :], scale, coordinate_transformation_mode
)
else:
raise ValueError("Unknown resize method", method)
return output_np
def resize1d_python(
data,
scale,
layout="NCW",
method="nearest_neighbor",
coordinate_transformation_mode="align_corners",
):
"""Python version of 3D scaling using nearest neighbour"""
if layout == "NWC":
data = data.transpose([0, 2, 1])
data = np.expand_dims(data, axis=[2, 3])
output_np = resize3d_ncdhw(data, (1, 1) + scale, method, coordinate_transformation_mode)
output_np = np.squeeze(output_np, axis=2)
output_np = np.squeeze(output_np, axis=2)
if layout == "NWC":
output_np = output_np.transpose([0, 2, 1])
return output_np
def resize2d_python(
data,
scale,
layout="NCHW",
method="nearest_neighbor",
coordinate_transformation_mode="align_corners",
):
"""Python version of scaling using nearest neighbour"""
if layout == "NHWC":
data = data.transpose([0, 3, 1, 2])
elif nchw_pack_layout(layout):
ishape = data.shape
transposed = data.transpose([0, 4, 1, 5, 2, 3])
tshape = transposed.shape
data = transposed.reshape(
tshape[0] * tshape[1], tshape[2] * tshape[3], tshape[4], tshape[5]
)
data = np.expand_dims(data, axis=2)
output_np = resize3d_ncdhw(data, (1,) + scale, method, coordinate_transformation_mode)
output_np = np.squeeze(output_np, axis=2)
if layout == "NHWC":
output_np = output_np.transpose([0, 2, 3, 1])
elif nchw_pack_layout(layout):
output_np = output_np.reshape(tshape[0:4] + output_np.shape[2:])
output_np = output_np.transpose([0, 2, 4, 5, 1, 3])
return output_np
def resize3d_python(
data,
scale,
layout="NCDHW",
method="nearest_neighbor",
coordinate_transformation_mode="align_corners",
):
"""Python version of 3D scaling using nearest neighbour"""
if layout == "NDHWC":
data = data.transpose([0, 4, 1, 2, 3])
output_np = resize3d_ncdhw(data, scale, method, coordinate_transformation_mode)
if layout == "NDHWC":
output_np = output_np.transpose([0, 2, 3, 4, 1])
return output_np
| 9,851 | 34.566787 | 92 | py |
tvm | tvm-main/python/tvm/topi/testing/roi_align_python.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, too-many-nested-blocks
"Roi align in python"
import math
import numpy as np
def _bilinear(a_np, n, c, y, x, height, width, layout):
if y < -1 or y > height or x < -1 or x > width:
return 0
y = min(max(y, 0), height - 1)
x = min(max(x, 0), width - 1)
y_low = int(math.floor(y))
x_low = int(math.floor(x))
y_high = y_low + 1
x_high = x_low + 1
wy_h = y - y_low
wx_h = x - x_low
wy_l = 1 - wy_h
wx_l = 1 - wx_h
val = 0
for wx, xp in zip((wx_l, wx_h), (x_low, x_high)):
for wy, yp in zip((wy_l, wy_h), (y_low, y_high)):
if 0 <= yp < height and 0 <= xp < width:
if layout == "NCHW":
val += wx * wy * a_np[n, c, yp, xp]
else:
val += wx * wy * a_np[n, yp, xp, c]
return val
def roi_align_common(
a_np,
b_np,
rois_np,
channel,
pooled_size_h,
pooled_size_w,
spatial_scale,
sample_ratio,
avg_mode,
max_mode,
height,
width,
layout,
):
"""Common code used by roi align NCHW and NHWC"""
num_roi = rois_np.shape[0]
for i in range(num_roi):
roi = rois_np[i]
batch_index = int(roi[0])
roi_start_w, roi_start_h, roi_end_w, roi_end_h = roi[1:] * spatial_scale
roi_h = max(roi_end_h - roi_start_h, 1.0)
roi_w = max(roi_end_w - roi_start_w, 1.0)
bin_h = roi_h / pooled_size_h
bin_w = roi_w / pooled_size_w
if sample_ratio > 0:
roi_bin_grid_h = roi_bin_grid_w = int(sample_ratio)
else:
roi_bin_grid_h = int(math.ceil(roi_h / pooled_size_h))
roi_bin_grid_w = int(math.ceil(roi_w / pooled_size_w))
count = roi_bin_grid_h * roi_bin_grid_w
for c in range(channel):
for ph in range(pooled_size_h):
for pw in range(pooled_size_w):
if avg_mode:
total = 0.0
if max_mode:
total = float("-inf")
for iy in range(roi_bin_grid_h):
for ix in range(roi_bin_grid_w):
y = roi_start_h + ph * bin_h + (iy + 0.5) * bin_h / roi_bin_grid_h
x = roi_start_w + pw * bin_w + (ix + 0.5) * bin_w / roi_bin_grid_w
if avg_mode:
total += (
_bilinear(a_np, batch_index, c, y, x, height, width, layout)
/ count
)
if max_mode:
total = max(
total,
_bilinear(a_np, batch_index, c, y, x, height, width, layout),
)
if layout == "NCHW":
b_np[i, c, ph, pw] = total
else:
b_np[i, ph, pw, c] = total
return b_np
def roi_align_nchw_python(a_np, rois_np, pooled_size, spatial_scale, sample_ratio, mode=b"avg"):
"""Roi align NCHW in python"""
avg_mode = mode in (b"avg", "avg", 0)
max_mode = mode in (b"max", "max", 1)
assert avg_mode or max_mode, "Mode must be average or max. Please pass a valid mode."
_, channel, height, width = a_np.shape
if isinstance(pooled_size, int):
pooled_size_h = pooled_size_w = pooled_size
else:
pooled_size_h, pooled_size_w = pooled_size
b_np = np.zeros((rois_np.shape[0], channel, pooled_size_h, pooled_size_w), dtype=a_np.dtype)
return roi_align_common(
a_np,
b_np,
rois_np,
channel,
pooled_size_h,
pooled_size_w,
spatial_scale,
sample_ratio,
avg_mode,
max_mode,
height,
width,
"NCHW",
)
def roi_align_nhwc_python(a_np, rois_np, pooled_size, spatial_scale, sample_ratio, mode=b"avg"):
"""Roi align NHWC in python"""
avg_mode = mode in (b"avg", "avg", 0)
max_mode = mode in (b"max", "max", 1)
assert avg_mode or max_mode, "Mode must be average or max. Please pass a valid mode."
_, height, width, channel = a_np.shape
num_roi = rois_np.shape[0]
if isinstance(pooled_size, int):
pooled_size_h = pooled_size_w = pooled_size
else:
pooled_size_h, pooled_size_w = pooled_size
b_np = np.zeros((num_roi, pooled_size_h, pooled_size_w, channel), dtype=a_np.dtype)
return roi_align_common(
a_np,
b_np,
rois_np,
channel,
pooled_size_h,
pooled_size_w,
spatial_scale,
sample_ratio,
avg_mode,
max_mode,
height,
width,
"NHWC",
)
| 5,625 | 30.965909 | 97 | py |
tvm | tvm-main/python/tvm/topi/testing/batch_to_space_nd.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, line-too-long, unused-variable, too-many-locals
"""Batch to space ND in python"""
import numpy as np
from . import strided_slice_python
def batch_to_space_nd_python(data, block_shape, crop_begin_list, crop_end_list):
"""Batch to Space operator in python for NHWC layout.
Parameters
----------
data : np.ndarray
N-D with shape [batch, spatial_shape, remaining_shapes],
where spatial_shape has M dimensions.
block_shape : list of ints
1-D array of size [M] where M is number of spatial dims, specifies block
size for each spatial dimension.
crop_begin_list : list of ints
list of shape [M] where M is number of spatial dims, specifies
begin crop size for each spatial dimension.
crop_end_list : list of ints
list of shape [M] where M is number of spatial dims, specifies
end crop size for each spatial dimension.
Returns
-------
b2s_out : np.ndarray
N-D with shape
[batch / prod(block_shape),
in_shape[1] * block_shape[0] - crop_begin_list[0] - crop_end_list[0], ...,
in_shape[M] * block_shape[M-1] - crop_begin_list[M-1] - crop_end_list[M-1],
remaining_shape]
"""
in_shape = data.shape
N = len(in_shape)
M = len(block_shape)
block_shape_prod = np.prod(block_shape)
in_batch = data.shape[0]
axis = []
r_p_shape = []
r_shape = [block_shape[i] for i in range(0, M)]
axis.append(len(r_shape))
r_shape.append(in_batch // block_shape_prod)
for i in range(1, N):
axis.append(len(r_shape))
if len(axis) < (M + N):
axis.append(len(r_shape) - (M + 1))
r_shape.append(in_shape[i])
r_p_shape.append(int((in_batch / block_shape_prod)))
for i in range(1, M + 1):
r_p_shape.append(in_shape[i] * block_shape[i - 1])
for i in range(M + 1, N):
r_p_shape.append(in_shape[i])
b2s_out = np.reshape(data, newshape=r_shape)
b2s_out = np.transpose(b2s_out, axes=axis)
b2s_out = np.reshape(b2s_out, newshape=r_p_shape)
# Crop the start and end of dimensions of b2s_out
begin_idx = []
end_idx = []
strides = []
for i, _ in enumerate(r_p_shape):
strides.append(1)
if 0 < i <= M:
# begin and end index for spatial dimensions
begin_idx.append(crop_begin_list[i - 1])
end_idx.append(r_p_shape[i] - crop_end_list[i - 1])
else:
begin_idx.append(0)
end_idx.append(r_p_shape[i])
b2s_out = strided_slice_python(b2s_out, begin_idx, end_idx, strides)
return b2s_out
| 3,439 | 34.102041 | 83 | py |
tvm | tvm-main/python/tvm/topi/testing/gather_nd_python.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, line-too-long, unused-variable, too-many-locals
"""gather_nd in python"""
import numpy as np
def gather_nd_python(a_np, indices_np):
"""Python version of GatherND operator
Parameters
----------
a_np : numpy.ndarray
Numpy array
indices_np : numpy.ndarray
Numpy array
Returns
-------
b_np : numpy.ndarray
Numpy array
"""
a_shape = a_np.shape
indices_np = indices_np.astype("int32")
indices_shape = indices_np.shape
assert len(indices_shape) > 1
assert indices_shape[0] <= len(a_shape)
b_shape = list(indices_shape[1:])
for i in range(indices_shape[0], len(a_shape)):
b_shape.append(a_shape[i])
b_np = np.zeros(b_shape)
for idx in np.ndindex(*indices_shape[1:]):
a_idx = []
for i in range(indices_shape[0]):
indices_pos = tuple([i] + list(idx))
a_idx.append(indices_np[indices_pos])
b_np[idx] = a_np[tuple(a_idx)]
return b_np
| 1,801 | 32.37037 | 79 | py |
tvm | tvm-main/python/tvm/topi/testing/correlation_nchw_python.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, line-too-long, unused-variable, too-many-locals
"""Convolution 3D in python"""
import numpy as np
def correlation_nchw_python(
data1, data2, kernel_size, max_displacement, stride1, stride2, padding, is_multiply
):
"""Correlationn operator in NCHW layout.
Parameters
----------
data1_np : numpy.ndarray
4-D with shape [batch, in_channel, in_height, in_width]
data2_np : numpy.ndarray
4-D with shape [batch, in_channel, in_height, in_width]
kernel_size: int
Kernel size for correlation, must be an odd number
max_displacement: int
Max displacement of Correlation
stride1: int
Stride for data1
stride2: int
Stride for data2 within the neightborhood centered around data1
padding: int
Padding for correlation
is_multiply: bool
operation type is either multiplication or substraction
Returns
-------
c_np : np.ndarray
4-D with shape [batch, out_channel, out_height, out_width]
"""
# compute output's dimension
pad_data_height = data1.shape[2] + 2 * padding
pad_data_width = data1.shape[3] + 2 * padding
kernel_radius = (kernel_size - 1) // 2
border_size = max_displacement + kernel_radius
out_width = (pad_data_width - border_size * 2) // stride1
out_height = (pad_data_height - border_size * 2) // stride1
neighborhood_grid_radius = max_displacement // stride2
neighborhood_grid_width = neighborhood_grid_radius * 2 + 1
out_channel = neighborhood_grid_width * neighborhood_grid_width
out = np.zeros((data1.shape[0], out_channel, out_height, out_width))
pad_data1 = np.zeros((data1.shape[0], data1.shape[1], pad_data_height, pad_data_width))
pad_data2 = np.zeros((data1.shape[0], data1.shape[1], pad_data_height, pad_data_width))
pad_data1[:, :, padding : padding + data1.shape[2], padding : padding + data1.shape[3]] = data1[
:, :, :, :
]
pad_data2[:, :, padding : padding + data2.shape[2], padding : padding + data2.shape[3]] = data2[
:, :, :, :
]
if is_multiply:
corr_func = lambda x, y: x * y
else:
corr_func = lambda x, y: abs(x - y)
# pylint: disable=too-many-nested-blocks
for i in range(out_height):
for j in range(out_width):
for nbatch in range(data1.shape[0]):
# x1,y1 is the location in data1 , i,j is the location in output
x1 = j * stride1 + max_displacement
y1 = i * stride1 + max_displacement
for q in range(out_channel):
# location in data2
x2 = x1 + (q % neighborhood_grid_width - neighborhood_grid_radius) * stride2
y2 = y1 + (q // neighborhood_grid_width - neighborhood_grid_radius) * stride2
for h in range(kernel_size):
for w in range(kernel_size):
for channel in range(data1.shape[1]):
out[nbatch, q, i, j] += corr_func(
pad_data1[nbatch, channel, y1 + h, x1 + w],
pad_data2[nbatch, channel, y2 + h, x2 + w],
)
out /= float(kernel_size**2 * data1.shape[1])
return out
| 4,133 | 37.277778 | 100 | py |
tvm | tvm-main/python/tvm/topi/testing/depthwise_conv2d_python.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-variable, line-too-long
"""Depthwise convolution in python"""
import numpy as np
from tvm.topi.nn.utils import get_pad_tuple
from .common import _convolve2d
def depthwise_conv2d_python_nchw(input_np, filter_np, stride, padding):
"""Depthwise convolution operator in NCHW layout.
Parameters
----------
input_np : numpy.ndarray
4-D with shape [batch, in_channel, in_height, in_width]
filter_np : numpy.ndarray
4-D with shape [in_channel, channel_multiplier, filter_height, filter_width]
stride : list / tuple of 2 ints
[stride_height, stride_width]
padding : str
'VALID' or 'SAME'
Returns
-------
output_np : np.ndarray
4-D with shape [batch, out_channel, out_height, out_width]
"""
batch, in_channel, in_height, in_width = input_np.shape
_, channel_multiplier, filter_height, filter_width = filter_np.shape
if isinstance(stride, int):
stride_h = stride_w = stride
else:
stride_h, stride_w = stride
pad_top, pad_left, pad_bottom, pad_right = get_pad_tuple(padding, (filter_height, filter_width))
pad_h = pad_top + pad_bottom
pad_w = pad_left + pad_right
out_channel = in_channel * channel_multiplier
out_height = (in_height - filter_height + pad_h) // stride_h + 1
out_width = (in_width - filter_width + pad_w) // stride_w + 1
output_np = np.zeros((batch, out_channel, out_height, out_width))
for i in range(batch):
for j in range(out_channel):
apad = input_np[i, j // channel_multiplier, :, :]
if pad_h or pad_w:
apad = np.pad(apad, [(pad_top, pad_bottom), (pad_left, pad_right)], "constant")
conv = _convolve2d(
apad,
np.rot90(filter_np[j // channel_multiplier, j % channel_multiplier, :, :], k=2),
)
output_np[i, j, :, :] = conv[
::stride_h,
::stride_w,
]
return output_np
def depthwise_conv2d_python_nchwc(input_np, filter_np, stride, padding):
"""Depthwise convolution operator in NCHWc layout.
Parameters
----------
input_np : numpy.ndarray
5-D with shape [batch, in_channel_chunk, in_height, in_width, in_channel_block]
filter_np : numpy.ndarray
6-D with shape [out_channel_chunk, channel_multiplier_chunk,
filter_height, filter_width,
channel_multiplier_block, out_channel_block]
stride : list / tuple of 2 ints
[stride_height, stride_width]
padding : str
'VALID' or 'SAME'
Returns
-------
output_np : np.ndarray
5-D with shape [batch, out_channel_chunk, out_height, out_width, out_channel_block]
"""
# Transform to NCHW
batch_size, in_channel_chunk, in_height, in_width, in_channel_block = input_np.shape
input_nchw = input_np.transpose(0, 1, 4, 2, 3).reshape(
(batch_size, in_channel_chunk * in_channel_block, in_height, in_width)
)
(
out_channel_chunk,
channel_multiplier_chunk,
filter_height,
filter_width,
channel_multiplier_block,
out_channel_block,
) = filter_np.shape
filter_nchw = filter_np.transpose(0, 5, 1, 4, 2, 3).reshape(
(
out_channel_chunk * out_channel_block,
channel_multiplier_chunk * channel_multiplier_block,
filter_height,
filter_width,
)
)
# Perform conv2d
output_np = depthwise_conv2d_python_nchw(input_nchw, filter_nchw, stride, padding)
# Transform back to NCHWc
# pylint: disable=unpacking-non-sequence
batch_size, out_channel, out_height, out_width = output_np.shape
return output_np.reshape(
(batch_size, out_channel_chunk, out_channel_block, out_height, out_width)
).transpose(0, 1, 3, 4, 2)
def depthwise_conv2d_python_nhwc(input_np, filter_np, stride, padding):
"""Depthwise convolution operator in nhwc layout.
Parameters
----------
input_np : numpy.ndarray
4-D with shape [batch, in_height, in_width, in_channel]
filter_np : numpy.ndarray
4-D with shape [filter_height, filter_width, in_channel, channel_multiplier]
stride : list / tuple of 2 ints
[stride_height, stride_width]
padding : str
'VALID' or 'SAME'
Returns
-------
output_np : np.ndarray
4-D with shape [batch, out_height, out_width, out_channel]
"""
input_nchw = input_np.transpose(0, 3, 1, 2)
filter_nchw = filter_np.transpose(2, 3, 0, 1)
output_nchw = depthwise_conv2d_python_nchw(input_nchw, filter_nchw, stride, padding)
return output_nchw.transpose(0, 2, 3, 1)
| 5,556 | 32.475904 | 100 | py |
tvm | tvm-main/python/tvm/topi/testing/roi_pool_python.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, too-many-nested-blocks
"Roi pool in python"
import math
import numpy as np
def roi_pool_nchw_python(a_np, rois_np, pooled_size, spatial_scale):
"""Roi pool in python"""
_, channel, height, width = a_np.shape
num_roi = rois_np.shape[0]
b_np = np.zeros((num_roi, channel, pooled_size, pooled_size), dtype=a_np.dtype)
if isinstance(pooled_size, int):
pooled_size_h = pooled_size_w = pooled_size
else:
pooled_size_h, pooled_size_w = pooled_size
for i in range(num_roi):
roi = rois_np[i]
batch_index = int(roi[0])
roi_start_w = int(round(roi[1] * spatial_scale))
roi_start_h = int(round(roi[2] * spatial_scale))
roi_end_w = int(round(roi[3] * spatial_scale))
roi_end_h = int(round(roi[4] * spatial_scale))
roi_h = max(roi_end_h - roi_start_h + 1, 1)
roi_w = max(roi_end_w - roi_start_w + 1, 1)
bin_h = float(roi_h) / pooled_size_h
bin_w = float(roi_w) / pooled_size_w
for ph in range(pooled_size_h):
for pw in range(pooled_size_w):
hstart = int(math.floor(ph * bin_h))
wstart = int(math.floor(pw * bin_w))
hend = int(math.ceil((ph + 1) * bin_h))
wend = int(math.ceil((pw + 1) * bin_w))
hstart = min(max(hstart + roi_start_h, 0), height)
hend = min(max(hend + roi_start_h, 0), height)
wstart = min(max(wstart + roi_start_w, 0), width)
wend = min(max(wend + roi_start_w, 0), width)
is_empty = (hend <= hstart) or (wend <= wstart)
for c in range(channel):
if is_empty:
b_np[i, c, ph, pw] = 0.0
else:
b_np[i, c, ph, pw] = np.max(a_np[batch_index, c, hstart:hend, wstart:wend])
return b_np
| 2,700 | 40.553846 | 99 | py |
tvm | tvm-main/python/tvm/topi/testing/poolnd_python.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument, unused-variable
"""Ground truth max and average pooling operators in python."""
import itertools
import math
from typing import List, Tuple, Optional
import numpy as np
import tvm
def _get_supported_layout(dims: int):
"""
Returns layout that is supported by poolnd_python based on number of
dimensions of input tensor
"""
assert dims in [3, 4, 5], f"{dims}-dimensional tensor is not supported"
if dims == 3:
return "NCW"
if dims == 4:
return "NCHW"
# dims == 5
return "NCDHW"
def _convert_to_layout(input_tensor: np.ndarray, layout: str) -> np.ndarray:
"""
Converts back to original layout after the algorithm is finished
"""
supported_layout = _get_supported_layout(input_tensor.ndim)
if layout is not None and supported_layout != layout:
# Generate transpose list
transpose_list = []
for d in layout:
transpose_list.append(supported_layout.index(d))
return input_tensor.transpose(transpose_list)
return input_tensor
def _convert_from_layout(input_tensor: np.ndarray, layout: str) -> np.ndarray:
"""
Converts tensor to one of suppored layouts
"""
supported_layout = _get_supported_layout(input_tensor.ndim)
if layout is not None and supported_layout != layout:
# Generate transpose list
transpose_list = []
for d in supported_layout:
transpose_list.append(layout.index(d))
return input_tensor.transpose(transpose_list)
return input_tensor
def get_slice(
spatial_dimensions: int,
pad_np: np.array,
dim_coord: Tuple[int],
kernel: Tuple[int],
strides: Tuple[int],
dilation: Tuple[int],
) -> Tuple[slice]:
"""
Programmatically create a slice object of the right dimensions for pad_np.
We assume pad_np's first two dimensions are not spatial and are not touched by the pad.
pad_np[slice] should give the elements of the data that a pool operation will use for the
step given in dim_coord.
"""
slices = [slice(None)] * spatial_dimensions
for nd in range(spatial_dimensions):
slices[nd] = slice(
dim_coord[nd] * strides[nd],
dim_coord[nd] * strides[nd] + (kernel[nd] - 1) * dilation[nd] + 1,
dilation[nd],
)
# Add back batch and channel dimensions
slices = [slice(None), slice(None)] + slices
return tuple(slices)
def pad_tensor(
np_arr: np.array,
pad_value: float,
padding_before: List[int],
padding_after: List[int],
dtype: str,
) -> np.array:
"""Pad the spatial dimensions of the given array."""
orig_shape = list(np_arr.shape)
padded_shape = list(np_arr.shape)
n = len(orig_shape)
for dim in range(2, n):
i = dim - 2
padded_shape[dim] += padding_after[i] + padding_before[i]
pad_np = (np.zeros(shape=padded_shape) + pad_value).astype(dtype)
ranges_it = [range(padded_shape[0]), range(padded_shape[1])]
for dim in range(2, n):
i = dim - 2
ranges_it.append(range(padding_before[i], padding_before[i] + orig_shape[dim]))
pad_np[np.ix_(*ranges_it)] = np_arr
return pad_np
def poolnd_python(
np_data: np.array,
kernel: Tuple[int],
strides: Tuple[int],
dilation: Tuple[int],
padding_before: Tuple[int],
padding_after: Tuple[int],
pool_type: str,
count_include_pad: bool = True,
ceil_mode: bool = False,
dtype: str = "float32",
layout: Optional[str] = None,
) -> np.array:
"""Ground truth pooling operator impelmented in numpy."""
np_data = _convert_from_layout(np_data, layout)
out_shape = [np_data.shape[0], np_data.shape[1]]
for dim in range(2, len(np_data.shape)):
i = dim - 2
val = (
float(
np_data.shape[dim]
- (kernel[i] - 1) * dilation[i]
- 1
+ padding_before[i]
+ padding_after[i]
)
/ strides[i]
)
if ceil_mode:
out_shape.append(int(math.ceil(val) + 1))
else:
out_shape.append(int(math.floor(val) + 1))
out_shape = tuple(out_shape)
# Create a padded array, and a boolean mask showing which values are padded values
pad_value = 0
if pool_type == "max" and not count_include_pad:
pad_value = tvm.te.min_value(dtype).value
pad_data = pad_tensor(np_data, pad_value, padding_before, padding_after, dtype)
pad_map = pad_tensor(np.ones_like(np_data), 0, padding_before, padding_after, "bool")
# Create iterator which gives all indices for output array
dim_iterators = []
for spatial_dimension in range(2, len(np_data.shape)):
dim_iterators.append(range(out_shape[spatial_dimension]))
coord_iterator = itertools.product(*dim_iterators)
ret_np = np.zeros(shape=out_shape).astype(dtype)
for coordinate in coord_iterator:
# Get index into the values that any pool operation will use for given coordinate
np_index = get_slice(
spatial_dimensions=len(out_shape) - 2,
pad_np=pad_data,
dim_coord=coordinate,
kernel=kernel,
strides=strides,
dilation=dilation,
)
output_slice = (slice(None), slice(None)) + tuple(coordinate)
reduction_axis = tuple(range(2, len(np_data.shape)))
if pool_type == "avg":
count_non_padded = (
pad_data[np_index].size if count_include_pad else np.sum(pad_map[np_index])
)
# We summed over the non spatial dimensions too so divide by them
count_non_padded /= out_shape[0] * out_shape[1]
if count_non_padded == 0:
ret_np[output_slice] = 0
else:
ret_np[output_slice] = (
np.sum(pad_data[np_index], axis=reduction_axis) / count_non_padded
)
elif pool_type == "max":
count_non_padded = np.sum(pad_map[np_index])
# All padded values, default to 0
ret_np[output_slice] = np.max(pad_data[np_index], axis=reduction_axis)
else:
raise ValueError(f"Pool type {pool_type} is not supported")
return _convert_to_layout(ret_np, layout)
| 7,140 | 33.331731 | 93 | py |
tvm | tvm-main/python/tvm/topi/testing/reorg_python.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, line-too-long, unused-variable, too-many-locals
"""Reorg in python"""
import numpy as np
def reorg_python(a_np, stride):
"""Reorg operator
Parameters
----------
a_np : numpy.ndarray
4-D with shape [batch, in_channel, in_height, in_width]
stride : int
Stride size
Returns
-------
b_np : np.ndarray
4-D with shape [batch, out_channel, out_height, out_width]
"""
batch, in_channel, in_height, in_width = a_np.shape
a_np = np.reshape(a_np, batch * in_channel * in_height * in_width)
out_c = int(in_channel / (stride * stride))
out_channel = in_channel * stride * stride
out_height = int(in_height / stride)
out_width = int(in_width / stride)
b_np = np.zeros(batch * out_channel * out_height * out_width)
cnt = 0
for b in range(batch):
for k in range(in_channel):
for j in range(in_height):
for i in range(in_width):
c2 = k % out_c
offset = int(k / out_c)
w2 = int(i * stride + offset % stride)
h2 = int(j * stride + offset / stride)
out_index = int(
w2 + in_width * stride * (h2 + in_height * stride * (c2 + out_c * b))
)
b_np[cnt] = a_np[int(out_index)]
cnt = cnt + 1
b_np = np.reshape(b_np, (batch, out_channel, out_height, out_width))
return b_np
| 2,293 | 36 | 93 | py |
tvm | tvm-main/python/tvm/topi/testing/conv1d_transpose_ncw_python.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=unused-variable
"""Transposed 1D convolution in python"""
import numpy as np
import scipy
import tvm.topi.testing
from tvm.topi.nn.utils import get_pad_tuple1d
def conv1d_transpose_ncw_python(a_np, w_np, stride, padding, output_padding):
"""Transposed 1D convolution operator in NCW layout.
Parameters
----------
a_np : numpy.ndarray
3-D with shape [batch, in_channel, in_width]
w_np : numpy.ndarray
3-D with shape [in_channel, num_filter, filter_width]
stride : int or a list/tuple of one int
Stride size, or [stride_width]
padding : int, tuple, or str
Single int for padding size, or
tuple of 2 ints for left and right padding, or
['VALID', 'SAME']
output_padding : tuple
Used to recover the actual output shape in case more than one
is possible
Returns
-------
b_np : np.ndarray
3-D with shape [batch, out_channel, out_width]
"""
batch, in_c, in_w = a_np.shape
_, out_c, filter_w = w_np.shape
opad = output_padding[0]
if isinstance(stride, int):
stride_w = stride
else:
stride_w = stride[0]
assert opad < stride_w
fpad_left, fpad_right = get_pad_tuple1d(padding, filter_w)
# dilate stage
dilated_a_np = tvm.topi.testing.dilate_python(a_np, [1, 1, stride_w])
# padding stage
bpad_left = filter_w - 1 - fpad_left
bpad_right = filter_w - 1 - fpad_right + opad
padded_a_np = np.zeros((batch, in_c, dilated_a_np.shape[2] + bpad_left + bpad_right))
padded_a_np[:, :, bpad_left : dilated_a_np.shape[2] + bpad_left] = dilated_a_np
# convolution stage
out_w = (in_w - 1) * stride_w - fpad_left - fpad_right + filter_w + opad
b_np = np.zeros((batch, out_c, out_w))
for n in range(batch):
for f in range(out_c):
for c in range(in_c):
out = scipy.signal.convolve(padded_a_np[n, c], w_np[c, f], mode="valid")
b_np[n, f] += out
return b_np
| 2,805 | 34.518987 | 89 | py |
tvm | tvm-main/python/tvm/topi/testing/depth_to_space.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, line-too-long, unused-variable, too-many-locals
"""Depth to space in python"""
import numpy as np
def depth_to_space_python(data, block_size, mode="DCR"):
"""Depth to Space operator in python for NCHW layout.
Parameters
----------
data : np.ndarray
4-D with shape [batch, in_channel, in_height, in_width]
block_size : int
Size of blocks to convert channel pixels into.
Returns
-------
d2s_out : np.ndarray
4-D with shape [batch, in_channel / (block_size * block_size),
out_height * block_size, out_width * block_size]
"""
in_n, in_c, in_h, in_w = data.shape
new_h = int(in_h * block_size)
new_w = int(in_h * block_size)
new_c = int(in_c / (block_size * block_size))
if mode == "DCR":
expanded = np.reshape(data, newshape=[in_n, block_size, block_size, new_c, in_h, in_w])
transposed = np.transpose(expanded, axes=[0, 3, 4, 1, 5, 2])
else:
expanded = np.reshape(data, newshape=(in_n, new_c, block_size, block_size, in_h, in_w))
transposed = np.transpose(expanded, axes=(0, 1, 4, 2, 5, 3))
newshape = [in_n, new_c, new_h, new_w]
d2s_out = np.reshape(transposed, newshape=newshape)
return d2s_out
| 2,069 | 38.056604 | 95 | py |
tvm | tvm-main/python/tvm/topi/testing/lstm_python.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""LSTM reference implementation using numpy."""
import numpy as np
def lstm_python(
Xs: np.array,
Wi: np.array,
Wh: np.array,
Bi: np.array = None,
Bh: np.array = None,
h_init: np.array = None,
c_init: np.array = None,
proj: np.array = None,
p_i: np.array = None,
p_f: np.array = None,
p_o: np.array = None,
f_act: str = "sigmoid",
g_act: str = "tanh",
h_act: str = "tanh",
reverse: bool = False,
weight_layout: str = "IFGO",
):
"""LSTM reference implementation using numpy
Parameters
----------
Xs : np.array
(seq_length, batch_size, in_dim)
Wi : np.array
(4 * hidden_dim, in_dim)
Wh : np.array
(4 * hidden_dim, out_dim) where out_dim = proj_dim if proj_dim > 0, else hidden_dim
Bi : np.array, optional
(4 * hidden_dim,), by default None
Bh : np.array, optional
(4 * hidden_dim,), by default None
h_init : np.array, optional
(batch_size, out_dim), by default None
c_init : np.array, optional
(batch_size, hidden_dim), by default None
proj : np.array, optional
(proj_dim, hidden_dim), by default None
p_i, p_f, p_o: np.array, optional
(batch_size, hidden_dim), by default None
f_act, g_act, h_act: str, optional
activations, by default "sigmoid", "tanh", "tanh"
reverse : bool, optional
process Xs in reverse, by default False
weight_layout : str, optional
Packed layout for weights and biases, by default "IFGO"
"""
i_gate_idx = weight_layout.find("I")
f_gate_idx = weight_layout.find("F")
g_gate_idx = weight_layout.find("G")
o_gate_idx = weight_layout.find("O")
str2act = {"sigmoid": lambda x: 1 / (1 + np.exp(-x)), "tanh": np.tanh}
f_act = str2act[f_act]
g_act = str2act[g_act]
h_act = str2act[h_act]
S, B, F = Xs.shape
H = Wi.shape[0] // 4
O = Wh.shape[1]
# make life a bit easier
Wi = np.reshape(Wi, (4, H, F))
Wh = np.reshape(Wh, (4, H, O))
if Bi is not None:
Bi = np.reshape(Bi, (4, H))
if Bh is not None:
Bh = np.reshape(Bh, (4, H))
h0 = h_init if h_init is not None else np.zeros((B, O), "float32")
c0 = c_init if c_init is not None else np.zeros((B, H), "float32")
hs = [h0]
cs = [c0]
for t in range(S):
x = Xs[S - t - 1 if reverse else t]
xh = [np.matmul(x, Wi[g].T) for g in range(4)]
if Bi is not None:
xh = [xh[g] + Bi[g] for g in range(4)]
hh = [np.matmul(hs[t], Wh[g].T) for g in range(4)]
if Bh is not None:
hh = [hh[g] + Bh[g] for g in range(4)]
sums = [xh[g] + hh[g] for g in range(4)]
if p_i is not None and p_f is not None:
i_gate = f_act(sums[i_gate_idx] + p_i * cs[t])
f_gate = f_act(sums[f_gate_idx] + p_f * cs[t])
else:
i_gate = f_act(sums[i_gate_idx])
f_gate = f_act(sums[f_gate_idx])
g_gate = g_act(sums[g_gate_idx])
next_c = f_gate * cs[t] + i_gate * g_gate
if p_o is not None:
o_gate = f_act(sums[o_gate_idx] + p_o * next_c)
else:
o_gate = f_act(sums[o_gate_idx])
next_h = o_gate * h_act(next_c)
if proj is not None:
next_h = np.matmul(next_h, proj.T)
hs.append(next_h)
cs.append(next_c)
return np.stack(hs[1:], axis=0), np.stack(cs[1:], axis=0)
| 4,276 | 30.681481 | 91 | py |
tvm | tvm-main/python/tvm/topi/testing/slice_axis_python.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Slice axis in python"""
def slice_axis_python(data, axis, begin, end=None):
"""Slice input array along specific axis.
Parameters
----------
data : numpy.ndarray
The source array to be sliced.
axis : int
Axis to be sliced.
begin: int
The index to begin with in the slicing.
end: int, optional
The index indicating end of the slice.
Returns
-------
ret : numpy.ndarray
The computed result.
"""
dshape = data.shape
if axis < 0:
axis += len(dshape)
if begin < 0:
begin += dshape[axis]
if end <= 0:
end += dshape[axis]
slc = [slice(None)] * len(dshape)
slc[axis] = slice(begin, end)
return data[tuple(slc)]
| 1,533 | 28.5 | 62 | py |
tvm | tvm-main/python/tvm/topi/testing/searchsorted.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""The reference implementation of searchsorted in Numpy."""
import numpy as np
def searchsorted_ref(sorted_sequence, values, right, out_dtype):
"""Run Numpy searchsorted on 1-D or N-D sorted_sequence."""
side = "right" if right else "left"
if len(sorted_sequence.shape) == 1 and len(values.shape) > 1:
sorted_sequence_2d = np.tile(sorted_sequence, (np.prod(values.shape[:-1]), 1))
else:
sorted_sequence_2d = np.reshape(sorted_sequence, (-1, sorted_sequence.shape[-1]))
values_2d = np.reshape(values, (-1, values.shape[-1]))
indices = np.zeros(values_2d.shape, dtype=out_dtype)
for i in range(indices.shape[0]):
indices[i] = np.searchsorted(sorted_sequence_2d[i], values_2d[i], side=side)
return np.reshape(indices, values.shape)
| 1,576 | 42.805556 | 89 | py |
tvm | tvm-main/python/tvm/topi/testing/conv2d_nhwc_python.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, line-too-long, unused-variable, too-many-locals
"""Convolution in python"""
import numpy as np
import scipy.signal
from tvm.topi.nn.utils import get_pad_tuple
def _conv2d_nhwc_python(a_np, w_np, stride, padding):
"""Convolution operator in NHWC layout.
Parameters
----------
a_np : numpy.ndarray
4-D with shape [batch, in_height, in_width, in_channel]
w_np : numpy.ndarray
4-D with shape [filter_height, filter_width, in_channel, num_filter]
stride : int or a list/tuple of two ints
Stride size, or [stride_height, stride_width]
padding : int or str or a list/tuple of two ints
Padding size, or ['VALID', 'SAME'], or [pad_height, pad_width]
Returns
-------
b_np : np.ndarray
4-D with shape [batch, out_height, out_width, out_channel]
"""
batch, in_height, in_width, in_channel = a_np.shape
kernel_h, kernel_w, _, num_filter = w_np.shape
if isinstance(stride, int):
stride_h = stride_w = stride
else:
stride_h, stride_w = stride
pad_top, pad_left, pad_bottom, pad_right = get_pad_tuple(padding, (kernel_h, kernel_w))
pad_h = pad_top + pad_bottom
pad_w = pad_left + pad_right
# compute the output shape
out_channel = num_filter
out_height = (in_height - kernel_h + pad_h) // stride_h + 1
out_width = (in_width - kernel_w + pad_w) // stride_w + 1
# change the layout from NHWC to NCHW
at = a_np.transpose((0, 3, 1, 2))
wt = w_np.transpose((3, 2, 0, 1))
bt = np.zeros((batch, out_channel, out_height, out_width))
# computation
for n in range(batch):
for f in range(out_channel):
for c in range(in_channel):
if pad_h > 0 or pad_w > 0:
apad = np.zeros((in_height + pad_h, in_width + pad_w))
apad[pad_top : pad_top + in_height, pad_left : pad_left + in_width] = at[n, c]
else:
apad = at[n, c]
out = scipy.signal.convolve2d(apad, np.rot90(np.rot90(wt[f, c])), mode="valid")
bt[n, f] += out[::stride_h, ::stride_w]
return bt.transpose((0, 2, 3, 1))
def conv2d_nhwc_python(a_np, w_np, stride, padding, groups=1):
"""Convolution operator in NHWC layout.
Parameters
----------
a_np : numpy.ndarray
4-D with shape [batch, in_height, in_width, in_channel]
w_np : numpy.ndarray
4-D with shape [filter_height, filter_width, in_channel // groups, num_filter]
stride : int or a list/tuple of two ints
Stride size, or [stride_height, stride_width]
padding : int or str or a list/tuple of 2 or 4 ints
Padding size, or ['VALID', 'SAME'], or
[pad_height, pad_width] for 2 ints, or
[pad_top, pad_left, pad_bottom, pad_right] for 2 ints
groups : int
Number of groups
Returns
-------
b_np : np.ndarray
4-D with shape [batch, out_height, out_width, out_channel]
"""
a_slices = np.array_split(a_np, groups, axis=3)
w_slices = np.array_split(w_np, groups, axis=3)
b_slices = [
_conv2d_nhwc_python(a_slice, w_slice, stride, padding)
for a_slice, w_slice in zip(a_slices, w_slices)
]
b_np = np.concatenate(b_slices, axis=3)
return b_np
| 4,109 | 34.73913 | 98 | py |
tvm | tvm-main/python/tvm/topi/testing/nll_loss.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""NLLLoss in python"""
import numpy as np
def nll_loss(predictions, targets, weights, reduction="mean", ignore_index=-100):
"""nll_loss operator implemented in numpy.
output{n, i_1, i_2, ..., i_k} = -p * w
where t = target{n, i_1, i_2, ..., i_k}
p = predictions{n, t, i_1, i_2, i_k}
w = weights{n, i_1, i_2, ..., i_k} if t != ignore_index else 0
result = reduction(output)
Parameters
----------
predictions : numpy.ndarray
(k+2)-D with shape (N, C, d_1, d_2, ..., d_k),
where C is the number of target classes
targets : numpy.ndarray
(k+1)-D with shape (N, d_1, d_2, ..., d_k)
The target value of the input.
weights : numpy.ndarray
1-D with shape (C,)
The weight of each target value.
reduction : string
The reduction method to apply to output.
Can be "mean", "sum" or "none".
ignore_index : int
The target value to ignore.
Returns
-------
output : numpy.ndarray
a scalar if the reduction type is "mean" or "sum",
otherwise the same shape as `target`.
"""
res = np.zeros(targets.shape)
weight_sum = 0.0
for index in np.ndindex(targets.shape):
class_id = targets[index]
if class_id != ignore_index:
index_list = list(index)
pred_index = tuple(index_list[:1] + [class_id] + index_list[1:])
res[index] = -predictions[pred_index] * weights[class_id]
weight_sum += weights[class_id]
if reduction == "mean":
return np.sum(res) / weight_sum
if reduction == "sum":
return np.sum(res)
return res
| 2,492 | 33.150685 | 81 | py |
tvm | tvm-main/python/tvm/topi/testing/conv1d_ncw_python.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=unused-variable, invalid-name
"""1D convolution in python"""
import numpy as np
from tvm.topi.nn.utils import get_pad_tuple1d
def dilate_np(x, dilation):
"""1D dilation using numpy
Parameters
----------
x : numpy.ndarray
Array to dilate with shape [batch, in_channel, in_width]
dilation : int
dilation rate of output
Returns
-------
out : numpy.ndarray
Dilated output with shape [batch, in_channel, (in_width - 1) * dilation + 1]
"""
irange = range(len(x) - 1)
for d in range(dilation - 1):
indices = [(d + 1) * (i + 1) for i in irange]
x = np.insert(x, indices, 0)
return x
def group_conv1d_ncw_python(a_np, w_np, stride, padding, dilation, groups):
"Grouped version of `conv1d_ncw_python`, see that for documentation"
a_slices = np.array_split(a_np, groups, axis=1)
w_slices = np.array_split(w_np, groups, axis=0)
b_slices = [
conv1d_ncw_python(a_slice, w_slice, stride, padding, dilation)
for a_slice, w_slice in zip(a_slices, w_slices)
]
return np.concatenate(b_slices, axis=1)
def conv1d_ncw_python(a_np, w_np, stride, padding, dilation):
"""1D convolution operator in NCW layout
Parameters
----------
a_np : numpy.ndarray
3-D with shape [batch, in_channel, in_width]
w_np : numpy.ndarray
3-D with shape [num_filter, in_channel, filter_width]
stride : int
Stride size
padding : int, tuple, or str
Single int for padding size or tuple of (left, right) padding
or a string in ['VALID', 'SAME']
dilation : int
Dilation rate of the kernel
groups : int
Number of groups in the convolution
Returns
-------
b_np : numpy.ndarray
3-D with shape [batch, out_channel, out_width]
"""
batch, in_c, in_w = a_np.shape
out_c, _, filter_w = w_np.shape
if isinstance(stride, (tuple, list)):
stride = stride[0]
if isinstance(dilation, (tuple, list)):
dilation = dilation[0]
dilated_filter_w = (filter_w - 1) * dilation + 1
pad_left, pad_right = get_pad_tuple1d(padding, (dilated_filter_w,))
out_w = ((in_w - dilated_filter_w + pad_left + pad_right) // stride) + 1
padded_a_np = np.zeros((batch, in_c, in_w + pad_left + pad_right))
padded_a_np[:, :, pad_left : (in_w + pad_left)] = a_np
b_np = np.zeros((batch, out_c, out_w))
for n in range(batch):
for f in range(out_c):
for c in range(in_c):
out = np.convolve(
padded_a_np[n, c], np.flip(dilate_np(w_np[f, c], dilation)), mode="valid"
)
b_np[n, f] += out[::stride]
return b_np
| 3,532 | 31.412844 | 93 | py |
tvm | tvm-main/python/tvm/topi/testing/conv3d_ndhwc_python.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, line-too-long, unused-variable, too-many-locals
"""Convolution 3D in python"""
import numpy as np
import scipy.signal
from tvm.topi.nn.utils import get_pad_tuple3d
def _conv3d_ndhwc_python(a_np, w_np, stride, padding):
"""Convolution 3D operator in NDHWC layout.
Parameters
----------
a_np : numpy.ndarray
5-D with shape [batch, in_channel, in_depth, in_height, in_width]
w_np : numpy.ndarray
5-D with shape [num_filter, in_channel, filter_depth, filter_height, filter_width]
stride : int or a list/tuple of three ints
Stride size, or [stride_depth, stride_height, stride_width]
padding : int or str or a list/tuple of three ints
Padding size, or ['VALID', 'SAME'], or [pad_depth, pad_height, pad_width]
Returns
-------
b_np : np.ndarray
5-D with shape [batch, out_channel, out_depth, out_height, out_width]
"""
batch, in_depth, in_height, in_width, in_channel = a_np.shape
kernel_d, kernel_h, kernel_w, _, num_filter = w_np.shape
if isinstance(stride, int):
stride_d = stride_h = stride_w = stride
else:
stride_d, stride_h, stride_w = stride
pad_front, pad_top, pad_left, pad_back, pad_bottom, pad_right = get_pad_tuple3d(
padding, (kernel_d, kernel_h, kernel_w)
)
pad_d = pad_front + pad_back
pad_h = pad_top + pad_bottom
pad_w = pad_left + pad_right
# compute the output shape
out_channel = num_filter
out_depth = (in_depth - kernel_d + pad_d) // stride_d + 1
out_height = (in_height - kernel_h + pad_h) // stride_h + 1
out_width = (in_width - kernel_w + pad_w) // stride_w + 1
# change the layout from NHWC to NCHW
at = a_np.transpose((0, 4, 1, 2, 3))
wt = w_np.transpose((4, 3, 0, 1, 2))
bt = np.zeros((batch, out_channel, out_depth, out_height, out_width), dtype=a_np.dtype)
# computation
for n in range(batch):
for f in range(out_channel):
for c in range(in_channel):
if pad_d > 0 or pad_h > 0 or pad_w > 0:
apad = np.zeros(
(in_depth + pad_d, in_height + pad_h, in_width + pad_w), dtype=a_np.dtype
)
apad[
pad_front : pad_front + in_depth,
pad_top : pad_top + in_height,
pad_left : pad_left + in_width,
] = at[n, c]
else:
apad = at[n, c]
out = scipy.signal.convolve(apad, np.flip(wt[f, c]), mode="valid")
bt[n, f] += out[::stride_d, ::stride_h, ::stride_w]
return bt.transpose((0, 2, 3, 4, 1))
def conv3d_ndhwc_python(a_np, w_np, stride, padding, groups=1):
"""Convolution 3D operator in NDHWC layout.
Parameters
----------
a_np : numpy.ndarray
5-D with shape [batch, in_channel, in_depth, in_height, in_width]
w_np : numpy.ndarray
5-D with shape [num_filter, in_channel, filter_depth, filter_height, filter_width]
stride : int or a list/tuple of three ints
Stride size, or [stride_depth, stride_height, stride_width]
padding : int or str or a list/tuple of three ints
Padding size, or ['VALID', 'SAME'], or [pad_depth, pad_height, pad_width]
groups : int
Number of groups
Returns
-------
b_np : np.ndarray
5-D with shape [batch, out_channel, out_depth, out_height, out_width]
"""
a_slices = np.array_split(a_np, groups, axis=4)
w_slices = np.array_split(w_np, groups, axis=4)
b_slices = [
_conv3d_ndhwc_python(a_slice, w_slice, stride, padding)
for a_slice, w_slice in zip(a_slices, w_slices)
]
b_np = np.concatenate(b_slices, axis=4)
return b_np
| 4,597 | 37 | 97 | py |
tvm | tvm-main/python/tvm/topi/testing/strided_slice_python.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""strided_slice/set in python"""
def strided_slice_python(data, begin, end, strides, slice_mode="end", axes=None):
"""Python version of strided slice operator.
Parameters
----------
data : numpy.ndarray
Input data
begin : list
Beginning of the slices.
end : list
End of the slices.
strides : list
The stride of each slice.
slice_mode : str, optional
The slice mode [end, size].
end: The default slice mode, ending indices for the slice.
size: The input strides will be ignored, input end in this mode indicates
the sizeof a slice starting at the location specified by begin. If end[i] is -1,
all remaining elements in that dimension are included in the slice.
axes : list, optional
Axes along which slicing is applied
Returns
-------
result : numpy.ndarray
The sliced result.
"""
strides = [] if strides is None else strides
if axes is not None:
rank = len(data.shape)
new_begin = [0] * rank
new_end = [data.shape[i] for i in range(rank)]
new_strides = [1] * rank
for i, axis in enumerate(axes):
new_begin[axis] = begin[i]
new_end[axis] = end[i]
if len(strides) > i:
new_strides[axis] = strides[i]
begin = new_begin
end = new_end
strides = new_strides
slices = []
for i in range(len(data.shape)):
new_stride = None
if slice_mode == "end" and i < len(strides):
new_stride = strides[i]
new_begin = begin[i] if i < len(begin) else None
if i >= len(end):
new_end = None
elif slice_mode == "size":
if end[i] < 0:
new_end = None
else:
new_end = new_begin + end[i]
else:
new_end = end[i]
slices.append(slice(new_begin, new_end, new_stride))
return data[tuple(slices)]
def strided_set_python(data, v, begin, end, strides):
"""Python version of strided slice operator.
Parameters
----------
data : numpy.ndarray
Input data
v : numpy.ndarray
Value data
begin : list
Beginning of the slices.
end : list
End of the slices.
strides : list
The stride of each slice.
Returns
-------
result : numpy.ndarray
The updated result.
"""
strides = [] if strides is None else strides
slices = []
res = data.copy()
for i in range(len(data.shape)):
slices.append(
slice(
begin[i] if i < len(begin) else None,
end[i] if i < len(end) else None,
strides[i] if i < len(strides) else None,
)
)
res[tuple(slices)] = v
return res
| 3,650 | 27.302326 | 94 | py |
tvm | tvm-main/python/tvm/topi/testing/deformable_conv2d_python.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, too-many-locals, too-many-arguments
"""Deformable convolution in python"""
import itertools
import math
import numpy as np
from tvm.topi.nn.utils import get_pad_tuple
def deformable_conv2d_nchw_python(
a_np, offset_np, w_np, stride, padding, dilation, deformable_groups, groups
):
"""Deformable convolution operator in NCHW layout.
Parameters
----------
a_np : numpy.ndarray
4-D with shape [batch, in_channel, in_height, in_width]
offset_np : numpy.ndarray
4-D with shape [batch, deformable_groups * filter_height * filter_width * 2,
out_height, out_width]
w_np : numpy.ndarray
4-D with shape [num_filter, in_channel, filter_height, filter_width]
stride : int or a list/tuple of two ints
Stride size, or [stride_height, stride_width]
padding : int or str or a list/tuple of 2 or 4 ints
Padding size, or ['VALID', 'SAME'], or
[pad_height, pad_width] for 2 ints, or
[pad_top, pad_left, pad_bottom, pad_right] for 2 ints
dilation : int or a list/tuple of two ints
Dilation size, or [dilate_height, dilate_width]
deformable_groups : int
Number of deformable groups
groups : int
Number of groups
Returns
-------
b_np : np.ndarray
4-D with shape [batch, out_channel, out_height, out_width]
"""
batch, in_channel, in_height, in_width = a_np.shape
out_channel, _, kernel_h, kernel_w = w_np.shape
out_height, out_width = offset_np.shape[-2:]
dtype = a_np.dtype
ic_per_dgroup = in_channel // deformable_groups
assert groups == 1, "deformable_conv2d_nchw_python does not support groups > 1"
if isinstance(stride, int):
stride_h = stride_w = stride
else:
stride_h, stride_w = stride
pad_top, pad_left, _, _ = get_pad_tuple(padding, (kernel_h, kernel_w))
if isinstance(dilation, int):
dilation_h = dilation_w = dilation
else:
dilation_h, dilation_w = dilation
def _bilinear(n, c, h, w):
y_low = int(math.floor(h))
x_low = int(math.floor(w))
y_high = y_low + 1
x_high = x_low + 1
wy_h = h - y_low
wx_h = w - x_low
wy_l = 1 - wy_h
wx_l = 1 - wx_h
val = 0
for wx, xp in zip((wx_l, wx_h), (x_low, x_high)):
for wy, yp in zip((wy_l, wy_h), (y_low, y_high)):
if 0 <= yp < in_height and 0 <= xp < in_width:
val += wx * wy * a_np[n, c, yp, xp]
return val
a_deform = np.zeros((batch, in_channel, out_height, out_width, kernel_h, kernel_w), dtype=dtype)
for n, h, w in itertools.product(range(batch), range(out_height), range(out_width)):
offset = offset_np[n, :, h, w].reshape(deformable_groups, kernel_h, kernel_w, 2)
in_h = h * stride_h - pad_top
in_w = w * stride_w - pad_left
index_h_base, index_w_base = np.meshgrid(
np.arange(in_h, in_h + kernel_h * dilation_h, dilation_h, dtype=offset_np.dtype),
np.arange(in_w, in_w + kernel_w * dilation_w, dilation_w, dtype=offset_np.dtype),
indexing="ij",
)
for c, kh, kw in itertools.product(range(in_channel), range(kernel_h), range(kernel_w)):
dg = c // ic_per_dgroup
index_h = index_h_base + offset[dg, ..., 0]
index_w = index_w_base + offset[dg, ..., 1]
y, x = index_h[kh, kw], index_w[kh, kw]
if y < 0 or y >= in_height or x < 0 or x >= in_width:
continue
a_deform[n, c, h, w, kh, kw] = _bilinear(n, c, y, x)
b_np = np.zeros((batch, out_channel, out_height, out_width), dtype=dtype)
for n, c, f, h, w in itertools.product(
range(batch), range(in_channel), range(out_channel), range(out_height), range(out_width)
):
b_np[n, f, h, w] += np.tensordot(a_deform[n, c, h, w], w_np[f, c])
return b_np
def deformable_conv2d_nhwc_python(
a_np, offset_np, w_np, stride, padding, dilation, deformable_groups, groups
):
"""Deformable convolution operator in NHWC layout.
Parameters
----------
a_np : numpy.ndarray
4-D with shape [batch, in_height, in_width, in_channel]
offset_np : numpy.ndarray
4-D with shape [batch, out_height, out_width,
deformable_groups * filter_height * filter_width * 2]
w_np : numpy.ndarray
4-D with shape [filter_height, filter_width, in_channel, num_filter]
stride : int or a list/tuple of two ints
Stride size, or [stride_height, stride_width]
padding : int or str or a list/tuple of 2 or 4 ints
Padding size, or ['VALID', 'SAME'], or
[pad_height, pad_width] for 2 ints, or
[pad_top, pad_left, pad_bottom, pad_right] for 2 ints
dilation : int or a list/tuple of two ints
Dilation size, or [dilate_height, dilate_width]
deformable_groups : int
Number of deformable groups
groups : int
Number of groups
Returns
-------
b_np : np.ndarray
4-D with shape [batch, out_channel, out_height, out_width]
"""
a_np = np.transpose(a_np, [0, 3, 1, 2]) # NHWC -> NCHW
offset_np = np.transpose(offset_np, [0, 3, 1, 2]) # NHWC -> NCHW
w_np = np.transpose(w_np, [3, 2, 0, 1]) # HWIO -> OIHW
b_np = deformable_conv2d_nchw_python(
a_np, offset_np, w_np, stride, padding, dilation, deformable_groups, groups
)
b_np = np.transpose(b_np, [0, 2, 3, 1]) # NCHW -> NHWC
return b_np
| 6,373 | 34.608939 | 100 | py |
tvm | tvm-main/python/tvm/topi/testing/common.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""Common utility for topi test"""
import numpy as np
import scipy.signal
import tvm
from tvm import topi
from tvm.testing import assert_allclose
_injective_schedule = {
"generic": topi.generic.schedule_injective,
"cpu": topi.x86.schedule_injective,
"arm_cpu": topi.arm_cpu.schedule_injective,
"gpu": topi.cuda.schedule_injective,
"hls": topi.hls.schedule_injective,
}
_reduce_schedule = {
"generic": topi.generic.schedule_reduce,
"cpu": topi.x86.schedule_reduce,
"gpu": topi.cuda.schedule_reduce,
"hls": topi.cuda.schedule_reduce,
}
def dispatch(target, dispatch_map):
if isinstance(target, str):
target = tvm.target.Target(target)
assert isinstance(target, tvm.target.Target)
for key in target.keys:
if key in dispatch_map:
return dispatch_map[key]
return dispatch_map["generic"]
def get_injective_schedule(target):
return dispatch(target, _injective_schedule)
def get_reduce_schedule(target):
return dispatch(target, _reduce_schedule)
get_broadcast_schedule = get_injective_schedule
get_elemwise_schedule = get_injective_schedule
_conv2d_nchw_implement = {
"generic": (topi.nn.conv2d_nchw, topi.generic.schedule_conv2d_nchw),
"cpu": (topi.x86.conv2d_nchw, topi.x86.schedule_conv2d_nchw),
"arm_cpu": (
topi.arm_cpu.conv2d_nchw_spatial_pack,
topi.arm_cpu.schedule_conv2d_nchw_spatial_pack,
),
"gpu": (topi.cuda.conv2d_nchw, topi.cuda.schedule_conv2d_nchw),
"mali": (topi.mali.conv2d_nchw_spatial_pack, topi.mali.schedule_conv2d_nchw_spatial_pack),
"bifrost": (
topi.bifrost.conv2d_nchw_spatial_pack,
topi.bifrost.schedule_conv2d_nchw_spatial_pack,
),
"intel_graphics": (topi.intel_graphics.conv2d_nchw, topi.intel_graphics.schedule_conv2d_nchw),
"hls": (topi.nn.conv2d_nchw, topi.hls.schedule_conv2d_nchw),
}
def get_conv2d_nchw_implement(target):
return dispatch(target, _conv2d_nchw_implement)
def compare_numpy_tvm(inputs, output, target, device, compute, schedule):
"""Compare a numpy inputs and output of a function to the results of the TVM version.
Parameters
----------
inputs : Sequence[numpy.nd.array]
List of input numpy arrays to pass to the function.
output : numpy.nd.array
Verified correct function output.
target : tvm.target.Target
Target to run on.
device : tvm.runtime.Device
Context to run on.
compute : callable
Topi compute function to test against.
schedule : callable
Topi scheduling function to test against.
"""
te_inputs = [tvm.te.placeholder(shape=i.shape, dtype=str(i.dtype)) for i in inputs]
te_out = tvm.nd.array(np.zeros(output.shape).astype(output.dtype), device=device)
with tvm.target.Target(target):
out = compute(*te_inputs)
s = schedule([out])
func = tvm.build(s, te_inputs + [out])
arys = [tvm.nd.array(x, device=device) for x in inputs]
func(*(arys + [te_out]))
assert_allclose(te_out.numpy(), output, atol=1e-4, rtol=1e-4)
def _convolve2d(data, weights):
"""2d convolution operator in HW layout.
This is intended to be used as a replacement for
scipy.signals.convolve2d, with wider support for different dtypes.
scipy.signal.convolve2d does not support all TVM-supported
dtypes (e.g. float16). Where possible, this function uses
scipy.signal.convolve2d to take advantage of compiled scipy
routines, falling back to an explicit loop only where needed.
Parameters
----------
data : numpy.ndarray
2-D with shape [in_height, in_width]
weights : numpy.ndarray
2-D with shape [filter_height, filter_width].
Returns
-------
b_np : np.ndarray
2-D with shape [out_height, out_width]
Return value and layout conventions are matched to
``scipy.signal.convolve2d(data, weights, mode="valid")``
"""
try:
return scipy.signal.convolve2d(data, weights, mode="valid")
except ValueError:
pass
weights = np.rot90(weights, k=2)
assert len(data.shape) == len(weights.shape) == 2
dtype = data.dtype
kernel_h, kernel_w = weights.shape
output_shape = [a_dim - w_dim + 1 for a_dim, w_dim in zip(data.shape, weights.shape)]
output = np.zeros(output_shape, dtype=dtype)
for y in range(output_shape[0]):
for x in range(output_shape[1]):
output[y][x] = np.sum(data[y : y + kernel_h, x : x + kernel_w] * weights)
return output
| 5,376 | 32.191358 | 98 | py |
tvm | tvm-main/python/tvm/topi/testing/lrn_python.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, line-too-long, unused-variable, too-many-locals
"""LRN in python"""
from itertools import product
import numpy as np
def lrn_python(a_np, size, axis, bias, alpha, beta):
"""Local response normalization operator in NCHW layout.
Parameters
----------
a_np : numpy.ndarray
4-D with shape [batch, in_channel, in_height, in_width]
size : int
normalization window size
axis : int
input data layout channel axis
bias : float
offset to avoid dividing by 0. constant value
alpha : float
constant value
beta : float
exponent constant value
Returns
-------
lrn_out : np.ndarray
4-D with shape [batch, out_channel, out_height, out_width]
"""
radius = size // 2
sqr_sum = np.zeros(shape=a_np.shape).astype(a_np.dtype)
for i, j, k, l in product(*[range(_axis) for _axis in a_np.shape]):
axis_size = a_np.shape[axis]
if axis == 1:
# NCHW layout
sum_start = j - radius if j - radius >= 0 else 0
sum_end = j + radius + 1 if j + radius + 1 < axis_size else axis_size
sqr_sum[i, j, k, l] = sum(
a_np[i, sum_start:sum_end, k, l] * a_np[i, sum_start:sum_end, k, l]
)
elif axis == 3:
# NHWC layout
sum_start = l - radius if l - radius >= 0 else 0
sum_end = l + radius + 1 if l + radius + 1 < axis_size else axis_size
sqr_sum[i, j, k, l] = sum(
a_np[i, j, k, sum_start:sum_end] * a_np[i, j, k, sum_start:sum_end]
)
sqr_sum_up = np.power((bias + (alpha * sqr_sum / size)), beta)
lrn_out = np.divide(a_np, sqr_sum_up)
return lrn_out
| 2,543 | 33.849315 | 83 | py |
tvm | tvm-main/python/tvm/topi/testing/sequence_mask_python.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""Sequence mask in python"""
import numpy as np
def sequence_mask(data, valid_length, mask_value, axis):
"""batch_matmul operator implemented in numpy.
Parameters
----------
data : numpy.ndarray
N-D with shape [batch_size, MAX_LENGTH, ...] or [MAX_LENGTH, batch_size, ...]
valid_length : numpy.ndarray
1-D with shape [batch_size,]
mask_value : float
Masking value
axis : int
The axis of the length dimension
Returns
-------
out : numpy.ndarray
N-D with shape same as data
"""
in_shape = data.shape
max_length = data.shape[axis]
val_len_expand_shape = [1 for _ in range(len(in_shape))]
val_len_expand_shape[1 - axis] = in_shape[1 - axis]
seq_len_expand_shape = [1 for _ in range(len(in_shape))]
seq_len_expand_shape[axis] = in_shape[axis]
mask = np.broadcast_to(
np.arange(max_length).reshape(seq_len_expand_shape), in_shape
) >= valid_length.reshape(val_len_expand_shape)
out = data * (1 - mask) + mask_value * mask
return out
| 1,883 | 33.254545 | 85 | py |
tvm | tvm-main/python/tvm/topi/testing/batch_matmul.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""Batch matmul in python"""
import numpy as np
def batch_matmul(x, y, out_dtype=None, trans_x=False, trans_y=True):
"""batch_matmul operator implemented in numpy.
Parameters
----------
x : numpy.ndarray
3-D with shape [batch, M, K]
y : numpy.ndarray
3-D with shape [batch, N, K]
out_dtype: string, optional
Specify the dtype of output
Returns
-------
out : numpy.ndarray
3-D with shape [batch, M, N]
"""
if trans_x:
XB, _, M = x.shape
else:
XB, M, _ = x.shape
if trans_y:
YB, N, _ = y.shape
else:
YB, _, N = y.shape
batch = max(XB, YB)
dtype = x.dtype if out_dtype is None else out_dtype
out = np.zeros((batch, M, N)).astype(dtype)
for i in range(batch):
xx = x[i if XB != 1 else 0].astype(dtype)
yy = y[i if YB != 1 else 0].astype(dtype)
out[i] = np.dot(
xx.T if trans_x else xx,
yy.T if trans_y else yy,
)
return out
| 1,838 | 29.65 | 68 | py |
tvm | tvm-main/python/tvm/topi/testing/conv2d_nchw_python.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, line-too-long, unused-variable, too-many-locals, too-many-branches
"""Convolution in python"""
import numpy as np
import scipy
from tvm.topi.nn.utils import get_pad_tuple
def _conv2d_nchw_python(a_np, w_np, stride, padding):
"""Convolution operator in NCHW layout.
Parameters
----------
a_np : numpy.ndarray
4-D with shape [batch, in_channel, in_height, in_width]
w_np : numpy.ndarray
4-D with shape [num_filter, in_channel, filter_height, filter_width]
stride : int or a list/tuple of two ints
Stride size, or [stride_height, stride_width]
padding : int or str or a list/tuple of 2 or 4 ints
Padding size, or ['VALID', 'SAME'], or
[pad_height, pad_width] for 2 ints, or
[pad_top, pad_left, pad_bottom, pad_right] for 2 ints
Returns
-------
b_np : np.ndarray
4-D with shape [batch, out_channel, out_height, out_width]
"""
batch, in_channel, in_height, in_width = a_np.shape
num_filter, _, kernel_h, kernel_w = w_np.shape
if isinstance(stride, int):
stride_h = stride_w = stride
else:
stride_h, stride_w = stride
pad_top, pad_left, pad_bottom, pad_right = get_pad_tuple(padding, (kernel_h, kernel_w))
pad_h = pad_top + pad_bottom
pad_w = pad_left + pad_right
# compute the output shape
out_channel = num_filter
out_height = (in_height - kernel_h + pad_h) // stride_h + 1
out_width = (in_width - kernel_w + pad_w) // stride_w + 1
b_np = np.zeros((batch, out_channel, out_height, out_width), dtype=a_np.dtype)
# computation
for n in range(batch):
for f in range(out_channel):
for c in range(in_channel):
if pad_h > 0 or pad_w > 0:
apad = np.zeros((in_height + pad_h, in_width + pad_w), dtype=a_np.dtype)
apad[pad_top : pad_top + in_height, pad_left : pad_left + in_width] = a_np[n, c]
else:
apad = a_np[n, c]
out = _conv2d_hw(apad, w_np[f, c])
b_np[n, f] += out[::stride_h, ::stride_w]
return b_np
def _conv2d_hw(apad, w_np_fc):
"""2d convolution operator in HW layout.
This is intended to be used as a subroutine from
_conv2d_nchw_python. Using scipy.signal.convolve2d directly does
not work for all dtypes (e.g. float16). Where possible, this
function uses scipy.signal.convolve2d to take advantage of
compiled scipy routines, falling back to an explicit loop only
where needed
Parameters
----------
a_np : numpy.ndarray
2-D with shape [in_height, in_width]
w_np : numpy.ndarray
2-D with shape [filter_height, filter_width].
Returns
-------
b_np : np.ndarray
2-D with shape [out_height, out_width]
"""
try:
return scipy.signal.convolve2d(apad, np.rot90(np.rot90(w_np_fc)), mode="valid")
except ValueError:
pass
assert len(apad.shape) == len(w_np_fc.shape) == 2
dtype = apad.dtype
in_height, in_width = apad.shape
kernel_h, kernel_w = w_np_fc.shape
output_shape = [a_dim - w_dim + 1 for a_dim, w_dim in zip(apad.shape, w_np_fc.shape)]
output = np.zeros(output_shape, dtype=apad.dtype)
for y in range(output_shape[0]):
for x in range(output_shape[1]):
output[y][x] = np.sum(apad[y : y + kernel_h, x : x + kernel_w] * w_np_fc)
return output
def conv2d_nchw_python(a_np, w_np, stride, padding, groups=1):
"""Convolution operator in NCHW layout.
Parameters
----------
a_np : numpy.ndarray
4-D with shape [batch, in_channel, in_height, in_width]
w_np : numpy.ndarray
4-D with shape [num_filter, in_channel // groups, filter_height, filter_width]
stride : int or a list/tuple of two ints
Stride size, or [stride_height, stride_width]
padding : int or str or a list/tuple of 2 or 4 ints
Padding size, or ['VALID', 'SAME'], or
[pad_height, pad_width] for 2 ints, or
[pad_top, pad_left, pad_bottom, pad_right] for 2 ints
groups : int
Number of groups
Returns
-------
b_np : np.ndarray
4-D with shape [batch, out_channel, out_height, out_width]
"""
a_slices = np.array_split(a_np, groups, axis=1)
w_slices = np.array_split(w_np, groups, axis=0)
b_slices = [
_conv2d_nchw_python(a_slice, w_slice, stride, padding)
for a_slice, w_slice in zip(a_slices, w_slices)
]
b_np = np.concatenate(b_slices, axis=1)
return b_np
| 5,383 | 33.075949 | 100 | py |
tvm | tvm-main/python/tvm/topi/testing/instance_norm_python.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, line-too-long, unused-variable, too-many-locals
"""Instance normalization in python"""
import numpy as np
def instance_norm_python(data, gamma, beta, axis, epsilon=1e-5):
"""Instance normalization operator in Python.
Parameters
----------
data : numpy.ndarray
N-D with shape (d_0, d_1, ..., d_{N-1})
gamma: numpy.ndarray
K-D with shape (r_0, r_1, ..., r_{K-1}) where K == len(axis) and d_{axis_k} == r_k
beta: numpy.ndarray
Optional, K-D with shape (r_0, r_1, ..., r_{K-1}) where K == len(axis) and d_{axis_k} == r_k
axis : int or tuple of ints
Axis over the normalization applied
epsilon : float
The epsilon value to avoid division by zero.
Returns
-------
result : np.ndarray
N-D with shape (d_0, d_1, ..., d_{N-1})
"""
mean = np.mean(data, axis, keepdims=True)
var = np.var(data, axis, keepdims=True)
result = (data - mean) / np.sqrt(var + epsilon)
result *= gamma
if beta is not None:
result += beta
return result
| 1,871 | 33.666667 | 100 | py |
tvm | tvm-main/python/tvm/topi/testing/conv2d_backcward_weight_python.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, too-many-nested-blocks
"""Gradient of conv2d with respect to weight in python"""
import numpy as np
# Reference: cutlass/tools/util/include/cutlass/util/reference/host/convolution.h
def conv2d_backward_weight_nchw_python(
dy_np, x_np, kernel_size, stride, padding, groups=1, channels=None
):
"""Gradient of the conv2d op with respect to weight, in NCHW layout.
Parameters
----------
dy_np : numpy.ndarray
4-D with shape [batch, in_channel, out_height, out_width]
x_np : numpy.ndarray
4-D with shape [batch, in_channel, in_height, in_width]
kernel_size : tuple of two ints
Height and width of the weight
stride : tuple of two ints
Stride size, or [stride_height, stride_width]
padding : tuple of two ints
Spatial padding, or [pad_h, pad_w]
Returns
-------
dw_np : np.ndarray
4-D with shape [num_filter, in_channel, filter_height, filter_width]
"""
N, C, H, W = x_np.shape
_, K, P, Q = dy_np.shape
R, S = kernel_size
pad_h, pad_w = padding
stride_h, stride_w = stride
is_depth_wise = C == K and C == groups
if is_depth_wise:
assert channels == groups, "Only channel_mult == 1 supported for now."
dw = np.zeros((K, 1, R, S)).astype(dy_np.dtype)
else:
assert groups == 1, "General grouped conv2d not supported for now."
dw = np.zeros((K, C, R, S)).astype(dy_np.dtype)
for k in range(K):
for r in range(R):
for s in range(S):
for c in range(dw.shape[1]):
acc = 0
for n in range(N):
for p in range(P):
for q in range(Q):
if not is_depth_wise:
in_c = c
else:
in_c = k
coord = (
n,
in_c,
p * stride_h - pad_h + r,
q * stride_w - pad_w + s,
)
if (
coord[2] < H
and coord[2] >= 0
and coord[3] < W
and coord[3] >= 0
):
acc += dy_np[n, k, p, q] * x_np[coord]
dw[k, c, r, s] = acc
return dw
def conv2d_backward_weight_python(
dy_np, x_np, kernel_size, stride, padding, layout="NCHW", groups=1, channels=None
):
"""Gradient of the conv2d op with respect to weight, in NCHW or NHWC layout.
Parameters
----------
dy_np : numpy.ndarray
4-D with shape [batch, in_channel, out_height, out_width] for NCHW layout
x_np : numpy.ndarray
4-D with shape [batch, in_channel, in_height, in_width] for NCHW layout
kernel_size : tuple of two ints
Height and width of the weight
stride : tuple of two ints
Stride size, or [stride_height, stride_width]
padding : tuple of two ints
Spatial padding, or [pad_h, pad_w]
layout: string
Layout of dy_np and x_np
groups: int
Number of groups for grouped convolution.
channels : int
Number of output channels of this convolution.
Returns
-------
dw_np : np.ndarray
Tensor of shape [num_filter, in_channel, filter_height, filter_width] for NCHW layout,
[num_filter, filter_height, filter_width, in_channel] for NHWC layout.
"""
if layout == "NCHW":
return conv2d_backward_weight_nchw_python(
dy_np, x_np, kernel_size, stride, padding, groups, channels
)
dw_np_oihw = conv2d_backward_weight_nchw_python(
np.transpose(dy_np, [0, 3, 1, 2]),
np.transpose(x_np, [0, 3, 1, 2]),
kernel_size,
stride,
padding,
groups,
channels,
)
return np.transpose(dw_np_oihw, [0, 2, 3, 1])
| 4,974 | 32.166667 | 94 | py |
tvm | tvm-main/python/tvm/topi/testing/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""TOPI Testing Util functions.
Used to verify the correctness of operators in TOPI .
"""
from __future__ import absolute_import as _abs
from .conv1d_ncw_python import conv1d_ncw_python, group_conv1d_ncw_python
from .conv2d_hwcn_python import conv2d_hwcn_python
from .conv2d_nchw_python import conv2d_nchw_python
from .conv2d_nhwc_python import conv2d_nhwc_python
from .conv3d_ncdhw_python import conv3d_ncdhw_python
from .conv3d_ndhwc_python import conv3d_ndhwc_python
from .conv3d_transpose_ncdhw_python import conv3d_transpose_ncdhw_python
from .conv2d_transpose_python import conv2d_transpose_nchw_python, conv2d_transpose_nhwc_python
from .conv1d_transpose_ncw_python import conv1d_transpose_ncw_python
from .correlation_nchw_python import correlation_nchw_python
from .deformable_conv2d_python import deformable_conv2d_nchw_python, deformable_conv2d_nhwc_python
from .depthwise_conv2d_python import (
depthwise_conv2d_python_nchw,
depthwise_conv2d_python_nhwc,
depthwise_conv2d_python_nchwc,
)
from .dilate_python import dilate_python
from .softmax_python import softmax_python, log_softmax_python
from .resize_python import resize1d_python, resize2d_python, resize3d_python
from .reorg_python import reorg_python
from .roi_align_python import roi_align_nchw_python, roi_align_nhwc_python
from .roi_pool_python import roi_pool_nchw_python
from .instance_norm_python import instance_norm_python
from .layer_norm_python import layer_norm_python
from .group_norm_python import group_norm_python
from .lrn_python import lrn_python
from .l2_normalize_python import l2_normalize_python
from .gather_python import gather_python
from .gather_nd_python import gather_nd_python
from .strided_slice_python import strided_slice_python, strided_set_python
from .batch_matmul import batch_matmul
from .batch_norm import batch_norm
from .slice_axis_python import slice_axis_python
from .sequence_mask_python import sequence_mask
from .poolnd_python import poolnd_python
from .pool_grad_python import pool_grad_nchw
from .one_hot import one_hot
from .depth_to_space import depth_to_space_python
from .space_to_depth import space_to_depth_python
from .crop_and_resize_python import crop_and_resize_python
from .common import (
compare_numpy_tvm,
get_injective_schedule,
get_reduce_schedule,
get_broadcast_schedule,
get_elemwise_schedule,
get_conv2d_nchw_implement,
dispatch,
)
from .adaptive_pool_python import adaptive_pool
from .grid_sample_python import affine_grid_python, grid_sample_python
from .matrix_set_diag import matrix_set_diag
from .space_to_batch_nd import space_to_batch_nd_python
from .batch_to_space_nd import batch_to_space_nd_python
from .nll_loss import nll_loss
from .dense import dense
from .searchsorted import searchsorted_ref
from .conv2d_backcward_weight_python import conv2d_backward_weight_python
from .lstm_python import lstm_python
| 3,680 | 43.349398 | 98 | py |
tvm | tvm-main/python/tvm/topi/testing/space_to_depth.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, line-too-long, unused-variable, too-many-locals
"""Space to depth in python"""
import numpy as np
def space_to_depth_python(data, block_size):
"""Space to Depth operator in python for NCHW layout.
Parameters
----------
data : np.ndarray
4-D with shape [batch, in_channel, in_height, in_width]
block_size : int
Size of spatial blocks to decompose into channels.
Returns
-------
d2s_out : np.ndarray
4-D with shape [batch, in_channel * (block_size * block_size),
out_height / block_size, out_width / block_size]
"""
in_n, in_c, in_h, in_w = data.shape
new_h = int(in_h / block_size)
new_w = int(in_h / block_size)
new_c = int(in_c * (block_size * block_size))
expanded = np.reshape(data, newshape=[in_n, in_c, new_h, block_size, new_w, block_size])
transposed = np.transpose(expanded, axes=[0, 3, 5, 1, 2, 4])
newshape = [in_n, new_c, new_h, new_w]
d2s_out = np.reshape(transposed, newshape=newshape)
return d2s_out
| 1,857 | 36.918367 | 92 | py |
tvm | tvm-main/python/tvm/topi/testing/space_to_batch_nd.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, line-too-long, unused-variable, too-many-locals
"""Space to batch ND in python"""
import numpy as np
def space_to_batch_nd_python(data, block_shape, pad_before, pad_after, pad_value=0):
"""Space to Batch operator in python for NHWC layout.
Parameters
----------
data : np.ndarray
N-D with shape [batch, spatial_shape, remaining_shapes],
where spatial_shape has M dimensions.
block_shape : list of ints
1-D array of size [M] where M is number of spatial dims, specifies block
size for each spatial dimension.
pad_before : list of ints
list of shape [M] where M is number of spatial dims, specifies
zero-padding size before each spatial dimension.
pad_after : list of ints
list of shape [M] where M is number of spatial dims, specifies
zero-padding size after each spatial dimension.
pad_value : float, optional
the value used for padding. Defaults to 0.
Returns
-------
s2b_out : np.ndarray
N-D with shape [batch * prod(block_shape),
padded_data[1] / block_shape[0], ..., padded_data[M] / block_shape[M-1],
remaining_shape]
"""
M = len(block_shape)
in_batch = data.shape[0]
block_shape_prod = np.prod(block_shape)
# Apply padding to input data
input_shape = data.shape
# Add the paddings for batch and remaining dims
paddings = map(list, zip(pad_before, pad_after))
paddings = [[0, 0]] + list(paddings) + [[0, 0]] * (data.ndim - 1 - M)
padded_data = np.pad(data, paddings, mode="constant", constant_values=pad_value)
padded_shape = padded_data.shape
# Get the reshape shape and transpose axes
r_shape = []
trans_axis = []
r_shape.append(in_batch)
for i in range(1, M + 1):
r_shape.append((int(padded_shape[i] // block_shape[i - 1])))
r_shape.append(block_shape[i - 1])
trans_axis.append(len(r_shape) - 1)
axis_len = len(trans_axis)
trans_axis.append(0)
for i in range(axis_len):
trans_axis.append(trans_axis[i] - 1)
out_shape = []
out_shape.append(int((in_batch * block_shape_prod)))
for i in range(1, M + 1):
out_shape.append(int(padded_shape[i] // block_shape[i - 1]))
for i in range(M + 1, len(input_shape)):
r_shape.append(input_shape[i])
trans_axis.append(len(r_shape) - 1)
out_shape.append(input_shape[i])
s2b_out = np.reshape(padded_data, newshape=r_shape)
s2b_out = np.transpose(s2b_out, axes=trans_axis)
s2b_out = np.reshape(s2b_out, newshape=out_shape)
return s2b_out
| 3,453 | 35.744681 | 96 | py |
tvm | tvm-main/python/tvm/topi/testing/conv2d_transpose_python.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=unused-variable
"""Transposed convolution in python"""
import numpy as np
import scipy
import tvm.topi.testing
from tvm.topi.nn.utils import get_pad_tuple
def _conv2d_transpose_nchw_python(a_np, w_np, stride, padding, output_padding):
"""Transposed convolution operator in NCHW layout.
Parameters
----------
a_np : numpy.ndarray
4-D with shape [batch, in_channel, in_height, in_width]
w_np : numpy.ndarray
4-D with shape [in_channel, num_filter, filter_height, filter_width]
stride : int or a list/tuple of two ints
Stride size, or [stride_height, stride_width]
padding : int or str
Padding size, or ['VALID', 'SAME']
output_padding : int or a list/tuple of two ints
Use to disambiguate the output shape.
Returns
-------
b_np : np.ndarray
4-D with shape [batch, out_channel, out_height, out_width]
"""
batch, in_c, in_h, in_w = a_np.shape
_, out_c, filter_h, filter_w = w_np.shape
if isinstance(stride, int):
stride_h = stride_w = stride
else:
stride_h, stride_w = stride
if isinstance(output_padding, int):
opad_h = opad_w = output_padding
else:
opad_h, opad_w = output_padding
assert opad_h < stride_h and opad_w < stride_w
# dilate stage
dilated_a_np = tvm.topi.testing.dilate_python(a_np, [1, 1, stride_h, stride_w])
# padding stage
fpad_top, fpad_left, fpad_bottom, fpad_right = get_pad_tuple(padding, (filter_h, filter_w))
bpad_top = filter_h - 1 - fpad_top
bpad_bottom = filter_h - 1 - fpad_bottom + opad_h
bpad_left = filter_w - 1 - fpad_left
bpad_right = filter_w - 1 - fpad_right + opad_w
padded_a_np = np.zeros(
(
batch,
in_c,
dilated_a_np.shape[2] + bpad_top + bpad_bottom,
dilated_a_np.shape[3] + bpad_left + bpad_right,
)
).astype(a_np.dtype)
padded_a_np[
:,
:,
bpad_top : dilated_a_np.shape[2] + bpad_top,
bpad_left : dilated_a_np.shape[3] + bpad_left,
] = dilated_a_np
# convolution stage
out_h = (in_h - 1) * stride_h - fpad_top - fpad_bottom + filter_h + opad_h
out_w = (in_w - 1) * stride_w - fpad_left - fpad_right + filter_w + opad_w
b_np = np.zeros((batch, out_c, out_h, out_w)).astype(a_np.dtype)
for n in range(batch):
for f in range(out_c):
for c in range(in_c):
out = scipy.signal.convolve2d(padded_a_np[n, c], w_np[c, f], mode="valid")
b_np[n, f] += out
return b_np
def conv2d_transpose_nhwc_python(
a_nhwc, weight, weight_format, stride, padding, output_padding=(0, 0)
):
"""Transposed convolution operator in NHWC layout.
Parameters
----------
a_nhwc : numpy.ndarray
4-D with shape [batch, in_height, in_width, in_channel]
weight : numpy.ndarray
4-D in formats HWIO, HWOI, OIHW or IOHW
weight_format : str
['HWIO', 'HWOI', 'OIHW', 'IOHW']
stride : int or a list/tuple of two ints
Stride size, or [stride_height, stride_width]
padding : int or str
Padding size, or ['VALID', 'SAME']
Returns
-------
b_np : np.ndarray
4-D with shape [batch, out_channel, out_height, out_width]
"""
assert a_nhwc.ndim == 4, "a_nhwc number of dimensions should be 4"
assert weight.ndim == 4, "weight number of dimensions should be 4"
a_nchw = np.transpose(a_nhwc, (0, 3, 1, 2))
# conv2d_transpose_nchw_python needs kernel layout to be IOHW
if weight_format == "HWIO":
w_iohw = np.transpose(weight, (2, 3, 0, 1))
elif weight_format == "HWOI":
w_iohw = np.transpose(weight, (3, 2, 0, 1))
elif weight_format == "OIHW":
w_iohw = np.transpose(weight, (1, 0, 2, 3))
elif weight_format == "IOHW":
w_iohw = weight
else:
raise ValueError("Valid weight_formats are HWIO, HWOI, OIHW or IOHW")
res_nchw = conv2d_transpose_nchw_python(
a_nchw, w_iohw, stride, padding, output_padding=output_padding
)
res_nhwc = np.transpose(res_nchw, (0, 2, 3, 1))
return res_nhwc
def conv2d_transpose_nchw_python(a_np, w_np, stride, padding, output_padding, groups=1):
"""Convolution operator in NCHW layout.
Parameters
----------
a_np : numpy.ndarray
4-D with shape [batch, in_channel, in_height, in_width]
w_np : numpy.ndarray
4-D with shape [in_channel, num_filter // groups, filter_height, filter_width]
stride : int or a list/tuple of two ints
Stride size, or [stride_height, stride_width]
padding : int or str
Padding size, or ['VALID', 'SAME']
output_padding : int or a list/tuple of two ints
Use to disambiguate the output shape.
groups : int
Number of groups
Returns
-------
b_np : np.ndarray
4-D with shape [batch, out_channel, out_height, out_width]
"""
a_slices = np.array_split(a_np, groups, axis=1)
w_slices = np.array_split(w_np, groups, axis=0)
b_slices = [
_conv2d_transpose_nchw_python(a_slice, w_slice, stride, padding, output_padding)
for a_slice, w_slice in zip(a_slices, w_slices)
]
b_np = np.concatenate(b_slices, axis=1)
return b_np
| 6,092 | 32.478022 | 95 | py |
tvm | tvm-main/python/tvm/topi/testing/conv3d_ncdhw_python.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, line-too-long, unused-variable, too-many-locals, too-many-branches
"""Convolution 3D in python"""
import numpy as np
import scipy.signal
from tvm.topi.nn.utils import get_pad_tuple3d
def _conv3d_ncdhw_python(a_np, w_np, stride, padding):
batch, in_channel, in_depth, in_height, in_width = a_np.shape
num_filter, _, kernel_d, kernel_h, kernel_w = w_np.shape
if isinstance(stride, int):
stride_d = stride_h = stride_w = stride
else:
stride_d, stride_h, stride_w = stride
pad_front, pad_top, pad_left, pad_back, pad_bottom, pad_right = get_pad_tuple3d(
padding, (kernel_d, kernel_h, kernel_w)
)
pad_d = pad_front + pad_back
pad_h = pad_top + pad_bottom
pad_w = pad_left + pad_right
# compute the output shape
out_channel = num_filter
out_depth = (in_depth - kernel_d + pad_d) // stride_d + 1
out_height = (in_height - kernel_h + pad_h) // stride_h + 1
out_width = (in_width - kernel_w + pad_w) // stride_w + 1
b_np = np.zeros((batch, out_channel, out_depth, out_height, out_width))
# computation
for n in range(batch):
for f in range(out_channel):
for c in range(in_channel):
if pad_d > 0 or pad_h > 0 or pad_w > 0:
apad = np.zeros((in_depth + pad_d, in_height + pad_h, in_width + pad_w))
apad[
pad_front : pad_front + in_depth,
pad_top : pad_top + in_height,
pad_left : pad_left + in_width,
] = a_np[n, c]
else:
apad = a_np[n, c]
out = scipy.signal.convolve(apad, np.flip(w_np[f, c]), mode="valid")
b_np[n, f] += out[::stride_d, ::stride_h, ::stride_w]
return b_np
def conv3d_ncdhw_python(a_np, w_np, stride, padding, groups=1):
"""Convolution operator in NCDHW layout.
Parameters
----------
a_np : numpy.ndarray
5-D with shape [batch, in_channel, in_depth, in_height, in_width]
w_np : numpy.ndarray
5-D with shape [num_filter, in_channel, filter_depth, filter_height, filter_width]
stride : int or a list/tuple of three ints
Stride size, or [stride_depth, stride_height, stride_width]
padding : int or str or a list/tuple of three ints
Padding size, or ['VALID', 'SAME'], or [pad_depth, pad_height, pad_width]
groups : int
Number of groups
Returns
-------
b_np : np.ndarray
5-D with shape [batch, out_channel, out_depth, out_height, out_width]
"""
a_slices = np.array_split(a_np, groups, axis=1)
w_slices = np.array_split(w_np, groups, axis=0)
b_slices = [
_conv3d_ncdhw_python(a_slice, w_slice, stride, padding)
for a_slice, w_slice in zip(a_slices, w_slices)
]
b_np = np.concatenate(b_slices, axis=1)
return b_np
| 3,711 | 37.666667 | 98 | py |
tvm | tvm-main/python/tvm/topi/testing/layer_norm_python.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, line-too-long, unused-variable, too-many-locals
"""Layer normalization in python"""
import numpy as np
def layer_norm_python(data, gamma, beta, axis, epsilon=1e-5):
"""Layer normalization operator in Python.
Parameters
----------
data : numpy.ndarray
N-D with shape (d_0, d_1, ..., d_{N-1})
gamma: numpy.ndarray
K-D with shape (r_0, r_1, ..., r_{K-1}) where K == len(axis) and d_{axis_k} == r_k
beta: numpy.ndarray
Optional, K-D with shape (r_0, r_1, ..., r_{K-1}) where K == len(axis) and d_{axis_k} == r_k
axis : int or tuple of ints
Axis over the normalization applied
epsilon : float
The epsilon value to avoid division by zero.
Returns
-------
result : np.ndarray
N-D with shape (d_0, d_1, ..., d_{N-1})
"""
mean = np.mean(data, axis, keepdims=True)
var = np.var(data, axis, keepdims=True)
result = (data - mean) / np.sqrt(var + epsilon)
result *= gamma
if beta is not None:
result += beta
return result
| 1,862 | 33.5 | 100 | py |
tvm | tvm-main/python/tvm/topi/testing/gather_python.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, line-too-long, unused-variable, too-many-locals
"""gather in python"""
import numpy as np
def gather_python(data, axis, indices):
"""Python version of Gather operator
Parameters
----------
data : numpy.ndarray
Numpy array
axis: int
integer
indices : numpy.ndarray
Numpy array
Returns
-------
b_np : numpy.ndarray
Numpy array
"""
shape_indices = indices.shape
out = np.zeros(shape_indices, dtype=data.dtype)
for index in np.ndindex(*shape_indices):
new_index = list(index)
new_index[axis] = indices[index]
out[index] = data[tuple(new_index)]
return out
| 1,487 | 30 | 79 | py |
tvm | tvm-main/python/tvm/topi/testing/conv3d_transpose_ncdhw_python.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, line-too-long, unused-variable, too-many-locals, too-many-branches
"""Convolution 3D transpose in python"""
import numpy as np
import tvm.topi.testing
from tvm.topi.nn.utils import get_pad_tuple3d
def conv3d_transpose_ncdhw_python(a_np, w_np, stride, padding, output_padding):
"""Transposed 3d convolution operator in NCDHW layout.
Parameters
----------
a_np : numpy.ndarray
5-D with shape [batch, in_channel, in_depth, in_height, in_width]
w_np : numpy.ndarray
5-D with shape [in_channel, num_filter, filter_depth, filter_height, filter_width]
stride : int or a list/tuple of two ints
Stride size, or [stride_depth, stride_height, stride_width]
padding : int or str
Padding size
output_padding : int or list/tuple of three ints
Used to disambiguate output shape.
Returns
-------
b_np : np.ndarray
5-D with shape [batch, out_channel, out_depth, out_height, out_width]
"""
batch, in_c, in_d, in_h, in_w = a_np.shape
_, out_c, filter_d, filter_h, filter_w = w_np.shape
if isinstance(stride, int):
stride_d = stride_h = stride_w = stride
else:
stride_d, stride_h, stride_w = stride
if isinstance(output_padding, int):
opad_d = opad_h = opad_w = output_padding
else:
opad_d, opad_h, opad_w = output_padding
assert opad_d < stride_d and opad_h < stride_h and opad_w < stride_w
# dilate stage
dilated_a_np = tvm.topi.testing.dilate_python(a_np, [1, 1, stride_d, stride_h, stride_w])
# padding stage
fpad_front, fpad_top, fpad_left, fpad_back, fpad_bottom, fpad_right = get_pad_tuple3d(
padding, (filter_d, filter_h, filter_w)
)
bpad_front = filter_d - 1 - fpad_front
bpad_back = filter_d - 1 - fpad_back + opad_d
bpad_top = filter_h - 1 - fpad_top
bpad_bottom = filter_h - 1 - fpad_bottom + opad_h
bpad_left = filter_w - 1 - fpad_left
bpad_right = filter_w - 1 - fpad_right + opad_w
padded_a_np = np.zeros(
(
batch,
in_c,
dilated_a_np.shape[2] + bpad_front + bpad_back,
dilated_a_np.shape[3] + bpad_top + bpad_bottom,
dilated_a_np.shape[4] + bpad_left + bpad_right,
)
)
padded_a_np[
:,
:,
bpad_front : dilated_a_np.shape[2] + bpad_front,
bpad_top : dilated_a_np.shape[3] + bpad_top,
bpad_left : dilated_a_np.shape[4] + bpad_left,
] = dilated_a_np
# convolution stage
out_d = (in_d - 1) * stride_d - bpad_front - bpad_back + filter_d
out_h = (in_h - 1) * stride_h - fpad_top - fpad_bottom + filter_h
out_w = (in_w - 1) * stride_w - fpad_left - fpad_right + filter_w
w_np = np.flip(w_np, axis=[2, 3, 4]).transpose((1, 0, 2, 3, 4))
b_np = tvm.topi.testing.conv3d_ncdhw_python(
padded_a_np, w_np, stride=(1, 1, 1), padding=(0, 0, 0)
)
return b_np
| 3,750 | 34.72381 | 98 | py |
tvm | tvm-main/python/tvm/topi/testing/softmax_python.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, trailing-whitespace
"""Softmax and log_softmax operation in python"""
import numpy as np
def softmax_python(a_np, axis=1):
"""Softmax operator.
Parameters
----------
a_np : numpy.ndarray
N-D input data
Returns
-------
output_np : numpy.ndarray
N-D output with same shape
"""
max_elem = np.amax(a_np, axis=axis, keepdims=True)
e = np.exp(a_np - max_elem)
expsum = np.sum(e, axis=axis, keepdims=True)
out_np = e / expsum
return out_np
def log_softmax_python(a_np, axis=1):
"""Log_softmax operator.
Parameters
----------
a_np : numpy.ndarray
N-D input data
Returns
-------
output_np : numpy.ndarray
N-D output with same shape
"""
max_elem = np.amax(a_np, axis=axis, keepdims=True)
e = np.exp(a_np - max_elem)
expsum = np.sum(e, axis=axis, keepdims=True)
out_np = a_np - max_elem - np.log(expsum)
return out_np
| 1,766 | 29.465517 | 62 | py |
tvm | tvm-main/python/tvm/topi/testing/grid_sample_python.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, line-too-long, unused-variable, too-many-locals
"""affine_grid and grid_sample operators in python"""
import math
import numpy as np
def affine_grid_python(data, target_shape):
yv, xv = np.meshgrid(np.arange(target_shape[0]), np.arange(target_shape[1]))
yv = yv.T * 2 / (target_shape[0] - 1) - 1
xv = xv.T * 2 / (target_shape[1] - 1) - 1
ones = np.ones_like(xv)
grid = np.stack([xv, yv, ones]).reshape(3, -1)
return data.reshape(-1, 3).dot(grid).reshape(data.shape[0], 2, *target_shape)
def grid_sample_2d(
data: np.ndarray,
grid: np.ndarray,
method="bilinear",
layout="NCHW",
padding_mode="zeros",
align_corners=True,
):
r"""grid_sample_2d for NCHW layout"""
assert method in ("bilinear", "nearest", "bicubic"), f"{method} is not supported"
assert layout == "NCHW"
assert padding_mode in ("zeros", "border", "reflection"), f"{padding_mode} is not supported"
assert len(data.shape) == len(grid.shape) == 4
batch, channel = data.shape[:2]
in_height, in_width = data.shape[2:]
out_height, out_width = grid.shape[2:]
out_shape = [batch, channel, out_height, out_width]
out = np.zeros(out_shape)
def _get_pixel(b, c, h, w):
if 0 <= h <= in_height - 1 and 0 <= w <= in_width - 1:
return data[b, c, h, w]
return 0
def _unnormalize(h, w):
if align_corners:
new_h = (h + 1) * (in_height - 1) / 2
new_w = (w + 1) * (in_width - 1) / 2
else:
new_h = -0.5 + (h + 1) * in_height / 2
new_w = -0.5 + (w + 1) * in_width / 2
return (new_h, new_w)
def _clip_coordinates(x, size):
return min(max(x, 0), size - 1)
def _reflect_coordinates(i, size):
def __refelection(i, size, corner_start):
def __reflect(index, size, corner_start):
index_align_corner = abs(corner_start - index)
size_times = index_align_corner // size
even = size_times % 2 == 0
extra = index_align_corner - size_times * size
return extra + corner_start if even else size - extra + corner_start
if corner_start <= i <= size + corner_start:
new_i = i
else:
new_i = __reflect(i, size, corner_start)
return new_i
if align_corners:
x = __refelection(i, size - 1, 0)
else:
x = __refelection(i, size, -0.5)
return x
def _compute_source_index(b, h, w):
y = grid[b, 1, h, w]
x = grid[b, 0, h, w]
y, x = _unnormalize(y, x)
if padding_mode == "reflection":
y = _reflect_coordinates(y, in_height)
x = _reflect_coordinates(x, in_width)
y = _clip_coordinates(y, in_height)
x = _clip_coordinates(x, in_width)
elif padding_mode == "border":
y = _clip_coordinates(y, in_height)
x = _clip_coordinates(x, in_width)
return (y, x)
def _nearest_sample():
for _b in range(batch):
for _c in range(channel):
for _h in range(out_height):
for _w in range(out_width):
y, x = _compute_source_index(_b, _h, _w)
# python round is not used here,
# beacause it is done toward the even choice
new_y = int(y + 0.5) if y > 0 else int(y - 0.5)
new_x = int(x + 0.5) if x > 0 else int(x - 0.5)
out[_b, _c, _h, _w] = _get_pixel(_b, _c, new_y, new_x)
def _bilinear_sample():
for _b in range(batch):
for _c in range(channel):
for _h in range(out_height):
for _w in range(out_width):
y, x = _compute_source_index(_b, _h, _w)
y0 = int(math.floor(y))
x0 = int(math.floor(x))
y1 = y0 + 1
x1 = x0 + 1
out[_b, _c, _h, _w] = (
_get_pixel(_b, _c, y0, x0) * (1.0 - (y - y0)) * (1.0 - (x - x0))
+ _get_pixel(_b, _c, y0, x1) * (1.0 - (y - y0)) * (x - x0)
+ _get_pixel(_b, _c, y1, x0) * (y - y0) * (1.0 - (x - x0))
+ _get_pixel(_b, _c, y1, x1) * (y - y0) * (x - x0)
)
def _bicubic_sample():
A = -0.75
def cubic_weight_1(x_fraction):
return ((A + 2) * x_fraction - (A + 3)) * x_fraction * x_fraction + 1
def cubic_weight_2(x_fraction):
return ((A * x_fraction - 5 * A) * x_fraction + 8 * A) * x_fraction - 4 * A
def cubic_interp_1d(pixel_0, pixel_1, pixel_2, pixel_3, x_fraction):
weights = [0] * 4
weights[0] = cubic_weight_2(x_fraction + 1)
weights[1] = cubic_weight_1(x_fraction)
weights[2] = cubic_weight_1(1 - x_fraction)
weights[3] = cubic_weight_2(2 - x_fraction)
return (
pixel_0 * weights[0]
+ pixel_1 * weights[1]
+ pixel_2 * weights[2]
+ pixel_3 * weights[3]
)
def coefficients_along_x(x_floor, y_floor, x_fraction):
coefficients = [0] * 4
for i in range(4):
y_ = y_floor - 1 + i
x_0 = x_floor - 1
x_1 = x_floor + 0
x_2 = x_floor + 1
x_3 = x_floor + 2
if padding_mode == "border":
y_ = _clip_coordinates(y_, in_height)
x_0 = _clip_coordinates(x_0, in_width)
x_1 = _clip_coordinates(x_1, in_width)
x_2 = _clip_coordinates(x_2, in_width)
x_3 = _clip_coordinates(x_3, in_width)
elif padding_mode == "reflection":
y_ = _reflect_coordinates(y_, in_height)
x_0 = _reflect_coordinates(x_0, in_width)
x_1 = _reflect_coordinates(x_1, in_width)
x_2 = _reflect_coordinates(x_2, in_width)
x_3 = _reflect_coordinates(x_3, in_width)
y_ = int(_clip_coordinates(y_, in_height))
x_0 = int(_clip_coordinates(x_0, in_width))
x_1 = int(_clip_coordinates(x_1, in_width))
x_2 = int(_clip_coordinates(x_2, in_width))
x_3 = int(_clip_coordinates(x_3, in_width))
coefficients[i] = cubic_interp_1d(
_get_pixel(_b, _c, y_, x_0),
_get_pixel(_b, _c, y_, x_1),
_get_pixel(_b, _c, y_, x_2),
_get_pixel(_b, _c, y_, x_3),
x_fraction,
)
return coefficients
for _b in range(batch):
for _c in range(channel):
for _h in range(out_height):
for _w in range(out_width):
y = grid[_b, 1, _h, _w]
x = grid[_b, 0, _h, _w]
y, x = _unnormalize(y, x)
y_floor = int(math.floor(y))
x_floor = int(math.floor(x))
y_fraction = y - y_floor
x_fraction = x - x_floor
coefficients = coefficients_along_x(x_floor, y_floor, x_fraction)
out[_b, _c, _h, _w] = cubic_interp_1d(
coefficients[0],
coefficients[1],
coefficients[2],
coefficients[3],
y_fraction,
)
if method == "bilinear":
_bilinear_sample()
elif method == "nearest":
_nearest_sample()
else: # mode == "bicubic":
_bicubic_sample()
return out
def grid_sample_3d(
data: np.ndarray,
grid: np.ndarray,
method="bilinear",
layout="NCDHW",
padding_mode="zeros",
align_corners=True,
):
r"""grid_sample_3d for NCDHW layout"""
assert method in ("bilinear", "nearest"), f"{method} is not supported"
assert layout == "NCDHW"
assert padding_mode in ("zeros", "border", "reflection"), f"{padding_mode} is not supported"
assert len(data.shape) == len(grid.shape) == 5
batch, channel = data.shape[:2]
in_depth, in_height, in_width = data.shape[2:]
out_depth, out_height, out_width = grid.shape[2:]
out_shape = [batch, channel, out_depth, out_height, out_width]
out = np.zeros(out_shape)
def _get_pixel(b, c, d, h, w):
if 0 <= d <= in_depth - 1 and 0 <= h <= in_height - 1 and 0 <= w <= in_width - 1:
return data[b, c, d, h, w]
return 0
def _unnormalize(d, h, w):
if align_corners:
new_d = (d + 1) * (in_depth - 1) / 2
new_h = (h + 1) * (in_height - 1) / 2
new_w = (w + 1) * (in_width - 1) / 2
else:
new_d = -0.5 + (d + 1) * in_depth / 2
new_h = -0.5 + (h + 1) * in_height / 2
new_w = -0.5 + (w + 1) * in_width / 2
return (new_d, new_h, new_w)
def _clip_coordinates(x, size):
return min(max(x, 0), size - 1)
def _reflect_coordinates(i, size):
def __refelection(i, size, corner_start):
def __reflect(index, size, corner_start):
index_align_corner = abs(corner_start - index)
size_times = index_align_corner // size
even = size_times % 2 == 0
extra = index_align_corner - size_times * size
return extra + corner_start if even else size - extra + corner_start
if corner_start <= i <= size + corner_start:
new_i = i
else:
new_i = __reflect(i, size, corner_start)
return new_i
if align_corners:
x = __refelection(i, size - 1, 0)
else:
x = __refelection(i, size, -0.5)
return x
def _compute_source_index(b, d, h, w):
z = grid[b, 2, d, h, w]
y = grid[b, 1, d, h, w]
x = grid[b, 0, d, h, w]
z, y, x = _unnormalize(z, y, x)
if padding_mode == "reflection":
z = _reflect_coordinates(z, in_depth)
y = _reflect_coordinates(y, in_height)
x = _reflect_coordinates(x, in_width)
z = _clip_coordinates(z, in_depth)
y = _clip_coordinates(y, in_height)
x = _clip_coordinates(x, in_width)
elif padding_mode == "border":
z = _clip_coordinates(z, in_depth)
y = _clip_coordinates(y, in_height)
x = _clip_coordinates(x, in_width)
return (z, y, x)
def _nearest_sample():
for _b in range(batch):
for _c in range(channel):
for _d in range(out_depth):
for _h in range(out_height):
for _w in range(out_width):
z, y, x = _compute_source_index(_b, _d, _h, _w)
# python round is not used here,
# beacause it is done toward the even choice
new_z = int(z + 0.5) if z > 0 else int(z - 0.5)
new_y = int(y + 0.5) if y > 0 else int(y - 0.5)
new_x = int(x + 0.5) if x > 0 else int(x - 0.5)
out[_b, _c, _d, _h, _w] = _get_pixel(_b, _c, new_z, new_y, new_x)
def _triilinear_sample():
for _b in range(batch):
for _c in range(channel):
for _d in range(out_depth):
for _h in range(out_height):
for _w in range(out_width):
z, y, x = _compute_source_index(_b, _d, _h, _w)
z0 = int(math.floor(z))
y0 = int(math.floor(y))
x0 = int(math.floor(x))
z1 = z0 + 1
y1 = y0 + 1
x1 = x0 + 1
out[_b, _c, _d, _h, _w] = (
_get_pixel(_b, _c, z0, y0, x0)
* (1 - (x - x0))
* (1 - (y - y0))
* (1 - (z - z0))
+ _get_pixel(_b, _c, z0, y0, x1)
* (x - x0)
* (1 - (y - y0))
* (1 - (z - z0))
+ _get_pixel(_b, _c, z1, y1, x0)
* (1 - (x - x0))
* (y - y0)
* (z - z0)
+ _get_pixel(_b, _c, z1, y1, x1) * (x - x0) * (y - y0) * (z - z0)
+ _get_pixel(_b, _c, z0, y1, x0)
* (1 - (x - x0))
* (y - y0)
* (1 - (z - z0))
+ _get_pixel(_b, _c, z1, y0, x1)
* (x - x0)
* (1 - (y - y0))
* (z - z0)
+ _get_pixel(_b, _c, z1, y0, x0)
* (1 - (x - x0))
* (1 - (y - y0))
* (z - z0)
+ _get_pixel(_b, _c, z0, y1, x1)
* (x - x0)
* (y - y0)
* (1 - (z - z0))
)
if method == "bilinear":
_triilinear_sample()
else: # method == "nearest":
_nearest_sample()
return out
def grid_sample_python(
data: np.ndarray,
grid: np.ndarray,
method="bilinear",
layout="NCHW",
padding_mode="zeros",
align_corners=True,
):
r"""grid_sample_3d for NCDHW layout or grid_sample_2d for NCHW layout"""
if len(data.shape) == 4:
grid_sample = grid_sample_2d
elif len(data.shape) == 5:
grid_sample = grid_sample_3d
else:
raise ValueError("invalid shape")
return grid_sample(data, grid, method, layout, padding_mode, align_corners)
| 15,371 | 37.720403 | 97 | py |
tvm | tvm-main/python/tvm/topi/testing/l2_normalize_python.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, line-too-long, unused-variable, too-many-locals
"""L2 normalize in python"""
import numpy as np
def l2_normalize_python(a_np, eps, axis=None):
"""L2 normalize operator in NCHW layout.
Parameters
----------
a_np : numpy.ndarray
4-D with shape [batch, in_channel, in_height, in_width]
eps : float
epsilon constant value
axis : list of int
axis over the normalization applied
Returns
-------
l2_normalize_out : np.ndarray
4-D with shape [batch, out_channel, out_height, out_width]
"""
dot_value = np.power(a_np, 2.0)
sqr_sum = np.sum(dot_value, axis, keepdims=True)
sqrt_sum = np.sqrt(np.maximum(np.broadcast_to(sqr_sum, a_np.shape), eps))
l2_normalize_out = np.divide(a_np, sqrt_sum)
return l2_normalize_out
| 1,620 | 35.022222 | 79 | py |
tvm | tvm-main/python/tvm/topi/testing/crop_and_resize_python.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, line-too-long, unused-variable, too-many-locals, too-many-nested-blocks
"""crop and resize in python"""
import math
import numpy as np
def crop_and_resize_python(
image, boxes, box_indices, crop_size, layout, method="bilinear", extrapolation_value=0
):
"""Crop and resize using python"""
(target_h, target_w) = crop_size
if layout == "NHWC":
batch = boxes.shape[0]
image_height, image_width, channel = image.shape[1], image.shape[2], image.shape[3]
scaled_image = np.ones((batch, target_h, target_w, channel))
else:
batch = boxes.shape[0]
channel, image_height, image_width = image.shape[1], image.shape[2], image.shape[3]
scaled_image = np.ones((batch, channel, target_h, target_w))
for n, box in enumerate(boxes):
b_in = box_indices[n]
y1, x1 = boxes[n][0], boxes[n][1]
y2, x2 = boxes[n][2], boxes[n][3]
in_h = (image_height - 1) * (y2 - y1)
in_w = (image_width - 1) * (x2 - x1)
h_scale = np.float32(in_h) / np.float32(target_h - 1)
w_scale = np.float32(in_w) / np.float32(target_w - 1)
for y in range(target_h):
in_y = y1 * (image_height - 1) + h_scale * y
if in_y < 0 or in_y > image_height - 1:
for x in range(target_w):
for d in range(channel):
if layout == "NHWC":
scaled_image[n][y][x][d] = extrapolation_value
else:
scaled_image[n][d][y][x] = extrapolation_value
continue
if method == "bilinear":
top_y_index = math.floor(in_y)
bottom_y_index = math.ceil(in_y)
y_lerp = in_y - top_y_index
for x in range(target_w):
in_x = x1 * (image_width - 1) + x * w_scale
if in_x < 0 or in_x > image_width - 1:
for d in range(channel):
if layout == "NHWC":
scaled_image[n][y][x][d] = extrapolation_value
else:
scaled_image[n][d][y][x] = extrapolation_value
continue
left_x_index = math.floor(in_x)
right_x_index = math.ceil(in_x)
x_lerp = in_x - left_x_index
for d in range(channel):
if layout == "NHWC":
top_left = image[b_in][top_y_index][left_x_index][d]
top_right = image[b_in][top_y_index][right_x_index][d]
bottom_left = image[b_in][bottom_y_index][left_x_index][d]
bottom_right = image[b_in][bottom_y_index][right_x_index][d]
top = top_left + (top_right - top_left) * x_lerp
bottom = bottom_left + (bottom_right - bottom_left) * x_lerp
scaled_image[n][y][x][d] = top + (bottom - top) * y_lerp
else:
top_left = image[b_in][d][top_y_index][left_x_index]
top_right = image[b_in][d][top_y_index][right_x_index]
bottom_left = image[b_in][d][bottom_y_index][left_x_index]
bottom_right = image[b_in][d][bottom_y_index][right_x_index]
top = top_left + (top_right - top_left) * x_lerp
bottom = bottom_left + (bottom_right - bottom_left) * x_lerp
scaled_image[n][d][y][x] = top + (bottom - top) * y_lerp
elif method == "nearest_neighbor":
for x in range(target_w):
in_x = x1 * (image_width - 1) + x * w_scale
if in_x < 0 or in_x > image_width - 1:
for d in range(channel):
if layout == "NHWC":
scaled_image[n][y][x][d] = extrapolation_value
else:
scaled_image[n][d][y][x] = extrapolation_value
continue
closest_x_index = np.round(in_x).astype("int32")
closest_y_index = np.round(in_y).astype("int32")
for d in range(channel):
if layout == "NHWC":
scaled_image[n][y][x][d] = image[b_in][closest_y_index][
closest_x_index
][d]
else:
scaled_image[n][d][y][x] = image[b_in][d][closest_y_index][
closest_x_index
]
return scaled_image
| 5,697 | 46.090909 | 103 | py |
tvm | tvm-main/python/tvm/topi/testing/dilate_python.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""Dilate operation in python"""
import numpy as np
def dilate_python(input_np, strides, dilation_value=0.0, out_dtype=None):
"""Dilate operation.
Parameters
----------
input_np : numpy.ndarray
n-D, can be any layout.
strides : list / tuple of n ints
Dilation stride on each dimension, 1 means no dilation.
dilation_value : int/float, optional
Value used to dilate the input.
out_dtype : Option[str]
The datatype of the dilated array. If unspecified, will use
the same dtype as the input array.
Returns
-------
output_np : numpy.ndarray
n-D, the same layout as Input.
"""
assert len(input_np.shape) == len(
strides
), f"Input dimension and strides size dismatch : {len(input_np.shape)} vs {len(strides)}"
if out_dtype is None:
out_dtype = input_np.dtype
output_size = [
(input_dim - 1) * stride + 1 for input_dim, stride in zip(input_np.shape, strides)
]
non_zero_elements = np.ix_(
*[range(0, output_dim, stride) for output_dim, stride in zip(output_size, strides)]
)
output_np = np.full(shape=output_size, fill_value=dilation_value, dtype=out_dtype)
output_np[non_zero_elements] = input_np
return output_np
| 2,102 | 31.859375 | 93 | py |
tvm | tvm-main/python/tvm/topi/gpu/conv2d.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument
"""Schedule for conv2d operator"""
from tvm import te, autotvm
from .. import nn
from ..utils import traverse_inline
from .conv2d_nhwc import schedule_conv2d_nhwc_direct
@autotvm.register_topi_compute("conv2d_nhwc.gpu")
def conv2d_nhwc(cfg, data, kernel, strides, padding, dilation, out_dtype="float32"):
"""Compute conv2d with NHWC layout"""
return nn.conv2d_nhwc(data, kernel, strides, padding, dilation, out_dtype)
@autotvm.register_topi_schedule("conv2d_nhwc.gpu")
def schedule_conv2d_nhwc(cfg, outs):
"""Create the schedule for conv2d_nhwc"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
def _callback(op):
if op.tag == "conv2d_nhwc":
schedule_conv2d_nhwc_direct(cfg, s, op.output(0))
traverse_inline(s, outs[0].op, _callback)
return s
| 1,695 | 37.545455 | 84 | py |
tvm | tvm-main/python/tvm/topi/gpu/dense.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument
"""Schedule for dense operator"""
import logging
from tvm import autotvm, te
from tvm.autotvm.task.space import SplitEntity
from .. import nn
from ..utils import traverse_inline, get_const_tuple
logger = logging.getLogger("topi")
@autotvm.register_topi_compute("dense_small_batch.gpu")
def dense_small_batch(cfg, data, weight, bias=None, out_dtype=None):
"""Dense operator on GPU"""
return nn.dense(data, weight, bias, out_dtype)
@autotvm.register_topi_schedule("dense_small_batch.gpu")
def schedule_dense_small_batch(cfg, outs):
"""Schedule float32/64 dense with small batch size"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
def _callback(op):
if op.tag == "dense":
_schedule_dense_small_batch(cfg, s, op.output(0))
traverse_inline(s, outs[0].op, _callback)
return s
@autotvm.register_topi_compute("matmul_default.gpu")
def matmul_default(
cfg,
tensor_a,
tensor_b,
bias=None,
out_dtype=None,
transpose_a=False,
transpose_b=False,
):
"""Matmul operator on GPU"""
return nn.matmul(tensor_a, tensor_b, bias, out_dtype, transpose_a, transpose_b)
@autotvm.register_topi_schedule("matmul_default.gpu")
def schedule_matmul_default(cfg, outs):
"""Schedule matmul on GPU"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
def _callback(op):
if op.tag == "matmul":
# Temporary use this as a basic schedule for matmul
# TODO(jcf94): Add a more general schedule for matmul
_schedule_dense_small_batch(cfg, s, op.output(0))
traverse_inline(s, outs[0].op, _callback)
return s
def _schedule_dense_small_batch(cfg, s, C):
A, weights = C.op.input_tensors
if len(weights.op.input_tensors) == 1 and weights.op.input_tensors[0] == A:
s[weights].compute_inline()
_, in_dim_weights = get_const_tuple(weights.shape)
_, in_dim_A = get_const_tuple(A.shape)
if isinstance(in_dim_A, int):
in_dim = in_dim_A
elif isinstance(in_dim_weights, int):
in_dim = in_dim_weights
else:
in_dim = None
if in_dim is not None:
cfg.define_split("tile_k", in_dim, num_outputs=2)
if cfg.is_fallback:
cfg["tile_k"] = SplitEntity([-1, 64] if in_dim > 64 else [1, 64])
_, kf = cfg["tile_k"].apply(s, C, C.op.reduce_axis[0])
else:
tile_k = 64
_, kf = s[C].split(C.op.reduce_axis[0], tile_k)
CF = s.rfactor(C, kf)
if C.op in s.outputs:
Out = C
else:
Out = s.outputs[0].output(0)
s[C].compute_at(s[Out], s[Out].op.axis[1])
s[Out].bind(s[Out].op.axis[0], te.thread_axis("blockIdx.y"))
s[Out].bind(s[Out].op.axis[1], te.thread_axis("blockIdx.x"))
tx = s[C].op.reduce_axis[0]
thread_x = te.thread_axis("threadIdx.x")
s[C].bind(tx, thread_x)
s[CF].compute_at(s[C], tx)
s[C].set_store_predicate(thread_x.var.equal(0))
s[Out].set_store_predicate(thread_x.var.equal(0))
@autotvm.register_topi_compute("dense_large_batch.gpu")
def dense_large_batch(cfg, data, weight, bias=None, out_dtype=None):
"""Dense operator on GPU"""
return nn.dense(data, weight, bias, out_dtype)
@autotvm.register_topi_schedule("dense_large_batch.gpu")
def schedule_dense_large_batch(cfg, outs):
"""Schedule float32/64 dense with large batch size"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
def _callback(op):
if op.tag == "dense":
_schedule_dense_large_batch(cfg, s, op.output(0))
traverse_inline(s, outs[0].op, _callback)
return s
def _schedule_dense_large_batch(cfg, s, C):
"""Schedule float32/64 dense with large batch size"""
A, B = C.op.input_tensors
if len(B.op.input_tensors) == 1 and B.op.input_tensors[0] == A:
s[B].compute_inline()
batch, in_dim = get_const_tuple(A.shape)
out_dim, _ = get_const_tuple(B.shape)
k = C.op.reduce_axis[0]
# create tuning space
try:
block_cand = [64, 128]
vthread_cand = [2**x for x in range(1, 7)]
n_thread_cand = [2**x for x in range(3, 7)]
cfg.define_split(
"tile_x",
batch,
num_outputs=4,
filter=lambda x: (
x.size[1] in vthread_cand
and x.size[2] in n_thread_cand
and (x.size[1] * x.size[2] * x.size[3]) in block_cand
),
)
cfg.define_split(
"tile_y",
out_dim,
num_outputs=4,
filter=lambda x: (
x.size[1] in vthread_cand
and x.size[2] in n_thread_cand
and (x.size[1] * x.size[2] * x.size[3]) in block_cand
),
)
cfg.define_split("tile_k", in_dim, num_outputs=3, filter=lambda x: x.size[0] > 2)
except IndexError:
# Index error happens when no entities left after filtering, which was designed
# to prune tuning space for better search efficiency.
logger.debug("Tuning space was created without pruning due to unfit shapes")
cfg.define_split("tile_x", batch, num_outputs=4)
cfg.define_split("tile_y", out_dim, num_outputs=4)
cfg.define_split("tile_k", in_dim, num_outputs=3)
if cfg.is_fallback:
if batch > 1:
cfg["tile_x"] = SplitEntity([-1, 2, 16, 2])
else:
cfg["tile_x"] = SplitEntity([1, 1, 1, 1])
if out_dim > 1:
cfg["tile_y"] = SplitEntity([-1, 2, 16, 2])
else:
cfg["tile_y"] = SplitEntity([1, 1, 1, 1])
if in_dim > 8:
cfg["tile_k"] = SplitEntity([-1, 8, 1])
else:
cfg["tile_k"] = SplitEntity([-1, 1, 1])
# Explicit memory access
AA = s.cache_read(A, "shared", [C])
BB = s.cache_read(B, "shared", [C])
AL = s.cache_read(AA, "local", [C])
BL = s.cache_read(BB, "local", [C])
CC = s.cache_write(C, "local")
# Deal with op fusion
if C.op not in s.outputs:
s[C].compute_inline()
C = s.outputs[0].output(0)
# Split and reorder computation
bx, txz, tx, xi = cfg["tile_x"].apply(s, C, C.op.axis[0])
by, tyz, ty, yi = cfg["tile_y"].apply(s, C, C.op.axis[1])
s[C].reorder(by, bx, tyz, txz, ty, tx, yi, xi)
s[CC].compute_at(s[C], tx)
# Binding
s[C].bind(by, te.thread_axis("blockIdx.y"))
s[C].bind(bx, te.thread_axis("blockIdx.x"))
s[C].bind(tyz, te.thread_axis("vthread"))
s[C].bind(txz, te.thread_axis("vthread"))
s[C].bind(ty, te.thread_axis("threadIdx.y"))
s[C].bind(tx, te.thread_axis("threadIdx.x"))
# Split reduction
yo, xo = CC.op.axis
ko, kt, ki = cfg["tile_k"].apply(s, CC, k)
s[CC].reorder(ko, kt, ki, yo, xo)
s[AA].compute_at(s[CC], ko)
s[BB].compute_at(s[CC], ko)
s[CC].unroll(kt)
s[AL].compute_at(s[CC], kt)
s[BL].compute_at(s[CC], kt)
# Schedule for A's shared memory load
num_thread_x = cfg["tile_x"].size[2]
ty, _ = s[AA].split(s[AA].op.axis[0], nparts=num_thread_x)
_, xi = s[AA].split(s[AA].op.axis[1], factor=num_thread_x * 4)
tx, xi = s[AA].split(xi, nparts=num_thread_x)
s[AA].bind(ty, te.thread_axis("threadIdx.y"))
s[AA].bind(tx, te.thread_axis("threadIdx.x"))
s[AA].double_buffer()
# Schedule for B' shared memory load
num_thread_y = cfg["tile_y"].size[2]
ty, _ = s[BB].split(s[BB].op.axis[0], nparts=num_thread_y)
_, xi = s[BB].split(s[BB].op.axis[1], factor=num_thread_y * 4)
tx, xi = s[BB].split(xi, nparts=num_thread_y)
s[BB].bind(ty, te.thread_axis("threadIdx.y"))
s[BB].bind(tx, te.thread_axis("threadIdx.x"))
s[BB].double_buffer()
| 8,670 | 33.137795 | 89 | py |
tvm | tvm-main/python/tvm/topi/gpu/conv2d_nhwc.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, too-many-locals, too-many-statements, unused-argument
"""Direct conv2d in NHWC layout"""
import tvm
from tvm import te
from tvm import autotvm
from ..utils import get_const_tuple
def schedule_conv2d_nhwc_direct(cfg, s, Conv):
"""schedule optimized for NHWC direct conv2d"""
pad_data, kernel = s[Conv].op.input_tensors
s[pad_data].compute_inline()
if isinstance(kernel.op, tvm.te.ComputeOp) and "dilate" in kernel.op.tag:
s[kernel].compute_inline()
if Conv.op in s.outputs:
output = Conv
OL = s.cache_write(Conv, "local")
else:
output = s.outputs[0].output(0)
s[Conv].set_scope("local")
OL = Conv
# create cache stage
AA = s.cache_read(pad_data, "shared", [OL])
WW = s.cache_read(kernel, "shared", [OL])
AL = s.cache_read(AA, "local", [OL])
WL = s.cache_read(WW, "local", [OL])
# Currently Conv2d NHWC only support dynamic shpe in batch
dynamic_batch = isinstance(s[output].op.axis[0].dom.extent, tvm.tir.expr.Var)
# Schedule for autotvm
cfg.define_knob("tile_n", [1] if dynamic_batch else [2, 4, 8])
cfg.define_knob("tile_c", [2, 4, 8])
cfg.define_knob("num_thread_n", [1] if dynamic_batch else [4, 8, 16])
cfg.define_knob("num_thread_c", [4, 8, 16])
cfg.define_knob("vthread_n", [1] if dynamic_batch else [1, 2])
cfg.define_knob("vthread_c", [1, 2])
cfg.define_knob("step", [16, 3, 32, 64])
cfg.define_knob("vectorize", [1, 2, 4, 8])
# fallback support
target = tvm.target.Target.current()
if cfg.is_fallback:
ref_log = autotvm.tophub.load_reference_log(
target.kind.name, target.model, "conv2d_nhwc.gpu"
)
cfg.fallback_with_reference_log(ref_log)
tile_n = cfg["tile_n"].val
tile_c = cfg["tile_c"].val
num_thread_n = cfg["num_thread_n"].val
num_thread_c = cfg["num_thread_c"].val
vthread_n = cfg["vthread_n"].val
vthread_c = cfg["vthread_c"].val
step = cfg["step"].val
vec_factor = cfg["vectorize"].val
block_factor_c = tile_c * num_thread_c * vthread_c
offset = 8
A_align = step + offset
W_align = block_factor_c + offset
block_x = te.thread_axis("blockIdx.x")
block_y = te.thread_axis("blockIdx.y")
block_z = te.thread_axis("blockIdx.z")
thread_x = te.thread_axis((0, num_thread_c), "threadIdx.x")
thread_y = te.thread_axis((0, num_thread_n), "threadIdx.y")
thread_xz = te.thread_axis((0, vthread_c), "vthread", name="vx")
thread_yz = te.thread_axis((0, vthread_n), "vthread", name="vy")
# Schedule for output
ni, _, wi, fi = s[output].op.axis
bx = wi
fi, vec = s[output].split(fi, factor=vec_factor)
s[output].vectorize(vec)
tx, fi = s[output].split(fi, factor=tile_c)
txz, tx = s[output].split(tx, factor=num_thread_c)
bz, txz = s[output].split(txz, factor=vthread_c)
ty, ni = s[output].split(ni, factor=tile_n)
tyz, ty = s[output].split(ty, factor=num_thread_n)
by, tyz = s[output].split(tyz, factor=vthread_n)
s[output].reorder(bx, by, bz, tyz, txz, ty, tx, ni, fi, vec)
s[output].bind(bz, block_z)
s[output].bind(by, block_y)
s[output].bind(bx, block_x)
s[output].bind(tyz, thread_yz)
s[output].bind(txz, thread_xz)
s[output].bind(ty, thread_y)
s[output].bind(tx, thread_x)
# Schedule local computation
s[OL].compute_at(s[output], tx)
ni, yi, xi, fi = s[OL].op.axis
ry, rx, rc = s[OL].op.reduce_axis
rco, rci = s[OL].split(rc, factor=step)
s[OL].vectorize(fi)
s[OL].reorder(rco, ry, rx, rci, ni, fi)
s[AA].compute_at(s[OL], rx)
s[WW].compute_at(s[OL], rx)
s[AL].compute_at(s[OL], rci)
s[WL].compute_at(s[OL], rci)
# Schedule for data's share memory
ni, yi, xi, ci = s[AA].op.axis
s[AA].reorder(yi, xi, ni, ci)
s[AA].storage_align(xi, A_align - 1, A_align)
t = s[AA].fuse(ni, ci)
ty, tx = s[AA].split(t, factor=num_thread_c)
_, ty = s[AA].split(ty, factor=num_thread_n)
s[AA].bind(tx, thread_x)
s[AA].bind(ty, thread_y)
# Schedule for kernel's share memory
_, _, ic, o = s[WW].op.axis
t = s[WW].fuse(ic, o)
s[WW].storage_align(ic, W_align - 1, W_align)
t, vec = s[WW].split(t, factor=vec_factor)
s[WW].vectorize(vec)
ty, tx = s[WW].split(t, factor=num_thread_c)
_, ty = s[WW].split(ty, factor=num_thread_n)
s[WW].bind(tx, thread_x)
s[WW].bind(ty, thread_y)
N, OH, OW, CO = get_const_tuple(output.shape)
KH, KW, CI, _ = get_const_tuple(kernel.shape)
if isinstance(N, int):
cfg.add_flop(2 * N * OH * OW * CO * CI * KH * KW)
| 5,436 | 36.496552 | 85 | py |
tvm | tvm-main/python/tvm/topi/gpu/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=redefined-builtin, wildcard-import
"""GPU specific declaration and schedules."""
from .dense import *
from .conv2d import *
| 928 | 41.227273 | 62 | py |
tvm | tvm-main/python/tvm/topi/mali/conv2d.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unused-variable,unused-argument,no-else-return
"""conv2d schedule on ARM Mali GPU"""
import logging
import tvm
from tvm import te
from tvm import relay
from tvm import autotvm
from tvm.autotvm.task.space import get_factors
from ..utils import traverse_inline, get_const_int, get_const_tuple
from .. import nn
from ..nn.winograd_util import winograd_transform_matrices
from ..nn.conv2d import conv2d_winograd_nhwc, _conv2d_winograd_nhwc_impl
# reuse some compute declarations from ARM CPU
from ..arm_cpu.conv2d_spatial_pack import conv2d_spatial_pack_nchw
from ..arm_cpu.conv2d_spatial_pack import conv2d_spatial_pack_nhwc
logger = logging.getLogger("topi")
@autotvm.register_topi_compute("conv2d_nchw_spatial_pack.mali")
def conv2d_nchw_spatial_pack(cfg, data, kernel, strides, padding, dilation, out_dtype):
"""TOPI compute callback for conv2d
Parameters
----------
cfg: ConfigEntity
The config for this template
data : tvm.te.Tensor
4-D with shape [batch, in_channel, in_height, in_width]
kernel : tvm.te.Tensor
4-D with shape [num_filter, in_channel, filter_height, filter_width] or
pre-packed 5-D with shape [num_filter_chunk, in_channel, filter_height,
filter_width, num_filter_block]
strides : list of two ints
[stride_height, stride_width]
padding : list of two ints
[pad_height, pad_width]
dilation : list of two ints
[dilation_height, dilation_width]
out_dtype: str
The output type. This is used for mixed precision.
Returns
-------
output : tvm.te.Tensor
4-D with shape [batch, out_channel, out_height, out_width]
"""
return conv2d_spatial_pack_nchw(
cfg, data, kernel, strides, padding, dilation, out_dtype, num_tile=3
)
@autotvm.register_topi_schedule("conv2d_nchw_spatial_pack.mali")
def schedule_conv2d_nchw_spatial_pack(cfg, outs):
"""TOPI schedule callback for conv2d
Parameters
----------
cfg: ConfigEntity
The configuration of this template
outs: Array of Tensor
The computation graph description of convolution2d
in the format of an array of tensors.
Returns
-------
s: Schedule
The computation schedule for conv2d
"""
s = te.create_schedule([x.op for x in outs])
def _callback(op):
# schedule conv2d
if "spatial_conv2d_output" in op.tag:
_schedule_spatial_pack(cfg, s, op, layout="NCHW")
traverse_inline(s, outs[0].op, _callback)
return s
@autotvm.register_topi_compute("conv2d_nhwc_spatial_pack.mali")
def conv2d_nhwc_spatial_pack(cfg, data, kernel, strides, padding, dilation, out_dtype):
"""Compute conv2d with NHWC layout"""
return conv2d_spatial_pack_nhwc(
cfg, data, kernel, strides, padding, dilation, out_dtype, num_tile=3
)
@autotvm.register_topi_schedule("conv2d_nhwc_spatial_pack.mali")
def schedule_conv2d_nhwc_spatial_pack(cfg, outs):
"""Create schedule for conv2d_nhwc"""
s = te.create_schedule([x.op for x in outs])
def _callback(op):
# schedule conv2d
if "spatial_conv_output_NHWC" in op.tag:
_schedule_spatial_pack(cfg, s, op, layout="NHWC")
traverse_inline(s, outs[0].op, _callback)
return s
def _schedule_spatial_pack(cfg, s, op, layout):
"""schedule the spatial packing for conv2d"""
assert layout in ("NCHW", "NHWC")
output = op.output(0)
conv = op.input_tensors[0]
data_vec = conv.op.input_tensors[0]
data_pad = data_vec.op.input_tensors[0]
s[data_pad].compute_inline()
kernel_vec = conv.op.input_tensors[1]
if kernel_vec.op.name == "kernel_vec":
kernel = kernel_vec.op.input_tensors[0]
else:
kernel = kernel_vec
if isinstance(kernel.op, tvm.te.ComputeOp) and "dilate" in kernel.op.tag:
s[kernel].compute_inline()
data = s[data_vec].op.input_tensors[0]
max_unroll = 16
vec_size = [1, 2, 4, 8, 16]
# get tunable parameters (they are defined in compute)
_, TC, VC = cfg["tile_co"].size
_, TH, VH = cfg["tile_oh"].size
_, TW, VW = cfg["tile_ow"].size
# schedule padding
if isinstance(data.op, tvm.te.ComputeOp) and "pad" in data.op.tag:
data_pad = data
s[data_pad].compute_inline()
# schedule data packing
if layout == "NCHW":
if isinstance(data_vec.op, tvm.te.ComputeOp) and data_vec.op.name == "data_vec_undilated":
_, h, w, ci, _, _, vh, vw = s[data_vec].op.axis
else:
_, h, w, ci, vh, vw = s[data_vec].op.axis
z, y, x, unroll1, unroll2 = h, w, ci, vh, vw
else:
if isinstance(data_vec.op, tvm.te.ComputeOp) and data_vec.op.name == "data_vec_undilated":
_, oho, owo, _, _, ic, ohi, owi = s[data_vec].op.axis
else:
_, oho, owo, ohi, owi, ic = s[data_vec].op.axis
z, y, x, unroll1, unroll2 = oho, owo, ohi, ic, owi
tile_and_bind3d(s, data_vec, z, y, x, 1)
if unroll1.dom.extent.value < max_unroll:
s[data_vec].unroll(unroll1)
if unroll2.dom.extent.value < max_unroll:
s[data_vec].unroll(unroll2)
if isinstance(kernel_vec.op, tvm.te.ComputeOp) and kernel_vec.name == "kernel_vec":
if not autotvm.GLOBAL_SCOPE.in_tuning:
max_threads = tvm.target.Target.current(allow_none=False).max_num_threads
ax1, ax2, ax3, ax4, ax5 = s[kernel_vec].op.axis
fused = s[kernel_vec].fuse(ax1, ax2, ax3, ax4, ax5)
fused, vec = s[kernel_vec].split(fused, VC)
bb, tt = s[kernel_vec].split(fused, max_threads)
s[kernel_vec].bind(bb, te.thread_axis("blockIdx.x"))
s[kernel_vec].bind(tt, te.thread_axis("threadIdx.x"))
if VC in vec_size:
s[kernel_vec].vectorize(vec)
# schedule convolution
ic, kh, kw = s[conv].op.reduce_axis
if layout == "NCHW":
kh_dim, kw_dim = kernel_vec.shape[2], kernel_vec.shape[3]
else:
kh_dim, kw_dim = kernel_vec.shape[0], kernel_vec.shape[1]
cfg["ann_reduce"].apply(
s,
conv,
[kh, kw],
axis_lens=[get_const_int(kh_dim), get_const_int(kw_dim)],
max_unroll=max_unroll,
)
if layout == "NCHW":
n, c, h, w, vh, vw, vc = s[conv].op.axis
cfg["reorder_0"].apply(s, conv, [n, c, h, w, ic, kh, kw, vh, vw, vc])
tile_and_bind3d(s, conv, c, h, w, TC, TH, TW)
unroll_vec_axes = [vh, vw, vc]
axis_lens = [VH, VW, VC]
else:
n, oho, owo, oco, ohi, owi, oci = s[conv].op.axis
cfg["reorder_conv"].apply(s, conv, [n, oho, owo, oco, kh, kw, ic, ohi, owi, oci])
tile_and_bind3d(s, conv, oho, owo, oco, TH, TW, TC)
unroll_vec_axes = [ohi, owi, oci]
axis_lens = [VH, VW, VC]
cfg["ann_spatial"].apply(
s, conv, unroll_vec_axes, axis_lens, max_unroll=max_unroll, vec_size=vec_size, cfg=cfg
)
# schedule output
if output.op not in s.outputs: # has bias
s[output].compute_inline()
output = s.outputs[0]
if layout == "NCHW":
_, co, oh, ow = s[output].op.axis
tile_and_bind3d(s, output, co, oh, ow, TC, TH, TW)
else:
_, oh, ow, co = s[output].op.axis
tile_and_bind3d(s, output, oh, ow, co, TH, TW, TC)
return s
##### WINOGRAD TEMPLATE #####
def _pick_tile_size(data, kernel, layout="NCHW"):
if layout == "NCHW":
N, CI, H, W = get_const_tuple(data.shape)
else:
assert layout == "NHWC"
N, H, W, CI = get_const_tuple(data.shape)
if H % 4 == 0:
return 4
else:
return 2
@autotvm.register_topi_compute("conv2d_nchw_winograd.mali")
def conv2d_nchw_winograd(cfg, data, kernel, strides, padding, dilation, out_dtype):
tile_size = _pick_tile_size(data, kernel)
return _decl_winograd(cfg, data, kernel, strides, padding, dilation, out_dtype, tile_size)
@autotvm.register_topi_schedule("conv2d_nchw_winograd.mali")
def schedule_conv2d_nchw_winograd(cfg, outs):
s = te.create_schedule([x.op for x in outs])
def _callback(op):
if "winograd_conv2d_output" in op.tag:
_schedule_winograd(cfg, s, op)
traverse_inline(s, outs[0].op, _callback)
return s
def _decl_winograd(cfg, data, kernel, strides, padding, dilation, out_dtype, tile_size):
N, CI, IH, IW = get_const_tuple(data.shape)
if isinstance(dilation, int):
dilation_h = dilation_w = dilation
else:
dilation_h, dilation_w = dilation
if len(kernel.shape) == 4:
if dilation_h != 1 or dilation_w != 1:
kernel = nn.dilate(kernel, (1, 1, dilation_h, dilation_w))
pre_computed = False
CO, _, KH, KW = get_const_tuple(kernel.shape)
else:
assert (dilation_h, dilation_w) == (1, 1), "Does not support dilation"
pre_computed = True
H_CAT, W_CAT, CO, CI, VC = get_const_tuple(kernel.shape)
CO *= VC
KH, KW = H_CAT - tile_size + 1, W_CAT - tile_size + 1
HSTR, WSTR = strides if isinstance(strides, (tuple, list)) else (strides, strides)
pt, pl, pb, pr = nn.get_pad_tuple(padding, (KH, KW))
assert KH == 3 and KW == 3 and HSTR == 1 and WSTR == 1
data_pad = nn.pad(data, (0, 0, pt, pl), (0, 0, pb, pr), name="data_pad")
r = KW
m = tile_size
alpha = m + r - 1
A, B, G = winograd_transform_matrices(m, r, out_dtype)
H = (IH + pt + pb - 3) // HSTR + 1
W = (IW + pl + pr - 3) // WSTR + 1
nH, nW = (H + m - 1) // m, (W + m - 1) // m
P = N * nH * nW
##### space definition begin #####
tile_bna_candidates = [1, 2, 4, 8, 16]
factors = get_factors(CO)
cfg.define_knob("tile_bna", [x for x in tile_bna_candidates if x in factors])
cfg.define_knob("tile_bnb", [1, 2, 4, 8, 16])
cfg.define_split("tile_t1", CI, num_outputs=2, max_factor=128)
cfg.define_split("tile_t2", CO, num_outputs=2, max_factor=128)
cfg.define_split("c_unroll", CI, num_outputs=2, max_factor=8)
cfg.define_knob("yt", [1, 2, 4, 8, 16, 32])
##### space definition end #####
if cfg.is_fallback:
cfg["tile_bnb"].val = 4
cfg["tile_bna"].val = 4
while CO % cfg["tile_bna"].val != 0:
cfg["tile_bna"].val //= 2
cfg["yt"].val = 8
cfg.fallback_split("tile_t1", [-1, 128])
cfg.fallback_split("tile_t2", [-1, 128])
cfg.fallback_split("c_unroll", [-1, 8])
bna = cfg["tile_bna"].val
bnb = cfg["tile_bnb"].val
P_round = (P + bnb - 1) // bnb * bnb
assert CO % bna == 0 and P_round % bnb == 0
# pack input tile
input_tile = te.compute(
(CI, P_round // bnb, alpha, alpha, bnb),
lambda ci, b, eps, nu, bb: tvm.tir.if_then_else(
b * bnb + bb < P,
data_pad[(b * bnb + bb) // (nH * nW)][ci][(b * bnb + bb) // nW % nH * m + eps][
(b * bnb + bb) % nW * m + nu
],
tvm.tir.const(0, data_pad.dtype),
),
name="d",
)
if autotvm.GLOBAL_SCOPE.in_tuning:
kvshape = (alpha, alpha, CO // bna, CI, bna)
U = tvm.te.placeholder(kvshape, kernel.dtype, name="U")
else:
# transform kernel
if pre_computed:
U = kernel
else:
r_kh = te.reduce_axis((0, KH), "r_kh")
r_kw = te.reduce_axis((0, KW), "r_kw")
U = te.compute(
(alpha, alpha, CO // bna, CI, bna),
lambda eps, nu, co, ci, vco: te.sum(
kernel[co * bna + vco][ci][r_kh][r_kw] * G[eps][r_kh] * G[nu][r_kw],
axis=[r_kh, r_kw],
),
name="U",
)
# transform image
r_a = te.reduce_axis((0, alpha), "r_a")
r_b = te.reduce_axis((0, alpha), "r_b")
V = te.compute(
(alpha, alpha, P_round // bnb, CI, bnb),
lambda eps, nu, p, ci, vp: te.sum(
input_tile[ci][p][r_a][r_b][vp] * B[r_a][eps] * B[r_b][nu], axis=[r_a, r_b]
),
name="V",
)
idxdiv = tvm.tir.indexdiv
idxmod = tvm.tir.indexmod
# batch gemm
ci = te.reduce_axis((0, CI), name="c")
M = te.compute(
(alpha, alpha, CO, P_round),
lambda eps, nu, co, p: te.sum(
U[eps][nu][idxdiv(co, bna)][ci][idxmod(co, bna)]
* V[eps][nu][idxdiv(p, bnb)][ci][idxmod(p, bnb)],
axis=ci,
),
name="M",
)
r_a = te.reduce_axis((0, alpha), "r_a")
r_b = te.reduce_axis((0, alpha), "r_b")
Y = te.compute(
(CO, P, m, m),
lambda co, p, vh, vw: te.sum(M[r_a][r_b][co][p] * A[r_a][vh] * A[r_b][vw], axis=[r_a, r_b]),
name="Y",
)
# unpack output
output = te.compute(
(N, CO, H, W),
lambda n, co, h, w: Y[
co, n * nH * nW + idxdiv(h, m) * nW + idxdiv(w, m), idxmod(h, m), idxmod(w, m)
]
# The following hack term is used to make the padding in batch gemm ("M")
# effective, otherwise the padding will be eliminated by bound inference.
# Use `tvm.tir.Mul` instead of `*` to avoid issues in const folding.
+ tvm.tir.Mul(tvm.tir.const(0, out_dtype), M[alpha - 1][alpha - 1][CO - 1][P_round - 1]),
name="output",
tag="winograd_conv2d_output",
)
# we have to manually assign effective GFLOP for winograd
cfg.add_flop(2 * N * CO * H * W * KH * KW * CI)
return output
def _schedule_winograd(cfg, s, op):
"""schedule winograd fast convolution F(2x2, 3x3) for conv2d"""
# get ops and tensors
output = op.output(0)
Y = op.input_tensors[0]
M, A = s[Y].op.input_tensors
U, V = s[M].op.input_tensors
d, B = s[V].op.input_tensors
data_pad = s[d].op.input_tensors[0]
# padding
s[data_pad].compute_inline()
# transform kernel
if isinstance(U.op, tvm.te.ComputeOp):
kernel, G = s[U].op.input_tensors
s[G].compute_inline()
(eps, nu, co, ci, vco) = s[U].op.axis
if not autotvm.GLOBAL_SCOPE.in_tuning:
r_kh, r_kw = s[U].op.reduce_axis
s[U].reorder(co, ci, eps, nu, r_kh, r_kw, vco)
_ = [s[U].unroll(x) for x in [eps, nu, r_kh, r_kw]]
s[U].vectorize(vco)
tile_and_bind(s, U, co, ci, 1, 256)
# dilation
if isinstance(kernel.op, tvm.te.ComputeOp) and "dilate" in kernel.op.tag:
s[kernel].compute_inline()
# transform image
s[B].compute_inline()
VL = s.cache_write(V, "local")
eps, nu, p, ci, vp = s[V].op.axis
s[V].reorder(p, ci, eps, nu, vp)
for axis in [eps, nu]:
s[V].unroll(axis)
s[V].vectorize(vp)
fused = s[V].fuse(p, ci)
bb, tt = cfg["tile_t1"].apply(s, V, fused)
s[V].bind(bb, te.thread_axis("blockIdx.x"))
s[V].bind(tt, te.thread_axis("threadIdx.x"))
eps, nu, p, ci, vp = s[VL].op.axis
r_a, r_b = s[VL].op.reduce_axis
for axis in [eps, nu, r_a, r_b]:
s[VL].unroll(axis)
s[VL].vectorize(vp)
s[d].compute_at(s[V], tt)
s[VL].compute_at(s[V], tt)
# batch gemm
bna = cfg["tile_bna"].val
bnb = cfg["tile_bnb"].val
eps, nu, k, b = s[M].op.axis
alpha = eps.dom.extent
c = s[M].op.reduce_axis[0]
yo, xo, yi, xi = s[M].tile(k, b, bna, bnb)
c, c_unroll = cfg["c_unroll"].apply(s, M, c)
s[M].reorder(yo, xo, c, c_unroll, yi, xi)
s[M].unroll(c_unroll)
s[M].unroll(yi)
s[M].vectorize(xi)
z = s[M].fuse(eps, nu)
tile_and_bind3d(s, M, z, yo, xo, 1, cfg["yt"].val, 1)
# inverse transform
s[A].compute_inline()
k, b, vh, vw = s[Y].op.axis
r_a, r_b = s[Y].op.reduce_axis
for axis in [vh, vw, r_a, r_b]:
s[Y].unroll(axis)
# schedule output and fusion
if output.op not in s.outputs:
s[output].compute_inline()
output = s.outputs[0]
n, co, h, w = s[output].op.axis
m = alpha - 3 + 1
h, w, hi, wi = s[output].tile(h, w, m, m)
s[output].unroll(hi)
s[output].unroll(wi)
fused = s[output].fuse(n, co, h, w)
bb, tt = cfg["tile_t2"].apply(s, output, fused)
s[output].bind(bb, te.thread_axis("blockIdx.x"))
s[output].bind(tt, te.thread_axis("threadIdx.x"))
s[Y].compute_at(s[output], tt)
##### REGISTER ALTER OP LAYOUT #####
@nn.conv2d_alter_layout.register(["mali"])
def _alter_conv2d_layout(attrs, inputs, tinfos, out_type):
target = tvm.target.Target.current(allow_none=False)
dispatch_ctx = autotvm.task.DispatchContext.current
new_attrs = {k: attrs[k] for k in attrs.keys()}
strides = attrs.get_int_tuple("strides")
padding = attrs.get_int_tuple("padding")
dilation = attrs.get_int_tuple("dilation")
data_layout = attrs["data_layout"]
kernel_layout = attrs["kernel_layout"]
data, kernel = tinfos
out_dtype = out_type.dtype
impl, outs = relay.backend.te_compiler.select_implementation(
relay.op.get("nn.conv2d"), attrs, tinfos, out_type, target
)
workload = autotvm.task.get_workload(outs)
if workload is None:
# The best implementation is not an AutoTVM template.
# It may be from the auto-scheduler
if impl.name.find("winograd") != -1:
if dilation != (1, 1):
logger.warning("Does not support weight pre-transform for dilated convolution.")
return None
assert data_layout == "NHWC" and kernel_layout == "HWIO"
N, H, W, CI = get_const_tuple(data.shape)
KH, KW, _, CO = get_const_tuple(kernel.shape)
# Pre-compute weight transformation in winograd
tile_size = _pick_tile_size(tinfos[0], tinfos[1], layout="NHWC")
# HWIO -> OIHW
kernel_transform = relay.transpose(inputs[1], axes=[3, 2, 0, 1])
# alpha, alpha, CO, CI
weight = relay.nn.contrib_conv2d_winograd_weight_transform(
kernel_transform, tile_size=tile_size
)
new_attrs["tile_size"] = tile_size
new_attrs["channels"] = CO
return relay.nn.contrib_conv2d_winograd_without_weight_transform(
inputs[0], weight, **new_attrs
)
return None
cfg = dispatch_ctx.query(target, workload)
if cfg.is_fallback: # if is fallback, clear query cache and return None
autotvm.task.clear_fallback_cache(target, workload)
return None
topi_tmpl = workload[0]
idxd = tvm.tir.indexdiv
if topi_tmpl == "conv2d_nchw_spatial_pack.mali":
assert data_layout == "NCHW" and kernel_layout == "OIHW"
N, CI, H, W = get_const_tuple(data.shape)
CO, _, KH, KW = get_const_tuple(kernel.shape)
VC = cfg["tile_co"].size[-1]
new_attrs["kernel_layout"] = f"OIHW{VC}o"
new_data = data
new_kernel = te.placeholder((idxd(CO, VC), CI, KH, KW, VC), dtype=kernel.dtype)
new_workload = autotvm.task.args_to_workload(
[new_data, new_kernel, strides, padding, dilation, out_dtype],
"conv2d_nchw_spatial_pack.mali",
)
dispatch_ctx.update(target, new_workload, cfg)
return relay.nn.conv2d(*inputs, **new_attrs)
elif topi_tmpl == "conv2d_nchw_winograd.mali":
assert data_layout == "NCHW" and kernel_layout == "OIHW"
N, CI, H, W = get_const_tuple(data.shape)
CO, _, KH, KW = get_const_tuple(kernel.shape)
tile_size = _pick_tile_size(data, kernel)
VC = cfg["tile_bna"].val
weight_expr = inputs[1]
weight_expr = relay.nn.contrib_conv2d_winograd_weight_transform(
weight_expr, tile_size=tile_size
)
weight_expr = relay.reshape(
weight_expr, newshape=(KH + tile_size - 1, KW + tile_size - 1, idxd(CO, VC), VC, CI)
)
weight_expr = relay.transpose(weight_expr, axes=[0, 1, 2, 4, 3])
new_attrs["tile_size"] = tile_size
new_data = data
new_kernel = te.placeholder(
(KH + tile_size - 1, KW + tile_size - 1, idxd(CO, VC), CI, VC), kernel.dtype
)
new_workload = autotvm.task.args_to_workload(
[new_data, new_kernel, strides, padding, dilation, out_dtype],
"conv2d_nchw_winograd.mali",
)
dispatch_ctx.update(target, new_workload, cfg)
return relay.nn.contrib_conv2d_winograd_without_weight_transform(
inputs[0], weight_expr, **new_attrs
)
else:
return None
@conv2d_winograd_nhwc.register(["mali"])
def conv2d_winograd_nhwc_mali(
data,
weight,
strides,
padding,
dilation,
out_dtype,
pre_computed=False,
auto_scheduler_rewritten_layout="",
):
"""Conv2D Winograd in NHWC layout.
This is a clean version to be used by the auto-scheduler for mali.
"""
tile_size = _pick_tile_size(data, weight, layout="NHWC")
return _conv2d_winograd_nhwc_impl(
data,
weight,
strides,
padding,
dilation,
out_dtype,
tile_size,
pre_computed,
auto_scheduler_rewritten_layout,
)
##### SCHECULE UTILITIES #####
def tile_and_bind(s, tensor, y, x, y_factor, x_factor=None):
"""tile and bind to GPU threads"""
x_factor = x_factor or y_factor
yo, xo, yi, xi = s[tensor].tile(y, x, y_factor, x_factor)
s[tensor].bind(xo, te.thread_axis("blockIdx.x"))
s[tensor].bind(xi, te.thread_axis("threadIdx.x"))
s[tensor].bind(yo, te.thread_axis("blockIdx.y"))
s[tensor].bind(yi, te.thread_axis("threadIdx.y"))
return yo, xo, yi, xi
def tile_and_bind3d(s, tensor, z, y, x, z_factor=2, y_factor=None, x_factor=None):
"""tile and bind 3d"""
y_factor = y_factor or z_factor
x_factor = x_factor or y_factor
zo, zi = s[tensor].split(z, z_factor)
yo, yi = s[tensor].split(y, y_factor)
xo, xi = s[tensor].split(x, x_factor)
s[tensor].bind(zo, te.thread_axis("blockIdx.z"))
s[tensor].bind(zi, te.thread_axis("threadIdx.z"))
s[tensor].bind(yo, te.thread_axis("blockIdx.y"))
s[tensor].bind(yi, te.thread_axis("threadIdx.y"))
s[tensor].bind(xo, te.thread_axis("blockIdx.x"))
s[tensor].bind(xi, te.thread_axis("threadIdx.x"))
s[tensor].reorder(zo, yo, xo, zi, yi, xi)
return zo, yo, xo, zi, yi, xi
| 23,060 | 33.470852 | 100 | py |
tvm | tvm-main/python/tvm/topi/mali/dense.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unused-variable
"""dense schedule on ARM Mali GPU"""
from tvm import te
from tvm import autotvm
from .. import nn
from ..utils import traverse_inline
@autotvm.register_topi_compute("dense.mali")
def dense(_, data, weight, bias=None, out_dtype=None):
"""Dense operator on Mali"""
return nn.dense(data, weight, bias, out_dtype)
@autotvm.register_topi_schedule("dense.mali")
def schedule_dense(cfg, outs):
"""Schedule for dense operator.
Parameters
----------
cfg: ConfigEntity
The config entity for this template
outs: Array of Tensor
The computation graph description of dense
in the format of an array of tensors.
Returns
-------
s: Schedule
The computation schedule for dense.
"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
def _callback(op):
if op.tag == "dense":
vec_size = [1, 2, 4, 8, 16]
max_unroll = 32
dense_out = op.output(0)
output = outs[0]
y, x = s[output].op.axis
c = s[dense_out].op.reduce_axis[0]
##### space definition begin #####
cfg.define_split("tile_y", y, num_outputs=3)
cfg.define_split("tile_x", x, num_outputs=3)
cfg.define_split("c_unroll", c, num_outputs=2, max_factor=64)
# fallback support
if cfg.is_fallback:
ref_log = autotvm.tophub.load_reference_log("mali", "rk3399", "dense.mali")
cfg.fallback_with_reference_log(ref_log)
##### space definition end #####
if dense_out.op in s.outputs:
dense_out = s.cache_write(output, "local")
by, ty, yi = cfg["tile_y"].apply(s, output, y)
bx, tx, xi = cfg["tile_x"].apply(s, output, x)
s[output].bind(by, te.thread_axis("blockIdx.y"))
s[output].bind(bx, te.thread_axis("blockIdx.x"))
s[output].bind(ty, te.thread_axis("threadIdx.y"))
s[output].bind(tx, te.thread_axis("threadIdx.x"))
if cfg["tile_y"].size[-1] < max_unroll:
s[output].unroll(yi)
if cfg["tile_x"].size[-1] in vec_size:
s[output].vectorize(xi)
s[dense_out].compute_at(s[output], tx)
k = s[dense_out].op.reduce_axis[0]
y, x = s[dense_out].op.axis
k, k_unroll = cfg["c_unroll"].apply(s, dense_out, k)
s[dense_out].reorder(k, k_unroll, y, x)
s[dense_out].unroll(k_unroll)
if cfg["tile_y"].size[-1] < max_unroll:
s[dense_out].unroll(y)
if cfg["tile_x"].size[-1] in vec_size:
s[dense_out].vectorize(x)
traverse_inline(s, outs[0].op, _callback)
return s
def fuse_and_bind(s, tensor, axis=None, num_thread=None):
"""fuse all the axis and bind to GPU threads"""
# TODO(@comaniac): figure out where this function is used.
axis = axis or s[tensor].op.axis
fused = s[tensor].fuse(*axis)
bx, tx = s[tensor].split(fused, num_thread)
s[tensor].bind(bx, te.thread_axis("blockIdx.x"))
s[tensor].bind(tx, te.thread_axis("threadIdx.x"))
return bx, tx
| 4,074 | 34.745614 | 91 | py |
tvm | tvm-main/python/tvm/topi/mali/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=redefined-builtin, wildcard-import
"""ARM Mali GPU specific declaration and schedules."""
from __future__ import absolute_import as _abs
from .conv2d import *
from .depthwise_conv2d import *
from .dense import *
| 1,017 | 39.72 | 62 | py |
tvm | tvm-main/python/tvm/topi/mali/depthwise_conv2d.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unused-variable,unused-argument
"""depthwise_conv2d schedule on ARM Mali GPU"""
import tvm
from tvm import te
from tvm import autotvm
from .. import nn
from ..utils import traverse_inline
# register original implementation of depthwise_conv2d_nchw since we don't need to change this part
@autotvm.register_topi_compute("depthwise_conv2d_nchw.mali")
def depthwise_conv2d_nchw(cfg, data, kernel, strides, padding, dilation, out_dtype):
return nn.depthwise_conv2d_nchw(data, kernel, strides, padding, dilation, out_dtype)
# register customized schedule for Mali.
@autotvm.register_topi_schedule("depthwise_conv2d_nchw.mali")
def schedule_depthwise_conv2d_nchw(cfg, outs):
"""Schedule depthwise conv2d
Parameters
----------
cfg: ConfigEntity
The configuration of this template
outs: Array of Tensor
The computation graph description of depthwise convolution2d
in the format of an array of tensors.
Returns
-------
s: Schedule
The computation schedule for depthwise_conv2d nchw.
"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
def _callback(op):
"""traverse to find op to schedule"""
# schedule depthwise_conv2d
if op.tag == "depthwise_conv2d_nchw":
pad_data = op.input_tensors[0]
kernel = op.input_tensors[1]
conv = op.output(0)
_schedule(cfg, s, pad_data, kernel, conv, "NCHW")
traverse_inline(s, outs[0].op, _callback)
return s
# register original implementation of depthwise_conv2d_nhwc since we don't need to change this part
@autotvm.register_topi_compute("depthwise_conv2d_nhwc.mali")
def depthwise_conv2d_nhwc(cfg, data, kernel, strides, padding, dilation, out_dtype):
return nn.depthwise_conv2d_nhwc(data, kernel, strides, padding, dilation, out_dtype)
# register customized schedule for Mali.
@autotvm.register_topi_schedule("depthwise_conv2d_nhwc.mali")
def schedule_depthwise_conv2d_nhwc(cfg, outs):
"""Schedule depthwise conv2d
Parameters
----------
cfg: ConfigEntity
The configuration of this template
outs: Array of Tensor
The computation graph description of depthwise convolution2d
in the format of an array of tensors.
Returns
-------
s: Schedule
The computation schedule for depthwise_conv2d nchw.
"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
def _callback(op):
"""traverse to find op to schedule"""
# schedule depthwise_conv2d
if op.tag == "depthwise_conv2d_nhwc":
pad_data = op.input_tensors[0]
kernel = op.input_tensors[1]
conv = op.output(0)
_schedule(cfg, s, pad_data, kernel, conv, "NHWC")
traverse_inline(s, outs[0].op, _callback)
return s
def _schedule(cfg, s, pad_data, kernel, conv, layout):
"""schedule depthwise_conv2d"""
assert layout in ("NCHW", "NHWC")
max_unroll = 16
vec_size = [1, 2, 4, 8, 16]
##### space definition begin #####
if layout == "NCHW":
n, c, h, w = s[conv].op.axis
else:
n, h, w, c = s[conv].op.axis
bc, tc, ci = cfg.define_split("tile_c", c, num_outputs=3)
bh, th, hi = cfg.define_split("tile_y", h, num_outputs=3)
bw, tw, wi = cfg.define_split("tile_x", w, num_outputs=3)
cfg.define_annotate("ann_spatial", [ci, hi, wi], policy="try_unroll_vec")
# fallback support
if cfg.is_fallback:
if layout == "NCHW":
ref_log = autotvm.tophub.load_reference_log(
"mali", "rk3399", "depthwise_conv2d_nchw.mali"
)
cfg.fallback_with_reference_log(ref_log)
else:
cfg.fallback_split("tile_c", [-1, 4, 2])
cfg.fallback_split("tile_y", [-1, 4, 2])
cfg.fallback_split("tile_x", [-1, 4, 2])
###### space definition end ######
# schedule padding
if layout == "NCHW":
n, c, h, w = s[pad_data].op.axis
z, y, x = c, h, w
z_factor, y_factor, x_factor = cfg["tile_c"].size[1], 1, 1
else:
n, h, w, c = s[pad_data].op.axis
z, y, x = h, w, c
z_factor, y_factor, x_factor = 1, 1, cfg["tile_c"].size[1]
tile_and_bind3d(s, pad_data, z, y, x, z_factor, y_factor, x_factor)
# schedule dilation
if isinstance(kernel.op, tvm.te.ComputeOp) and "dilate" in kernel.op.tag:
s[kernel].compute_inline()
# schedule conv
if conv.op not in s.outputs:
s[conv].set_scope("local")
OL = conv
output = s.outputs[0].output(0)
else:
OL = s.cache_write(conv, "local")
output = conv
if layout == "NCHW":
n, c, h, w = s[output].op.axis
else:
n, h, w, c = s[output].op.axis
bc, tc, ci = cfg["tile_c"].apply(s, output, c)
bh, th, hi = cfg["tile_y"].apply(s, output, h)
bw, tw, wi = cfg["tile_x"].apply(s, output, w)
if layout == "NCHW":
bz, tz, by, ty, bx, tx = bc, tc, bh, th, bw, tw
else:
bz, tz, by, ty, bx, tx = bh, th, bw, tw, bc, tc
bz = s[output].fuse(n, bz)
s[output].bind(bz, te.thread_axis("blockIdx.z"))
s[output].bind(tz, te.thread_axis("threadIdx.z"))
s[output].bind(by, te.thread_axis("blockIdx.y"))
s[output].bind(ty, te.thread_axis("threadIdx.y"))
s[output].bind(bx, te.thread_axis("blockIdx.x"))
s[output].bind(tx, te.thread_axis("threadIdx.x"))
di, dj = s[OL].op.reduce_axis
s[OL].unroll(di)
s[OL].unroll(dj)
s[OL].compute_at(s[output], tx)
if layout == "NCHW":
n, ci, hi, wi = s[OL].op.axis
else:
n, hi, wi, ci = s[OL].op.axis
cfg["ann_spatial"].apply(
s,
OL,
[ci, hi, wi],
axis_lens=[cfg["tile_c"].size[2], cfg["tile_y"].size[2], cfg["tile_x"].size[2]],
max_unroll=max_unroll,
vec_size=vec_size,
cfg=cfg,
)
def tile_and_bind3d(s, tensor, z, y, x, z_factor=2, y_factor=None, x_factor=None):
"""tile and bind 3d"""
y_factor = y_factor or z_factor
x_factor = x_factor or y_factor
zo, zi = s[tensor].split(z, z_factor)
yo, yi = s[tensor].split(y, y_factor)
xo, xi = s[tensor].split(x, x_factor)
s[tensor].bind(zo, te.thread_axis("blockIdx.z"))
s[tensor].bind(zi, te.thread_axis("threadIdx.z"))
s[tensor].bind(yo, te.thread_axis("blockIdx.y"))
s[tensor].bind(yi, te.thread_axis("threadIdx.y"))
s[tensor].bind(xo, te.thread_axis("blockIdx.x"))
s[tensor].bind(xi, te.thread_axis("threadIdx.x"))
return zo, zi, yo, yi, xo, xi
| 7,489 | 33.045455 | 99 | py |
tvm | tvm-main/python/tvm/topi/cuda/injective.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-variable,
"""Schedule for composition of injective operator"""
import numpy as np
import tvm
from tvm import te
from .. import utils
def schedule_injective_from_existing(sch, out):
"""Schedule for injective op from existing schedule.
Parameters
----------
sch: Schedule
The schedule to update.
out: Tensor
The tensor representing the injective op.
Returns
-------
sch: Schedule
The updated schedule.
"""
def find_nearest_small_factor(num, target):
"""Find the nearest factor of the given number that is smaller than the target."""
for i in range(target, 0, -1):
if num % i == 0:
return i
# Unreachable because i=1 must hold.
return -1
fused = sch[out].fuse(*sch[out].op.axis)
num_thread = tvm.target.Target.current(allow_none=False).max_num_threads
max_block = 256
# Vectorize on fp16 data type to enable half2 for better memory bandwidth utilization.
vector_width = 2 if out.dtype == "float16" else 1
is_dynamic_output = False
for dim in out.shape:
if not isinstance(dim, tvm.tir.IntImm):
is_dynamic_output = True
break
out_len = utils.prod(out.shape)
try:
const_size = utils.get_const_int(out_len)
# Adjust block and thread to make sure they are dividable so that vectorize can be
# correctly applied.
if vector_width > 1 and const_size % vector_width == 0:
remain_total_size = const_size // vector_width
cand_sizes = []
for max_size in [num_thread, max_block]:
cand_sizes.append(
max_size
if remain_total_size % max_size == 0
else find_nearest_small_factor(remain_total_size, max_size)
)
remain_total_size //= cand_sizes[-1]
# If the product of candidate dividable (block * thread) is too small,
# then the performance may be worse even half2 is enabled. Note that 0.7
# is just a heuristic ratio and may not be optimal for all workloads.
if np.prod(cand_sizes) / (max_block * num_thread) >= 0.7:
num_thread, max_block = cand_sizes
need_block_split = const_size > max_block * num_thread * vector_width
except ValueError:
need_block_split = False
const_size = 0
if vector_width > 1:
fused, v = sch[out].split(fused, vector_width)
sch[out].vectorize(v)
if need_block_split:
xo, xi = sch[out].split(fused, factor=num_thread * max_block)
bx, tx = sch[out].split(xi, factor=num_thread)
sch[out].reorder(bx, tx, xo)
sch[out].bind(bx, te.thread_axis("blockIdx.x"))
sch[out].bind(tx, te.thread_axis("threadIdx.x"))
else:
# Use less threads for dynamic shape ops to avoid runtime error.
if is_dynamic_output:
num_thread //= 2
if const_size != 0 and const_size < num_thread:
bx, tx = sch[out].split(fused, factor=const_size)
else:
bx, tx = sch[out].split(fused, factor=num_thread)
sch[out].bind(tx, te.thread_axis("threadIdx.x"))
sch[out].bind(bx, te.thread_axis("blockIdx.x"))
return sch
def schedule_injective(outs):
"""Schedule for injective op.
Parameters
----------
outs: Array of Tensor
The computation graph description of injective in the format
of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
tvm.te.schedule.AutoInlineInjective(s)
for out in outs:
if not utils.is_empty_shape(out.shape):
schedule_injective_from_existing(s, out)
return s
schedule_elemwise = schedule_injective
schedule_broadcast = schedule_injective
| 4,841 | 33.098592 | 90 | py |
tvm | tvm-main/python/tvm/topi/cuda/nms.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, no-member, too-many-locals, too-many-arguments, too-many-statements, singleton-comparison
# pylint: disable=bad-continuation, unused-argument
"""Non-maximum suppression operator"""
import tvm
from tvm import te
from tvm.contrib import nvcc
from tvm.contrib.thrust import can_use_thrust, can_use_rocthrust
from tvm.ir import register_intrin_lowering
from tvm.tir import if_then_else
from .sort import argsort, argsort_thrust
from .scan import exclusive_scan
from ..utils import ceil_div
from ..math import cast
from ..transform import reshape
from ..vision.nms_util import (
calculate_overlap,
binary_search,
collect_selected_indices,
collect_selected_indices_and_scores,
run_all_class_nms,
)
def cuda_atomic_add_rule(op):
if op.dtype == "float32":
return tvm.tir.call_pure_extern("float32", "atomicAdd", op.args[0], op.args[1])
if op.dtype == "float64":
return tvm.tir.call_pure_extern("float64", "atomicAdd", op.args[0], op.args[1])
if op.dtype == "int32":
return tvm.tir.call_pure_extern("int32", "atomicAdd", op.args[0], op.args[1])
raise RuntimeError("only support int32, float32 and float64")
def opencl_atomic_add_rule(op):
if op.dtype == "int32":
return tvm.tir.call_pure_extern("int32", "atomic_add", op.args[0], op.args[1])
raise RuntimeError("only support int32")
register_intrin_lowering("tir.atomic_add", target="cuda", f=cuda_atomic_add_rule, level=99)
register_intrin_lowering("tir.atomic_add", target="opencl", f=opencl_atomic_add_rule, level=99)
def atomic_add(x, y):
return tvm.tir.call_intrin(y.dtype, "tir.atomic_add", x, y)
def get_valid_boxes_ir(data, valid_boxes, score_threshold, id_index, score_index):
"""Low level IR to identify bounding boxes given a score threshold.
Parameters
----------
data : Buffer
Input data. 3-D Buffer with shape [batch_size, num_anchors, elem_length].
score_threshold : Buffer or float32
Lower limit of score for valid bounding boxes.
id_index : optional, int
index of the class categories, -1 to disable.
score_index: optional, int
Index of the scores/confidence of boxes.
Returns
-------
valid_boxes: Buffer
2D Buffer indicating valid boxes with shape [batch_size, num_anchors].
"""
batch_size = data.shape[0]
num_anchors = data.shape[1]
elem_length = data.shape[2]
ib = tvm.tir.ir_builder.create()
data = ib.buffer_ptr(data)
valid_boxes = ib.buffer_ptr(valid_boxes)
if isinstance(score_threshold, float):
score_threshold = tvm.tir.FloatImm("float32", score_threshold)
id_index = tvm.tir.IntImm("int32", id_index)
score_index = tvm.tir.IntImm("int32", score_index)
max_threads = int(tvm.target.Target.current(allow_none=False).max_num_threads)
with ib.new_scope():
nthread_tx = max_threads
nthread_bx = ceil_div(num_anchors, max_threads)
nthread_by = batch_size
tx = te.thread_axis("threadIdx.x")
bx = te.thread_axis("blockIdx.x")
by = te.thread_axis("blockIdx.y")
ib.scope_attr(tx, "thread_extent", nthread_tx)
ib.scope_attr(bx, "thread_extent", nthread_bx)
ib.scope_attr(by, "thread_extent", nthread_by)
tid = bx * max_threads + tx
with ib.if_scope(tid < num_anchors):
i = by
j = tid
score = data[(i * num_anchors + j) * elem_length + score_index]
with ib.if_scope(
tvm.tir.all(
score > score_threshold,
tvm.tir.any(
id_index < 0, data[(i * num_anchors + j) * elem_length + id_index] >= 0
),
)
):
valid_boxes[i * num_anchors + j] = 1
with ib.else_scope():
valid_boxes[i * num_anchors + j] = 0
return ib.get()
def get_valid_counts_ir(data, valid_indices, valid_boxes, out, out_indices):
"""Low level IR to get valid count of bounding boxes
given a score threshold. Also prepares to move valid boxes to the
top of input data.
Parameters
----------
data : Buffer
Input data. 3-D Buffer with shape [batch_size, num_anchors, elem_length].
valid_indices: Buffer
2D Buffer of flag indicating valid data with shape [batch_size, num_anchors].
Returns
-------
out : Buffer
Sorted valid boxes
out_indices : Buffer
Incidices of valid boxes in original data
"""
batch_size = data.shape[0]
num_anchors = data.shape[1]
elem_length = data.shape[2]
ib = tvm.tir.ir_builder.create()
data = ib.buffer_ptr(data)
valid_indices = ib.buffer_ptr(valid_indices)
valid_boxes = ib.buffer_ptr(valid_boxes)
out = ib.buffer_ptr(out)
out_indices = ib.buffer_ptr(out_indices)
one = tvm.tir.const(1, dtype=out.dtype)
max_threads = int(tvm.target.Target.current(allow_none=False).max_num_threads)
nthread_tx = max_threads
nthread_bx = num_anchors // max_threads + 1
nthread_by = batch_size
with ib.new_scope():
tx = te.thread_axis("threadIdx.x")
bx = te.thread_axis("blockIdx.x")
by = te.thread_axis("blockIdx.y")
ib.scope_attr(tx, "thread_extent", nthread_tx)
ib.scope_attr(bx, "thread_extent", nthread_bx)
ib.scope_attr(by, "thread_extent", nthread_by)
tid = bx * max_threads + tx
with ib.if_scope(tid < num_anchors):
i = by
j = tid
with ib.for_range(0, elem_length) as k:
out[(i * num_anchors + j) * elem_length + k] = -one
out_indices[i * num_anchors + j] = -1
with ib.new_scope():
tx = te.thread_axis("threadIdx.x")
bx = te.thread_axis("blockIdx.x")
by = te.thread_axis("blockIdx.y")
ib.scope_attr(tx, "thread_extent", nthread_tx)
ib.scope_attr(bx, "thread_extent", nthread_bx)
ib.scope_attr(by, "thread_extent", nthread_by)
tid = bx * max_threads + tx
with ib.if_scope(tid < num_anchors):
i = by
j = tid
with ib.if_scope(valid_boxes[i, tid] > 0):
with ib.for_range(0, elem_length) as k:
out[(i * num_anchors + valid_indices[i, tid]) * elem_length + k] = data[
(i * num_anchors + j) * elem_length + k
]
out_indices[i * num_anchors + valid_indices[i, tid]] = j
return ib.get()
def get_valid_counts(data, score_threshold=0, id_index=0, score_index=1):
"""Get valid count of bounding boxes given a score threshold.
Also moves valid boxes to the top of input data.
Parameters
----------
data : tvm.te.Tensor
Input data. 3-D tensor with shape [batch_size, num_anchors, elem_length].
score_threshold : optional, tvm.te.Tensor or float
Lower limit of score for valid bounding boxes.
id_index : optional, int
index of the class categories, -1 to disable.
score_index: optional, int
Index of the scores/confidence of boxes.
Returns
-------
valid_count : tvm.te.Tensor
1-D tensor for valid number of boxes.
out_tensor : tvm.te.Tensor
Rearranged data tensor.
"""
batch_size = data.shape[0]
num_anchors = data.shape[1]
data_buf = tvm.tir.decl_buffer(data.shape, data.dtype, "data_buf", data_alignment=8)
valid_boxes_buf = tvm.tir.decl_buffer(
(batch_size, num_anchors), "int32", "valid_boxes_buf", data_alignment=8
)
valid_boxes = te.extern(
[(batch_size, num_anchors)],
[data],
lambda ins, outs: get_valid_boxes_ir(
ins[0], outs[0], score_threshold, id_index, score_index
),
dtype=["int32"],
in_buffers=[data_buf],
out_buffers=[valid_boxes_buf],
name="get_valid_boxes",
tag="get_valid_boxes_gpu",
)
valid_indices_buf = tvm.tir.decl_buffer(
(batch_size, num_anchors), "int32", "valid_indices_buf", data_alignment=8
)
valid_indices, valid_count = exclusive_scan(valid_boxes, axis=1, return_reduction=True)
out_buf = tvm.tir.decl_buffer(data.shape, data.dtype, "out_buf", data_alignment=8)
out_indices_buf = tvm.tir.decl_buffer(
(batch_size, num_anchors), "int32", "out_buf", data_alignment=8
)
out, out_indices = te.extern(
[data.shape, (batch_size, num_anchors)],
[data, valid_indices, valid_boxes],
lambda ins, outs: get_valid_counts_ir(ins[0], ins[1], ins[2], outs[0], outs[1]),
dtype=["int32", data.dtype],
in_buffers=[data_buf, valid_indices_buf, valid_boxes_buf],
out_buffers=[out_buf, out_indices_buf],
name="get_valid_counts",
tag="get_valid_counts_gpu",
)
return [valid_count, out, out_indices]
def _nms_loop(
ib,
batch_size,
top_k,
iou_threshold,
max_output_size,
valid_count,
on_new_valid_box_func,
on_new_invalidated_box_func,
needs_bbox_check_func,
calc_overlap_func,
out_scores,
num_valid_boxes,
):
max_threads = int(tvm.target.Target.current(allow_none=False).max_num_threads)
with ib.new_scope():
nthread_by = batch_size
nthread_tx = max_threads
# Some cuda architectures have smaller limit of 32K for cudaDevAttrMaxRegistersPerBlock
# vs 64K for most GPUs. Since this kernel uses many registers (around 35), the limit will
# be exceeded with 1024 threads.
target = tvm.target.Target.current(allow_none=False)
if target.kind.name == "cuda":
if nvcc.get_target_compute_version(target) in ["3.2", "5.3", "6.2"]:
nthread_tx = 512
by = te.thread_axis("blockIdx.y")
tx = te.thread_axis("threadIdx.x")
ib.scope_attr(by, "thread_extent", nthread_by)
ib.scope_attr(tx, "thread_extent", nthread_tx)
num_valid_boxes_local = ib.allocate(
"int32", (1,), name="num_valid_boxes_local", scope="local"
)
num_valid_boxes_local[0] = 0
def nms_inner_loop(ib, i, j, nkeep):
# The box j is valid, invalidate other boxes that overlap with j above iou_threshold
on_new_valid_box_func(ib, tx, num_valid_boxes_local[0], i, j)
num_valid_boxes_local[0] += 1
num_iter_per_thread = ceil_div(nkeep - (j + 1), nthread_tx)
with ib.for_range(0, num_iter_per_thread, name="_k") as _k:
k = j + 1 + _k * nthread_tx + tx
with ib.if_scope(
tvm.tir.all(
k < nkeep,
out_scores[i, k] > 0, # is the box k still valid?
needs_bbox_check_func(i, j, k),
)
):
iou = calc_overlap_func(i, j, k)
with ib.if_scope(iou >= iou_threshold):
# invalidate the box k
out_scores[i, k] = -1.0
on_new_invalidated_box_func(i, k)
ib.emit(tvm.tir.Call(None, "tir.tvm_storage_sync", tvm.runtime.convert(["shared"])))
i = by
nkeep = if_then_else(tvm.tir.all(top_k > 0, top_k < valid_count[i]), top_k, valid_count[i])
max_output_size = if_then_else(max_output_size > 0, max_output_size, nkeep)
with ib.if_scope(tvm.tir.all(iou_threshold > 0, valid_count[i] > 0)):
# Apply nms
# No need to do more iteration if we have already reached max_output_size boxes
box_idx = ib.allocate("int32", (1,), name="box_idx", scope="local")
box_idx[0] = 0
with ib.while_loop(
tvm.tir.all(box_idx[0] < nkeep, num_valid_boxes_local[0] < max_output_size)
):
# Proceed to the inner loop if the box with id box_idx is still valid
with ib.if_scope(out_scores[i, box_idx[0]] > -1.0):
nms_inner_loop(ib, i, box_idx[0], nkeep)
box_idx[0] += 1
with ib.if_scope(tx + 0 == 0):
num_valid_boxes[i] = num_valid_boxes_local[0]
with ib.else_scope():
num_valid_boxes[i] = 0
return ib.get()
def nms_ir(
data,
sorted_index,
valid_count,
indices,
out_bboxes,
out_scores,
out_class_ids,
out_features,
box_indices,
num_valid_boxes,
max_output_size,
iou_threshold,
force_suppress,
top_k,
coord_start,
id_index,
score_index,
return_indices,
):
"""Low level IR routing for transform location in multibox_detection operator.
Parameters
----------
data : Buffer
Buffer of output boxes with class and score.
sorted_index : Buffer
Buffer of output box indexes sorted by score.
valid_count : Buffer
Buffer of number of valid output boxes.
indices : Buffer
indices in original tensor, with shape [batch_size, num_anchors],
represents the index of box in original data. It could be the third
output out_indices of get_valid_counts. The values in the second
dimension are like the output of arange(num_anchors) if get_valid_counts
is not used before non_max_suppression.
out_bboxes : Buffer
Output buffer, to be filled with sorted box coordinates.
out_scores : Buffer
Output buffer, to be filled with sorted scores.
out_class_ids : Buffer
Output buffer, to be filled with sorted class ids.
box_indices : Buffer
A indices tensor mapping sorted indices to original indices
This is the first output of NMS when return_indices=True.
num_valid_boxes : Buffer
Record the number of boxes that have survived IOU tests.
This is the second output of NMS when return_indices=True.
max_output_size : int
Max number of output valid boxes for each instance.
By default all valid boxes are returned.
iou_threshold : float
Overlapping(IoU) threshold to suppress object with smaller score.
force_suppress : boolean
Whether to suppress all detections regardless of class_id.
top_k : int
Keep maximum top k detections before nms, -1 for no limit.
coord_start : int
Start index of the consecutive 4 coordinates.
id_index : int
index of the class categories, -1 to disable.
score_index : optional, int
Index of the scores/confidence of boxes.
return_indices : boolean
Whether to return box indices in input data.
Returns
-------
stmt : Stmt
The result IR statement.
"""
batch_size = data.shape[0]
num_anchors = data.shape[1]
box_data_length = data.shape[2]
num_features = out_features.shape[2]
ib = tvm.tir.ir_builder.create()
data = ib.buffer_ptr(data)
sorted_index = ib.buffer_ptr(sorted_index)
valid_count = ib.buffer_ptr(valid_count)
indices = ib.buffer_ptr(indices)
# outputs
out_bboxes = ib.buffer_ptr(out_bboxes)
out_scores = ib.buffer_ptr(out_scores)
out_class_ids = ib.buffer_ptr(out_class_ids)
out_features = ib.buffer_ptr(out_features)
box_indices = ib.buffer_ptr(box_indices)
num_valid_boxes = ib.buffer_ptr(num_valid_boxes)
if isinstance(iou_threshold, float):
iou_threshold = tvm.tir.FloatImm("float32", iou_threshold)
top_k = tvm.tir.IntImm("int32", top_k)
coord_start = tvm.tir.IntImm("int32", coord_start)
id_index = tvm.tir.IntImm("int32", id_index)
score_index = tvm.tir.IntImm("int32", score_index)
force_suppress = tvm.tir.IntImm("int32", 1 if force_suppress else 0)
max_threads = int(tvm.target.Target.current(allow_none=False).max_num_threads)
with ib.new_scope():
nthread_tx = max_threads
nthread_bx = ceil_div(num_anchors, max_threads)
nthread_by = batch_size
tx = te.thread_axis("threadIdx.x")
bx = te.thread_axis("blockIdx.x")
by = te.thread_axis("blockIdx.y")
ib.scope_attr(by, "thread_extent", nthread_by)
ib.scope_attr(tx, "thread_extent", nthread_tx)
ib.scope_attr(bx, "thread_extent", nthread_bx)
i = by
base_src_idx = i * num_anchors * box_data_length
base_bbox_idx = i * num_anchors * 4
base_features_idx = i * num_anchors * num_features
with ib.if_scope(tvm.tir.all(iou_threshold > 0, valid_count[i] > 0)):
# Reorder output
nkeep = if_then_else(
tvm.tir.all(top_k > 0, top_k < valid_count[i]), top_k, valid_count[i]
)
j = bx * max_threads + tx
with ib.if_scope(j < nkeep):
src_idx = base_src_idx + sorted_index[i * num_anchors + j] * box_data_length
with ib.for_range(0, 4, kind="unroll") as k:
out_bboxes[(base_bbox_idx + j * 4 + k)] = data[src_idx + coord_start + k]
with ib.for_range(0, num_features, kind="unroll") as k:
out_features[(base_features_idx + j * num_features + k)] = data[
src_idx + coord_start + 4 + k
]
out_scores[i * num_anchors + j] = data[src_idx + score_index]
if id_index >= 0:
out_class_ids[i * num_anchors + j] = data[src_idx + id_index]
with ib.else_scope():
# Indices > nkeep are discarded
# Only needed for return_indices = False case
if return_indices is False:
with ib.if_scope(j < num_anchors):
with ib.for_range(0, 4, kind="unroll") as k:
out_bboxes[(base_bbox_idx + j * 4 + k)] = -1.0
with ib.for_range(0, num_features, kind="unroll") as k:
out_features[(base_features_idx + j * num_features + k)] = -1.0
out_scores[i, j] = -1.0
if id_index >= 0:
out_class_ids[i, j] = -1.0
if return_indices:
with ib.if_scope(j < num_anchors):
box_indices[i * num_anchors + j] = -1
with ib.else_scope():
# Need to copy all boxes if not using return_indices
bounds = valid_count[i] if return_indices else num_anchors
with ib.if_scope(j < bounds):
src_offset = base_src_idx + j * box_data_length
with ib.for_range(0, 4, kind="unroll") as k:
out_bboxes[base_bbox_idx + j * 4 + k] = data[src_offset + coord_start + k]
with ib.for_range(0, num_features, kind="unroll") as k:
out_features[(base_features_idx + j * num_features + k)] = data[
src_offset + coord_start + 4 + k
]
out_scores[i * num_anchors + j] = data[src_offset + score_index]
if id_index >= 0:
out_class_ids[i * num_anchors + j] = data[src_offset + id_index]
box_indices[i * num_anchors + j] = j
if isinstance(max_output_size, int):
max_output_size = tvm.tir.const(max_output_size)
def calc_overlap(i, j, k):
offset_j = j * 4
offset_k = k * 4
base_bbox_idx = i * num_anchors * 4
return calculate_overlap(
out_bboxes,
base_bbox_idx + offset_j,
base_bbox_idx + offset_k,
)
def on_new_valid_box(ib, tid, num_current_valid_box, i, j):
# When return_indices is False, no need to populate box_indices
if return_indices:
with ib.if_scope(tid + 0 == 0):
orig_idx = sorted_index[i * num_anchors + j]
box_indices[i, num_current_valid_box] = indices[i, orig_idx]
def on_new_invalidated_box(i, k):
if return_indices is False and id_index >= 0:
out_class_ids[i, k] = -1.0
def needs_bbox_check(i, j, k):
return tvm.tir.any(
force_suppress > 0,
id_index < 0,
out_class_ids[i, k] == out_class_ids[i, j],
)
return _nms_loop(
ib,
batch_size,
top_k,
iou_threshold,
max_output_size,
valid_count,
on_new_valid_box,
on_new_invalidated_box,
needs_bbox_check,
calc_overlap,
out_scores,
num_valid_boxes,
)
def _fetch_score_ir(data, score, axis):
"""
Fetch score from data.
This routine is required for dynamic shape nms.
"""
batch_size = data.shape[0]
num_anchors = data.shape[1]
elem_length = data.shape[2]
ib = tvm.tir.ir_builder.create()
data = ib.buffer_ptr(data)
score = ib.buffer_ptr(score)
with ib.if_scope(num_anchors > 0):
max_threads = int(tvm.target.Target.current(allow_none=False).max_num_threads)
nthread_tx = max_threads
nthread_bx = batch_size * num_anchors // max_threads + 1
tx = te.thread_axis("threadIdx.x")
bx = te.thread_axis("blockIdx.x")
ib.scope_attr(tx, "thread_extent", nthread_tx)
ib.scope_attr(bx, "thread_extent", nthread_bx)
tid = bx * max_threads + tx
with ib.if_scope(tid < batch_size * num_anchors):
score[tid] = data[tid * elem_length + axis]
return ib.get()
def _dispatch_sort(scores, ret_type="indices"):
target = tvm.target.Target.current()
if target and (
can_use_thrust(target, "tvm.contrib.thrust.sort")
or can_use_rocthrust(target, "tvm.contrib.thrust.sort")
):
return argsort_thrust(scores, axis=1, is_ascend=False, dtype="int32", ret_type=ret_type)
return argsort(scores, axis=1, is_ascend=False, dtype="int32", ret_type=ret_type)
def _get_sorted_indices(data, data_buf, score_index, score_shape):
"""Extract a 1D score tensor from the packed input and do argsort on it."""
score_buf = tvm.tir.decl_buffer(score_shape, data.dtype, "score_buf", data_alignment=8)
score_tensor = te.extern(
[score_shape],
[data],
lambda ins, outs: _fetch_score_ir(
ins[0],
outs[0],
score_index,
),
dtype=[data.dtype],
in_buffers=[data_buf],
out_buffers=[score_buf],
name="fetch_score",
tag="fetch_score",
)
return _dispatch_sort(score_tensor)
def _run_nms(
data,
data_buf,
sort_tensor,
valid_count,
indices,
max_output_size,
iou_threshold,
force_suppress,
top_k,
coord_start,
id_index,
score_index,
return_indices,
):
"""Run NMS using sorted scores."""
sort_tensor_buf = tvm.tir.decl_buffer(
sort_tensor.shape, sort_tensor.dtype, "sort_tensor_buf", data_alignment=8
)
valid_count_dtype = "int32"
valid_count_buf = tvm.tir.decl_buffer(
valid_count.shape, valid_count_dtype, "valid_count_buf", data_alignment=4
)
indices_buf = tvm.tir.decl_buffer(indices.shape, indices.dtype, "indices_buf", data_alignment=8)
batch_size = data.shape[0]
num_anchors = data.shape[1]
# Number of extra features per box beyond coords, score, and id.
num_features = data.shape[2] - 6 if id_index >= 0 else data.shape[2] - 5
# output shapes
bbox_shape = (batch_size, num_anchors, 4)
score_shape = (batch_size, num_anchors)
class_id_shape = score_shape
out_features_shape = (batch_size, num_anchors, num_features)
box_indices_shape = score_shape
num_valid_boxes_shape = (batch_size, 1)
return te.extern(
[
bbox_shape,
score_shape,
class_id_shape,
out_features_shape,
box_indices_shape,
num_valid_boxes_shape,
],
[data, sort_tensor, valid_count, indices],
lambda ins, outs: nms_ir(
ins[0],
ins[1],
ins[2],
ins[3],
outs[0], # sorted bbox
outs[1], # sorted scores
outs[2], # sorted class ids
outs[3], # sorted box feats
outs[4], # box_indices
outs[5], # num_valid_boxes
max_output_size,
iou_threshold,
force_suppress,
top_k,
coord_start,
id_index,
score_index,
return_indices,
),
dtype=[data.dtype, "float32", "float32", "float32", "int32", "int32"],
in_buffers=[data_buf, sort_tensor_buf, valid_count_buf, indices_buf],
name="nms",
tag="nms",
)
def _concatenate_outputs(
out_bboxes,
out_scores,
out_class_ids,
out_features,
out_shape,
coord_start,
score_index,
id_index,
):
"""Pack the results from NMS into a single 5D or 6D tensor."""
batch_size = out_bboxes.shape[0]
num_anchors = out_bboxes.shape[1]
num_features = out_features.shape[2]
def ir(out_bboxes, out_scores, out_class_ids, out):
ib = tvm.tir.ir_builder.create()
out_bboxes = ib.buffer_ptr(out_bboxes)
out_scores = ib.buffer_ptr(out_scores)
out_class_ids = ib.buffer_ptr(out_class_ids)
out = ib.buffer_ptr(out)
with ib.if_scope(num_anchors > 0):
max_threads = int(tvm.target.Target.current(allow_none=False).max_num_threads)
nthread_tx = max_threads
nthread_bx = ceil_div(num_anchors, nthread_tx)
tx = te.thread_axis("threadIdx.x")
bx = te.thread_axis("blockIdx.x")
by = te.thread_axis("blockIdx.y")
ib.scope_attr(tx, "thread_extent", nthread_tx)
ib.scope_attr(bx, "thread_extent", nthread_bx)
ib.scope_attr(by, "thread_extent", batch_size)
tid = bx * nthread_tx + tx
i = by
with ib.if_scope(tid < num_anchors):
with ib.for_range(0, 4, kind="unroll") as j:
out[i, tid, coord_start + j] = out_bboxes[i, tid, j]
with ib.for_range(0, num_features, kind="unroll") as j:
out[i, tid, coord_start + 4 + j] = out_features[i, tid, j]
out[i, tid, score_index] = out_scores[i, tid]
if id_index >= 0:
out[i, tid, id_index] = out_class_ids[i, tid]
return ib.get()
return te.extern(
[out_shape],
[out_bboxes, out_scores, out_class_ids],
lambda ins, outs: ir(ins[0], ins[1], ins[2], outs[0]),
dtype=["float32"],
name="nms_output_concat",
tag="nms_output_concat",
)
def non_max_suppression(
data,
valid_count,
indices,
max_output_size=-1,
iou_threshold=0.5,
force_suppress=False,
top_k=-1,
coord_start=2,
score_index=1,
id_index=0,
return_indices=True,
invalid_to_bottom=False,
):
"""Non-maximum suppression operator for object detection.
Parameters
----------
data : tvm.te.Tensor
3-D tensor with shape [batch_size, num_anchors, elem_length].
The last dimension should be in format of
[class_id, score, box_left, box_top, box_right, box_bottom].
It could be the second output out_tensor of get_valid_counts.
valid_count : tvm.te.Tensor
1-D tensor for valid number of boxes. It could be the output
valid_count of get_valid_counts.
indices : tvm.te.Tensor
2-D tensor with shape [batch_size, num_anchors], represents
the index of box in original data. It could be the third
output out_indices of get_valid_counts. The values in the
second dimension are like the output of arange(num_anchors)
if get_valid_counts is not used before non_max_suppression.
max_output_size : optional, tvm.te.Tensor or int
Max number of output valid boxes for each instance.
By default all valid boxes are returned.
iou_threshold : optional, tvm.te.Tensor or float
Non-maximum suppression threshold.
force_suppress : optional, boolean
Whether to suppress all detections regardless of class_id.
top_k : optional, int
Keep maximum top k detections before nms, -1 for no limit.
coord_start : required, int
Start index of the consecutive 4 coordinates.
score_index : optional, int
Index of the scores/confidence of boxes.
id_index : optional, int
index of the class categories, -1 to disable.
return_indices : boolean
Whether to return box indices in input data.
invalid_to_bottom : optional, boolean
Whether to move all valid bounding boxes to the top.
Returns
-------
out : tvm.te.Tensor
3-D tensor with shape [batch_size, num_anchors, elem_length].
Example
--------
.. code-block:: python
# An example to use nms
dshape = (1, 5, 6)
data = te.placeholder(dshape, name="data")
valid_count = te.placeholder((dshape[0],), dtype="int32", name="valid_count")
iou_threshold = 0.7
force_suppress = True
top_k = -1
out = non_max_suppression(data=data, valid_count=valid_count, iou_threshold=iou_threshold,
force_suppress=force_supress, top_k=top_k, return_indices=False)
np_data = np.random.uniform(dshape)
np_valid_count = np.array([4])
s = topi.generic.schedule_nms(out)
f = tvm.build(s, [data, valid_count, out], "cuda")
dev = tvm.cuda(0)
tvm_data = tvm.nd.array(np_data, dev)
tvm_valid_count = tvm.nd.array(np_valid_count, dev)
tvm_out = tvm.nd.array(np.zeros(dshape, dtype=data.dtype), dev)
f(tvm_data, tvm_valid_count, tvm_out)
"""
data_buf = tvm.tir.decl_buffer(data.shape, data.dtype, "data_buf", data_alignment=8)
sort_tensor = _get_sorted_indices(data, data_buf, score_index, (data.shape[0], data.shape[1]))
out_bboxes, out_scores, out_class_ids, out_features, box_indices, num_valid_boxes = _run_nms(
data,
data_buf,
sort_tensor,
valid_count,
indices,
max_output_size,
iou_threshold,
force_suppress,
top_k,
coord_start,
id_index,
score_index,
return_indices,
)
if return_indices:
return [box_indices, num_valid_boxes]
return _concatenate_outputs(
out_bboxes,
out_scores,
out_class_ids,
out_features,
data.shape,
coord_start,
score_index,
id_index,
)
def _get_valid_box_count(scores, score_threshold):
batch_classes, num_boxes = scores.shape
def searchsorted_ir(scores, valid_count):
ib = tvm.tir.ir_builder.create()
scores = ib.buffer_ptr(scores)
valid_count = ib.buffer_ptr(valid_count)
bx = te.thread_axis("blockIdx.x")
tx = te.thread_axis("threadIdx.x")
max_threads = int(tvm.target.Target.current(allow_none=False).max_num_threads)
with ib.new_scope():
ib.scope_attr(bx, "thread_extent", ceil_div(batch_classes, max_threads))
ib.scope_attr(tx, "thread_extent", max_threads)
tid = bx * max_threads + tx
with ib.if_scope(tid < batch_classes):
binary_search(ib, tid, num_boxes, scores, score_threshold, valid_count)
return ib.get()
scores_buf = tvm.tir.decl_buffer(scores.shape, scores.dtype, "scores_buf", data_alignment=8)
return te.extern(
[(batch_classes,)],
[scores],
lambda ins, outs: searchsorted_ir(ins[0], outs[0]),
dtype=["int32"],
in_buffers=[scores_buf],
name="searchsorted",
tag="searchsorted",
)
def _collect_selected_indices_ir(num_class, selected_indices, num_detections, row_offsets, out):
batch_classes, num_boxes = selected_indices.shape
ib = tvm.tir.ir_builder.create()
selected_indices = ib.buffer_ptr(selected_indices)
num_detections = ib.buffer_ptr(num_detections)
row_offsets = ib.buffer_ptr(row_offsets)
out = ib.buffer_ptr(out)
max_threads = int(tvm.target.Target.current(allow_none=False).max_num_threads)
nthread_tx = max_threads
nthread_bx = ceil_div(num_boxes, nthread_tx)
nthread_by = batch_classes
tx = te.thread_axis("threadIdx.x")
bx = te.thread_axis("blockIdx.x")
by = te.thread_axis("blockIdx.y")
ib.scope_attr(tx, "thread_extent", nthread_tx)
ib.scope_attr(bx, "thread_extent", nthread_bx)
ib.scope_attr(by, "thread_extent", nthread_by)
with ib.new_scope():
idx = bx * nthread_tx + tx
idy = cast(by, "int64")
batch_id = idy // num_class
class_id = idy % num_class
with ib.if_scope(idx < num_detections[idy]):
out[row_offsets[idy] + idx, 0] = batch_id
out[row_offsets[idy] + idx, 1] = class_id
out[row_offsets[idy] + idx, 2] = cast(selected_indices[idy, idx], "int64")
return ib.get()
def _collect_selected_indices_and_scores_ir(
selected_indices,
selected_scores,
num_detections,
row_offsets,
num_total_detections,
collected_indices,
collected_scores,
):
batch_size, num_class = row_offsets.shape
num_boxes = selected_indices.shape[1]
ib = tvm.tir.ir_builder.create()
selected_indices = ib.buffer_ptr(selected_indices)
selected_scores = ib.buffer_ptr(selected_scores)
num_detections = ib.buffer_ptr(num_detections)
row_offsets = ib.buffer_ptr(row_offsets)
num_total_detections = ib.buffer_ptr(num_total_detections)
collected_indices = ib.buffer_ptr(collected_indices)
collected_scores = ib.buffer_ptr(collected_scores)
max_threads = int(tvm.target.Target.current(allow_none=False).max_num_threads)
nthread_tx = max_threads
nthread_bx = ceil_div(num_boxes, nthread_tx)
nthread_by = batch_size * num_class
tx = te.thread_axis("threadIdx.x")
bx = te.thread_axis("blockIdx.x")
by = te.thread_axis("blockIdx.y")
ib.scope_attr(tx, "thread_extent", nthread_tx)
ib.scope_attr(bx, "thread_extent", nthread_bx)
ib.scope_attr(by, "thread_extent", nthread_by)
zero = cast(0, "int64")
with ib.new_scope():
idx = bx * nthread_tx + tx
idy = cast(by, "int64")
batch_id = idy // num_class
class_id = idy % num_class
with ib.if_scope(idx < num_detections[batch_id, class_id]):
offset = row_offsets[batch_id, class_id] + idx
collected_indices[batch_id, offset, 0] = class_id
collected_indices[batch_id, offset, 1] = cast(selected_indices[idy, idx], "int64")
collected_scores[batch_id, offset] = selected_scores[idy, idx]
with ib.else_scope():
with ib.if_scope(idx < num_boxes):
offset = (
num_total_detections[batch_id]
+ class_id * num_boxes
- row_offsets[batch_id, class_id]
+ idx
- num_detections[batch_id, class_id]
)
collected_indices[batch_id, offset, 0] = zero
collected_indices[batch_id, offset, 1] = zero
collected_scores[batch_id, offset] = 0.0
return ib.get()
def all_class_non_max_suppression(
boxes,
scores,
max_output_boxes_per_class,
iou_threshold,
score_threshold,
output_format="onnx",
):
"""Non-maximum suppression operator for object detection, corresponding to ONNX
NonMaxSuppression and TensorFlow combined_non_max_suppression.
NMS is performed for each class separately.
Parameters
----------
boxes : tvm.te.Tensor
3-D tensor with shape (batch_size, num_boxes, 4)
scores: tvm.te.Tensor
3-D tensor with shape (batch_size, num_classes, num_boxes)
max_output_boxes_per_class : int or tvm.te.Tensor, optional
The maxinum number of output selected boxes per class
iou_threshold : float or tvm.te.Tensor, optionaIl
IoU test threshold
score_threshold : float or tvm.te.Tensor, optional
Score threshold to filter out low score boxes early
output_format : str, optional
"onnx" or "tensorflow", see below
Returns
-------
out : list of tvm.te.Tensor
If `output_format` is "onnx", the output is two tensors. The first is `indices` of size
`(batch_size * num_class* num_boxes , 3)` and the second is a scalar tensor
`num_total_detection` of shape `(1,)` representing the total number of selected
boxes. The three values in `indices` encode batch, class, and box indices.
Rows of `indices` are ordered such that selected boxes from batch 0, class 0 come
first, in descending of scores, followed by boxes from batch 0, class 1 etc. Out of
`batch_size * num_class* num_boxes` rows of indices, only the first `num_total_detection`
rows are valid.
If `output_format` is "tensorflow", the output is three tensors, the first
is `indices` of size `(batch_size, num_class * num_boxes , 2)`, the second is `scores` of
size `(batch_size, num_class * num_boxes)`, and the third is `num_total_detection` of size
`(batch_size,)` representing the total number of selected boxes per batch. The two values
in `indices` encode class and box indices. Of num_class * num_boxes boxes in `indices` at
batch b, only the first `num_total_detection[b]` entries are valid. The second axis of
`indices` and `scores` are sorted within each class by box scores, but not across classes.
So the box indices and scores for the class 0 come first in a sorted order, followed by
the class 1 etc.
"""
batch, num_class, num_boxes = scores.shape
scores = reshape(scores, (batch * num_class, num_boxes))
sorted_scores, sorted_indices = _dispatch_sort(scores, ret_type="both")
valid_count = _get_valid_box_count(sorted_scores, score_threshold)
selected_indices, selected_scores, num_detections = run_all_class_nms(
boxes,
sorted_scores,
sorted_indices,
valid_count,
max_output_boxes_per_class,
iou_threshold,
_nms_loop,
return_scores=(output_format == "tensorflow"),
)
if output_format == "onnx":
row_offsets, num_total_detections = exclusive_scan(
num_detections, return_reduction=True, output_dtype="int64"
)
selected_indices = collect_selected_indices(
num_class, selected_indices, num_detections, row_offsets, _collect_selected_indices_ir
)
return [selected_indices, num_total_detections]
num_detections_per_batch = reshape(num_detections, (batch, num_class))
row_offsets, num_total_detections = exclusive_scan(
num_detections_per_batch, return_reduction=True, output_dtype="int64", axis=1
)
selected_indices, selected_scores = collect_selected_indices_and_scores(
selected_indices,
selected_scores,
num_detections_per_batch,
row_offsets,
num_total_detections,
_collect_selected_indices_and_scores_ir,
)
return [selected_indices, selected_scores, num_total_detections]
| 40,245 | 34.057491 | 121 | py |
tvm | tvm-main/python/tvm/topi/cuda/pooling.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-variable, unused-argument
"""Schedule for pooling operators"""
import tvm
from tvm import te
from .. import tag
from ..utils import traverse_inline
from .reduction import _schedule_reduce
from .injective import schedule_injective_from_existing
def schedule_adaptive_pool(outs, layout="NCHW"):
"""Schedule for adaptive_pool.
Parameters
----------
outs: Array of Tensor
The computation graph description of adaptive_pool
in the format of an array of tensors.
Returns
-------
s: Schedule
The computation schedule for adaptive_pool.
"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
def _schedule_non_global(Pool):
if Pool.op in s.outputs:
Out = Pool
OL = s.cache_write(Pool, "local")
else:
Out = outs[0].op.output(0)
s[Pool].set_scope("local")
max_threads = int(tvm.target.Target.current(allow_none=False).max_num_threads)
fused_axis = s[Out].fuse(*s[Out].op.axis)
bx, tx = s[Out].split(fused_axis, factor=max_threads)
s[Out].bind(bx, te.thread_axis("blockIdx.x"))
s[Out].bind(tx, te.thread_axis("threadIdx.x"))
if Pool.op in s.outputs:
s[OL].compute_at(s[Out], tx)
else:
s[Pool].compute_at(s[Out], tx)
scheduled_ops = []
def traverse(OP):
"""Internal traverse function"""
# inline all one-to-one-mapping operators except the last stage (output)
if tag.is_injective(OP.tag):
if OP not in s.outputs:
s[OP].compute_inline()
for tensor in OP.input_tensors:
if isinstance(tensor.op, te.tensor.ComputeOp) and tensor.op not in scheduled_ops:
traverse(tensor.op)
# schedule global_pool
elif OP.tag.startswith("adaptive_pool"):
Pool = OP.output(0)
oshape = Pool.shape
if (layout == "NCHW" and oshape[2] == 1 and oshape[3] == 1) or (
layout == "NHWC" and oshape[1] == 1 and oshape[2] == 1
):
_schedule_reduce(OP, s)
if OP != outs[0].op:
# the final division for adaptive pool or fused elemwise ops
schedule_injective_from_existing(s, outs[0])
else:
_schedule_non_global(Pool)
else:
raise RuntimeError(f"Unsupported operator: {OP.tag}")
scheduled_ops.append(OP)
traverse(outs[0].op)
return s
def schedule_pool(outs, layout):
"""Schedule for pool.
Parameters
----------
outs: Array of Tensor
The computation graph description of pool
in the format of an array of tensors.
layout: str
Data layout.
Returns
-------
s: Schedule
The computation schedule for pool.
"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
def _schedule(PaddedInput, Pool):
if isinstance(PaddedInput.op, tvm.te.ComputeOp):
s[PaddedInput].compute_inline()
num_thread = tvm.target.Target.current(allow_none=False).max_num_threads
if Pool.op in s.outputs:
Out = Pool
OL = s.cache_write(Pool, "local")
else:
Out = outs[0].op.output(0)
s[Pool].set_scope("local")
fused = s[Out].fuse(*s[Out].op.axis)
bx, tx = s[Out].split(fused, factor=num_thread)
s[Out].bind(bx, te.thread_axis("blockIdx.x"))
s[Out].bind(tx, te.thread_axis("threadIdx.x"))
if Pool.op in s.outputs:
s[OL].compute_at(s[Out], tx)
else:
s[Pool].compute_at(s[Out], tx)
scheduled_ops = []
def traverse(OP):
"""Internal traverse function"""
# inline all one-to-one-mapping operators except the last stage (output)
if tag.is_injective(OP.tag):
if OP not in s.outputs:
s[OP].compute_inline()
for tensor in OP.input_tensors:
if isinstance(tensor.op, te.tensor.ComputeOp) and tensor.op not in scheduled_ops:
traverse(tensor.op)
# schedule pool
elif OP.tag.startswith("pool"):
PaddedInput = OP.input_tensors[0]
Pool = OP.output(0)
_schedule(PaddedInput, Pool)
else:
raise RuntimeError(f"Unsupported operator: {OP.tag}")
scheduled_ops.append(OP)
traverse(outs[0].op)
return s
def schedule_pool_grad(outs):
"""Schedule for pool_grad on CUDA
Parameters
----------
outs: Array of Tensor
The computation graph description of pool_grad
in the format of an array of tensors.
Returns
-------
s: Schedule
The computation schedule for pool_grad.
"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
def _schedule_pool_grad(op):
if op in s.outputs:
out = op
else:
out = outs[0].op.output(0)
fused = s[out].fuse(*s[out].op.axis)
num_thread = tvm.target.Target.current(allow_none=False).max_num_threads
bx, tx = s[out].split(fused, factor=num_thread)
s[out].bind(bx, te.thread_axis("blockIdx.x"))
s[out].bind(tx, te.thread_axis("threadIdx.x"))
if tag.COMM_REDUCE_IDX in op.input_tensors[0].op.tag:
max_pool_index = op.input_tensors[0]
s[max_pool_index].compute_at(s[out], tx)
pool_input = max_pool_index.op.input_tensors[0]
if isinstance(pool_input.op, tvm.te.ComputeOp):
# handle padding
s[pool_input].compute_inline()
if op not in s.outputs:
s[op].compute_at(s[out], tx)
def _callback(op):
if op.tag.startswith("pool_grad"):
_schedule_pool_grad(op)
traverse_inline(s, outs[0].op, _callback)
return s
| 6,895 | 32.475728 | 97 | py |
tvm | tvm-main/python/tvm/topi/cuda/conv2d.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument
"""Compute definition for conv2d with cuda backend"""
from tvm import te
from tvm import autotvm
from tvm.autotvm.task.space import OtherOptionEntity
from tvm.contrib import cudnn
from .. import nn, generic
from ..nn.utils import get_pad_tuple
from ..utils import get_const_tuple, traverse_inline
from .conv2d_direct import schedule_direct_cuda
@autotvm.register_topi_compute("conv2d_nchw.cuda")
def conv2d_nchw(cfg, data, kernel, strides, padding, dilation, out_dtype="float32"):
"""Compute conv2d with NCHW layout"""
return nn.conv2d_nchw(data, kernel, strides, padding, dilation, out_dtype)
@autotvm.register_topi_schedule("conv2d_nchw.cuda")
def schedule_conv2d_nchw(cfg, outs):
"""Create the schedule for conv2d_nchw"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
def _callback(op):
if op.tag == "conv2d_nchw":
schedule_direct_cuda(cfg, s, op.output(0))
traverse_inline(s, outs[0].op, _callback)
return s
@autotvm.register_topi_compute("conv2d_cudnn.cuda")
def conv2d_cudnn(
cfg, data, kernel, strides, padding, dilation, groups=1, layout="NCHW", out_dtype="float32"
):
"""Compute conv2d using CuDNN library"""
if layout == "NCHW":
tensor_format = 0 # CUDNN_TENSOR_NCHW
N, _, H, W = get_const_tuple(data.shape)
elif layout == "NHWC":
tensor_format = 1 # CUDNN_TENSOR_NHWC
N, H, W, _ = get_const_tuple(data.shape)
else:
raise ValueError(f"Unsupported layout {layout} in cudnn")
CO, CI, KH, KW = get_const_tuple(kernel.shape)
# handle dilation
stride_h, stride_w = (strides, strides) if isinstance(strides, int) else strides
dilation_h, dilation_w = (dilation, dilation) if isinstance(dilation, int) else dilation
KH_dilated = (KH - 1) * dilation_h + 1
KW_dilated = (KW - 1) * dilation_h + 1
pt, pl, pb, pr = get_pad_tuple(padding, (KH_dilated, KW_dilated))
if (pt != pb) or (pl != pr):
raise ValueError("Cudnn doesn't support asymmetric padding.")
OH = (H + pt + pb - KH) // stride_h + 1
OW = (W + pl + pr - KW) // stride_w + 1
if isinstance(N, int):
cfg.add_flop(
groups
* 2
* N
* OH
* OW
* CO
* CI
* ((KH - 1) * dilation_h + 1)
* ((KW - 1) * dilation_w + 1)
)
if data.dtype == "int8" or kernel.dtype == "int8":
if layout == "NCHW":
raise ValueError("NCHW layout do not support int8 in cudnn")
dtype = "int32"
else:
dtype = data.dtype
cfg.define_knob("algo", range(cudnn.algo_to_index("fwd", "CUDNN_CONVOLUTION_FWD_ALGO_COUNT")))
if cfg.is_fallback:
if cudnn.exists():
# Let CUDNN choose the best algo, based on benchmarks run
# on the local machine. In the future, this should be
# based on parameters stored in the Target.
cfg["algo"] = OtherOptionEntity(-1)
else:
cfg["algo"] = OtherOptionEntity(0)
return cudnn.conv_forward(
data,
kernel,
[pt, pl], # cudnn padding pt, pl on both sides of input
[stride_h, stride_w],
[dilation_h, dilation_w],
conv_mode=1,
tensor_format=tensor_format,
algo=cfg["algo"].val,
conv_dtype=dtype,
groups=groups,
)
@autotvm.register_topi_schedule("conv2d_cudnn.cuda")
def schedule_conv2d_cudnn(cfg, outs):
"""Create the schedule for conv2d_cudnn"""
return generic.schedule_extern(outs)
def conv2d_backward_weight_cudnn(
dy, x, kernel_size, padding, stride, dilation, groups, layout, output_dtype
):
"""Compute conv2d wgrad using CuDNN library"""
assert layout in ["NCHW", "NHWC"]
if dy.dtype == "float16":
# cuDNN does not seem to support other combination.
assert output_dtype == "float16", "Only supports fp16 output for cuDNN fp16 wgrad."
conv_dtype = "float32" # Accumulation is always fp32
return cudnn.conv_backward_filter(
dy,
x,
kernel_size,
padding,
stride,
dilation,
conv_mode=1,
tensor_format=0 if layout == "NCHW" else 1,
conv_dtype=conv_dtype,
groups=groups,
)
| 5,169 | 33.238411 | 98 | py |
tvm | tvm-main/python/tvm/topi/cuda/reduction.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unused-variable,too-many-locals,len-as-condition
"""Schedule for reduce operators"""
from __future__ import absolute_import as _abs
from operator import mul
from functools import reduce
import tvm
from tvm import te
from .. import tag
from .injective import schedule_injective_from_existing
def _schedule_reduce(op, sch, is_idx_reduce=False):
if is_idx_reduce:
data_out = op.input_tensors[0]
else:
data_in = op.input_tensors[0]
data_out = op.output(0)
if not sch[data_out].op.reduce_axis:
return schedule_injective_from_existing(sch, op.output(0))
if len(sch[data_out].op.axis) > 0:
all_reduce = False
num_thread = 32
target = tvm.target.Target.current()
if target and (target.kind.name == "opencl" or target.kind.name == "metal"):
# without it, CL_INVALID_WORK_GROUP_SIZE occurred when running test_topi_reduce.py
# don't know why
num_thread = 16
block_x = te.thread_axis("blockIdx.x")
thread_x = te.thread_axis((0, num_thread), "threadIdx.x")
thread_y = te.thread_axis((0, num_thread), "threadIdx.y")
else:
all_reduce = True
num_thread = tvm.target.Target.current(allow_none=False).max_num_threads
thread_x = te.thread_axis((0, num_thread), "threadIdx.x")
# Fuse and refactor the reduce axis
fused_reduce = sch[data_out].fuse(
*[sch[data_out].op.reduce_axis[i] for i in range(len(sch[data_out].op.reduce_axis))]
)
ko, ki = sch[data_out].split(fused_reduce, factor=num_thread)
if is_idx_reduce:
data_out_rf, _ = sch.rfactor(data_out, ki)
else:
data_out_rf = sch.rfactor(data_out, ki)
tx = sch[data_out].op.reduce_axis[0]
sch[data_out].bind(tx, thread_x)
sch[data_out_rf].compute_at(sch[data_out], tx)
if is_idx_reduce:
real_output = op.output(0)
temp_idx_input = data_out.op.output(0)
temp_val_input = data_out.op.output(1)
else:
real_output = data_out
if not all_reduce:
# Fuse and split the axis
fused_outer = sch[real_output].fuse(
*[sch[real_output].op.axis[i] for i in range(len(sch[real_output].op.axis))]
)
bx, outer_in = sch[real_output].split(fused_outer, factor=num_thread)
# Bind the axes to threads and blocks
sch[real_output].bind(outer_in, thread_y)
sch[real_output].bind(bx, block_x)
if is_idx_reduce:
sch[temp_idx_input].compute_at(sch[real_output], outer_in)
sch[temp_val_input].compute_at(sch[real_output], outer_in)
sch[real_output].set_store_predicate(
tvm.tir.all(
thread_x.equal(0), block_x * num_thread + thread_y < reduce(mul, real_output.shape)
)
)
else:
if is_idx_reduce:
spatial_axis = sch[real_output].fuse(*(sch[real_output].op.axis))
sch[real_output].bind(spatial_axis, te.thread_axis("blockIdx.x"))
sch[temp_idx_input].compute_at(sch[real_output], spatial_axis)
sch[temp_val_input].compute_at(sch[real_output], spatial_axis)
sch[real_output].set_store_predicate(thread_x.equal(0))
return sch
def _enable_auto_inline(sch):
def is_scheduled(stage):
# auto inline requires the attach type is AttachType.kGroupRoot
conds = [
len(stage.relations) == 0,
stage.attach_type == 1,
stage.all_iter_vars == stage.leaf_iter_vars,
]
if not all(conds):
return True
return False
for s in sch.stages:
if not s.is_output and isinstance(s.op, tvm.te.ComputeOp):
if is_scheduled(s) or len(s.op.reduce_axis) != 0:
return False
return True
def schedule_reduce_impl(
outs, schedule_reduce_stage, schedule_injective_stage, inline_postops=False
):
"""Schedule for inject->reduce->bcast ops.
Traverse over the stages in the schedule and schedule separate stages depending
on the position of the stage. Injecteve post-ops of reduction will be scheduled using
injection schedule, injective pre-ops of reduction will be inlined, reduction stage
will be scheduled using reduction schedule
Parameters
----------
outs: Array of Tensor
The computation graph description of reduce in the format
of an array of tensors.
schedule_reduce_stage: Function responsible for scheduling the reduction
stage
schedule_injective_stage: Function responsible for scheduling the
standalone injection stage
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
sch = te.create_schedule([x.op for x in outs])
scheduled_ops = []
enable_auto_inline = _enable_auto_inline(sch)
def traverse_before_reduce(operator):
"""Internal traverse function"""
if isinstance(operator, tvm.te.PlaceholderOp):
return
if tag.is_injective(operator.tag):
sch[operator].compute_inline()
for tensor in operator.input_tensors:
if tensor.op not in scheduled_ops:
traverse_before_reduce(tensor.op)
else:
raise RuntimeError(f"Unsupported operator: {operator.tag}")
scheduled_ops.append(operator)
def traverse_after_reduce(operator):
"""Internal traverse function"""
if tag.is_broadcast(operator.tag):
if operator not in scheduled_ops and not inline_postops:
schedule_injective_stage(sch, operator.output(0))
for tensor in operator.input_tensors:
if tensor.op not in scheduled_ops:
if enable_auto_inline:
traverse_before_reduce(tensor.op)
else:
traverse_after_reduce(tensor.op)
elif operator.tag == "comm_reduce":
if operator not in scheduled_ops:
schedule_reduce_stage(operator, sch, is_idx_reduce=False)
for tensor in operator.input_tensors:
if tensor.op not in scheduled_ops:
traverse_before_reduce(tensor.op)
elif operator.tag == "comm_reduce_idx":
if operator not in scheduled_ops:
schedule_reduce_stage(operator, sch, is_idx_reduce=True)
input_tensors = operator.input_tensors[0].op.input_tensors
for tensor in input_tensors:
if tensor.op not in scheduled_ops:
traverse_before_reduce(tensor.op)
elif isinstance(operator, tvm.te.PlaceholderOp):
pass
else:
raise RuntimeError(f"Unsupported operator: {operator.tag}")
scheduled_ops.append(operator)
for out in outs:
traverse_after_reduce(out.op)
return sch
def schedule_reduce(outs):
return schedule_reduce_impl(outs, _schedule_reduce, schedule_injective_from_existing)
| 7,891 | 38.46 | 99 | py |
tvm | tvm-main/python/tvm/topi/cuda/group_conv2d_nchw.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
# pylint: disable=no-value-for-parameter
"""The template for cuda group_conv2d_nchw"""
import tvm
from tvm import te
from tvm import autotvm
from .injective import schedule_injective_from_existing
from .tensor_intrin import dp4a
from ..nn.pad import pad
from ..nn.conv2d import unpack_NCHWc_to_nchw
from ..nn.utils import get_pad_tuple
from ..utils import traverse_inline, get_const_tuple, get_const_int
from .. import nn
def group_conv2d_nchw_int8(data, kernel, strides, padding, dilation, groups, out_dtype="float32"):
"""Compute group_conv2d internally using group_conv2d_nchwc layout for int8 dtype"""
assert data.dtype in ("int8", "uint8")
assert kernel.dtype in ("int8", "uint8")
assert data.dtype == kernel.dtype
packed_out = group_conv2d_NCHWc_int8(
data, kernel, strides, padding, dilation, groups, out_dtype
)
return unpack_NCHWc_to_nchw(packed_out, out_dtype)
def schedule_group_conv2d_nchw_int8(outs):
"""Create schedule for tensors"""
return schedule_group_conv2d_NCHWc_int8(outs)
@autotvm.register_topi_compute("group_conv2d_nchw.cuda")
def group_conv2d_nchw(_, data, kernel, stride, padding, dilation, groups, out_dtype="float32"):
return nn.group_conv2d_nchw(data, kernel, stride, padding, dilation, groups, out_dtype)
@autotvm.register_topi_schedule("group_conv2d_nchw.cuda")
def schedule_group_conv2d_nchw(cfg, outs):
"""TOPI schedule callback of group conv2d for cuda gpu
Parameters
----------
cfg: ConfigEntity
The config for this template
outs: Array of Tensor
The computation graph description of conv2d
in the format of an array of tensors.
Returns
-------
s: Schedule
The computation schedule for group conv2d.
"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
def _callback(op):
if op.tag == "group_conv2d_nchw":
_schedule_group_conv2d_nchw_direct(cfg, s, op.output(0))
traverse_inline(s, outs[0].op, _callback)
return s
def _schedule_group_conv2d_nchw_direct(cfg, s, conv):
"""Schedule group conv2d NCHW direct template"""
workload = conv.op.attrs["workload"]
groups = get_const_int(workload[6])
num_filters = get_const_int(conv.shape[1])
##### space definition begin #####
n, f, y, x = s[conv].op.axis
rc, ry, rx = s[conv].op.reduce_axis
cfg.define_split("tile_n", n, num_outputs=4)
cfg.define_split("tile_g", cfg.axis(groups), num_outputs=2)
cfg.define_split("tile_f", cfg.axis(num_filters // groups), num_outputs=4)
cfg.define_split("tile_y", y, num_outputs=4)
cfg.define_split("tile_x", x, num_outputs=4)
cfg.define_split("tile_rc", rc, num_outputs=2)
cfg.define_split("tile_ry", ry, num_outputs=2)
cfg.define_split("tile_rx", rx, num_outputs=2)
cfg.define_knob("auto_unroll_max_step", [0, 512, 1500])
target = tvm.target.Target.current()
if target.kind.name in ["nvptx", "rocm"]:
cfg.define_knob("unroll_explicit", [1])
else:
cfg.define_knob("unroll_explicit", [0, 1])
pad_data, kernel = s[conv].op.input_tensors
s[pad_data].compute_inline()
if conv.op in s.outputs:
output = conv
OL = s.cache_write(conv, "local")
else:
output = s.outputs[0].output(0)
s[conv].set_scope("local")
OL = conv
# create cache stage
AA = s.cache_read(pad_data, "shared", [OL])
WW = s.cache_read(kernel, "shared", [OL])
# tile and bind spatial axes
n, f, y, x = s[output].op.axis
kernel_scope, n = s[output].split(n, nparts=1)
g, f = s[output].split(f, nparts=groups)
bn, vn, tn, ni = cfg["tile_n"].apply(s, output, n)
bg, vg = cfg["tile_g"].apply(s, output, g)
bf, vf, tf, fi = cfg["tile_f"].apply(s, output, f)
by, vy, ty, yi = cfg["tile_y"].apply(s, output, y)
bx, vx, tx, xi = cfg["tile_x"].apply(s, output, x)
s[output].reorder(bn, bg, bf, by, bx, vn, vg, vf, vy, vx, tn, tf, ty, tx, ni, fi, yi, xi)
s[output].bind(bn, te.thread_axis("blockIdx.z"))
s[output].bind(s[output].fuse(bg, bf), te.thread_axis("blockIdx.y"))
s[output].bind(s[output].fuse(by, bx), te.thread_axis("blockIdx.x"))
s[output].bind(vn, te.thread_axis("vthread"))
s[output].bind(vg, te.thread_axis("vthread"))
s[output].bind(vf, te.thread_axis("vthread"))
s[output].bind(vy, te.thread_axis("vthread"))
s[output].bind(vx, te.thread_axis("vthread"))
cfg.define_knob("fuse_yx", [0, 1]) # fuse ty,tx or tn,tf
if cfg["fuse_yx"].val:
s[output].bind(tn, te.thread_axis("threadIdx.z"))
s[output].bind(tf, te.thread_axis("threadIdx.y"))
tyx = s[output].fuse(ty, tx)
s[output].bind(tyx, te.thread_axis("threadIdx.x"))
s[OL].compute_at(s[output], tyx)
# number of threads
n_tz = cfg["tile_n"].size[2]
n_ty = cfg["tile_f"].size[2]
n_tx = cfg["tile_y"].size[2] * cfg["tile_x"].size[2]
else:
s[output].bind(s[output].fuse(tn, tf), te.thread_axis("threadIdx.z"))
s[output].bind(ty, te.thread_axis("threadIdx.y"))
s[output].bind(tx, te.thread_axis("threadIdx.x"))
s[OL].compute_at(s[output], tx)
# number of threads
n_tz = cfg["tile_n"].size[2] * cfg["tile_f"].size[2]
n_ty = cfg["tile_y"].size[2]
n_tx = cfg["tile_x"].size[2]
# tile reduction axes
n, f, y, x = s[OL].op.axis
rc, ry, rx = s[OL].op.reduce_axis
rco, rci = cfg["tile_rc"].apply(s, OL, rc)
ryo, ryi = cfg["tile_rx"].apply(s, OL, ry)
rxo, rxi = cfg["tile_ry"].apply(s, OL, rx)
s[OL].reorder(rco, ryo, rxo, rci, ryi, rxi, n, f, y, x)
s[AA].compute_at(s[OL], rxo)
s[WW].compute_at(s[OL], rxo)
# cooperative fetching
for load in [AA, WW]:
n, f, y, x = s[load].op.axis
fused = s[load].fuse(n, f, y, x)
fused, tx = s[load].split(fused, factor=n_tx)
fused, ty = s[load].split(fused, factor=n_ty)
fused, tz = s[load].split(fused, factor=n_tz)
s[load].bind(tz, te.thread_axis("threadIdx.z"))
s[load].bind(ty, te.thread_axis("threadIdx.y"))
s[load].bind(tx, te.thread_axis("threadIdx.x"))
# unroll
s[output].pragma(kernel_scope, "auto_unroll_max_step", cfg["auto_unroll_max_step"].val)
s[output].pragma(kernel_scope, "unroll_explicit", cfg["unroll_explicit"].val)
N, CO, OH, OW = get_const_tuple(output.shape)
_, CI_div_groups, KH, KW = get_const_tuple(kernel.shape)
cfg.add_flop(2 * N * OH * OW * CO * CI_div_groups * KH * KW)
@autotvm.register_topi_compute("group_conv2d_NCHWc_int8.cuda")
def group_conv2d_NCHWc_int8(
cfg, data, kernel, stride, padding, dilation, groups, out_dtype="float32"
):
"""Group convolution operator for 'group_conv2d_NCHWc_int8'.
Parameters
----------
data : tvm.te.Tensor
4-D with shape [batch, in_channel, in_height, in_width] or
5-D with shape [batch, in_channel_chunk, in_height, in_width, in_channel_block]
kernel : tvm.te.Tensor
4-D with shape [num_filter, in_channel // groups, filter_height, filter_width] or
6-D with shape [num_filter_chunk, in_channel_chunk // groups, filter_height,
filter_width, num_filter_block, in_channel_block]
stride : int or a list/tuple of two ints
Stride size, or [stride_height, stride_width]
padding : int or str
Padding size, or ['VALID', 'SAME']
dilation : int or a list/tuple of two ints
dilation size, or [dilation_height, dilation_width]
groups : int
number of groups
out_dtype : str
The output type. This is used for mixed precision.
Returns
-------
Output : tvm.te.Tensor
5-D with shape [batch, out_channel, out_height, out_width, out_channel_block]
"""
ic_block_factor = 4
oc_block_factor = 4
pre_computed = len(kernel.shape) == 6
if not pre_computed:
batch, channels, height, width = get_const_tuple(data.shape)
out_channels, in_channels, kernel_h, kernel_w = get_const_tuple(kernel.shape)
assert channels % groups == 0, "input channels must divide group size"
assert out_channels % groups == 0, "output channels must divide group size"
assert (
channels % ic_block_factor == 0
), f"Number of input channels per group must divide {ic_block_factor}"
assert (
out_channels % oc_block_factor == 0
), f"Number of output channels per group must divide {oc_block_factor}"
packed_data = te.compute(
(batch, channels // ic_block_factor, height, width, ic_block_factor),
lambda n, c, h, w, vc: data[n, c * ic_block_factor + vc, h, w],
name="packed_data",
)
packed_kernel = te.compute(
(
out_channels // oc_block_factor,
in_channels // ic_block_factor,
kernel_h,
kernel_w,
oc_block_factor,
ic_block_factor,
),
lambda oc_chunk, ic_chunk, kh, kw, oc_block, ic_block: kernel[
oc_chunk * oc_block_factor + oc_block, ic_chunk * ic_block_factor + ic_block, kh, kw
],
name="packed_kernel",
)
else:
packed_data = data
packed_kernel = kernel
batch, ic_chunk, in_height, in_width, _ = get_const_tuple(packed_data.shape)
oc_chunk, _, kernel_h, kernel_w, oc_block, ic_block = get_const_tuple(packed_kernel.shape)
# TODO(kumasento): these assertions ensure that the number of groups
# should be smaller or equal to the number of blocks, so that each
# group will have at least one block.
# Shall we pad the channels to avoid raising assertions?
assert (
groups <= oc_chunk
), f"Number of groups {groups} should be less than output channel chunk size {oc_chunk}"
assert (
groups <= ic_chunk
), f"Number of groups {groups} should be less than input channel chunk size {ic_chunk}"
if isinstance(stride, int):
stride_h = stride_w = stride
else:
stride_h, stride_w = stride
if isinstance(dilation, int):
dilation_h = dilation_w = dilation
else:
dilation_h, dilation_w = dilation
# pad the input data
pad_top, pad_left, pad_down, pad_right = get_pad_tuple(padding, (kernel_h, kernel_w))
pad_before = [0, 0, pad_top, pad_left, 0]
pad_after = [0, 0, pad_down, pad_right, 0]
pad_data = pad(packed_data, pad_before, pad_after, name="pad_data")
# compute the output shape
out_height = (in_height - (kernel_h - 1) * dilation_h - 1 + pad_top + pad_down) // stride_h + 1
out_width = (in_width - (kernel_w - 1) * dilation_w - 1 + pad_left + pad_right) // stride_w + 1
oshape = (batch, oc_chunk, out_height, out_width, oc_block)
icc = te.reduce_axis((0, ic_chunk // groups), name="ic_chunk")
icb = te.reduce_axis((0, ic_block_factor), name="ic_block")
kh = te.reduce_axis((0, kernel_h), name="kh")
kw = te.reduce_axis((0, kernel_w), name="kw")
# NOTE(kumasento): explanation of this snippet -
# oc_chunk//groups and ic_chunk//groups give you the number of blocks,
# i.e., chunk, per group.
# occ is the ID of the output channel block, so that occ//(oc_chunk//groups)
# produces the ID of the group.
# Multiplying that result with ic_chunk//groups resulting in the ID
# of the beginning block of the corresponding input group.
# Adding the block offset (icc) will give you the exact block ID.
#
# Compared with a normal convolution, group convolution only sums
# input channels from the group that an output channel resides in.
conv = te.compute(
oshape,
lambda n, occ, oh, ow, ocb: te.sum(
pad_data[
n,
occ // (oc_chunk // groups) * (ic_chunk // groups) + icc,
oh * stride_h + kh * dilation_h,
ow * stride_w + kw * dilation_w,
icb,
].astype("int32")
* packed_kernel[occ, icc, kh, kw, ocb, icb].astype("int32"),
axis=[icc, kh, kw, icb],
),
)
# Type conversion
output = te.compute(
oshape, lambda *index: conv(*index).astype(out_dtype), tag="group_conv2d_NCHWc_int8"
)
num_flop = (
batch
* oc_chunk
* oc_block
* out_height
* out_width
* ic_chunk
* ic_block
* kernel_h
* kernel_w
* 2
// groups
)
cfg.add_flop(num_flop)
return output
@autotvm.register_topi_schedule("group_conv2d_NCHWc_int8.cuda")
def schedule_group_conv2d_NCHWc_int8(cfg, outs):
"""TOPI schedule callback of group conv2d for cuda gpu
Parameters
----------
cfg: ConfigEntity
The config for this template
outs: Array of Tensor
The computation graph description of conv2d
in the format of an array of tensors.
Returns
-------
s: Schedule
The computation schedule for group conv2d.
"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
def _callback(op):
if op.tag == "group_conv2d_NCHWc_int8":
_schedule_group_conv2d_NCHWc_int8(cfg, s, op.output(0))
traverse_inline(s, outs[0].op, _callback)
return s
def _schedule_group_conv2d_NCHWc_int8(cfg, s, output):
"""Schedule group conv2d int8 NCHWc template"""
workload = output.op.attrs["workload"]
groups = get_const_int(workload[6])
conv = output.op.input_tensors[0]
packed_data, packed_kernel = conv.op.input_tensors
if isinstance(packed_data.op, tvm.te.ComputeOp) and "pad" in packed_data.op.tag:
pad_data = packed_data
packed_data = pad_data.op.input_tensors[0]
else:
pad_data = packed_data
if autotvm.GLOBAL_SCOPE.in_tuning:
# skip this part during tuning to make records accurate
# this part will be pre-computed during NNVM's pre-compute optimization pass
s[packed_data].pragma(s[packed_data].op.axis[0], "debug_skip_region")
s[packed_kernel].pragma(s[packed_kernel].op.axis[0], "debug_skip_region")
else:
if isinstance(packed_kernel.op, tvm.te.ComputeOp) and packed_kernel.name == "packed_kernel":
# data and kernel are not pre-computed, schedule layout transform here
schedule_injective_from_existing(s, packed_data)
schedule_injective_from_existing(s, packed_kernel)
if pad_data != packed_data:
s[pad_data].compute_inline()
# create cache stage
AA = s.cache_read(pad_data, "shared", [conv])
WW = s.cache_read(packed_kernel, "shared", [conv])
s[conv].set_scope("local")
# handle bias
if output.op not in s.outputs:
s[output].compute_inline()
output = s.outputs[0].output(0)
oc_chunk = get_const_int(output.shape[1])
# tile and bind spatial axes
if len(s[output].op.axis) == 5:
n, f, y, x, c = s[output].op.axis
else:
# For task extraction of auto-tuning, the expected output is 4D. Since auto-tuning tasks
# are created from scratch, therefore the real auto-tuning will still happen on 5D output.
n, f, y, x = s[output].op.axis
cfg.define_split("tile_n", n, num_outputs=4)
cfg.define_split("tile_g", cfg.axis(groups), num_outputs=2)
cfg.define_split("tile_f", cfg.axis(oc_chunk // groups), num_outputs=4)
cfg.define_split("tile_y", y, num_outputs=4)
cfg.define_split("tile_x", x, num_outputs=4)
# this is the scope to attach global config inside this kernel
kernel_scope, n = s[output].split(n, nparts=1)
g, f = s[output].split(f, nparts=groups)
s[output].bind(n, te.thread_axis("blockIdx.z"))
bn, vn, tn, ni = cfg["tile_n"].apply(s, output, n)
bg, vg = cfg["tile_g"].apply(s, output, g)
bf, vf, tf, fi = cfg["tile_f"].apply(s, output, f)
by, vy, ty, yi = cfg["tile_y"].apply(s, output, y)
bx, vx, tx, xi = cfg["tile_x"].apply(s, output, x)
s[output].reorder(bn, bg, bf, by, bx, vn, vg, vf, vy, vx, tn, tf, ty, tx, ni, fi, yi, xi)
s[output].bind(bn, te.thread_axis("blockIdx.z"))
s[output].bind(s[output].fuse(bg, bf), te.thread_axis("blockIdx.y"))
s[output].bind(s[output].fuse(by, bx), te.thread_axis("blockIdx.x"))
s[output].bind(vn, te.thread_axis("vthread"))
s[output].bind(vg, te.thread_axis("vthread"))
s[output].bind(vf, te.thread_axis("vthread"))
s[output].bind(vy, te.thread_axis("vthread"))
s[output].bind(vx, te.thread_axis("vthread"))
cfg.define_knob("fuse_yx", [0, 1]) # fuse ty,tx or tn,tf
if cfg["fuse_yx"].val:
s[output].bind(tn, te.thread_axis("threadIdx.z"))
s[output].bind(tf, te.thread_axis("threadIdx.y"))
tyx = s[output].fuse(ty, tx)
s[output].bind(tyx, te.thread_axis("threadIdx.x"))
s[conv].compute_at(s[output], tyx)
# number of threads
n_tz = cfg["tile_n"].size[2]
n_ty = cfg["tile_f"].size[2]
n_tx = cfg["tile_y"].size[2] * cfg["tile_x"].size[2]
else:
s[output].bind(tn, te.thread_axis("threadIdx.z"))
s[output].bind(s[output].fuse(tn, tf), te.thread_axis("threadIdx.z"))
s[output].bind(ty, te.thread_axis("threadIdx.y"))
s[output].bind(tx, te.thread_axis("threadIdx.x"))
s[conv].compute_at(s[output], tx)
# number of threads
n_tz = cfg["tile_n"].size[2] * cfg["tile_f"].size[2]
n_ty = cfg["tile_y"].size[2]
n_tx = cfg["tile_x"].size[2]
# tile and bind reduction axes
n, f, y, x, c = s[conv].op.axis
rc, ry, rx, rc_block = s[conv].op.reduce_axis
cfg.define_split("tile_rc", cfg.axis(rc), num_outputs=2)
cfg.define_split("tile_ry", cfg.axis(ry), num_outputs=2)
cfg.define_split("tile_rx", cfg.axis(rx), num_outputs=2)
rco, rci = cfg["tile_rc"].apply(s, conv, rc)
ryo, ryi = cfg["tile_ry"].apply(s, conv, ry)
rxo, rxi = cfg["tile_rx"].apply(s, conv, rx)
s[conv].reorder(rco, ryo, rxo, rci, ryi, rxi, n, f, y, x, c, rc_block)
_, rc_block = s[conv].split(rc_block, factor=4)
target = tvm.target.Target.current(allow_none=False)
do_tensorize = "+dotprod" in target.mattr or target.supports_integer_dot_product
if do_tensorize:
dtypes = (pad_data.dtype, packed_kernel.dtype)
s[conv].tensorize(rc_block, dp4a("shared", "shared", "local", dtypes))
s[AA].compute_at(s[conv], rxo)
s[WW].compute_at(s[conv], rxo)
# cooperative fetching
for load in [AA, WW]:
c = s[load].op.axis[-1]
c_outer, c = s[load].split(c, factor=4)
s[load].vectorize(c)
fused = s[load].op.axis[:-1] + [c_outer]
fused = s[load].fuse(*fused)
fused, tx = s[load].split(fused, factor=n_tx)
fused, ty = s[load].split(fused, factor=n_ty)
fused, tz = s[load].split(fused, factor=n_tz)
s[load].bind(tz, te.thread_axis("threadIdx.z"))
s[load].bind(ty, te.thread_axis("threadIdx.y"))
s[load].bind(tx, te.thread_axis("threadIdx.x"))
# double buffer
cfg.define_knob("AA_double_buffer", [0, 1])
cfg.define_knob("WW_double_buffer", [0, 1])
if cfg["AA_double_buffer"].val:
s[AA].double_buffer()
if cfg["WW_double_buffer"].val:
s[WW].double_buffer()
# unroll
cfg.define_knob("auto_unroll_max_step", [0, 512, 1500])
s[output].pragma(kernel_scope, "auto_unroll_max_step", cfg["auto_unroll_max_step"].val)
s[output].pragma(kernel_scope, "unroll_explicit", False)
return s
| 20,528 | 36.80663 | 100 | py |
tvm | tvm-main/python/tvm/topi/cuda/softmax.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-variable, trailing-whitespace
"""Schedule for softmax operator"""
from tvm.target import Target
from tvm import te
from tvm.contrib import cudnn
from .. import generic
from .injective import schedule_injective_from_existing
from ..utils import get_const_int, traverse_inline
def _schedule_softmax(softmax_op, s, outs, tgt):
op_tag = softmax_op.tag
axis = get_const_int(softmax_op.attrs["axis"]) # reduce axis
if op_tag == "softmax_output":
expsum = softmax_op.input_tensors[1]
exp = softmax_op.input_tensors[0]
max_elem = s[exp].op.input_tensors[1]
delta = None
elif op_tag == "fast_softmax_output":
expsum = softmax_op.input_tensors[1]
exp = softmax_op.input_tensors[0]
delta = s[exp].op.input_tensors[0]
max_elem = s[delta].op.input_tensors[1]
elif op_tag == "log_softmax_output":
exp = None
delta = None
max_elem = softmax_op.input_tensors[1]
expsum = softmax_op.input_tensors[2]
else:
raise ValueError(
f"Tag is expected to be softmax_output or log_softmax_output. Got {op_tag}"
)
# The nvptx and rocm backends only supports 32-bits warp shuffle
# instructions.
#
# TODO(tvm-team) Fix nvptx codegen or deprecate nvptx backend.
def sched_warp_softmax():
if tgt.kind.name in ["nvptx", "rocm"]:
dtype = softmax_op.output(0).dtype
return dtype in ["float32", "int32"]
if tgt.kind.name != "cuda":
# this is used as the gpu schedule for other arches which
# may not have warp reductions
return False
return True
if len(outs[0].shape) != 2:
ops = [max_elem.op, expsum.op, softmax_op]
if delta is not None:
ops.append(delta.op)
if exp is not None:
ops.append(exp.op)
if softmax_op != outs[0].op:
ops.append(outs[0].op)
for op in ops:
s = schedule_injective_from_existing(s, op.output(0))
elif sched_warp_softmax():
# A warp of 32 threads performs a row reduction.
num_thread = tgt.thread_warp_size
block_x = te.thread_axis("blockIdx.x")
thread_x = te.thread_axis((0, num_thread), "threadIdx.x")
# (4) softmax
output = outs[0]
xo, xi = s[output].split(output.op.axis[axis], nparts=num_thread)
xio, xii = s[output].split(xi, factor=4)
s[output].vectorize(xii)
s[output].bind(xo, thread_x)
s[output].bind(output.op.axis[axis ^ 1], block_x)
s[output].reorder(output.op.axis[axis ^ 1], xo, xio, xii)
if softmax_op != outs[0].op:
s[softmax_op].compute_at(s[output], xio)
s[softmax_op].vectorize(softmax_op.axis[axis]) # vec_len == 4
# (3) expsum
k = expsum.op.reduce_axis[0]
ko, _ = s[expsum].split(k, nparts=num_thread)
s[expsum].bind(ko, thread_x)
s[expsum].compute_at(s[output], xo)
# (2) exp
if delta is not None:
s[exp].compute_inline()
s[delta].compute_inline()
elif exp is not None:
xo, xi = s[exp].split(exp.op.axis[axis], nparts=num_thread)
_, xii = s[exp].split(xi, factor=4)
s[exp].vectorize(xii)
s[exp].bind(xo, thread_x)
s[exp].compute_at(s[expsum], expsum.op.axis[0])
s[exp].compute_at(s[output], output.op.axis[axis ^ 1])
s[exp].set_scope("warp")
# (1) max_elem
k = max_elem.op.reduce_axis[0]
ko, _ = s[max_elem].split(k, nparts=num_thread)
s[max_elem].bind(ko, thread_x)
if exp is not None and delta is None:
s[max_elem].compute_at(s[exp], xo)
else:
s[max_elem].bind(ko, thread_x)
s[max_elem].bind(max_elem.op.axis[0], block_x)
else:
num_thread = 64
block_x = te.thread_axis("blockIdx.x")
thread_x = te.thread_axis((0, num_thread), "threadIdx.x")
if delta is not None:
s[exp].compute_inline()
s[delta].compute_inline()
elif exp is not None:
s[exp].bind(exp.op.axis[axis ^ 1], block_x)
s[max_elem].bind(max_elem.op.axis[0], block_x)
k = expsum.op.reduce_axis[0]
ko, ki = s[expsum].split(k, factor=num_thread)
EF = s.rfactor(expsum, ki)
s[expsum].bind(s[expsum].op.axis[0], block_x)
s[expsum].bind(s[expsum].op.reduce_axis[0], thread_x)
s[EF].compute_at(s[expsum], s[expsum].op.reduce_axis[0])
s[expsum].set_store_predicate(thread_x.var.equal(0))
output = outs[0]
tx, xi = s[output].split(output.op.axis[axis], nparts=num_thread)
s[output].bind(output.op.axis[axis ^ 1], block_x)
s[output].bind(tx, thread_x)
s[output].reorder(output.op.axis[axis ^ 1], tx, xi)
if softmax_op != outs[0].op:
s[softmax_op].compute_at(s[output], tx)
def schedule_softmax(outs):
"""Schedule for softmax op.
Parameters
----------
outs: Array of Tensor
The computation graph description of softmax in the format
of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
tgt = Target.current(allow_none=False)
def _callback(op):
if "softmax" in op.tag:
_schedule_softmax(op, s, outs, tgt)
traverse_inline(s, outs[0].op, _callback)
return s
def softmax_cudnn(x, axis=-1):
"""Perform softmax on the data using cudnn"""
return cudnn.softmax(x, axis)
def schedule_softmax_cudnn(outs):
"""Schedule for softmax cudnn op"""
return generic.schedule_extern(outs)
def log_softmax_cudnn(x, axis=-1):
"""Perform log_softmax on the data using cudnn"""
return cudnn.log_softmax(x, axis)
def schedule_log_softmax_cudnn(outs):
"""Schedule for log_softmax cudnn op"""
return generic.schedule_extern(outs)
| 6,940 | 34.055556 | 87 | py |
tvm | tvm-main/python/tvm/topi/cuda/dense.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument
"""Schedule for dense operator"""
import logging
import tvm
from tvm import te, autotvm
from tvm.contrib import cublas
from .tensor_intrin import dp4a
from .. import tag
from .. import generic
from ..utils import traverse_inline, get_const_tuple
logger = logging.getLogger("topi")
def _matmul_cublas_common(
cfg, tensor_a, tensor_b, bias=None, out_dtype=None, transpose_a=False, transpose_b=False
):
assert len(tensor_a.shape) == 2 and len(tensor_b.shape) == 2, "only support 2-dim matmul"
if bias is not None:
assert len(bias.shape) == 1
if out_dtype is None:
out_dtype = tensor_a.dtype
if out_dtype not in [tensor_a.dtype, "int32"]:
assert out_dtype == tensor_a.dtype, "Mixed precision other than int8 + int32 not supported."
batch, in_dim = get_const_tuple(tensor_a.shape)
out_dim, _ = get_const_tuple(tensor_b.shape)
matmul = cublas.matmul(tensor_a, tensor_b, transpose_a, transpose_b, dtype=out_dtype)
if all(isinstance(d, int) for d in [batch, in_dim, out_dim]):
cfg.add_flop(batch * in_dim * out_dim * 2)
if bias is not None:
matmul = te.compute(
(batch, out_dim), lambda i, j: matmul[i, j] + bias[j], tag=tag.BROADCAST
)
return matmul
@autotvm.register_topi_compute("matmul_cublas.cuda")
def matmul_cublas(
cfg, tensor_a, tensor_b, bias=None, out_dtype=None, transpose_a=False, transpose_b=False
):
"""Matmul operator on CUDA with CUBLAS"""
return _matmul_cublas_common(cfg, tensor_a, tensor_b, bias, out_dtype, transpose_a, transpose_b)
@autotvm.register_topi_schedule("matmul_cublas.cuda")
def schedule_matmul_cublas(_, outs):
"""Schedule matmul operator using CUBLAS"""
return generic.schedule_extern(outs)
@autotvm.register_topi_compute("dense_cublas.cuda")
def dense_cublas(cfg, data, weight, bias=None, out_dtype=None):
"""Dense operator on CUDA with CUBLAS. This is an alias of matmul_nt operator."""
return _matmul_cublas_common(cfg, data, weight, bias, out_dtype, False, True)
@autotvm.register_topi_schedule("dense_cublas.cuda")
def schedule_dense_cublas(_, outs):
"""Schedule dense operator using CUBLAS"""
return generic.schedule_extern(outs)
@autotvm.register_topi_compute("dense_int8.cuda")
def dense_int8(cfg, data, weight, bias=None, out_dtype=None):
"""Dense operator for int8 on CUDA"""
if out_dtype is None:
out_dtype = data.dtype
batch, in_dim = get_const_tuple(data.shape)
out_dim, _ = get_const_tuple(weight.shape)
k = te.reduce_axis((0, in_dim), name="k")
matmul = te.compute(
(batch, out_dim),
lambda i, j: te.sum(
data[i, k].astype(out_dtype) * weight[j, k].astype(out_dtype), axis=[k]
),
tag="dense_int8",
)
cfg.add_flop(batch * in_dim * out_dim * 2)
if bias is not None:
matmul = te.compute(
(batch, out_dim),
lambda i, j: matmul[i, j] + bias[j].astype(out_dtype),
tag=tag.BROADCAST,
)
cfg.add_flop(batch * out_dim)
return matmul
@autotvm.register_topi_schedule("dense_int8.cuda")
def schedule_dense_int8(cfg, outs):
"""Dense schedule for int8 on CUDA"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
def _callback(op):
if "dense_int8" in op.tag:
_schedule_dense_int8(cfg, s, op.output(0))
traverse_inline(s, outs[0].op, _callback)
return s
def _schedule_dense_int8(cfg, s, output):
data, weight = s[output].op.input_tensors
if len(weight.op.input_tensors) == 1 and weight.op.input_tensors[0] == data:
s[weight].compute_inline()
batch, in_dim = get_const_tuple(data.shape)
out_dim, _ = get_const_tuple(weight.shape)
in_dim_factor = 4
assert in_dim % in_dim_factor == 0, f"Input dimension must divide {in_dim_factor}"
if in_dim % 16 == 0:
in_dim_factor = 16
# create tuning space
cfg.define_split("tile_y", batch, num_outputs=4)
cfg.define_split("tile_x", out_dim, num_outputs=4)
cfg.define_split("tile_k", in_dim // in_dim_factor, num_outputs=2)
cfg.define_knob("auto_unroll_max_step", [0, 512, 1500])
# create cache stage
AA = s.cache_read(data, "shared", [output])
WW = s.cache_read(weight, "shared", [output])
CC = s.cache_write(output, "local")
# handle bias
if output.op not in s.outputs:
s[output].compute_inline()
output = s.outputs[0].output(0)
n, x = s[output].op.axis
# this is the scope to attach global config inside this kernel
kernel_scope, n = s[output].split(n, nparts=1)
ko = CC.op.reduce_axis[0]
ko, ki = s[CC].split(ko, factor=4)
ko, kt = cfg["tile_k"].apply(s, CC, ko)
target = tvm.target.Target.current(allow_none=False)
do_tensorize = "+dotprod" in target.mattr or target.supports_integer_dot_product
if do_tensorize:
dtypes = (data.dtype, weight.dtype)
s[CC].tensorize(ki, dp4a("shared", "shared", "local", dtypes))
by, vy, ty, yi = cfg["tile_y"].apply(s, output, n)
bx, vx, tx, xi = cfg["tile_x"].apply(s, output, x)
s[output].reorder(by, bx, vy, vx, ty, tx, yi, xi)
s[output].bind(by, te.thread_axis("blockIdx.y"))
s[output].bind(bx, te.thread_axis("blockIdx.x"))
s[output].bind(vy, te.thread_axis("vthread"))
s[output].bind(vx, te.thread_axis("vthread"))
s[output].bind(ty, te.thread_axis("threadIdx.y"))
s[output].bind(tx, te.thread_axis("threadIdx.x"))
n_ty = cfg["tile_y"].size[2]
n_tx = cfg["tile_x"].size[2]
s[CC].compute_at(s[output], tx)
yo, xo = CC.op.axis[:2]
s[CC].reorder(ko, kt, yo, xo, ki)
for load in [AA, WW]:
s[load].compute_at(s[CC], ko)
outer, inner = s[load].split(s[load].op.axis[-1], factor=in_dim_factor)
s[load].vectorize(inner)
fused = s[load].op.axis[:-1] + [outer]
fused = s[load].fuse(*fused)
fused, tx = s[load].split(fused, factor=n_tx)
fused, ty = s[load].split(fused, factor=n_ty)
s[load].bind(tx, te.thread_axis("threadIdx.x"))
s[load].bind(ty, te.thread_axis("threadIdx.y"))
s[output].pragma(kernel_scope, "auto_unroll_max_step", cfg["auto_unroll_max_step"].val)
s[output].pragma(kernel_scope, "unroll_explicit", False)
return s
| 7,192 | 34.965 | 100 | py |
tvm | tvm-main/python/tvm/topi/cuda/correlation.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Correlation operators on CUDA"""
import tvm
from tvm import te
from tvm import autotvm
from .. import nn
from ..utils import traverse_inline
@autotvm.register_topi_compute("correlation_nchw.cuda")
def correlation_nchw(
cfg, data1, data2, kernel_size, max_displacement, stride1, stride2, padding, is_multiply
):
"""Correlation operator in NCHW layout.
Parameters
----------
data1 : tvm.te.Tensor
4-D with shape [batch, channel, height, width]
data2 : tvm.te.Tensor
4-D with shape [batch, channel, height, width]
kernel_size: int
Kernel size for correlation, must be an odd number
max_displacement: int
Max displacement of Correlation
stride1: int
Stride for data1
stride2: int
Stride for data2 within the neightborhood centered around data1
padding : int or a list/tuple of 2 or 4 ints
Padding size, or
[pad_height, pad_width] for 2 ints, or
[pad_top, pad_left, pad_bottom, pad_right] for 4 ints
is_multiply: bocorrelation
operation type is either multiplication or substraction
Returns
-------
Output : tvm.te.Tensor
4-D with shape [batch, out_channel, out_height, out_width]
"""
# pylint: disable=unused-argument
return nn.correlation_nchw(
data1, data2, kernel_size, max_displacement, stride1, stride2, padding, is_multiply
)
def _schedule_correlation_nchw(cfg, s, correlation):
"""Schedule correlation_nchw direct template"""
# pylint: disable=invalid-name
##### space definition begin #####
n, f, y, x = s[correlation].op.axis
rc, ry, rx = s[correlation].op.reduce_axis
cfg.define_split("tile_f", f, num_outputs=4)
cfg.define_split("tile_y", y, num_outputs=4)
cfg.define_split("tile_x", x, num_outputs=4)
cfg.define_split("tile_rc", rc, num_outputs=2)
cfg.define_split("tile_ry", ry, num_outputs=2)
cfg.define_split("tile_rx", rx, num_outputs=2)
cfg.define_knob("auto_unroll_max_step", [0, 512, 1500])
target = tvm.target.Target.current()
if target.kind.name in ["nvptx", "rocm"]:
cfg.define_knob("unroll_explicit", [1])
else:
cfg.define_knob("unroll_explicit", [0, 1])
##### space definition end #####
padded_data1, padded_data2 = s[correlation].op.input_tensors
s[padded_data1].compute_inline()
s[padded_data2].compute_inline()
# create cache stage
s[correlation].set_scope("local")
AA = s.cache_read(padded_data1, "shared", [correlation])
BB = s.cache_read(padded_data2, "shared", [correlation])
output = s.outputs[0].output(0)
# tile and bind spatial axes
n, f, y, x = s[output].op.axis
kernel_scope, n = s[output].split(n, nparts=1)
bf, vf, tf, fi = cfg["tile_f"].apply(s, output, f)
by, vy, ty, yi = cfg["tile_y"].apply(s, output, y)
bx, vx, tx, xi = cfg["tile_x"].apply(s, output, x)
bf = s[output].fuse(n, bf)
s[output].bind(bf, te.thread_axis("blockIdx.z"))
s[output].bind(by, te.thread_axis("blockIdx.y"))
s[output].bind(bx, te.thread_axis("blockIdx.x"))
s[output].bind(vf, te.thread_axis("vthread"))
s[output].bind(vy, te.thread_axis("vthread"))
s[output].bind(vx, te.thread_axis("vthread"))
s[output].bind(tf, te.thread_axis("threadIdx.z"))
s[output].bind(ty, te.thread_axis("threadIdx.y"))
s[output].bind(tx, te.thread_axis("threadIdx.x"))
s[output].reorder(bf, by, bx, vf, vy, vx, tf, ty, tx, fi, yi, xi)
s[correlation].compute_at(s[output], tx)
# tile reduction axes
n, f, y, x = s[correlation].op.axis
rc, ry, rx = s[correlation].op.reduce_axis
rco, rci = cfg["tile_rc"].apply(s, correlation, rc)
ryo, ryi = cfg["tile_ry"].apply(s, correlation, ry)
rxo, rxi = cfg["tile_rx"].apply(s, correlation, rx)
s[correlation].reorder(rco, ryo, rxo, rci, ryi, rxi, n, f, y, x)
s[AA].compute_at(s[correlation], rxo)
s[BB].compute_at(s[correlation], rxo)
# cooperative fetching
for load in [AA, BB]:
n, f, y, x = s[load].op.axis
fused = s[load].fuse(n, f, y, x)
tz, fused = s[load].split(fused, nparts=cfg["tile_f"].size[2])
ty, fused = s[load].split(fused, nparts=cfg["tile_y"].size[2])
tx, fused = s[load].split(fused, nparts=cfg["tile_x"].size[2])
s[load].bind(tz, te.thread_axis("threadIdx.z"))
s[load].bind(ty, te.thread_axis("threadIdx.y"))
s[load].bind(tx, te.thread_axis("threadIdx.x"))
# unroll
s[output].pragma(kernel_scope, "auto_unroll_max_step", cfg["auto_unroll_max_step"].val)
s[output].pragma(kernel_scope, "unroll_explicit", cfg["unroll_explicit"].val)
@autotvm.register_topi_schedule("correlation_nchw.cuda")
def schedule_correlation_nchw(cfg, outs):
"""schedule of correlation_nchw for cuda gpu
Parameters
----------
cfg: ConfigEntity
The config for this template
outs: Array of Tensor
The computation graph description of correlation
in the format of an array of tensors.
Returns
-------
s: Schedule
The computation schedule for correlation.
"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
def _callback(op):
if op.tag == "correlation_nchw":
_schedule_correlation_nchw(cfg, s, op.output(0))
traverse_inline(s, outs[0].op, _callback)
return s
| 6,237 | 33.849162 | 92 | py |
tvm | tvm-main/python/tvm/topi/cuda/conv2d_hwnc_tensorcore.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, too-many-locals, too-many-function-args
# pylint: disable=too-many-statements, unused-argument, too-many-arguments
"""Tensorcore template for cuda backend"""
import tvm
from tvm import te
from tvm import autotvm
from tvm.target import Target
from tvm.topi.cuda.injective import schedule_injective_from_existing
from ..utils import get_const_tuple, traverse_inline, simplify, tag
from ..nn.pad import pad
from ..nn.utils import get_pad_tuple
from .tensor_intrin import intrin_wmma_load_matrix_A
from .tensor_intrin import intrin_wmma_load_matrix_W
from .tensor_intrin import intrin_wmma_store_matrix
from .tensor_intrin import intrin_wmma_gemm
def unpack_HWNCnc_to_hwnc(packed_out, out_dtype):
"""Unpack conv2d_hwnc output from layout hwncnc to hwnc
Parameters
-----------
packed_out : tvm.te.Tensor
The output tensor of conv2d_hwnc.
out_dtype : str
The output dtype.
Returns
-------
unpacked_out : tvm.te.Tensor
The unpacked output tensor in hwnc layout.
"""
H, W, N, O, wmma_m, wmma_n = get_const_tuple(packed_out.shape)
idxmod = tvm.tir.indexmod
idxdiv = tvm.tir.indexdiv
oshape = (H, W, N * wmma_m, O * wmma_n)
unpacked_out = te.compute(
oshape,
lambda h, w, n, o: packed_out[
h, w, idxdiv(n, wmma_m), idxdiv(o, wmma_n), idxmod(n, wmma_m), idxmod(o, wmma_n)
].astype(out_dtype),
name="output_unpack",
tag=tag.INJECTIVE + ",unpack_hwncc",
)
return unpacked_out
def conv2d_hwnc_tensorcore(data, kernel, strides, padding, dilation, in_dtype, out_dtype="int32"):
""" "Compute conv2d with tensorcore for HWNC layout with int8/int4"""
assert data.dtype in ("int4", "uint4", "int8", "uint8")
assert kernel.dtype in ("int4", "uint4", "int8", "uint8")
packed_out = hwnc_tensorcore_cuda(data, kernel, strides, padding, dilation, out_dtype)
return unpack_HWNCnc_to_hwnc(packed_out, out_dtype)
@autotvm.register_topi_compute("conv2d_HWNCnc_tensorcore.cuda")
def hwnc_tensorcore_cuda(cfg, Input, Filter, stride, padding, dilation, out_dtype="int32"):
"""Compute declaration for tensorcore"""
assert isinstance(stride, int) or len(stride) == 2
assert isinstance(dilation, int) or len(dilation) == 2
if isinstance(stride, int):
stride_h = stride_w = stride
else:
stride_h, stride_w = stride
if isinstance(dilation, int):
dilation_h = dilation_w = dilation
else:
dilation_h, dilation_w = dilation
in_dtype = Input.dtype
if in_dtype in ["int4", "uint4"]:
wmma_n = wmma_m = 8
wmma_k = 32
else:
wmma_m = 8
wmma_n = 32
wmma_k = 16
pre_computed = len(Filter.shape) == 6
in_height, in_width, batch, in_channels = get_const_tuple(Input.shape)
if pre_computed:
kernel_h, kernel_w, oc_chunk, _, oc_block_factor, _ = get_const_tuple(Filter.shape)
num_filter = oc_block_factor * oc_chunk
else:
kernel_h, kernel_w, num_filter, _ = get_const_tuple(Filter.shape)
if in_dtype in ["int4", "uint4"]:
assert batch % 8 == 0 and in_channels % 32 == 0 and num_filter % 8 == 0
else:
assert batch % 8 == 0 and in_channels % 16 == 0 and num_filter % 32 == 0, (
"The shape of (batch, in_channels, num_filter) "
"must be multiple of (8, 16, 32) for int8, "
"and (8, 32, 8) for int4"
)
# compute the output shape
dilated_kernel_h = (kernel_h - 1) * dilation_h + 1
dilated_kernel_w = (kernel_w - 1) * dilation_w + 1
pad_top, pad_left, pad_down, pad_right = get_pad_tuple(
padding, (dilated_kernel_h, dilated_kernel_w)
)
out_channels = num_filter
out_height = simplify((in_height - dilated_kernel_h + pad_top + pad_down) // stride_h + 1)
out_width = simplify((in_width - dilated_kernel_w + pad_left + pad_right) // stride_w + 1)
cfg.add_flop(
2 * batch * out_height * out_width * out_channels * in_channels * kernel_h * kernel_w
)
# Input feature map: (H, W, N, IC, n, ic)
data_shape = (in_height, in_width, batch // wmma_m, in_channels // wmma_k, wmma_m, wmma_k)
# Kernel: (H, W, OC, IC, oc, ic)
kernel_shape = (
kernel_h,
kernel_w,
out_channels // wmma_n,
in_channels // wmma_k,
wmma_n,
wmma_k,
)
# Reduction axes
kh = te.reduce_axis((0, kernel_h), name="kh")
kw = te.reduce_axis((0, kernel_w), name="kw")
ic = te.reduce_axis((0, in_channels // wmma_k), name="ic")
ii = te.reduce_axis((0, wmma_k), name="ii")
if pre_computed:
packed_kernel = Filter
else:
packed_kernel = te.compute(
kernel_shape,
lambda kh, kw, o, i, oo, ii: Filter[kh, kw, o * wmma_n + oo, i * wmma_k + ii],
name="packed_kernel",
)
packed_data = te.compute(
data_shape, lambda h, w, n, i, nn, ii: Input[h, w, n * wmma_m + nn, i * wmma_k + ii]
)
pad_before = [pad_top, pad_left, 0, 0, 0, 0]
pad_after = [pad_down, pad_right, 0, 0, 0, 0]
pad_data = pad(packed_data, pad_before, pad_after, name="pad_data")
Conv = te.compute(
(out_height, out_width, batch // wmma_m, out_channels // wmma_n, wmma_m, wmma_n),
lambda h, w, n, o, nn, oo: te.sum(
(
pad_data[h * stride_h + kh, w * stride_w + kw, n, ic, nn, ii].astype("int32")
* packed_kernel[kh, kw, o, ic, oo, ii].astype("int32")
),
axis=[ic, kh, kw, ii],
),
name="Conv",
tag="conv2d_HWNCnc_tensorcore",
)
return Conv
def schedule_hwnc_tensorcore_cuda(cfg, s, Conv):
"""Schedule tensorcore template"""
pad_data, packed_kernel = s[Conv].op.input_tensors
ic, kh, kw, ii = s[Conv].op.reduce_axis
packed_data = s[pad_data].op.input_tensors[0]
block_x = te.thread_axis("blockIdx.x")
block_y = te.thread_axis("blockIdx.y")
block_z = te.thread_axis("blockIdx.z")
thread_x = te.thread_axis("threadIdx.x")
thread_y = te.thread_axis("threadIdx.y")
thread_z = te.thread_axis("threadIdx.z")
# Designate the memory hierarchy
AS = s.cache_read(pad_data, "shared", [Conv])
WS = s.cache_read(packed_kernel, "shared", [Conv])
AF = s.cache_read(AS, "wmma.matrix_a", [Conv])
WF = s.cache_read(WS, "wmma.matrix_b", [Conv])
ConvF = s.cache_write(Conv, "wmma.accumulator")
if Conv.op in s.outputs:
output = Conv
ConvS = s.cache_read(ConvF, "shared", [Conv])
OL = ConvS
else:
output = s.outputs[0].output(0)
s[Conv].set_scope("shared")
OL = Conv
out_dtype = Conv.dtype
if isinstance(packed_kernel.op, te.tensor.ComputeOp) and packed_kernel.name == "packed_kernel":
if autotvm.GLOBAL_SCOPE.in_tuning:
s[packed_kernel].pragma(s[packed_kernel].op.axis[0], "debug_skip_region")
else:
with Target("cuda"):
schedule_injective_from_existing(s, packed_kernel)
if isinstance(pad_data.op, te.tensor.ComputeOp) and "pad" in pad_data.op.tag:
s[pad_data].compute_inline()
data = pad_data.op.input_tensors[0]
if autotvm.GLOBAL_SCOPE.in_tuning:
# skip this part during tuning to make recrods accurate
# this part will be pre-computed during NNVM's pre-compute optimization pass
s[pad_data].pragma(s[pad_data].op.axis[0], "debug_skip_region")
else:
data = pad_data
s[data].compute_inline()
data_dtype = data.dtype
kernel_dtype = packed_kernel.dtype
# Schedule for autotvm
cfg.define_knob("block_row_warps", [1, 2, 4])
cfg.define_knob("block_col_warps", [1, 2, 4])
cfg.define_knob("warp_row_tiles", [1, 2, 4, 8, 16])
cfg.define_knob("warp_col_tiles", [1, 2, 4, 8, 16])
cfg.define_knob("chunk", [1, 2, 4, 8])
cfg.define_knob("split_block_k_nums", [1, 2, 4, 8, 16, 32])
cfg.define_knob("vector_ws", [1, 8])
cfg.define_knob("vector_as", [1, 8, 16])
block_row_warps = cfg["block_row_warps"].val
block_col_warps = cfg["block_col_warps"].val
warp_row_tiles = cfg["warp_row_tiles"].val
warp_col_tiles = cfg["warp_col_tiles"].val
chunk = cfg["chunk"].val
vector_as = cfg["vector_as"].val
vector_ws = cfg["vector_ws"].val
split_block_k_nums = cfg["split_block_k_nums"].val
s[packed_data].compute_inline()
if data_dtype in ["int4", "uint4"]:
wmma_m = wmma_n = 8
wmma_k = 32
else:
wmma_m = 8
wmma_n = 32
wmma_k = 16
warp_size = 32
# Schedule for output
if len(s[output].op.axis) == 4:
(
hc,
wc,
nc,
oc,
) = output.op.axis
nc, nnc = s[output].split(nc, factor=wmma_m)
oc, ooc = s[output].split(oc, factor=wmma_n)
else:
hc, wc, nc, oc, nnc, ooc = output.op.axis
kernel_scope, hc = s[output].split(hc, nparts=1)
block_k = s[output].fuse(hc, wc)
block_k, split_block_k = s[output].split(block_k, factor=split_block_k_nums)
nc, nci = s[output].split(nc, factor=warp_row_tiles)
block_i, nc = s[output].split(nc, factor=block_row_warps)
oc, oci = s[output].split(oc, factor=warp_col_tiles)
block_j, oc = s[output].split(oc, factor=block_col_warps)
s[output].reorder(block_k, split_block_k, block_i, block_j, nc, oc, nci, oci, nnc, ooc)
t = s[output].fuse(nnc, ooc)
_, tx = s[output].split(t, factor=warp_size)
s[output].bind(block_k, block_z)
s[output].bind(block_i, block_x)
s[output].bind(block_j, block_y)
s[output].bind(tx, thread_x)
s[output].bind(nc, thread_y)
s[output].bind(oc, thread_z)
# Schedule wmma store
s[OL].compute_at(s[output], block_j)
hc, wc, nc, oc, nnc, ooc = OL.op.axis
oc, oci = s[OL].split(oc, factor=warp_col_tiles)
_, oc = s[OL].split(oc, factor=block_col_warps)
nc, nci = s[OL].split(nc, factor=warp_row_tiles)
_, nc = s[OL].split(nc, factor=block_row_warps)
s[OL].reorder(nc, oc, nci, oci, nnc, ooc)
s[OL].bind(nc, thread_y)
s[OL].bind(oc, thread_z)
# Schedule local computation
s[ConvF].compute_at(s[OL], oc)
_, _, n, o, nnf, oof = ConvF.op.axis
ko, ki = s[ConvF].split(ic, factor=chunk)
s[ConvF].reorder(ko, kh, ki, kw, n, o, nnf, oof, ii)
cfg.define_reorder("reorder_inner", [ko, kh], policy="all")
cfg["reorder_inner"].apply(s, ConvF, [ko, kh])
cfg["reorder_inner"].apply(s, ConvF, [ki, kw])
# Move intermediate computation into each output compute tile
s[AF].compute_at(s[ConvF], kw)
s[WF].compute_at(s[ConvF], kw)
# Schedule for A's share memory
s[AS].compute_at(s[ConvF], ko)
_, _, n, _, nn, ii = AS.op.axis
tx, xo = s[AS].split(n, nparts=block_row_warps)
ty, _ = s[AS].split(xo, nparts=block_col_warps)
t = s[AS].fuse(nn, ii)
to, ti = s[AS].split(t, nparts=warp_size)
ti, _t = s[AS].split(ti, factor=vector_as)
s[AS].bind(tx, thread_y)
s[AS].bind(ty, thread_z)
s[AS].bind(to, thread_x)
s[AS].vectorize(_t)
# Schedule for W's share memory
s[WS].compute_at(s[ConvF], kw)
kh, kw, ic, o, ii, oo = WS.op.axis
tx, xo = s[WS].split(o, nparts=block_row_warps)
ty, _ = s[WS].split(xo, nparts=block_col_warps)
t = s[WS].fuse(ii, oo)
to, ti = s[WS].split(t, nparts=warp_size)
ti, _t = s[WS].split(ti, factor=vector_ws)
s[WS].bind(tx, thread_y)
s[WS].bind(ty, thread_z)
s[WS].bind(to, thread_x)
s[WS].vectorize(ti)
# double buffer
cfg.define_knob("AS_double_buffer", [0, 1])
cfg.define_knob("WS_double_buffer", [0, 1])
if cfg["AS_double_buffer"].val:
s[AS].double_buffer()
if cfg["WS_double_buffer"].val:
s[WS].double_buffer()
# unroll
cfg.define_knob("auto_unroll_max_step", [0, 512, 1500])
s[output].pragma(kernel_scope, "auto_unroll_max_step", cfg["auto_unroll_max_step"].val)
s[output].pragma(kernel_scope, "unroll_explicit", False)
shape = (wmma_m, wmma_n, wmma_k)
AS_shape = (wmma_m, wmma_k)
AL_shape = (wmma_m, wmma_k)
WS_shape = (wmma_n, wmma_k)
WL_shape = (wmma_n, wmma_k)
CL_shape = (wmma_m, wmma_n)
CS_shape = (wmma_m, wmma_n)
AL_gemm = te.placeholder(AL_shape, name="A", dtype=data_dtype)
WL_gemm = te.placeholder(WL_shape, name="B", dtype=kernel_dtype)
k_gemm = te.reduce_axis((0, wmma_k), name="k")
CL_compute = te.compute(
CL_shape,
lambda ii, jj: te.sum(
(AL_gemm[ii, k_gemm].astype("int32") * WL_gemm[jj, k_gemm].astype("int32")), axis=k_gemm
),
name="C",
)
AL_strides = [wmma_k, 1]
AS_strides = [wmma_k, 1]
WL_strides = [wmma_k, 1]
WS_strides = [wmma_k, 1]
CL_strides = [wmma_n, 1]
CS_strides = [wmma_n, 1]
s[AF].tensorize(
AF.op.axis[-2],
intrin_wmma_load_matrix_A(
AL_strides, AS_strides, shape, "row_major", AS_shape, AL_shape, data_dtype
),
)
s[WF].tensorize(
WF.op.axis[-2],
intrin_wmma_load_matrix_W(
WL_strides, WS_strides, shape, "col_major", WS_shape, WL_shape, kernel_dtype
),
)
s[OL].tensorize(
nnc, intrin_wmma_store_matrix(CS_strides, CL_strides, shape, out_dtype, CL_shape, CS_shape)
)
s[ConvF].tensorize(
nnf,
intrin_wmma_gemm(AL_gemm, WL_gemm, CL_compute, AL_strides, WL_strides, CL_strides, shape),
)
return s
@autotvm.register_topi_schedule("conv2d_HWNCnc_tensorcore.cuda")
def schedule_conv2d_hwnc_tensorcore(cfg, outs):
"""TOPI schedule callback"""
s = te.create_schedule([x.op for x in outs])
def _callback(op):
if "conv2d_HWNCnc_tensorcore" in op.tag:
schedule_hwnc_tensorcore_cuda(cfg, s, op.output(0))
traverse_inline(s, outs[0].op, _callback)
return s
| 14,745 | 33.37296 | 100 | py |
tvm | tvm-main/python/tvm/topi/cuda/unique.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""Unique operator"""
import tvm
from tvm import te, tir
from ...te import hybrid
from .scan import cumsum
from .sort import sort, argsort
from ..utils import ceil_div
def _get_max_threads(batch_size):
target = tvm.target.Target.current()
max_threads = tvm.target.Target.current(allow_none=False).max_num_threads
if "vulkan" in str(target) and not isinstance(batch_size, tvm.tir.IntImm):
# SPIR-V does not support dynamic thread group size
return max_threads
return tir.min(batch_size, max_threads)
def _calc_adjacent_diff_ir(data, output, binop=tir.Sub):
"""Low level IR to calculate adjacent difference in an 1-D array.
Parameters
----------
data : Buffer
Input 1-D Buffer.
output: Buffer
A buffer to store adjacent difference, of the same shape as data. The adjacent difference
is defined as: output[0] = 0, output[i] = binop(data[i], data[i-1])
where i > 0 and i < len(data).
binop: function, optional
A binary associative op to use for calculating adjacent difference. The function takes two
TIR expressions and produce a new TIR expression. By default it uses tvm.tir.Sub to
compute the adjacent difference.
"""
ib = tir.ir_builder.create()
data_ptr = ib.buffer_ptr(data)
output_ptr = ib.buffer_ptr(output)
batch_size = data.shape[0]
max_threads = _get_max_threads(batch_size)
with ib.new_scope():
nthread_tx = max_threads
nthread_bx = ceil_div(batch_size, max_threads)
tx = te.thread_axis("threadIdx.x")
bx = te.thread_axis("blockIdx.x")
ib.scope_attr(tx, "thread_extent", nthread_tx)
ib.scope_attr(bx, "thread_extent", nthread_bx)
tid = bx * max_threads + tx
with ib.if_scope(tid < batch_size):
with ib.if_scope(tid == 0):
output_ptr[tid] = 0
with ib.else_scope():
output_ptr[tid] = tir.Cast(output.dtype, binop(data_ptr[tid], data_ptr[tid - 1]))
return ib.get()
def _calc_adjacent_diff(data, out_dtype="int32", binop=tir.Sub):
"""Function calculate adjacent difference in an 1-D array.
Parameters
----------
data : tvm.te.Tensor
Input 1-D tensor.
output_dtype : str
The output tensor data type.
binop: function, optional
A binary associative op to use for calculating difference. The function takes two
TIR expressions and produce a new TIR expression. By default it uses tvm.tir.Sub to
compute the adjacent difference.
Returns
-------
output : tvm.te.Tensor
1-D tensor storing the adjacent difference of the input tensor. The adjacent difference
is defined as: output[0] = 0, output[i] = binop(data[i], data[i-1])
where i > 0 and i < len(data).
"""
data_buf = tir.decl_buffer(data.shape, data.dtype, "sorted_data_buf", data_alignment=8)
output_buf = tir.decl_buffer(data.shape, out_dtype, "output_buf", data_alignment=8)
return te.extern(
[data.shape],
[data],
lambda ins, outs: _calc_adjacent_diff_ir(ins[0], outs[0], binop=binop),
dtype=[out_dtype],
in_buffers=[data_buf],
out_buffers=[output_buf],
name="_calc_adjacent_diff",
tag="_calc_adjacent_diff_gpu",
)
@hybrid.script
def _calc_num_unique(inc_scan):
"""Helper function to get the number of unique elements fron inc_scan tensor"""
output = output_tensor((1,), "int32")
for i in bind("threadIdx.x", 1):
output[i] = inc_scan[inc_scan.shape[0] - 1] + int32(1)
return output
def _calc_unique_ir(
data, argsorted_indices, inc_scan, index_converter, unique_elements, inverse_indices, counts
):
"""Low level IR to calculate unique elements, inverse indices, and counts (optional) of
unique elements of 1-D array.
Parameters
----------
data : Buffer
Input 1-D Buffer.
argsorted_indices : Buffer
A buffer that stores the argsorted indices of the input data.
inc_scan : Buffer
A buffer that stores the inclusive scan of the binary tir.NE adjacent difference
of the sorted data.
index_converter (optional) : Buffer
An optional index converter that transforms the unique element index
such that new_idx = index_converter[old_idx].
unique_elements : Buffer
A buffer that stores the unique elements.
inverse_indices : Buffer
A buffer that stores the index of each input data element in the unique element array.
counts (optional) : Buffer
A buffer that stores the count of each unique element.
"""
ib = tir.ir_builder.create()
data_ptr = ib.buffer_ptr(data)
argsorted_indices_ptr = ib.buffer_ptr(argsorted_indices)
inc_scan_ptr = ib.buffer_ptr(inc_scan)
unique_elements_ptr = ib.buffer_ptr(unique_elements)
inverse_indices_ptr = ib.buffer_ptr(inverse_indices)
index_converter_ptr = None
if isinstance(index_converter, tir.Buffer):
index_converter_ptr = ib.buffer_ptr(index_converter)
if isinstance(counts, tir.Buffer):
counts_ptr = ib.buffer_ptr(counts)
# use indices_ptr as a tmp buffer to store tids with inc_scan[tid] != inc_scan[tid-1]
unique_seq_indices_ptr = ib.buffer_ptr(inverse_indices)
batch_size = data.shape[0]
max_threads = _get_max_threads(batch_size)
# if need to return counts
if isinstance(counts, tir.Buffer):
num_unique = inc_scan_ptr[inc_scan.shape[0] - 1] + 1
num_elements = data.shape[0]
with ib.new_scope():
nthread_tx = max_threads
nthread_bx = ceil_div(batch_size, max_threads)
tx = te.thread_axis("threadIdx.x")
bx = te.thread_axis("blockIdx.x")
ib.scope_attr(tx, "thread_extent", nthread_tx)
ib.scope_attr(bx, "thread_extent", nthread_bx)
tid = bx * max_threads + tx
with ib.if_scope(tid < batch_size):
with ib.if_scope(tid == 0):
unique_seq_indices_ptr[num_unique - 1] = num_elements
with ib.else_scope():
with ib.if_scope(inc_scan_ptr[tid] != inc_scan_ptr[tid - 1]):
unique_seq_indices_ptr[inc_scan_ptr[tid] - 1] = tid
with ib.new_scope():
nthread_tx = max_threads
nthread_bx = ceil_div(batch_size, max_threads)
tx = te.thread_axis("threadIdx.x")
bx = te.thread_axis("blockIdx.x")
ib.scope_attr(tx, "thread_extent", nthread_tx)
ib.scope_attr(bx, "thread_extent", nthread_bx)
tid = bx * max_threads + tx
with ib.if_scope(tid < num_unique):
unique_idx = tid if not index_converter_ptr else index_converter_ptr[tid]
with ib.if_scope(tid == 0):
counts_ptr[unique_idx] = unique_seq_indices_ptr[tid]
with ib.else_scope():
counts_ptr[unique_idx] = (
unique_seq_indices_ptr[tid] - unique_seq_indices_ptr[tid - 1]
)
# calculate unique elements and inverse indices
with ib.new_scope():
nthread_tx = max_threads
nthread_bx = ceil_div(batch_size, max_threads)
tx = te.thread_axis("threadIdx.x")
bx = te.thread_axis("blockIdx.x")
ib.scope_attr(tx, "thread_extent", nthread_tx)
ib.scope_attr(bx, "thread_extent", nthread_bx)
tid = bx * max_threads + tx
with ib.if_scope(tid < batch_size):
data_idx = argsorted_indices_ptr[tid]
unique_idx = (
inc_scan_ptr[tid]
if not index_converter_ptr
else index_converter_ptr[inc_scan_ptr[tid]]
)
inverse_indices_ptr[data_idx] = unique_idx
with ib.if_scope(tid == 0):
unique_elements_ptr[unique_idx] = data_ptr[data_idx]
with ib.else_scope():
with ib.if_scope(inc_scan_ptr[tid] != inc_scan_ptr[tid - 1]):
unique_elements_ptr[unique_idx] = data_ptr[data_idx]
return ib.get()
def _calc_first_occurence_ir(argsorted_indices, inc_scan, first_occurence):
"""Low level IR to calculate the first occurence of each unique element in the input data.
Parameters
----------
argsorted_indices : Buffer
A buffer that stores the argsorted indices of the input data.
inc_scan : Buffer
A buffer that stores the inclusive scan of the binary tir.NE adjacent difference
of the sorted data.
first_occurence : Buffer
A buffer that stores the first occurence of each unique element in the input data.
"""
ib = tir.ir_builder.create()
argsorted_indices_ptr = ib.buffer_ptr(argsorted_indices)
inc_scan_ptr = ib.buffer_ptr(inc_scan)
first_occurence_ptr = ib.buffer_ptr(first_occurence)
batch_size = argsorted_indices.shape[0]
max_threads = _get_max_threads(batch_size)
with ib.new_scope():
nthread_tx = max_threads
nthread_bx = ceil_div(batch_size, max_threads)
tx = te.thread_axis("threadIdx.x")
bx = te.thread_axis("blockIdx.x")
ib.scope_attr(tx, "thread_extent", nthread_tx)
ib.scope_attr(bx, "thread_extent", nthread_bx)
tid = bx * max_threads + tx
with ib.if_scope(tid < batch_size):
first_occurence_ptr[tid] = batch_size
with ib.new_scope():
nthread_tx = max_threads
nthread_bx = ceil_div(batch_size, max_threads)
tx = te.thread_axis("threadIdx.x")
bx = te.thread_axis("blockIdx.x")
ib.scope_attr(tx, "thread_extent", nthread_tx)
ib.scope_attr(bx, "thread_extent", nthread_bx)
tid = bx * max_threads + tx
with ib.if_scope(tid < batch_size):
with ib.if_scope(tid == 0):
first_occurence_ptr[inc_scan_ptr[tid]] = argsorted_indices_ptr[tid]
with ib.else_scope():
with ib.if_scope(inc_scan_ptr[tid] != inc_scan_ptr[tid - 1]):
first_occurence_ptr[inc_scan_ptr[tid]] = argsorted_indices_ptr[tid]
return ib.get()
def unique(data, is_sorted=True, return_counts=False):
"""
Find the unique elements of a 1-D tensor. Please note `output` and `counts` are all padded to
have the same length of `data` and element with index >= num_unique[0] has undefined value.
Parameters
----------
data : tvm.te.Tensor
A 1-D tensor of integers.
sorted : bool
Whether to sort the unique elements in ascending order before returning as output.
return_counts : bool
Whether to return the count of each unique element.
Returns
-------
unique : tvm.te.Tensor
A 1-D tensor containing the unique elements of the input data tensor. The same size as
the input data. If there are less unique elements than input data, the end of the tensor
is padded with zeros.
indices : tvm.te.Tensor
A 1-D tensor. The same size as output. For each entry in output, it contains
the index of its first occurence in the input data. The end of the tensor is padded
with the length of the input data.
inverse_indices : tvm.te.Tensor
A 1-D tensor. For each entry in data, it contains the index of that data element in the
unique array. (Note that inverse_indices is very similar to indices if output is not
sorted)
num_unique : tvm.te.Tensor
A 1-D tensor with size=1 containing the number of unique elements in the input data tensor.
counts (optional) : tvm.te.Tensor
A 1-D tensor containing the count of each unique element in the output.
Examples
--------
.. code-block:: python
[output, indices, num_unique] = unique([4, 5, 1, 2, 3, 3, 4, 5], False, False)
output = [4, 5, 1, 2, 3, ?, ?, ?]
indices = [0, 1, 2, 3, 4, ?, ?, ?]
inverse_indices = [0, 1, 2, 3, 4, 4, 0, 1]
num_unique = [5]
[output, indices, num_unique, counts] = unique([4, 5, 1, 2, 3, 3, 4, 5], False, True)
output = [4, 5, 1, 2, 3, ?, ?, ?]
indices = [0, 1, 2, 3, 4, ?, ?, ?]
inverse_indices = [0, 1, 2, 3, 4, 4, 0, 1]
num_unique = [5]
counts = [2, 2, 1, 1, 2, ?, ?, ?]
[output, indices, num_unique] = unique([4, 5, 1, 2, 3, 3, 4, 5], True)
output = [1, 2, 3, 4, 5, ?, ?, ?]
indices = [2, 3, 4, 0, 1, ?, ?, ?]
inverse_indices = [3, 4, 0, 1, 2, 2, 3, 4]
num_unique = [5]
"""
sorted_data = sort(data)
argsorted_indices = argsort(data, dtype="int32")
# adjacent difference
adjacent_diff = _calc_adjacent_diff(sorted_data, out_dtype="int32", binop=tir.NE)
# inclusive scan
inc_scan = cumsum(adjacent_diff, dtype="int32", exclusive=0)
# total number of unique elements
num_unique_elements = _calc_num_unique(inc_scan)
# buffers
data_buf = tir.decl_buffer(data.shape, data.dtype, "data_buf", data_alignment=8)
argsorted_indices_buf = tir.decl_buffer(
data.shape, "int32", "argsorted_indices_buf", data_alignment=8
)
inc_scan_buf = tvm.tir.decl_buffer(data.shape, "int32", "inc_scan_buf", data_alignment=8)
unique_elements_buf = tir.decl_buffer(
data.shape, data.dtype, "unique_elements_buf", data_alignment=8
)
inverse_indices_buf = tvm.tir.decl_buffer(
data.shape, "int32", "inverse_indices_buf", data_alignment=8
)
# prepare outputs
if return_counts:
counts_buf = tir.decl_buffer(data.shape, "int32", "counts_buf", data_alignment=8)
out_data_shape = [data.shape] * 3
out_buffers = [unique_elements_buf, inverse_indices_buf, counts_buf]
out_dtypes = [data.dtype, "int32", "int32"]
else:
out_data_shape = [data.shape] * 2
out_buffers = [unique_elements_buf, inverse_indices_buf]
out_dtypes = [data.dtype, "int32"]
# prepare inputs and fcompute
# calculate first occurence
first_occurence_buf = tir.decl_buffer(
data.shape, "int32", "first_occurence_buf", data_alignment=8
)
first_occurence = te.extern(
[data.shape],
[argsorted_indices, inc_scan],
lambda ins, outs: _calc_first_occurence_ir(ins[0], ins[1], outs[0]),
dtype=["int32"],
in_buffers=[argsorted_indices_buf, inc_scan_buf],
out_buffers=[first_occurence_buf],
name="_calc_first_occurence",
tag="_calc_first_occurence_gpu",
)
if is_sorted:
in_data = [data, argsorted_indices, inc_scan]
in_buffers = [data_buf, argsorted_indices_buf, inc_scan_buf]
if return_counts:
fcompute = lambda ins, outs: _calc_unique_ir(*ins, None, *outs)
else:
fcompute = lambda ins, outs: _calc_unique_ir(*ins, None, *outs, None)
indices = first_occurence
else:
# calculate index converter by sorting unique elements by their first occurence
argsorted_first_occurence = argsort(first_occurence, dtype="int32")
index_converter = argsort(argsorted_first_occurence, dtype="int32")
index_converter_buf = tir.decl_buffer(
data.shape, "int32", "index_converter_buf", data_alignment=8
)
in_data = [data, argsorted_indices, inc_scan, index_converter]
in_buffers = [data_buf, argsorted_indices_buf, inc_scan_buf, index_converter_buf]
if return_counts:
fcompute = lambda ins, outs: _calc_unique_ir(*ins, *outs)
else:
fcompute = lambda ins, outs: _calc_unique_ir(*ins, *outs, None)
indices = sort(first_occurence)
outs = te.extern(
out_data_shape,
in_data,
fcompute,
dtype=out_dtypes,
in_buffers=in_buffers,
out_buffers=out_buffers,
name="_calc_unique",
tag="_calc_unique_gpu",
)
if return_counts:
return [outs[0], indices, outs[1], num_unique_elements, outs[2]]
return [outs[0], indices, outs[1], num_unique_elements]
| 17,024 | 39.535714 | 99 | py |
tvm | tvm-main/python/tvm/topi/cuda/conv3d_transpose_ncdhw.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""Conv3d transpose template for cuda backend"""
import tvm
from tvm import te
from tvm import autotvm
from .. import nn
from ..utils import get_const_tuple, traverse_inline
from .conv3d_direct import schedule_direct_conv3d_cuda
@autotvm.register_topi_compute("conv3d_transpose_ncdhw.cuda")
def conv3d_transpose_ncdhw(cfg, data, kernel, stride, padding, out_dtype, output_padding):
"""Transposed 3D convolution ncdhw forward operator.
Parameters
----------
cfg: ConfigEntity
The config for this template
Input : tvm.te.Tensor
5-D with shape [batch, in_channel, in_depth, in_height, in_width]
Filter : tvm.te.Tensor
5-D with shape [in_channel, num_filter, filter_depth, filter_height, filter_width]
strides : int or a list/tuple of three ints
The spatial stride along height and width
padding : int or str
Padding size, or ['VALID', 'SAME']
out_dtype: str
The output type. This is used in mixed precision
output_padding : tuple of three ints
Used to disambiguate output shape
Returns
-------
Output : tvm.te.Tensor
5-D with shape [batch, out_channel, out_depth, out_height, out_width]
"""
batch, inp_channels, inp_depth, inp_height, inp_width = get_const_tuple(data.shape)
_, out_channels, kernel_depth, kernel_height, kernel_width = get_const_tuple(kernel.shape)
stride_depth, stride_height, stride_width = stride
outpad_depth, outpad_height, outpad_width = output_padding
assert (
outpad_height < stride_height
and outpad_width < stride_width
and outpad_depth < stride_depth
)
cfg.stride = stride
pad_front, pad_top, pad_left, pad_back, pad_bottom, pad_right = nn.get_pad_tuple3d(
padding, (kernel_depth, kernel_height, kernel_width)
)
out_depth = (inp_depth - 1) * stride_depth + kernel_depth - pad_front - pad_back + outpad_depth
pad_front = kernel_depth - 1 - pad_front
pad_back = kernel_depth - 1 - pad_back
dilated_depth = stride_depth * (inp_depth - 1) + 1
out_width = (inp_width - 1) * stride_width + kernel_width - pad_left - pad_right + outpad_width
pad_left = kernel_width - 1 - pad_left
pad_right = kernel_width - 1 - pad_right
dilated_width = stride_width * (inp_width - 1) + 1
out_height = (
(inp_height - 1) * stride_height + kernel_height - pad_top - pad_bottom + outpad_height
)
pad_top = kernel_height - 1 - pad_top
pad_bottom = kernel_height - 1 - pad_bottom
dilated_height = stride_height * (inp_height - 1) + 1
# compute pad
data = te.compute(
(
batch,
inp_channels,
pad_front + dilated_depth + pad_back,
pad_top + dilated_height + pad_bottom,
pad_left + dilated_width + pad_right,
),
lambda n, c, d, y, x: tvm.tir.if_then_else(
tvm.tir.all(
x >= pad_left,
x < pad_left + dilated_width,
tvm.tir.indexmod(x - pad_left, stride_width).equal(0),
y >= pad_top,
y < pad_top + dilated_height,
tvm.tir.indexmod(y - pad_top, stride_height).equal(0),
d >= pad_front,
d < pad_front + dilated_depth,
tvm.tir.indexmod(d - pad_front, stride_depth).equal(0),
),
data[
n,
c,
tvm.tir.indexdiv(d - pad_front, stride_depth),
tvm.tir.indexdiv(y - pad_top, stride_height),
tvm.tir.indexdiv(x - pad_left, stride_width),
],
tvm.tir.const(0.0, "float32"),
),
name="data_pad",
)
# compute transposed conv
dc = te.reduce_axis((0, inp_channels), name="dc")
dd = te.reduce_axis((0, kernel_depth), name="dd")
dh = te.reduce_axis((0, kernel_height), name="dh")
dw = te.reduce_axis((0, kernel_width), name="dw")
data_out = te.compute(
(batch, out_channels, out_depth, out_height, out_width),
lambda b, c, d, h, w: te.sum(
data[b, dc, d + dd, h + dh, w + dw].astype(out_dtype)
* kernel[
dc, c, kernel_depth - 1 - dd, kernel_height - 1 - dh, kernel_width - 1 - dw
].astype(out_dtype),
axis=[dc, dd, dh, dw],
),
tag="conv3d_transpose_ncdhw",
)
return data_out
@autotvm.register_topi_schedule("conv3d_transpose_ncdhw.cuda")
def schedule_conv3d_transpose_ncdhw(cfg, outs):
"""TOPI Schedule callback for conv3d transpose operator.
Parameters
----------
cfg: ConfigEntity
The parameters for this template
outs: Array of Tensor
The computation graph description of conv3d transpose
in the format of an array of tensors.
Returns
-------
s: Schedule
The computation schedule for conv3d transpose.
"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
def _callback(op):
if op.tag == "conv3d_transpose_ncdhw":
schedule_direct_conv3d_cuda(
cfg, s, op.output(0), "NCDHW", "conv3d_transpose_ncdhw.cuda"
)
traverse_inline(s, outs[0].op, _callback)
return s
| 6,131 | 35.718563 | 99 | py |
tvm | tvm-main/python/tvm/topi/cuda/scan.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, too-many-locals, too-many-statements
"Scan related operators"
from typing import Callable, Optional, Union
import tvm
from tvm import te
from tvm.contrib.thrust import can_use_rocthrust, can_use_thrust
from .. import tag
from ..math import cast, ceil_log2
from ..transform import expand_dims, reshape, squeeze, transpose
from ..utils import ceil_div, get_const_int, prod, swap
from .injective import schedule_injective_from_existing
def _get_thrust_func_name(tvmop):
tvmop_to_thrust_func_name = {tvm.tir.generic.add: "tvm.contrib.thrust.sum_scan"}
assert tvmop in tvmop_to_thrust_func_name, f"{tvmop} not supported by thrust"
return tvmop_to_thrust_func_name[tvmop]
def exclusive_scan_ir(data, output, reduction=None, binop=tvm.tir.generic.add, identity_value=0):
"""Low level IR to do exclusive sum scan along rows of 2D input.
Parameters
----------
data : Buffer
Input N-D Buffer. Scan is done over the innermost axis.
output: Buffer
A buffer to store the output scan, of the same shape as data
reduction: Buffer, optional
(N-1)-D Buffer, to store the sum of each scan axis.
binop: function, optional
A binary associative op to use for scan. The function takes two TIR expressions
and produce a new TIR expression. By default it uses tvm.tir.generic.add to compute
prefix sum.
identity_value: int or float
A value for the binary operation which provides the identity property. E.g. if * is
your operator and i is the identity_value then a * i = a for all a in the domain of
your operation.
"""
batch_size = prod(data.shape[:-1])
scan_axis_size = data.shape[-1]
ib = tvm.tir.ir_builder.create()
data = ib.buffer_ptr(data)
output = ib.buffer_ptr(output)
out_dtype = output.dtype
if reduction is not None:
reduction = ib.buffer_ptr(reduction)
max_threads = int(tvm.target.Target.current(allow_none=False).max_num_threads)
with ib.if_scope(scan_axis_size == 0):
with ib.new_scope():
bx = te.thread_axis("blockIdx.x")
ib.scope_attr(bx, "thread_extent", batch_size)
with ib.if_scope(bx < batch_size):
if reduction is not None:
reduction[bx] = cast(identity_value, out_dtype)
with ib.else_scope():
with ib.new_scope():
nthread_tx = max_threads
nthread_bx = ceil_div(scan_axis_size, max_threads)
nthread_by = batch_size
tx = te.thread_axis("threadIdx.x")
bx = te.thread_axis("blockIdx.x")
by = te.thread_axis("blockIdx.y")
ib.scope_attr(tx, "thread_extent", nthread_tx)
ib.scope_attr(bx, "thread_extent", nthread_bx)
ib.scope_attr(by, "thread_extent", nthread_by)
tid = bx * nthread_tx + tx
with ib.if_scope(tid < scan_axis_size):
output[by * scan_axis_size + tid] = cast(data[by * scan_axis_size + tid], out_dtype)
nthread_tx = max_threads
nthread_bx = ceil_div(scan_axis_size, max_threads)
nthread_by = batch_size
# The following algorithm performs parallel exclusive scan
# Up Sweep of exclusive scan
lim = ceil_log2(scan_axis_size)
with ib.for_range(0, cast(lim, "int64"), dtype="int64") as l2_width:
width = 2 << l2_width
with ib.new_scope():
tx = te.thread_axis("threadIdx.x")
bx = te.thread_axis("blockIdx.x")
ib.scope_attr(tx, "thread_extent", nthread_tx)
ib.scope_attr(
bx,
"thread_extent",
tvm.tir.generic.cast(ceil_div(scan_axis_size, max_threads * width), "int32"),
)
tid = bx * nthread_tx + tx
by = te.thread_axis("blockIdx.y")
ib.scope_attr(by, "thread_extent", nthread_by)
start = ib.allocate("int64", (1,), name="start", scope="local")
middle = ib.allocate("int64", (1,), name="middle", scope="local")
end = ib.allocate("int64", (1,), name="end", scope="local")
start[0] = width * tid
with ib.if_scope(start[0] < scan_axis_size):
middle[0] = start[0] + tvm.tir.indexdiv(width, 2)
end[0] = tvm.te.min(start[0] + width, scan_axis_size)
with ib.if_scope(middle[0] < scan_axis_size):
output[by * scan_axis_size + end[0] - 1] = binop(
output[by * scan_axis_size + end[0] - 1],
output[by * scan_axis_size + middle[0] - 1],
)
# Down Sweep of exclusive scan
with ib.new_scope():
bx = te.thread_axis("blockIdx.x")
ib.scope_attr(bx, "thread_extent", batch_size)
with ib.if_scope(bx < batch_size):
if reduction is not None:
reduction[bx] = output[(bx + 1) * scan_axis_size - 1]
output[(bx + 1) * scan_axis_size - 1] = cast(identity_value, out_dtype)
with ib.for_range(0, cast(lim, "int64"), dtype="int64") as l2_width:
width = 2 << (lim - l2_width - 1)
with ib.new_scope():
tx = te.thread_axis("threadIdx.x")
bx = te.thread_axis("blockIdx.x")
ib.scope_attr(tx, "thread_extent", nthread_tx)
ib.scope_attr(
bx,
"thread_extent",
tvm.tir.generic.cast(ceil_div(scan_axis_size, max_threads * width), "int32"),
)
tid = bx * nthread_tx + tx
by = te.thread_axis("blockIdx.y")
ib.scope_attr(by, "thread_extent", nthread_by)
start = ib.allocate("int64", (1,), name="start", scope="local")
middle = ib.allocate("int64", (1,), name="middle", scope="local")
end = ib.allocate("int64", (1,), name="end", scope="local")
tmp = ib.allocate(out_dtype, (1,), name="end", scope="local")
start[0] = width * tid
with ib.if_scope(tvm.tir.all(start[0] < scan_axis_size)):
middle[0] = start[0] + tvm.tir.indexdiv(width, 2)
end[0] = tvm.tir.min(start[0] + width, scan_axis_size)
with ib.if_scope(middle[0] < scan_axis_size):
tmp[0] = output[by * scan_axis_size + middle[0] - 1]
output[by * scan_axis_size + middle[0] - 1] = output[
by * scan_axis_size + end[0] - 1
]
output[by * scan_axis_size + end[0] - 1] = binop(
output[by * scan_axis_size + end[0] - 1], tmp[0]
)
return ib.get()
def get_reduction_from_exclusive_scan(data, ex_scan_output, binop=tvm.tir.generic.add):
"""Return the sum of the last element of data and the exclusive scan output.
The is the reduction of data along each row (for 2-D case).
Parameters
----------
data : tvm.te.Tensor
Input data of any shape
ex_scan_output : tvm.te.Tensor
The output of exclusive scan on data
binop: function, optional
A binary associative op to use for scan. The function takes two TIR expressions
and produce a new TIR expression. By default it uses tvm.tir.generic.add to compute
prefix sum.
Returns
-------
reduction : tvm.te.Tensor
(N-1)-D tensor storing the reduction of each scan axis.
"""
ndim = len(data.shape)
if ndim == 1:
data = expand_dims(data, axis=0)
ex_scan_output = expand_dims(ex_scan_output, axis=0)
def ir(data, data_ex_scan, reduction):
batch_size = prod(data.shape[:-1])
scan_axis_size = data.shape[-1]
ib = tvm.tir.ir_builder.create()
data = ib.buffer_ptr(data)
data_ex_scan = ib.buffer_ptr(data_ex_scan)
reduction = ib.buffer_ptr(reduction)
max_threads = int(tvm.target.Target.current(allow_none=False).max_num_threads)
with ib.new_scope():
nthread_tx = max_threads
nthread_bx = ceil_div(batch_size, max_threads)
tx = te.thread_axis("threadIdx.x")
bx = te.thread_axis("blockIdx.x")
ib.scope_attr(tx, "thread_extent", nthread_tx)
ib.scope_attr(bx, "thread_extent", nthread_bx)
tid = bx * max_threads + tx
with ib.if_scope(tid < batch_size):
with ib.if_scope(scan_axis_size > 0):
reduction[tid] = binop(
data_ex_scan[tid * scan_axis_size + scan_axis_size - 1],
data[tid * scan_axis_size + scan_axis_size - 1],
)
with ib.else_scope():
reduction[tid] = cast(0, reduction.dtype)
return ib.get()
data_buf = tvm.tir.decl_buffer(data.shape, data.dtype, "valid_indices_buf", data_alignment=8)
ex_scan_output_buf = tvm.tir.decl_buffer(
ex_scan_output.shape, ex_scan_output.dtype, "ex_scan_output_buf", data_alignment=8
)
reduction = te.extern(
[data.shape[:-1]],
[data, ex_scan_output],
lambda ins, outs: ir(ins[0], ins[1], outs[0]),
dtype=[ex_scan_output.dtype],
in_buffers=[data_buf, ex_scan_output_buf],
name="ex_scan_reduction",
tag="ex_scan_reduction_gpu",
)
if ndim == 1:
return squeeze(reduction, 0)
return reduction
def scan_thrust(
data, output_dtype, exclusive=True, return_reduction=False, binop=tvm.tir.generic.add
):
"""Do exclusive or inclusive scan on 1D or multidimensional input, using thrust.
Parameters
----------
data : tvm.te.Tensor
Input data of any shape. The scan is done over the innermost axis.
output_dtype: string
The dtype of the output scan tensor.
exclusive: bool, optional
Whether or not do exclusive or inclusive scan.
return_reduction: bool, optional
Whether or not return a (N-1)-D tensor storing the reduction of each scan axis.
Reductions are computed as part of the upsweep pass, so there is no extra cost.
If False, reductions are ignored. It must be False when exclusive is False.
binop: function, optional
A binary associative op to use for scan. Since we need to lookup the corresponding
thrust function, arbitrariy callables are not supported. Currently only
tvm.tir.generic.add can be passed in.
Returns
-------
output : tvm.te.Tensor
A N-D tensor of the same rank N and shape as the input data.
reduction : tvm.te.Tensor, optional
(N-1)-D tensor storing the reduction of each scan axis.
Returned if return_reduction is True.
"""
data_buf = tvm.tir.decl_buffer(data.shape, data.dtype, "data_buf", data_alignment=8)
output_buf = tvm.tir.decl_buffer(data.shape, output_dtype, "output_buf", data_alignment=8)
output = te.extern(
[data.shape],
[data],
lambda ins, outs: tvm.tir.call_packed(
_get_thrust_func_name(binop), ins[0], outs[0], exclusive
),
dtype=[output_dtype],
in_buffers=[data_buf],
out_buffers=[output_buf],
name="exclusive_scan_thrust",
tag="exclusive_scan_thrust_gpu",
)
if return_reduction:
assert exclusive, "return_reduction should be False for inclusive scan"
reduction = get_reduction_from_exclusive_scan(data, output, binop)
return output, reduction
return output
def exclusive_scan(
data,
axis=-1,
return_reduction=False,
output_dtype=None,
binop=tvm.tir.generic.add,
identity_value=0,
):
"""Do exclusive scan on 1D or multidimensional input.
Parameters
----------
data : tvm.te.Tensor
Input data of any shape.
axis: int, optional
The axis to do scan on. By default, scan is done on the innermost axis.
return_reduction: bool, optional
Whether or not return a tensor storing the reduction over each scan axis.
If the input rank is N, this tensor is of rank N - 1.
Reductions are computed as part of the upsweep pass, so there is no extra cost.
If False, reductions are ignored.
output_dtype: string, optional
The dtype of the output scan tensor. If not provided, the dtype of the input is used.
binop: function, optional
A binary associative op to use for scan. The function takes two TIR expressions
and produce a new TIR expression. By default it uses tvm.tir.generic.add to compute
prefix sum.
identity_value: int or float
A value for the binary operation which provides the identity property. E.g. if * is
your operator and i is the identity_value then a * i = a for all a in the domain of
your operation.
Returns
-------
output : tvm.te.Tensor
A N-D tensor of the same rank N and shape as the input data.
reduction : tvm.te.Tensor, optional
(N-1)-D tensor storing the reduction of each scan axis.
Returned if return_reduction is True.
"""
def do_scan(data, output_dtype):
target = tvm.target.Target.current()
# TODO: add support for a prod_scan
if (
target
and binop == tvm.tir.generic.add
and (
can_use_thrust(target, "tvm.contrib.thrust.sum_scan")
or can_use_rocthrust(target, "tvm.contrib.thrust.sum_scan")
)
):
return scan_thrust(
data, output_dtype, exclusive=True, return_reduction=return_reduction, binop=binop
)
if ndim == 1:
# TIR exclusive scan accepts only 2D or higher-rank inputs.
data = expand_dims(data, axis=0)
data_buf = tvm.tir.decl_buffer(data.shape, data.dtype, "data_buf", data_alignment=8)
output_buf = tvm.tir.decl_buffer(data.shape, output_dtype, "output_buf", data_alignment=8)
if return_reduction:
output, reduction = te.extern(
[data.shape, data.shape[:-1]],
[data],
lambda ins, outs: exclusive_scan_ir(
ins[0], outs[0], outs[1], binop=binop, identity_value=identity_value
),
dtype=[output_dtype, output_dtype],
in_buffers=[data_buf],
name="exclusive_scan",
tag="exclusive_scan_gpu",
)
else:
output = te.extern(
[data.shape],
[data],
lambda ins, outs: exclusive_scan_ir(
ins[0], outs[0], binop=binop, identity_value=identity_value
),
dtype=[output_dtype],
in_buffers=[data_buf],
out_buffers=[output_buf],
name="exclusive_scan",
tag="exclusive_scan_gpu",
)
reduction = None
if ndim == 1:
output = squeeze(output, 0)
if return_reduction:
reduction = squeeze(reduction, 0)
if return_reduction:
return output, reduction
return output
if output_dtype is None or output_dtype == "":
output_dtype = data.dtype
ndim = len(data.shape)
if axis < 0:
axis += ndim
# If scan axis is not the innermost one, swap the scan and the innermost axes
# Scan is always done on the innermost axis, for performance reason.
if axis != ndim - 1:
axes = swap(list(range(ndim)), axis)
data = transpose(data, axes)
if return_reduction:
output, reduction = do_scan(data, output_dtype)
else:
output = do_scan(data, output_dtype)
if axis != ndim - 1:
axes = swap(list(range(ndim)), axis)
output = transpose(output, axes)
if return_reduction:
return output, reduction
return output
def inclusive_scan(data, axis=-1, output_dtype=None, binop=tvm.tir.generic.add, identity_value=0):
"""Do inclusive scan on 1D or multidimensional input.
Parameters
----------
data : tvm.te.Tensor
Input data of any shape.
axis: int, optional
The axis to do scan on. By default, scan is done on the innermost axis.
output_dtype: string, optional
The dtype of the output scan tensor. If not provided, the dtype of the input is used.
binop: function, optional
A binary associative op to use for scan. The function takes two TIR expressions
and produce a new TIR expression. By default it uses tvm.tir.generic.add to compute
prefix sum.
identity_value: int or float
A value for the binary operation which provides the identity property. E.g. if * is
your operator and i is the identity_value then a * i = a for all a in the domain of
your operation.
Returns
-------
output : tvm.te.Tensor
A N-D tensor of the same rank N as the input data.
"""
ex_scan = exclusive_scan(
data, axis, output_dtype=output_dtype, binop=binop, identity_value=identity_value
)
if output_dtype is not None and data.dtype != output_dtype and output_dtype != "":
data = cast(data, output_dtype)
return binop(data, ex_scan)
def schedule_scan(outs):
"""Schedule for scan operator.
Parameters
----------
outs: Array of Tensor
The computation graph description of scan
in the format of an array of tensors.
Returns
-------
s: Schedule
The computation schedule for the op.
"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
scheduled_ops = []
def traverse(op):
if tag.is_injective(op.tag):
schedule_injective_from_existing(s, op.output(0))
for tensor in op.input_tensors:
if tensor.op.input_tensors and tensor.op not in scheduled_ops:
traverse(tensor.op)
scheduled_ops.append(op)
for out in outs:
traverse(out.op)
return s
def scanop(
data: tvm.te.Tensor,
binop: Callable[["tvm.Expr", "tvm.Expr"], "tvm.Expr"],
identity_value: Union[float, int],
axis: Optional[int] = None,
dtype: Optional[str] = None,
exclusive: Optional[bool] = None,
) -> tvm.te.Tensor:
"""Cumulative binary operator (scan) with similar axis behavior as np.cumsum and np.cumprod.
See cumprod and cumsum for an example of use.
E.g. if * is your binary operator and the input tensor is [1, 2, 3, 4] the output may be
[1, 1 * 2, 1 * 2 * 3, 1 * 2 * 3 * 4]
Parameters
----------
data : tvm.te.Tensor
The input data to the operator.
binop: Callable (tvm.Expr, tvm.Expr) -> tvm.Expr
A binary operator which should be associative and commutative. E.g. if * is your
operator then a * (b * c) = (a * b) * c and a * b = b * a
identity_value: int or float
A value for the binary operation which provides the identity property. E.g. if * is
your operator and i is the identity_value then a * i = a for all a in the domain of
your operation.
axis : int, optional
Axis along which the operation is computed. The default (None) is to compute
the cumulative operation over the flattened array.
dtype : string, optional
Type of the returned array and of the accumulator in which the elements are computed.
If dtype is not specified, it defaults to the dtype of data.
exclusive : bool, optional
If true will return exclusive cumulative operation in which the first element is not
included. In other terms, if true, the j-th output element would be
the cumulative operation of the first (j-1) elements. Otherwise, it would be the
cumulative operation of the first j elements.
Returns
-------
result : tvm.te.Tensor
The result has the same size as data, and the same shape as data if axis is not None.
If axis is None, the result is a 1-d array.
"""
if axis is None:
axis = 0
data = reshape(data, (prod(data.shape),))
axis = get_const_int(axis)
if exclusive is not None and exclusive:
return exclusive_scan(
data, axis, output_dtype=dtype, binop=binop, identity_value=identity_value
)
return inclusive_scan(
data, axis, output_dtype=dtype, binop=binop, identity_value=identity_value
)
def cumsum(
data: tvm.te.Tensor,
axis: Optional[int] = None,
dtype: Optional[int] = None,
exclusive: Optional[bool] = None,
) -> tvm.te.Tensor:
"""Numpy style cumsum op. Return the cumulative sum of the elements along a given axis.
Parameters
----------
data : tvm.te.Tensor
The input data to the operator.
axis : int, optional
Axis along which the cumulative sum is computed. The default (None) is to compute
the cumsum over the flattened array.
dtype : string, optional
Type of the returned array and of the accumulator in which the elements are summed.
If dtype is not specified, it defaults to the dtype of data.
exclusive : bool, optional
If true will return exclusive sum in which the first element is not
included. In other terms, if true, the j-th output element would be
the sum of the first (j-1) elements. Otherwise, it would be the sum of
the first j elements.
Returns
-------
result : tvm.te.Tensor
The result has the same size as data, and the same shape as data if axis is not None.
If axis is None, the result is a 1-d array.
"""
return scanop(
data=data,
binop=tvm.tir.generic.add,
identity_value=0,
axis=axis,
dtype=dtype,
exclusive=exclusive,
)
def cumprod(
data: tvm.te.Tensor,
axis: Optional[int] = None,
dtype: Optional[int] = None,
exclusive: Optional[bool] = None,
):
"""Numpy style cumprod op. Return the cumulative product of the elements along a given axis.
Parameters
----------
data : tvm.te.Tensor
The input data to the operator.
axis : int, optional
Axis along which the cumulative product is computed. The default (None) is to compute
the cumproduct over the flattened array.
dtype : string, optional
Type of the returned array and of the accumulator in which the elements are multiplied.
If dtype is not specified, it defaults to the dtype of data.
exclusive : bool, optional
If True, will return exclusive product in which the first element is not
included. In other terms, if True, the j-th output element would be
the product of the first (j-1) elements. Otherwise, it would be the product of
the first j elements.
Returns
-------
result : tvm.te.Tensor
The result has the same size as data, and the same shape as data if axis is not None.
If axis is None, the result is a 1-d array.
"""
return scanop(
data=data,
binop=tvm.tir.generic.multiply,
identity_value=1,
axis=axis,
dtype=dtype,
exclusive=exclusive,
)
| 24,410 | 35.488789 | 100 | py |
tvm | tvm-main/python/tvm/topi/cuda/conv3d.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument
"""Compute definition for conv3d with cuda backend"""
from tvm import te
from tvm import autotvm
from tvm.contrib import cudnn
from .. import nn, generic
from ..utils import get_const_tuple, traverse_inline
from .conv3d_direct import schedule_direct_conv3d_cuda
@autotvm.register_topi_compute("conv3d_ncdhw.cuda")
def conv3d_ncdhw(cfg, data, kernel, strides, padding, dilation, groups, out_dtype="float32"):
"""Conv3D operator in NCDHW layout for cuda backend.
Parameters
----------
cfg: ConfigEntity
The config for this template
data : tvm.te.Tensor
5-D with shape [batch, in_channel, in_depth, in_height, in_width]
kernel : tvm.te.Tensor
5-D with shape [num_filter, in_channel, filter_depth, filter_height, filter_width]
strides : int or a list/tuple of three ints
stride size, or [stride_depth, stride_height, stride_width]
padding : int or a list/tuple of three ints
padding size, or [pad_depth, pad_height, pad_width]
dilation: int or a list/tuple of three ints
dilation size, or [dilation_depth, dilation_height, dilation_width]
groups: int
Number of groups
out_dtype: str
The output type. This is used for mixed precision.
Returns
-------
output : tvm.te.Tensor
5-D with shape [batch, out_channel, out_depth, out_height, out_width]
"""
return nn.conv3d_ncdhw(data, kernel, strides, padding, dilation, groups, out_dtype)
@autotvm.register_topi_schedule("conv3d_ncdhw.cuda")
def schedule_conv3d_ncdhw(cfg, outs):
"""TOPI schedule callback of conv3d for cuda gpu
Parameters
----------
cfg: ConfigEntity
The config for this template
outs: Array of Tensor
The computation graph description of conv2d
in the format of an array of tensors.
Returns
-------
s: Schedule
The computation schedule for conv2d.
"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
def _callback(op):
if "conv3d_ncdhw" in op.tag:
schedule_direct_conv3d_cuda(cfg, s, op.output(0), "NCDHW", "conv3d_ncdhw.cuda")
traverse_inline(s, outs[0].op, _callback)
return s
@autotvm.register_topi_compute("conv3d_ndhwc.cuda")
def conv3d_ndhwc(cfg, data, kernel, strides, padding, dilation, groups, out_dtype="float32"):
"""Conv3d operator in NDHWC layout for cuda backend.
Parameters
----------
Input : tvm.te.Tensor
5-D with shape [batch, in_depth, in_height, in_width, in_channel]
Filter : tvm.te.Tensor
5-D with shape [filter_depth, filter_height, filter_width, in_channel, num_filter]
stride : int or a list/tuple of three ints
Stride size, or [stride_depth, stride_height, stride_width]
padding : int or str
Padding size, or ['VALID', 'SAME']
dilation: int or a list/tuple of three ints
dilation size, or [dilation_depth, dilation_height, dilation_width]
groups: int
Number of groups
Returns
-------
Output : tvm.te.Tensor
5-D with shape [batch, out_depth, out_height, out_width, out_channel]
"""
return nn.conv3d_ndhwc(data, kernel, strides, padding, dilation, groups, out_dtype)
@autotvm.register_topi_schedule("conv3d_ndhwc.cuda")
def schedule_conv3d_ndhwc(cfg, outs):
"""TOPI schedule callback of conv3d for cuda gpu
Parameters
----------
cfg: ConfigEntity
The config for this template
outs: Array of Tensor
The computation graph description of conv3d
in the format of an array of tensors.
Returns
-------
s: Schedule
The computation schedule for conv2d.
"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
def _callback(op):
if "conv3d_ndhwc" in op.tag:
schedule_direct_conv3d_cuda(cfg, s, op.output(0), "NDHWC", "conv3d_ndhwc.cuda")
traverse_inline(s, outs[0].op, _callback)
return s
@autotvm.register_topi_compute("conv3d_cudnn.cuda")
def conv3d_cudnn(
cfg, data, kernel, strides, padding, dilation, groups, layout="NCDHW", out_dtype="float32"
):
"""Conv3D operator for cuda backend.
Parameters
----------
cfg: ConfigEntity
The config for this template
data : tvm.te.Tensor
5-D with shape [batch, in_channel, in_depth, in_height, in_width]
kernel : tvm.te.Tensor
5-D with shape [num_filter, in_channel, filter_depth, filter_height, filter_width]
strides : int or a list/tuple of three ints
stride size, or [stride_depth, stride_height, stride_width]
padding : int or a list/tuple of three ints
padding size, or [pad_depth, pad_height, pad_width]
dilation: int or a list/tuple of three ints
dilation size, or [dilation_depth, dilation_height, dilation_width]
layout : str
layout of data
out_dtype: str
The output type. This is used for mixed precision.
Returns
-------
output : tvm.te.Tensor
5-D with shape [batch, out_channel, out_depth, out_height, out_width]
"""
if layout == "NCDHW":
tensor_format = 0 # CUDNN_TENSOR_NCHW
N, _, D, H, W = get_const_tuple(data.shape)
elif layout == "NDHWC":
tensor_format = 1 # CUDNN_TENSOR_NHWC
N, D, H, W, _ = get_const_tuple(data.shape)
else:
raise ValueError(f"Unsupported layout {layout} in cudnn")
CO, CI, KD, KH, KW = get_const_tuple(kernel.shape)
assert groups == 1, "conv3d_cudnn does not support groups"
# handle dilation
stride_d, stride_h, stride_w = (
(strides, strides, strides) if isinstance(strides, int) else strides
)
pad_d, pad_h, pad_w = (padding, padding, padding) if isinstance(padding, int) else padding
dilation_d, dilation_h, dilation_w = (
(dilation, dilation, dilation) if isinstance(dilation, int) else dilation
)
OD = (D + 2 * pad_d - KD) // stride_d + 1
OH = (H + 2 * pad_h - KH) // stride_h + 1
OW = (W + 2 * pad_w - KW) // stride_w + 1
if isinstance(N, int):
cfg.add_flop(
2
* N
* OD
* OH
* OW
* CO
* CI
* ((KD - 1) * dilation_d + 1)
* ((KH - 1) * dilation_h + 1)
* ((KW - 1) * dilation_w + 1)
)
cfg.define_knob("algo", range(cudnn.algo_to_index("fwd", "CUDNN_CONVOLUTION_FWD_ALGO_COUNT")))
if cfg.is_fallback:
if cudnn.exists():
# Let CUDNN choose the best algo, based on benchmarks run
# on the local machine. In the future, this should be
# based on parameters stored in the Target.
cfg["algo"] = OtherOptionEntity(-1)
else:
cfg["algo"] = OtherOptionEntity(0)
return cudnn.conv_forward(
data,
kernel,
[pad_d, pad_h, pad_w],
[stride_d, stride_h, stride_w],
[dilation_d, dilation_h, dilation_w],
conv_mode=1,
tensor_format=tensor_format,
algo=cfg["algo"].val,
conv_dtype=dtype,
)
@autotvm.register_topi_schedule("conv3d_cudnn.cuda")
def schedule_conv3d_cudnn(_, outs):
"""TOPI schedule callback of conv3d for cuda gpu
Parameters
----------
cfg: ConfigEntity
The config for this template
outs: Array of Tensor
The computation graph description of conv2d
in the format of an array of tensors.
Returns
-------
s: Schedule
The computation schedule for conv2d.
"""
return generic.schedule_extern(outs)
| 8,538 | 30.164234 | 98 | py |
tvm | tvm-main/python/tvm/topi/cuda/conv1d_transpose_ncw.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""Conv1d transpose template for cuda backend"""
import tvm
from tvm import te
from tvm import autotvm
from .. import nn
from ..utils import get_const_tuple, traverse_inline
@autotvm.task.register_topi_compute("conv1d_transpose_nchw.cuda")
def conv1d_transpose_ncw(cfg, data, kernel, stride, padding, out_dtype, output_padding):
"""Transposed 1D convolution ncw forward operator.
Parameters
----------
cfg: ConfigEntity
The config for this template
Input : tvm.te.Tensor
3-D with shape [batch, in_channel, inp_width]
Filter : tvm.te.Tensor
3-D with shape [in_channel, num_filter, kernel_size]
stride : tuple of one int
The spatial stride along width
padding : int, tuple, or string
int: padding size
tuple of 2 ints: (pad_left, pad_right) for left and right padding
string: ['VALID', 'SAME']
out_dtype: str
The output type. This is used in mixed precision
output_padding : ints
Used to disambiguate the output shape.
Returns
-------
Output : tvm.te.Tensor
u 3-D with shape [batch, out_channel, out_width]
"""
if isinstance(stride, (tuple, list)):
stride = stride[0]
if isinstance(output_padding, (tuple, list)):
output_padding = output_padding[0]
assert output_padding < stride
cfg.stride = stride
cfg.output_padding = output_padding
batch, inp_channels, inp_width = get_const_tuple(data.shape)
_, out_channels, kernel_size = get_const_tuple(kernel.shape)
pad_left, pad_right = nn.get_pad_tuple1d(padding, kernel_size)
out_width = (inp_width - 1) * stride + kernel_size - pad_left - pad_right + output_padding
pad_left = kernel_size - 1 - pad_left
pad_right = kernel_size - 1 - pad_right + output_padding
padded_width = pad_left + inp_width + pad_right
padded_data = te.compute(
(batch, inp_channels, padded_width),
lambda n, c, x: tvm.tir.if_then_else(
tvm.tir.all(x >= pad_left, x < pad_left + inp_width),
data[n, c, x - pad_left],
tvm.tir.const(0.0, "float32"),
),
name="data_pad",
)
padded_kernel = te.compute(
(inp_channels, out_channels, kernel_size + stride - 1),
lambda ci, co, k: tvm.tir.if_then_else(
tvm.tir.all(k < kernel_size),
kernel[ci, co, kernel_size - k - 1],
tvm.tir.const(0.0, "float32"),
),
name="kernel_pad",
)
ci = te.reduce_axis((0, inp_channels), name="ci")
k = te.reduce_axis((0, tvm.tir.indexdiv(kernel_size + stride - 1, stride)), name="k")
border = pad_left * (stride - 1)
# Skip multiplication by 0 values in the input data inserted when stride is greater then 1.
# During multiplication of kernel by padded data:
# Kernel indices are: 0, 1 * stride, 2 * stride, ..., ceil(kernel_size / stride) plus
# data offset mod stride
data_out = te.compute(
(batch, out_channels, out_width),
lambda b, co, w: te.sum(
padded_data[b, ci, tvm.tir.indexdiv(border + w + stride - 1, stride) + k].astype(
out_dtype
)
* padded_kernel[
ci, co, k * stride + tvm.tir.indexmod(stride - w - border, stride)
].astype(out_dtype),
axis=[ci, k],
),
tag="conv1d_transpose_ncw",
)
return data_out
@autotvm.task.register_topi_schedule("conv1d_transpose_nchw.cuda")
def schedule_conv1d_transpose_ncw(cfg, outs):
"""TOPI Schedule callback for conv1d_transpose operator.
Parameters
----------
cfg: ConfigEntity
The parameters for this template
outs: Array of Tensor
The computation graph description of conv1d transpose
in the format of an array of tensors.
Returns
-------
s: Schedule
The computation schedule for conv1d transpose.
"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
def _callback(op):
if op.tag == "conv1d_transpose_ncw":
padded_data = op.input_tensors[0]
padded_kernel = op.input_tensors[1]
conv = op.output(0)
##### space definition begin #####
n, f, x = s[conv].op.axis
rc = s[conv].op.reduce_axis[0]
cfg.define_split("tile_n", cfg.axis(n if isinstance(n, int) else 1), num_outputs=4)
cfg.define_split("tile_f", cfg.axis(f), num_outputs=4)
cfg.define_split("tile_x", cfg.axis(x), num_outputs=4)
cfg.define_split("tile_rc", cfg.axis(rc), num_outputs=3)
cfg.define_knob("auto_unroll_max_step", [64, 512, 1500])
target = tvm.target.Target.current()
if target.kind.name in ["nvptx", "rocm"]:
cfg.define_knob("unroll_explicit", [1])
else:
cfg.define_knob("unroll_explicit", [0, 1])
##### space definition end #####
if conv.op in s.outputs:
output = conv
OL = s.cache_write(conv, "local")
else:
output = s.outputs[0].output(0)
s[conv].set_scope("local")
OL = conv
s[padded_kernel].compute_inline()
s[padded_data].compute_inline()
# tile and bind spatial axes
n, f, x = s[output].op.axis
kernel_scope, n = s[output].split(n, nparts=1)
bn, vn, tn, ni = cfg["tile_n"].apply(s, output, n)
bf, vf, tf, fi = cfg["tile_f"].apply(s, output, f)
bx, vx, tx, xi = cfg["tile_x"].apply(s, output, x)
s[output].reorder(bn, bf, bx, vn, vf, vx, tn, tf, tx, ni, fi, xi)
s[output].bind(bn, te.thread_axis("blockIdx.z"))
s[output].bind(bf, te.thread_axis("blockIdx.y"))
s[output].bind(bx, te.thread_axis("blockIdx.x"))
s[output].bind(vn, te.thread_axis("vthread"))
s[output].bind(vf, te.thread_axis("vthread"))
s[output].bind(vx, te.thread_axis("vthread"))
s[output].bind(tx, te.thread_axis("threadIdx.x"))
s[OL].compute_at(s[output], tx)
# tile reduction axes
n, f, x = s[OL].op.axis
rc, rx = s[OL].op.reduce_axis
rco, rcm, rci = cfg["tile_rc"].apply(s, OL, rc)
s[OL].reorder(rco, rcm, rx, rci, n, f, x)
s[output].pragma(kernel_scope, "auto_unroll_max_step", cfg["auto_unroll_max_step"].val)
s[output].pragma(kernel_scope, "unroll_explicit", cfg["unroll_explicit"].val)
traverse_inline(s, outs[0].op, _callback)
return s
| 7,537 | 36.69 | 99 | py |
tvm | tvm-main/python/tvm/topi/cuda/conv3d_direct.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""The templates for cuda conv3d operators"""
import tvm
from tvm import te
from tvm import autotvm
from ..utils import get_const_tuple
def schedule_direct_conv3d_cuda(cfg, s, conv, layout, workload_name):
"""schedule optimized for batch size = 1"""
##### space definition begin #####
if layout == "NCDHW":
n, f, d, y, x = s[conv].op.axis
elif layout == "NDHWC":
n, d, y, x, f = s[conv].op.axis
else:
raise ValueError(f"not support this layout {layout} yet")
rc, rd, ry, rx = s[conv].op.reduce_axis
cfg.define_split("tile_f", f, num_outputs=4)
cfg.define_split("tile_d", d, num_outputs=4)
cfg.define_split("tile_y", y, num_outputs=4)
cfg.define_split("tile_x", x, num_outputs=4)
cfg.define_split("tile_rc", rc, num_outputs=2)
cfg.define_split("tile_rd", ry, num_outputs=2)
cfg.define_split("tile_ry", ry, num_outputs=2)
cfg.define_split("tile_rx", rx, num_outputs=2)
cfg.define_knob("auto_unroll_max_step", [0, 512, 1500])
target = tvm.target.Target.current()
if target.kind.name in ["nvptx", "rocm"]:
cfg.define_knob("unroll_explicit", [1])
else:
cfg.define_knob("unroll_explicit", [0, 1])
# fallback support
if cfg.is_fallback:
ref_log = autotvm.tophub.load_reference_log(target.kind.name, target.model, workload_name)
cfg.fallback_with_reference_log(ref_log)
##### space definition end #####
pad_data, kernel = s[conv].op.input_tensors
s[pad_data].compute_inline()
if isinstance(kernel.op, tvm.te.ComputeOp) and "dilate" in kernel.op.tag:
s[kernel].compute_inline()
if conv.op in s.outputs:
output = conv
OL = s.cache_write(conv, "local")
else:
output = s.outputs[0].output(0)
s[conv].set_scope("local")
OL = conv
# create cache stage
AA = s.cache_read(pad_data, "shared", [OL])
WW = s.cache_read(kernel, "shared", [OL])
# tile and bind spatial axes
n, f, d, y, x = s[output].op.axis
kernel_scope, n = s[output].split(n, nparts=1)
bf, vf, tf, fi = cfg["tile_f"].apply(s, output, f)
bd, vd, td, di = cfg["tile_d"].apply(s, output, d)
by, vy, ty, yi = cfg["tile_y"].apply(s, output, y)
bx, vx, tx, xi = cfg["tile_x"].apply(s, output, x)
bf = s[output].fuse(n, bf)
s[output].reorder(bf, bd, by, bx, vf, vd, vy, vx, tf, td, ty, tx, fi, di, yi, xi)
s[output].bind(bf, te.thread_axis("blockIdx.z"))
s[output].bind(s[output].fuse(bd, by), te.thread_axis("blockIdx.y"))
s[output].bind(bx, te.thread_axis("blockIdx.x"))
s[output].bind(vf, te.thread_axis("vthread"))
s[output].bind(vd, te.thread_axis("vthread"))
s[output].bind(vy, te.thread_axis("vthread"))
s[output].bind(vx, te.thread_axis("vthread"))
s[output].bind(s[output].fuse(td, tf), te.thread_axis("threadIdx.z"))
s[output].bind(ty, te.thread_axis("threadIdx.y"))
s[output].bind(tx, te.thread_axis("threadIdx.x"))
s[OL].compute_at(s[output], tx)
# tile reduction axes
n, f, d, y, x = s[OL].op.axis
rc, rd, ry, rx = s[OL].op.reduce_axis
rco, rci = cfg["tile_rc"].apply(s, OL, rc)
rdo, rdi = cfg["tile_rd"].apply(s, OL, rd)
ryo, ryi = cfg["tile_ry"].apply(s, OL, ry)
rxo, rxi = cfg["tile_rx"].apply(s, OL, rx)
s[OL].reorder(rco, rdo, ryo, rxo, rci, rdi, ryi, rxi, n, f, d, y, x)
s[AA].compute_at(s[OL], rxo)
s[WW].compute_at(s[OL], rxo)
# cooperative fetching
for load in [AA, WW]:
n, f, d, y, x = s[load].op.axis
fused = s[load].fuse(n, f, d, y, x)
tz, fused = s[load].split(fused, nparts=cfg["tile_f"].size[2])
td, fused = s[load].split(fused, nparts=cfg["tile_d"].size[2])
ty, fused = s[load].split(fused, nparts=cfg["tile_y"].size[2])
tx, fused = s[load].split(fused, nparts=cfg["tile_x"].size[2])
s[load].bind(tz, te.thread_axis("threadIdx.z"))
s[load].bind(s[load].fuse(td, ty), te.thread_axis("threadIdx.y"))
s[load].bind(tx, te.thread_axis("threadIdx.x"))
# unroll
s[output].pragma(kernel_scope, "auto_unroll_max_step", cfg["auto_unroll_max_step"].val)
s[output].pragma(kernel_scope, "unroll_explicit", cfg["unroll_explicit"].val)
N, CO, OD, OH, OW = get_const_tuple(output.shape)
_, KD, KH, KW, CI = get_const_tuple(kernel.shape)
cfg.add_flop(2 * N * OD * OH * OW * CO * CI * KD * KH * KW)
| 5,229 | 38.923664 | 98 | py |
tvm | tvm-main/python/tvm/topi/cuda/sparse.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Sparse operators"""
import numpy as np
import scipy.sparse as sp
import tvm
from tvm import relay, te
from .. import nn
from ..utils import traverse_inline, get_const_tuple, prod, get_const_int, ceil_div
from .transform import schedule_transpose_from_existing
def sparse_dense(data, weight_data, weight_indices, weight_indptr, sparse_lhs=False):
"""
Computes sparse-dense matrix multiplication of `data` and
`(weight_data, weight_indices, weight_indptr).T`
Parameters
----------
cfg: ConfigEntity
The config for this template
data : tvm.te.Tensor
2-D with shape [M, K], float32
weight_data : tvm.te.Tensor
1-D with shape [nnz] (CSR) or
3-D with shape [num_blocks, bs_r, bs_c] (BSR)
weight_indices : tvm.te.Tensor
1-D with shape [nnz] (CSR) or
1-D with shape [num_blocks] (BSR)
weight_indptr : tvm.te.Tensor
1-D with shape [N + 1] (CSR) or
1-D with shape [(N + 1) // bs_r] (BSR)
Returns
-------
output : tvm.te.Tensor
2-D with shape [M, N]
"""
# pylint:disable=unused-argument
return nn.sparse_dense(data, weight_data, weight_indices, weight_indptr, sparse_lhs)
def schedule_sparse_dense(outs):
"""Create schedule for sparse dense"""
# pylint:disable=invalid-name
s = te.create_schedule([x.op for x in outs])
def _callback(op):
if op.tag == "sparse_dense_sp_rhs_bsrmm" or op.tag == "sparse_dense_sp_lhs_bsrmm":
y_bsrmm = op.input_tensors[0]
assert (
y_bsrmm.op.tag == "sparse_dense_sp_rhs_bsrmm_block"
or y_bsrmm.op.tag == "sparse_dense_sp_lhs_bsrmm_block"
)
out = s.outputs[0].output(0)
if op not in s.outputs:
y_reshape = op.output(0)
s[y_reshape].compute_at(s[out], s[out].op.axis[1])
(_, c) = s[y_bsrmm].op.reduce_axis
(m_o, n_o) = s[out].op.axis
s[out].bind(m_o, te.thread_axis("blockIdx.x"))
s[out].bind(n_o, te.thread_axis("blockIdx.y"))
s[y_bsrmm].compute_at(s[out], n_o)
thread_x = te.thread_axis("threadIdx.x")
y_bsrmm_factored = s.rfactor(y_bsrmm, c)
tx = s[y_bsrmm].op.reduce_axis[0]
s[y_bsrmm].bind(tx, thread_x)
s[y_bsrmm_factored].compute_at(s[y_bsrmm], tx)
s[y_bsrmm].set_store_predicate(thread_x.var.equal(0))
s[out].set_store_predicate(thread_x.var.equal(0))
elif op.tag == "sparse_dense_sp_lhs_csrmm" or op.tag == "sparse_dense_sp_rhs_csrmm":
out = op.output(0)
const_size = get_const_int(prod(out.shape))
fused = s[out].fuse(*s[out].op.axis)
bx, tx = s[out].split(fused, factor=const_size)
s[out].bind(tx, te.thread_axis("threadIdx.x"))
s[out].bind(bx, te.thread_axis("blockIdx.x"))
traverse_inline(s, outs[0].op, _callback)
return s
def sparse_dense_tir(data, w_data, w_indices, w_indptr):
"""Compute data * w^T.
Actually computes (w * data^T) ^ T as data needs to be in column-major
format for performance reasons.
Good resources:
Yang, Carl, Aydın Buluç, and John D. Owens. "Design principles for sparse
matrix multiplication on the GPU." European Conference on Parallel
Processing. Springer, Cham, 2018. <- This code is basically row-split from here.
Gale, Trevor, et al. "Sparse GPU Kernels for Deep Learning." arXiv preprint
arXiv:2006.10901 (2020).
Profile with
`/opt/nvidia/nsight-compute/2020.1.2/ncu -k default_function_kernel1
--section '.*' -s 1 -c 1 venv/bin/python3 test_topi_sparse.py manual`
with either default_function_kernel0 for the transpose or
default_function_kernel1 for the multiply.
"""
def gen_ir(data, w_data, w_indices, w_indptr, out):
# pylint: disable=invalid-name, simplifiable-if-statement
# TODO(tkonolige): use tensorcores for block multiply
# TODO(tkonolige): use vectorize on loads
# TODO(tkonolige): separate implementation if M is small
# TODO(tkonolige): separate implementation for large block sizes
ib = tvm.tir.ir_builder.create()
if tvm.target.Target.current(allow_none=False).kind.name == "cuda":
use_warp_storage = True
else:
# TVMs warp shuffle intrinsics are slow on ROCM because they use
# LDS (shared memory) to do the shuffling. Instead, we could use
# ROCM's support for accessing neighboring threads memory, but we
# those intrinsics aren't accessible from TVM. For now, we just use
# shared memory. We also default to shared memory on platforms
# where we do not know how warp storage performs.
use_warp_storage = False
warp_size = int(tvm.target.Target.current(allow_none=False).thread_warp_size)
m = data.shape[1]
nb = w_indptr.shape[0] - 1
# treat csr like block size 1 bsr
if len(w_data.shape) == 1:
bs_n = 1
bs_k = 1
else:
bs_n = w_data.shape[1]
bs_k = w_data.shape[2]
bs_m = bs_n
mb = m // bs_m
mi = warp_size
assert mb >= mi, (
f"Number of block rows in dense matrix must be larger than warp size: "
f"{warp_size} vs {mb}."
)
mo = ceil_div(mb, mi)
ni = 1 # TODO(tkonolige): how do I compute the number of warps per block?
no = ceil_div(nb, ni)
rowlength_bi = warp_size
bx = te.thread_axis("blockIdx.x")
ib.scope_attr(bx, "thread_extent", mo)
by = te.thread_axis("blockIdx.y")
ib.scope_attr(by, "thread_extent", no)
tx = te.thread_axis("threadIdx.x")
ib.scope_attr(tx, "thread_extent", warp_size)
warp = te.thread_axis("threadIdx.y")
ib.scope_attr(warp, "thread_extent", ni)
out_ptr = ib.buffer_ptr(out)
data_ptr = ib.buffer_ptr(data)
w_data_ptr = ib.buffer_ptr(w_data)
w_indices_ptr = ib.buffer_ptr(w_indices)
w_indptr_ptr = ib.buffer_ptr(w_indptr)
n_index = by * ni + warp
m_index = bx * mi + tx
row_start = w_indptr_ptr[n_index]
# Guaranteed to be evenly divisible
rowlength_bo = ceil_div(w_indptr_ptr[n_index + 1] - row_start, rowlength_bi)
# thread local storage for bs_m x bs_n block
block = ib.allocate(data.dtype, (bs_m, bs_n), name="block", scope="local")
data_cache = ib.allocate(data.dtype, (mi, bs_m, bs_k), name="data_cache", scope="local")
if use_warp_storage:
indices = ib.allocate(w_indices.dtype, (rowlength_bi,), name="indices", scope="warp")
w_data_cache = ib.allocate(
w_data.dtype, (rowlength_bi, bs_n, bs_k), name="w_data_cache", scope="warp"
)
else:
indices = ib.allocate(
w_indices.dtype, (ni, rowlength_bi), name="indices", scope="shared"
)
w_data_cache = ib.allocate(
w_data.dtype, (ni, rowlength_bi, bs_n, bs_k), name="w_data_cache", scope="shared"
)
# zero block
with ib.for_range(0, bs_m, name="x", kind="unroll") as x:
with ib.for_range(0, bs_n, name="y", kind="unroll") as y:
block[x, y] = 0.0
# compute into thread local storage using warp_size chunks
with ib.for_range(0, rowlength_bo, name="bb") as bb:
elem_idx = bb * rowlength_bi + tx
# Cache indices. Guaranteed to be multiple of warp_size.
if use_warp_storage:
indices[tx] = w_indices_ptr[row_start + elem_idx]
else:
indices[warp, tx] = w_indices_ptr[row_start + elem_idx]
# cache dense matrix
# each thread has a row
# TODO: ideally we could vectorize this
with ib.for_range(0, rowlength_bi, name="bi") as bi:
with ib.for_range(0, bs_m, name="x", kind="unroll") as x:
with ib.for_range(0, bs_k, name="z", kind="unroll") as z:
# This memory acces should be out of bounds when
# m_index >= mb (which occurs when the dense matrix
# rows % 32 != 0), but it seems to work just fine...
if use_warp_storage:
ind = indices[bi]
else:
ind = indices[warp, bi]
data_cache[bi, x, z] = data_ptr[ind * bs_k + z, m_index * bs_m + x]
# cache w_data
elem_idx = bb * rowlength_bi + tx
with ib.for_range(0, bs_n, name="y", kind="unroll") as y:
with ib.for_range(0, bs_k, name="z", kind="unroll") as z:
data_indices = [row_start + elem_idx] + (
[y, z] if len(w_data.shape) > 1 else []
)
cache_indices = [tx, y, z] if use_warp_storage else [warp, tx, y, z]
w_data_cache[cache_indices] = w_data_ptr[data_indices]
with ib.for_range(0, mi, name="i") as i:
# thread local block matmul
with ib.for_range(0, bs_m, name="x", kind="unroll") as x:
with ib.for_range(0, bs_n, name="y", kind="unroll") as y:
with ib.for_range(0, bs_k, name="z", kind="unroll") as z:
if use_warp_storage:
w = w_data_cache[i, y, z]
else:
w = w_data_cache[warp, i, y, z]
block[x, y] += data_cache[i, x, z] * w
# store results
with ib.for_range(0, bs_m, name="x", kind="unroll") as x:
with ib.for_range(0, bs_n, name="y", kind="unroll") as y:
with ib.if_scope(m_index < mb):
with ib.if_scope(n_index < nb):
# It doesn't seem like we would be getting coelesced
# writes here, but it doesn't seem to matter
out_ptr[m_index * bs_m + x, n_index * bs_n + y] = block[x, y]
return ib.get()
data_t = tvm.topi.transpose(data)
# handle csr
if len(w_data.shape) == 1:
blocksize = 1
else:
blocksize = w_data.shape[1]
out_shape = (data_t.shape[1], (w_indptr.shape[0] - 1) * blocksize)
out_buf = tvm.tir.decl_buffer(out_shape, data.dtype, "out_buf")
out = te.extern(
[out_shape],
[data_t, w_data, w_indices, w_indptr, data],
lambda ins, outs: gen_ir(ins[0], ins[1], ins[2], ins[3], outs[0]),
dtype=data.dtype,
out_buffers=[out_buf],
name="sparse_dense_gpu",
tag="sparse_dense_gpu",
)
return out
def is_valid_for_sparse_dense_padded(data, weight_data):
"""
Check whether input is applicable for sparse_dense_padded op.
If not we should fall back to default scheduling.
"""
# pylint:disable=invalid-name
warp_size = int(tvm.target.Target.current(allow_none=False).thread_warp_size)
# If there are multiple alter_ops in a model, the first alteration does not
# run type inference for the subsequent ones. In this case, we don't have
# the shape information, so we run the inferencer manually.
try:
m = get_const_tuple(data.checked_type.shape)[1]
except ValueError:
data_infered = relay.transform.InferType()(tvm.IRModule.from_expr(data))["main"]
m = get_const_tuple(data_infered.ret_type.shape)[1]
if len(weight_data.shape) == 1:
bs_m = 1
else:
bs_m = weight_data.shape[1]
mb = m // bs_m
if mb >= warp_size:
return True
return False
def sparse_dense_padded(data, weight_data, weight_indices, weight_indptr, sparse_lhs=False):
"""
Computes sparse-dense matrix multiplication of `data` and
`(weight_data, weight_indices, weight_indptr).T`
This variation uses a padded matrix where all row lengths are a multiple of the warp size.
Parameters
----------
cfg: ConfigEntity
The config for this template
data : tvm.te.Tensor
2-D with shape [M, K], float32
weight_data : tvm.te.Tensor
1-D with shape [nnz] (CSR) or
3-D with shape [num_blocks, bs_r, bs_c] (BSR)
weight_indices : tvm.te.Tensor
1-D with shape [nnz] (CSR) or
1-D with shape [num_blocks] (BSR)
weight_indptr : tvm.te.Tensor
1-D with shape [N + 1] (CSR) or
1-D with shape [(N + 1) // bs_r] (BSR)
Returns
-------
output : tvm.te.Tensor
2-D with shape [M, N]
"""
# TODO(ANSHUMAN87): Handle for sparse_lhs case too
assert not sparse_lhs, "Currently only sparse weight is supported."
return sparse_dense_tir(data, weight_data, weight_indices, weight_indptr)
def schedule_sparse_dense_padded(outs):
"""Create schedule for sparse dense"""
# XXX: this will fail if we don't include the data_t Tensor in the schedule
# ops. Maybe create_schedule should do some analysis so this isn't
# necessary
data_t = outs[0].op.input_tensors[0]
s = te.create_schedule([outs[0].op, data_t.op])
schedule_transpose_from_existing(s, outs[0].op.input_tensors[0])
return s
def pad_sparse_matrix(matrix, blocksize):
"""Pad rows of sparse matrix matrix so that they are a multiple of blocksize."""
assert isinstance(matrix, sp.bsr_matrix)
new_entries = np.zeros(matrix.shape[0], dtype=matrix.indptr.dtype)
bsr = matrix.blocksize[0]
for i in range(matrix.shape[0] // bsr):
row_length = matrix.indptr[i + 1] - matrix.indptr[i]
if row_length % blocksize != 0:
new_entries[i] = blocksize - (row_length % blocksize)
additional = np.sum(new_entries)
indices = np.zeros(matrix.indices.shape[0] + additional, dtype=matrix.indices.dtype)
data = np.zeros(
(matrix.data.shape[0] + additional, matrix.data.shape[1], matrix.data.shape[2]),
dtype=matrix.data.dtype,
)
n = matrix.shape[0] // bsr
indptr = np.zeros(n + 1, dtype=matrix.indptr.dtype)
indptr[: matrix.indptr.shape[0]] = matrix.indptr
for i in range(matrix.shape[0] // bsr):
indptr[i + 1] = indptr[i] + new_entries[i] + (matrix.indptr[i + 1] - matrix.indptr[i])
indices[indptr[i] : indptr[i + 1] - new_entries[i]] = matrix.indices[
matrix.indptr[i] : matrix.indptr[i + 1]
]
data[indptr[i] : indptr[i + 1] - new_entries[i], :, :] = matrix.data[
matrix.indptr[i] : matrix.indptr[i + 1], :, :
]
return sp.bsr_matrix((data, indices, indptr), matrix.shape)
@nn.sparse_dense_alter_layout.register(["cuda", "gpu", "rocm"])
def _alter_sparse_dense_layout(_attrs, inputs, _tinfos, _out_type):
"""With cuda, we modify use alter_op_layout to swap the default
sparse_dense implementation for one that operates on a padded matrix. We
also pad the matrix.
"""
# TODO(ANSHUMAN87): Handle for sparse_lhs case too
if (
isinstance(inputs[1], relay.Constant)
and isinstance(inputs[2], relay.Constant)
and isinstance(inputs[3], relay.Constant)
and is_valid_for_sparse_dense_padded(inputs[0], inputs[1].data.numpy())
):
if len(inputs[1].data.numpy().shape) == 1:
sparse_matrix = sp.csr_matrix(
(inputs[1].data.numpy(), inputs[2].data.numpy(), inputs[3].data.numpy())
).tobsr()
else:
sparse_matrix = sp.bsr_matrix(
(inputs[1].data.numpy(), inputs[2].data.numpy(), inputs[3].data.numpy())
)
warp_size = int(tvm.target.Target.current(allow_none=False).thread_warp_size)
sparse_matrix = pad_sparse_matrix(sparse_matrix, warp_size)
return relay.nn._make.sparse_dense_padded(
inputs[0],
relay.Constant(tvm.nd.array(sparse_matrix.data)),
relay.Constant(tvm.nd.array(sparse_matrix.indices)),
relay.Constant(tvm.nd.array(sparse_matrix.indptr)),
)
return None
| 17,053 | 39.508314 | 97 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.