repo stringlengths 2 99 | file stringlengths 13 225 | code stringlengths 0 18.3M | file_length int64 0 18.3M | avg_line_length float64 0 1.36M | max_line_length int64 0 4.26M | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
tvm | tvm-main/python/tvm/topi/cuda/conv3d_alter_op.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unused-variable,unused-argument
"""Conv3D alter op and legalize functions for cuda backend"""
import logging
import tvm
from tvm import te
from tvm import relay
from tvm import autotvm
from .. import nn
from ..utils import get_const_tuple
from .conv3d_winograd import _infer_tile_size
logger = logging.getLogger("topi")
@nn.conv3d_alter_layout.register(["cuda", "gpu"])
def _alter_conv3d_layout(attrs, inputs, tinfos, out_type):
target = tvm.target.Target.current(allow_none=False)
dispatch_ctx = autotvm.task.DispatchContext.current
_, outs = relay.backend.te_compiler.select_implementation(
relay.op.get("nn.conv3d"), attrs, tinfos, out_type, target
)
workload = autotvm.task.get_workload(outs)
if workload is None:
# The best implementation is not an AutoTVM template,
# we then assume it's not necessary to alter this op.
return None
cfg = dispatch_ctx.query(target, workload)
if cfg.is_fallback: # if is fallback, clear query cache and return None
autotvm.task.clear_fallback_cache(target, workload)
return None
topi_tmpl = workload[0]
new_attrs = {k: attrs[k] for k in attrs.keys()}
strides = attrs.get_int_tuple("strides")
padding = attrs.get_int_tuple("padding")
dilation = attrs.get_int_tuple("dilation")
groups = attrs.get_int("groups")
data_layout = attrs["data_layout"]
kernel_layout = attrs["kernel_layout"]
data, kernel = tinfos
out_dtype = out_type.dtype
if topi_tmpl == "conv3d_ncdhw_winograd.cuda":
if dilation != (1, 1, 1):
logger.warning("Does not support weight pre-transform for dilated 3D convolution.")
return None
assert data_layout == "NCDHW" and kernel_layout == "OIDHW"
N, CI, D, H, W = get_const_tuple(data.shape)
CO, _, KD, KH, KW = get_const_tuple(kernel.shape)
# Pre-compute weight transformation in winograd
tile_size = _infer_tile_size(tinfos[0], tinfos[1])
weight = relay.nn.contrib_conv3d_winograd_weight_transform(inputs[1], tile_size=tile_size)
new_attrs["tile_size"] = tile_size
new_attrs["channels"] = CO
# Store the same config for the altered operators (workload)
new_data = data
# Check if depth is transformed or not
if 2 < KD < 8 and KD == KH:
new_weight = te.placeholder(
(KD + tile_size - 1, KH + tile_size - 1, KW + tile_size - 1, CO, CI),
dtype=kernel.dtype,
)
else:
new_weight = te.placeholder(
(KH + tile_size - 1, KW + tile_size - 1, KD, CO, CI), dtype=kernel.dtype
)
new_workload = autotvm.task.args_to_workload(
[new_data, new_weight, strides, padding, dilation, out_dtype],
"conv3d_ncdhw_winograd_without_weight_transform.cuda",
)
dispatch_ctx.update(target, new_workload, cfg)
return relay.nn.contrib_conv3d_winograd_without_weight_transform(
inputs[0], weight, **new_attrs
)
return None
| 3,910 | 37.722772 | 98 | py |
tvm | tvm-main/python/tvm/topi/cuda/sparse_reshape.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, too-many-arguments, too-many-nested-blocks
"""Sparse_Reshape operator"""
import tvm
from tvm import te
from ...tir import decl_buffer, ir_builder, Cast
from ...te import extern, div, floordiv, floormod
from ..utils import ceil_div
def sparse_reshape(
sparse_indices,
prev_shape,
new_shape,
new_sparse_indices_shape,
new_shape_shape,
):
"""
Reshape a Sparse Tensor
Parameters
----------
sparse_indices : relay.Expr
A 2-D tensor[N, n_dim] of integers containing location of sparse values, where N is the
number of sparse values and n_dim is the number of dimensions of the dense_shape
prev_shape : relay.Expr
A 1-D tensor containing the previous shape of the dense tensor
new_shape : relay.Expr
A 1-D tensor containing the new shape of the dense tensor
Returns
-------
result: relay.Expr
Output tensor.
Examples
--------
.. code-block:: python
sparse_indices = [[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[1, 0, 0],
[1, 2, 3]]
prev_shape = [2, 3, 4]
new_shape = [9, -1]
new_sparse_indices, new_shape = relay.sparse_reshape(sparse_indices,
prev_shape,
new_shape)
new_sparse_indices = [[0, 0],
[0, 1],
[1, 2],
[4, 2],
[8, 1]]
new_shape = [9, 4]
"""
def gen_ir(
sparse_indices_ptr,
prev_shape_ptr,
new_shape_ptr,
new_sparse_indices_ptr,
out_new_shape_ptr,
):
ib = ir_builder.create()
sparse_indices = ib.buffer_ptr(sparse_indices_ptr)
prev_shape = ib.buffer_ptr(prev_shape_ptr)
new_shape = ib.buffer_ptr(new_shape_ptr)
out_new_shape = ib.buffer_ptr(out_new_shape_ptr)
new_sparse_indices = ib.buffer_ptr(new_sparse_indices_ptr)
out_new_shape = ib.buffer_ptr(out_new_shape_ptr)
prev_shape_size = prev_shape_ptr.shape[0]
new_shape_size = new_shape_ptr.shape[0]
multipliers = ib.allocate(
new_shape_ptr.dtype, (prev_shape_size,), name="multipliers", scope="global"
)
dividers = ib.allocate(
new_shape_ptr.dtype, (new_shape_size,), name="dividers", scope="global"
)
flattened_indices = ib.allocate(
new_shape_ptr.dtype,
(sparse_indices_ptr.shape[0],),
name="flattened_indices",
scope="global",
)
total_ele = ib.allocate(new_shape_ptr.dtype, (1,), name="total_ele", scope="global")
division_total_ele = ib.allocate(
new_shape_ptr.dtype, (1,), name="division_total_ele", scope="global"
)
equal_shape = ib.allocate("bool", (1,), name="equal_shape", scope="global")
max_threads = int(tvm.target.Target.current(allow_none=False).max_num_threads)
with ib.new_scope():
# The computation in this block is very very miniscule since we are just iterating over
# shape tensors which are very small (< 10) and there is no need of parallelization
nthread_tx = 1
nthread_bx = 1
tx = te.thread_axis("threadIdx.x")
bx = te.thread_axis("blockIdx.x")
ib.scope_attr(tx, "thread_extent", nthread_tx)
ib.scope_attr(bx, "thread_extent", nthread_bx)
total_ele[0] = prev_shape[0]
# Cumulative Reverse Exclusive Multiply
multipliers[prev_shape_size - 1] = Cast(new_shape_ptr.dtype, 1)
with ib.for_range(0, prev_shape_size - 1) as i_:
i = i_ + 1
multipliers[prev_shape_size - 1 - i] = (
prev_shape[prev_shape_size - i] * multipliers[prev_shape_size - i]
)
total_ele[0] *= prev_shape[prev_shape_size - i]
division_total_ele[0] = Cast(new_shape_ptr.dtype, 1)
with ib.for_range(0, new_shape_size) as i:
with ib.if_scope(new_shape[i] != -1):
division_total_ele[0] *= new_shape[i]
# Compute true output shape (replace negative ones)
with ib.for_range(0, new_shape_size) as i:
with ib.if_scope(new_shape[i] == -1):
out_new_shape[i] = Cast(
new_shape_ptr.dtype, div(total_ele[0], division_total_ele[0])
)
with ib.else_scope():
out_new_shape[i] = new_shape[i]
# Check if prev_shape and new_shape are equal
equal_shape[0] = True
with ib.if_scope(prev_shape_size == new_shape_size):
with ib.for_range(0, prev_shape_size) as i:
with ib.if_scope(prev_shape[i] != out_new_shape[i]):
equal_shape[0] = False
with ib.else_scope():
equal_shape[0] = False
with ib.new_scope():
nthread_tx = max_threads
nthread_bx = ceil_div(sparse_indices_ptr.shape[0], max_threads)
tx = te.thread_axis("threadIdx.x")
bx = te.thread_axis("blockIdx.x")
ib.scope_attr(tx, "thread_extent", nthread_tx)
ib.scope_attr(bx, "thread_extent", nthread_bx)
row_number = bx * max_threads + tx
# Return same inputs if shapes are equal
with ib.if_scope(equal_shape[0]):
with ib.if_scope(row_number < sparse_indices_ptr.shape[0]):
with ib.for_range(0, sparse_indices_ptr.shape[1]) as j:
new_sparse_indices[row_number, j] = sparse_indices[row_number, j]
# Else compute new_sparse_indices
with ib.else_scope():
dividers[new_shape_size - 1] = Cast(new_shape_ptr.dtype, 1)
with ib.for_range(0, new_shape_size - 1) as i_:
i = i_ + 1
dividers[new_shape_size - 1 - i] = (
dividers[new_shape_size - i] * out_new_shape[new_shape_size - i]
)
with ib.if_scope(row_number < sparse_indices_ptr.shape[0]):
flattened_indices[row_number] = Cast(new_shape_ptr.dtype, 0)
with ib.for_range(0, sparse_indices_ptr.shape[1]) as j:
flattened_indices[row_number] += (
sparse_indices[row_number, j] * multipliers[j]
)
with ib.if_scope(row_number < sparse_indices_ptr.shape[0]):
current_element = ib.allocate(
new_shape_ptr.dtype, (1,), name="current_element", scope="local"
)
current_element[0] = flattened_indices[row_number]
with ib.for_range(0, new_sparse_indices_ptr.shape[1]) as j:
new_sparse_indices[row_number, j] = Cast(
sparse_indices_ptr.dtype, floordiv(current_element[0], dividers[j])
)
current_element[0] = floormod(current_element[0], dividers[j])
return ib.get()
new_sparse_indices_buf = decl_buffer(
new_sparse_indices_shape, sparse_indices.dtype, "new_sparse_indices_buf"
)
new_shape_buf = decl_buffer(new_shape_shape, prev_shape.dtype, "new_shape_buf")
return extern(
[new_sparse_indices_shape, new_shape_shape],
[sparse_indices, prev_shape, new_shape],
lambda ins, outs: gen_ir(ins[0], ins[1], ins[2], outs[0], outs[1]),
out_buffers=[new_sparse_indices_buf, new_shape_buf],
name="sparse_reshape_cuda",
tag="sparse_reshape_cuda",
)
| 8,725 | 40.35545 | 99 | py |
tvm | tvm-main/python/tvm/topi/cuda/transform.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""CUDA implementations of transforms"""
import tvm
from ... import te
from ...target import Target
from ..utils import traverse_inline
def schedule_transpose(outs):
"""Schedule a unfused transpose"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
schedule_transpose_from_existing(s, outs[0])
return s
def schedule_transpose_from_existing(s, out):
"""Schedule for transpose on the gpu.
Roughly follows this:
https://developer.nvidia.com/blog/efficient-matrix-transpose-cuda-cc/, but
without the padding for shared memory. For better performance, we could
rewrite it in tir to add the padding. Also, rewriting in tir would allow
use to use warp shuffles instead of shared memory (see
https://github.com/bryancatanzaro/trove).
"""
def _callback(op):
# pylint: disable=invalid-name
m, n = s[op].op.axis
warp_size = int(Target.current(allow_none=False).thread_warp_size)
no, ni = s[op].split(n, factor=warp_size)
mo, mi = s[op].split(m, factor=warp_size)
s[op].reorder(mo, no, mi, ni)
s[op].bind(mo, te.thread_axis("blockIdx.x"))
s[op].bind(no, te.thread_axis("blockIdx.y"))
c = s.cache_read(op.input_tensors[0], "shared", op)
s[c].compute_at(s[op], no)
thread_x = te.thread_axis("threadIdx.x")
thread_y = te.thread_axis("threadIdx.y")
s[op].bind(ni, thread_x)
# This is a hack to make the scheduling language realize that this axis
# can be scheduled.
a, _ = s[c].split(s[c].op.axis[1], factor=1)
s[c].bind(a, thread_x)
# Use 4 warps per block. Slightly faster than 1 warp per block
ao, _ = s[op].split(mi, nparts=4)
s[op].bind(ao, thread_y)
ao, _ = s[c].split(s[c].op.axis[0], nparts=4)
s[c].bind(ao, thread_y)
traverse_inline(s, out.op, _callback)
def _invert_permutation_ir(data, out):
"""Low level IR to get invert_permutation.
Parameters
----------
data : Buffer
Input data. 1-D Buffer with shape [elem_num].
out : Buffer
1D buffer for invert permutation result with the same shape with data.
Returns
-------
stmt : Stmt
The result IR statement.
"""
elem_num = data.shape[0]
irb = tvm.tir.ir_builder.create()
data = irb.buffer_ptr(data)
out = irb.buffer_ptr(out)
max_threads = int(Target.current(allow_none=False).max_num_threads)
nthread_tx = max_threads
nthread_bx = elem_num // max_threads + 1
thread_x = te.thread_axis("threadIdx.x")
block_x = te.thread_axis("blockIdx.x")
irb.scope_attr(thread_x, "thread_extent", nthread_tx)
irb.scope_attr(block_x, "thread_extent", nthread_bx)
tid = block_x * max_threads + thread_x
with irb.if_scope(tid < elem_num):
r_ind = data[tid]
out[r_ind] = tid
return irb.get()
def invert_permutation(data):
"""Compute definition of invert_permutation.
For an output tensor y and an input tensor x, this operation computes the following:
y[x[i]] = i for i in [0, 1, ..., len(x) - 1]
Parameters
----------
data : tvm.te.Tensor
1-D tensor
Returns
-------
out : tvm.te.Tensor
"""
data_buf = tvm.tir.decl_buffer(data.shape, data.dtype, "data_buf", data_alignment=8)
out_buf = tvm.tir.decl_buffer(data.shape, data.dtype, "out_buf", data_alignment=8)
out = te.extern(
[data.shape],
[data],
lambda ins, outs: _invert_permutation_ir(ins[0], outs[0]),
in_buffers=[
data_buf,
],
out_buffers=[
out_buf,
],
name="invert_permutation",
tag="invert_permutation_gpu",
)
return out
| 4,600 | 32.100719 | 88 | py |
tvm | tvm-main/python/tvm/topi/cuda/searchsorted.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""searchsorted operator for GPU"""
import tvm
from tvm import te
from .. import utils
from ..searchsorted import binary_search
def searchsorted(sorted_sequence, values, right, out_dtype="int64"):
"""Find indices where elements should be inserted to maintain order.
If `sorted_sequence` is N-dimensional, the innermost dimension of
`values` are searched in the corresponding dimension of `sorted_sequence`.
Parameters
----------
sorted_sequence : te.Tensor
N-D or 1-D Tensor, containing monotonically increasing sequence
on the innermost dimension.
values : te.Tensor
N-D Tensor containing the search values. When `sorted_sequence` is 1-D,
the shape of `values` can be arbitrary. Otherwise, ranks of `sorted_sequence`
and `values` must be the same, and outer N-1 axes must have the same size.
right : bool, optional
Controls which index is returned if a value lands exactly on one of sorted values. If
False, the index of the first suitable location found is given. If true, return the
last such index. If there is no suitable index, return either 0 or N (where N is the
size of the innermost dimension).
dtype : string, optional
The data type of the output indices.
Returns
-------
indices : te.Tensor
Tensor with same shape as values, representing the indices of
elements of `values` if they are inserted in `sorted_sequence`.
"""
def ir(sorted_sequence, values, indices):
ib = tvm.tir.ir_builder.create()
sorted_sequence_shape = sorted_sequence.shape
values_shape = values.shape
num_search = utils.prod(values_shape)
search_range = sorted_sequence_shape[-1]
sorted_sequence = ib.buffer_ptr(sorted_sequence)
values = ib.buffer_ptr(values)
indices = ib.buffer_ptr(indices)
max_threads = int(tvm.target.Target.current(allow_none=False).max_num_threads)
bx = te.thread_axis("blockIdx.x")
tx = te.thread_axis("threadIdx.x")
ib.scope_attr(
bx, "thread_extent", tvm.tir.indexdiv(num_search + max_threads - 1, max_threads)
)
ib.scope_attr(tx, "thread_extent", max_threads)
tid = bx * max_threads + tx
with ib.if_scope(tid < num_search):
if len(sorted_sequence_shape) == 1:
sequence_offset = 0
else:
sequence_id = tid // values_shape[-1]
sequence_offset = sequence_id * search_range
indices[tid] = binary_search(
ib,
sequence_offset,
search_range,
sorted_sequence,
values[tid],
right,
out_dtype,
)
return ib.get()
return te.extern(
values.shape,
[sorted_sequence, values],
lambda ins, outs: ir(ins[0], ins[1], outs[0]),
name="searchsorted",
dtype=out_dtype,
)
| 3,846 | 36.349515 | 93 | py |
tvm | tvm-main/python/tvm/topi/cuda/scatter_elements.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""Scatter operator """
import tvm
from tvm import te, tir
from ..utils import ceil_div, get_const_int
from ..math import cast
from .nms import atomic_add
def gen_scatter_add_1d_atomic(data, indices, updates, out, axis, _):
"""Generate ir for scatter elements for reduction sum for 1d inputs,
using atomic_add instruction
Parameters
----------
data : tir.Tensor
The input data to the operator.
indices : tir.Tensor
The index locations to update.
updates : tir.Tensor
The values to update.
out : tir.Tensor
The output tensor.
axis : int
The axis to scatter on
Returns
-------
ret : tir
The computational ir.
"""
assert axis == 0
n = data.shape[0]
ib = tvm.tir.ir_builder.create()
out_ptr = ib.buffer_ptr(out)
data_ptr = ib.buffer_ptr(data)
max_threads = int(tvm.target.Target.current(allow_none=False).max_num_threads)
nthread_tx = max_threads
with ib.new_scope():
nthread_bx = ceil_div(n, nthread_tx)
tx = te.thread_axis("threadIdx.x")
bx = te.thread_axis("blockIdx.x")
ib.scope_attr(tx, "thread_extent", nthread_tx)
ib.scope_attr(bx, "thread_extent", nthread_bx)
tid = bx * nthread_tx + tx
with ib.if_scope(tid < n):
out_ptr[tid] = data_ptr[tid]
indices_ptr = ib.buffer_ptr(indices)
updates_ptr = ib.buffer_ptr(updates)
ni = indices.shape[0]
atomic_add_return = ib.allocate(updates.dtype, (1,), name="atomic_add_return", scope="local")
with ib.new_scope():
nthread_bx = ceil_div(ni, nthread_tx)
tx = te.thread_axis("threadIdx.x")
bx = te.thread_axis("blockIdx.x")
ib.scope_attr(tx, "thread_extent", nthread_tx)
ib.scope_attr(bx, "thread_extent", nthread_bx)
tid = bx * nthread_tx + tx
with ib.if_scope(tid < ni):
index = indices_ptr[tid]
with ib.if_scope(index < 0):
atomic_add_return[0] = atomic_add(
tvm.tir.call_intrin("handle", "tir.address_of", out_ptr[index + n]),
updates_ptr[tid],
)
with ib.else_scope():
atomic_add_return[0] = atomic_add(
tvm.tir.call_intrin("handle", "tir.address_of", out_ptr[index]),
updates_ptr[tid],
)
return ib.get()
def gen_ir(data, indices, updates, out, axis, reduce_func):
"""Generate ir for scatter elements
Parameters
----------
data : tir.Tensor
The input data to the operator.
indices : tir.Tensor
The index locations to update.
updates : tir.Tensor
The values to update.
out : tir.Tensor
The output tensor.
axis : int
The axis to scatter on
reduce_func : Any
The function reduced update and output to output
Returns
-------
ret : tir
The computational ir.
"""
ib = tir.ir_builder.create()
data_ptr = ib.buffer_ptr(data)
indices_ptr = ib.buffer_ptr(indices)
updates_ptr = ib.buffer_ptr(updates)
out_ptr = ib.buffer_ptr(out)
# Prepare ranges and strides
shape = data.shape
if axis < 0:
axis = len(shape) + axis
axis_range = cast(shape[axis], indices.dtype)
before_axis_range = 1
after_axis_range = 1
for i, value in enumerate(shape, 0):
if i < axis:
before_axis_range *= value
elif i > axis:
after_axis_range *= value
before_axis_stride = axis_range * after_axis_range
full_range = before_axis_range * before_axis_stride
ind_shape = indices.shape
ind_axis_range = ind_shape[axis]
ind_before_axis_range = 1
ind_after_axis_range = 1
for i, value in enumerate(ind_shape, 0):
if i < axis:
ind_before_axis_range *= value
elif i > axis:
ind_after_axis_range *= value
ind_before_axis_stride = ind_axis_range * ind_after_axis_range
ind_full_range_excl_axis = ind_before_axis_range * ind_after_axis_range
max_threads = int(tvm.target.Target.current(allow_none=False).max_num_threads)
# Copy initial input data to output
with ib.new_scope():
num_blocks = ceil_div(full_range, max_threads)
bx = te.thread_axis("blockIdx.x")
tx = te.thread_axis("threadIdx.x")
ib.scope_attr(bx, "thread_extent", num_blocks)
ib.scope_attr(tx, "thread_extent", max_threads)
index = bx * max_threads + tx
with ib.if_scope(index < full_range):
out_ptr[index] = data_ptr[index]
with ib.new_scope():
num_blocks_2 = ceil_div(ind_full_range_excl_axis, max_threads)
bx2 = te.thread_axis("blockIdx.x")
tx2 = te.thread_axis("threadIdx.x")
ib.scope_attr(bx2, "thread_extent", num_blocks_2)
ib.scope_attr(tx2, "thread_extent", max_threads)
ind_fused = bx2 * max_threads + tx2
with ib.if_scope(ind_fused < ind_full_range_excl_axis):
i = ind_fused // ind_after_axis_range
j = ind_fused % ind_after_axis_range
pre_index1 = i * ind_before_axis_stride + j
pre_index2 = i * before_axis_stride + j
with ib.for_range(0, ind_axis_range, "k") as k:
# Offset along indices or updates
index1 = pre_index1 + k * ind_after_axis_range
# Get index and shift to positive side if need
new_index = indices_ptr[index1]
shifted_index = new_index + (new_index < 0) * axis_range
# Offset along data
index2 = pre_index2 + shifted_index * after_axis_range
reduce_func(out_ptr, index2, updates_ptr[index1])
return ib.get()
def scatter_elements(data, indices, updates, axis=0, reduction="update"):
"""Scatter elements from updates to corresponding indices of copied data.
Data, indices, updates and output have the same shape.
Indices can not have duplicates (if idx1 != idx2, then indices[idx1] != indices[idx2])
if reduction == "update".
.. code-block::
output[indices[i][j]][j] = f(output[indices[i][j]][j], updates[i][j]) if axis = 0
output[i][indices[i][j]] = f(output[i][indices[i][j]], updates[i][j]) if axis = 1
where the update function f is determinted by the reduction.
Five types of the function are supported: "update", "add", "mul", "min" and "max" (see below)
Parameters
----------
data : tvm.te.Tensor
The source array.
indices : tvm.te.Tensor
The indices of the values to extract.
updates : tvm.te.Tensor
The updates to apply at the Indices
axis : optional, int
The axis to scatter on. It is zero by default.
reduction : optional, string
The update mode for the algorithm, either "update", "add", "mul", "mean", "min" or "max"
If update, the update values will replace the input data
If add, the update values will be added to the input data
If mul, the input data will be multiplied on the update values
If mean, the input data will be mean between the update values and the input data
If min, there is choice of minimal between the update values and the input data
If max, there is choice of maximal between the update values and the input data
It is "update" by default
Returns
-------
ret : tvm.te.Tensor
"""
if not isinstance(axis, int):
axis = get_const_int(axis)
def update_func(dst_ptr, dst_index, update):
dst_ptr[dst_index] = update
def add_func(dst_ptr, dst_index, update):
dst_ptr[dst_index] += update
def mul_func(dst_ptr, dst_index, update):
dst_ptr[dst_index] *= update
def mean_func(dst_ptr, dst_index, update):
dst_ptr[dst_index] = (dst_ptr[dst_index] + update) / 2
def min_func(dst_ptr, dst_index, update):
dst_ptr[dst_index] = tir.min(dst_ptr[dst_index], update)
def max_func(dst_ptr, dst_index, update):
dst_ptr[dst_index] = tir.max(dst_ptr[dst_index], update)
reduce_func = None
if reduction == "update":
reduce_func = update_func
elif reduction == "add":
reduce_func = add_func
elif reduction == "mul":
reduce_func = mul_func
elif reduction == "mean":
reduce_func = mean_func
elif reduction == "min":
reduce_func = min_func
elif reduction == "max":
reduce_func = max_func
else:
raise NotImplementedError(
"scatter_elements reduction not in [update, add, mul, min, max]:", reduction
)
shape = data.shape
rank = len(shape)
cur_target_kind = str(tvm.target.Target.current(allow_none=False).kind)
gen_scatter_elements_ir = None
if (
reduction == "add"
and rank == 1
and cur_target_kind not in ["vulkan", "metal"]
and updates.dtype in ["int32", "float32"]
):
gen_scatter_elements_ir = gen_scatter_add_1d_atomic
else:
gen_scatter_elements_ir = gen_ir
out_buf = tir.decl_buffer(shape, data.dtype, "out_buf")
return te.extern(
[shape],
[data, indices, updates],
lambda ins, outs: gen_scatter_elements_ir(
ins[0], ins[1], ins[2], outs[0], axis, reduce_func
),
dtype=data.dtype,
out_buffers=[out_buf],
name="scatter_elements_cuda",
tag="scatter_elements_cuda",
)
| 10,349 | 31.857143 | 97 | py |
tvm | tvm-main/python/tvm/topi/cuda/signal.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, too-many-arguments, too-many-nested-blocks, unused-argument
"""STFT operator"""
from math import pi
import tvm
from tvm import te, tir
from ..utils import ceil_div
def _get_max_threads(batch_row):
max_threads = tvm.target.Target.current(allow_none=False).max_num_threads
return tir.min(batch_row, max_threads)
def stft(
data,
n_fft,
hop_length,
win_length,
window,
normalized,
onesided,
output_shape,
):
"""
The STFT computes the Fourier transform of short overlapping windows of the input.
This gives frequency components of the signal as they change over time.
Parameters
----------
data : relay.Expr
Either a 1-D tensor or a 2-D batch tensor.
n_fft : int
The size of Fourier transform
hop_length : int
The distance between neighboring sliding window frames
win_length : int
The size of window frame and STFT filter
window : relay.Expr
A 1-D tensor window frame
normalized : bool
Whether to return the normalized STFT results
onesided : bool
Whether to return onesided result or fill with conjugate symmetry
Returns
-------
output : relay.Expr
Tensor containing the STFT result
Examples
--------
.. code-block:: python
data = [1, 2, 3, 4, 5, 6]
window = [4, 3, 2]
[n_fft, hop_length, win_length, normalized, onesided] = [3, 3, 3, False, True]
relay.stft(data, n_fft, hop_length, win_length, window, normalized, onesided)
-> [[[15.0000, 0.0000], [34.0000, 0.0000]], [[ 4.5000, 0.8660], [ 1.0000, -1.7321]]]
"""
def gen_ir(
data_ptr,
n_fft,
hop_length,
win_length,
window_ptr,
normalized,
onesided,
output_ptr,
):
ib = tir.ir_builder.create()
data = ib.buffer_ptr(data_ptr)
window = ib.buffer_ptr(window_ptr)
output = ib.buffer_ptr(output_ptr)
max_threads = _get_max_threads(output_ptr.shape[0] * output_ptr.shape[1])
output_size = output_ptr.shape[0] * output_ptr.shape[1] * output_ptr.shape[2]
with ib.new_scope():
nthread_tx = max_threads
nthread_bx = ceil_div(output_size, max_threads)
tx = te.thread_axis("threadIdx.x")
bx = te.thread_axis("blockIdx.x")
ib.scope_attr(tx, "thread_extent", nthread_tx)
ib.scope_attr(bx, "thread_extent", nthread_bx)
tid = bx * max_threads + tx
with ib.if_scope(tid < output_size):
matrix_size = output_ptr.shape[1] * output_ptr.shape[2]
batch = tir.floordiv(tid, matrix_size)
row = tir.floordiv(tir.indexmod(tid, matrix_size), output_ptr.shape[2])
col = tir.indexmod(tir.indexmod(tid, matrix_size), output_ptr.shape[2])
output[batch, row, col, 0] = tir.Cast(data_ptr.dtype, 0)
output[batch, row, col, 1] = tir.Cast(data_ptr.dtype, 0)
with ib.for_range(0, win_length) as wlen:
output[batch, row, col, 0] += (
window[wlen]
* data[batch, col * hop_length + wlen]
* tir.cos(2 * pi * row * wlen / win_length)
)
output[batch, row, col, 1] -= (
window[wlen]
* data[batch, col * hop_length + wlen]
* tir.sin(2 * pi * row * wlen / win_length)
)
with ib.if_scope(normalized):
output[batch, row, col, 0] /= tir.sqrt(tir.const(n_fft, "float32"))
output[batch, row, col, 1] /= tir.sqrt(tir.const(n_fft, "float32"))
return ib.get()
output_buf = tir.decl_buffer(output_shape, data.dtype, "output_buf")
return te.extern(
output_shape,
[data, window],
lambda ins, outs: gen_ir(
ins[0], n_fft, hop_length, win_length, ins[1], normalized, onesided, outs[0]
),
dtype=[data.dtype],
out_buffers=[output_buf],
name="stft_cuda",
tag="stft_cuda",
)
def dft(
re_data: te.Tensor,
im_data: te.Tensor,
inverse: tir.IntImm,
):
"""
Computes the discrete Fourier transform of input (calculation along the last axis).
This gives frequency components of the signal as they change over time.
Parameters
----------
re_data : relay.Expr
N-D tensor, real part of the input signal.
im_data : relay.Expr
N-D tensor, imaginary part of the input signal.
If the signal is real, then the values of this tensor are zeros.
inverse : bool
Whether to perform the inverse discrete fourier transform.
Returns
-------
re_output : relay.Expr
The Fourier Transform of the input (Real part).
im_output : relay.Expr
The Fourier Transform of the input (Imaginary part).
"""
def gen_ir(
re_data_buf,
im_data_buf,
re_output_buf,
im_output_buf,
):
ib = tir.ir_builder.create()
re_data_ptr = ib.buffer_ptr(re_data_buf)
im_data_ptr = ib.buffer_ptr(im_data_buf)
re_output_ptr = ib.buffer_ptr(re_output_buf)
im_output_ptr = ib.buffer_ptr(im_output_buf)
shape = re_data.shape
n_fft = shape[len(shape) - 1]
base_range = 1
for i in range(len(shape) - 1):
base_range *= shape[i]
sign = -1 if inverse else 1
factor = 1.0 / n_fft if inverse else 1.0
max_threads = _get_max_threads(base_range)
with ib.new_scope():
nthread_tx = max_threads
nthread_bx = ceil_div(base_range, max_threads)
tx = te.thread_axis("threadIdx.x")
bx = te.thread_axis("blockIdx.x")
ib.scope_attr(tx, "thread_extent", nthread_tx)
ib.scope_attr(bx, "thread_extent", nthread_bx)
tid = bx * max_threads + tx
with ib.if_scope(tid < base_range):
base_idx = tid * n_fft
with ib.for_range(0, n_fft) as n:
n_idx = base_idx + n
re_output_ptr[n_idx] = tir.Cast(re_output_ptr.dtype, 0)
im_output_ptr[n_idx] = tir.Cast(im_output_ptr.dtype, 0)
_w = sign * -2 * pi * n / n_fft
with ib.for_range(0, n_fft) as k:
k_idx = base_idx + k
w = _w * k
cos_w = tir.Cast(re_output_ptr.dtype, tir.cos(w))
sin_w = tir.Cast(re_output_ptr.dtype, tir.sin(w))
re_output_ptr[n_idx] += (
re_data_ptr[k_idx] * cos_w - im_data_ptr[k_idx] * sin_w
)
im_output_ptr[n_idx] += (
re_data_ptr[k_idx] * sin_w + im_data_ptr[k_idx] * cos_w
)
re_output_ptr[n_idx] *= tir.Cast(re_output_ptr.dtype, factor)
im_output_ptr[n_idx] *= tir.Cast(im_output_ptr.dtype, factor)
return ib.get()
output_shape = [re_data.shape] * 2
return te.extern(
shape=output_shape,
inputs=[re_data, im_data],
fcompute=lambda ins, outs: gen_ir(ins[0], ins[1], outs[0], outs[1]),
dtype=[re_data.dtype, im_data.dtype],
name="dft_cuda",
tag="dft_cuda",
)
| 8,348 | 34.987069 | 95 | py |
tvm | tvm-main/python/tvm/topi/cuda/conv1d.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument
"""Compute definition for conv1d with cuda backend"""
import tvm
from tvm import te
from tvm import autotvm
from .. import nn
from ..utils import traverse_inline, get_const_tuple
@autotvm.register_topi_compute("conv1d_ncw.cuda")
def conv1d_ncw(cfg, data, kernel, strides, padding, dilation, out_dtype="float32"):
return nn.conv1d_ncw(data, kernel, strides, padding, dilation, out_dtype)
def _schedule_conv1d_ncw(cfg, outs):
"""TOPI schedule callback of conv1d ncw for cuda gpu
Parameters
----------
cfg : ConfigEntity
the config for this template.
outs : Array of Tensor
The computation graph description of conv1d
in the format of an array of tensors.
Returns
-------
s : Schedule
The computation schedule for conv1d.
"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
def _callback(op):
if op.tag == "conv1d_ncw" or op.tag == "group_conv1d_ncw":
pad_data = op.input_tensors[0]
kernel = op.input_tensors[1]
conv = op.output(0)
##### space definition begin #####
n, f, x = s[conv].op.axis
rc = s[conv].op.reduce_axis[0]
cfg.define_split("tile_n", cfg.axis(n), num_outputs=4)
cfg.define_split("tile_f", cfg.axis(f), num_outputs=4)
cfg.define_split("tile_x", cfg.axis(x), num_outputs=4)
cfg.define_split("tile_rc", cfg.axis(rc), num_outputs=3)
cfg.define_knob("auto_unroll_max_step", [64, 512, 1500])
target = tvm.target.Target.current()
if target.kind.name in ["nvptx", "rocm"]:
cfg.define_knob("unroll_explicit", [1])
else:
cfg.define_knob("unroll_explicit", [0, 1])
##### space definition end #####
if isinstance(kernel.op, tvm.te.ComputeOp) and "dilate" in kernel.op.tag:
s[kernel].compute_inline()
if conv.op in s.outputs:
output = conv
OL = s.cache_write(conv, "local")
else:
output = s.outputs[0].output(0)
s[conv].set_scope("local")
OL = conv
# create cache stage
s[pad_data].set_scope("shared")
AA = pad_data
WW = s.cache_read(kernel, "shared", [OL])
# tile and bind spatial axes
n, f, x = s[output].op.axis
kernel_scope, n = s[output].split(n, nparts=1)
bn, vn, tn, ni = cfg["tile_n"].apply(s, output, n)
bf, vf, tf, fi = cfg["tile_f"].apply(s, output, f)
bx, vx, tx, xi = cfg["tile_x"].apply(s, output, x)
s[output].reorder(bn, bf, bx, vn, vf, vx, tn, tf, tx, ni, fi, xi)
s[output].bind(bn, te.thread_axis("blockIdx.z"))
s[output].bind(bf, te.thread_axis("blockIdx.y"))
s[output].bind(bx, te.thread_axis("blockIdx.x"))
s[output].bind(vn, te.thread_axis("vthread"))
s[output].bind(vf, te.thread_axis("vthread"))
s[output].bind(vx, te.thread_axis("vthread"))
s[output].bind(tx, te.thread_axis("threadIdx.x"))
s[OL].compute_at(s[output], tx)
# number of threads
n_tz = cfg["tile_n"].size[2] * cfg["tile_f"].size[2]
n_tx = cfg["tile_x"].size[2]
# tile reduction axes
n, f, x = s[OL].op.axis
rc, rx = s[OL].op.reduce_axis
rco, rcm, rci = cfg["tile_rc"].apply(s, OL, rc)
s[OL].reorder(rco, rcm, rx, rci, n, f, x)
s[AA].compute_at(s[OL], rx)
s[WW].compute_at(s[OL], rx)
# cooperative fetching
for load in [AA, WW]:
n, f, x = s[load].op.axis
fused = s[load].fuse(f, x)
tz, fused = s[load].split(fused, nparts=n_tz)
tx, fused = s[load].split(fused, nparts=n_tx)
s[load].bind(tz, te.thread_axis("threadIdx.y"))
s[load].bind(tx, te.thread_axis("threadIdx.x"))
s[output].pragma(kernel_scope, "auto_unroll_max_step", cfg["auto_unroll_max_step"].val)
s[output].pragma(kernel_scope, "unroll_explicit", cfg["unroll_explicit"].val)
N, CO, OW = get_const_tuple(output.shape)
_, CI, KW = get_const_tuple(kernel.shape)
cfg.add_flop(2 * N * OW * CO * KW * CI)
traverse_inline(s, outs[0].op, _callback)
return s
@autotvm.register_topi_schedule("conv1d_ncw.cuda")
def schedule_conv1d_ncw(cfg, outs):
return _schedule_conv1d_ncw(cfg, outs)
@autotvm.register_topi_compute("group_conv1d_ncw.cuda")
def group_conv1d_ncw(cfg, data, kernel, strides, padding, dilation, groups, out_dtype="float32"):
return nn.group_conv1d_ncw(data, kernel, strides, padding, dilation, groups, out_dtype)
@autotvm.register_topi_schedule("group_conv1d_ncw.cuda")
def schedule_group_conv1d_ncw(cfg, outs):
return _schedule_conv1d_ncw(cfg, outs)
@autotvm.register_topi_compute("conv1d_nwc.cuda")
def conv1d_nwc(cfg, data, kernel, strides, padding, dilation, out_dtype="float32"):
return nn.conv1d_nwc(data, kernel, strides, padding, dilation, out_dtype)
def _schedule_conv1d_nwc(cfg, outs):
"""TOPI schedule callback of conv1d nwc for cuda gpu
Parameters
----------
cfg : ConfigEntity
the config for this template.
outs : Array of Tensor
The computation graph description of conv1d
in the format of an array of tensors.
Returns
-------
s : Schedule
The computation schedule for conv1d.
"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
def _callback(op):
if op.tag == "conv1d_nwc" or op.tag == "group_conv1d_nwc":
pad_data = op.input_tensors[0]
kernel = op.input_tensors[1]
conv = op.output(0)
##### space definition begin #####
n, x, f = s[conv].op.axis
rc = s[conv].op.reduce_axis[0]
cfg.define_split("tile_n", cfg.axis(n), num_outputs=4)
cfg.define_split("tile_x", cfg.axis(x), num_outputs=4)
cfg.define_split("tile_f", cfg.axis(f), num_outputs=4)
cfg.define_split("tile_rc", cfg.axis(rc), num_outputs=3)
cfg.define_knob("auto_unroll_max_step", [64, 512, 1500])
target = tvm.target.Target.current()
if target.kind.name in ["nvptx", "rocm"]:
cfg.define_knob("unroll_explicit", [1])
else:
cfg.define_knob("unroll_explicit", [0, 1])
##### space definition end #####
if isinstance(kernel.op, tvm.te.ComputeOp) and "dilate" in kernel.op.tag:
s[kernel].compute_inline()
if conv.op in s.outputs:
output = conv
OL = s.cache_write(conv, "local")
else:
output = s.outputs[0].output(0)
s[conv].set_scope("local")
OL = conv
# create cache stage
s[pad_data].set_scope("shared")
AA = pad_data
WW = s.cache_read(kernel, "shared", [OL])
# tile and bind spatial axes
n, f, x = s[output].op.axis
kernel_scope, n = s[output].split(n, nparts=1)
bn, vn, tn, ni = cfg["tile_n"].apply(s, output, n)
bx, vx, tx, xi = cfg["tile_x"].apply(s, output, x)
bf, vf, tf, fi = cfg["tile_f"].apply(s, output, f)
s[output].reorder(bn, bx, bf, vn, vx, vf, tn, tx, tf, ni, xi, fi)
s[output].bind(bn, te.thread_axis("blockIdx.z"))
s[output].bind(bx, te.thread_axis("blockIdx.y"))
s[output].bind(bf, te.thread_axis("blockIdx.x"))
s[output].bind(vn, te.thread_axis("vthread"))
s[output].bind(vx, te.thread_axis("vthread"))
s[output].bind(vf, te.thread_axis("vthread"))
s[output].bind(tf, te.thread_axis("threadIdx.x"))
s[OL].compute_at(s[output], tf)
# number of threads
n_tz = cfg["tile_n"].size[2] * cfg["tile_x"].size[2]
n_tx = cfg["tile_f"].size[2]
# tile reduction axes
n, x, f = s[OL].op.axis
rc, rx = s[OL].op.reduce_axis
rco, rcm, rci = cfg["tile_rc"].apply(s, OL, rc)
s[OL].reorder(rco, rcm, rx, rci, n, x, f)
s[AA].compute_at(s[OL], rx)
s[WW].compute_at(s[OL], rx)
# cooperative fetching
for load in [AA, WW]:
n, x, f = s[load].op.axis
fused = s[load].fuse(x, f)
tz, fused = s[load].split(fused, nparts=n_tz)
tx, fused = s[load].split(fused, nparts=n_tx)
s[load].bind(tz, te.thread_axis("threadIdx.y"))
s[load].bind(tx, te.thread_axis("threadIdx.x"))
s[output].pragma(kernel_scope, "auto_unroll_max_step", cfg["auto_unroll_max_step"].val)
s[output].pragma(kernel_scope, "unroll_explicit", cfg["unroll_explicit"].val)
N, OW, CO = get_const_tuple(output.shape)
KW, CI, _ = get_const_tuple(kernel.shape)
cfg.add_flop(2 * N * OW * CO * KW * CI)
traverse_inline(s, outs[0].op, _callback)
return s
@autotvm.register_topi_schedule("conv1d_nwc.cuda")
def schedule_conv1d_nwc(cfg, outs):
return _schedule_conv1d_nwc(cfg, outs)
@autotvm.register_topi_compute("group_conv1d_nwc.cuda")
def group_conv1d_nwc(cfg, data, kernel, strides, padding, dilation, groups, out_dtype="float32"):
return nn.group_conv1d_nwc(data, kernel, strides, padding, dilation, groups, out_dtype)
@autotvm.register_topi_schedule("group_conv1d_nwc.cuda")
def schedule_group_conv1d_nwc(cfg, outs):
return _schedule_conv1d_nwc(cfg, outs)
| 10,813 | 36.94386 | 99 | py |
tvm | tvm-main/python/tvm/topi/cuda/conv2d_direct.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""The templates for cuda conv2d operators"""
import tvm
from tvm import te
from tvm import autotvm
from ..utils import get_const_tuple
def schedule_direct_cuda(cfg, s, conv):
"""schedule optimized for batch size = 1"""
##### space definition begin #####
n, f, y, x = s[conv].op.axis
rc, ry, rx = s[conv].op.reduce_axis
cfg.define_split("tile_f", f, num_outputs=4)
cfg.define_split("tile_y", y, num_outputs=4)
cfg.define_split("tile_x", x, num_outputs=4)
cfg.define_split("tile_rc", rc, num_outputs=2)
cfg.define_split("tile_ry", ry, num_outputs=2)
cfg.define_split("tile_rx", rx, num_outputs=2)
cfg.define_knob("auto_unroll_max_step", [0, 512, 1500])
target = tvm.target.Target.current()
if target.kind.name in ["nvptx", "rocm"]:
cfg.define_knob("unroll_explicit", [1])
else:
cfg.define_knob("unroll_explicit", [0, 1])
# fallback support
if cfg.is_fallback:
ref_log = autotvm.tophub.load_reference_log(
target.kind.name, target.model, "conv2d_nchw.cuda"
)
cfg.fallback_with_reference_log(ref_log)
##### space definition end #####
pad_data, kernel = s[conv].op.input_tensors
s[pad_data].compute_inline()
if isinstance(kernel.op, tvm.te.ComputeOp) and "dilate" in kernel.op.tag:
s[kernel].compute_inline()
if conv.op in s.outputs:
output = conv
OL = s.cache_write(conv, "local")
else:
output = s.outputs[0].output(0)
s[conv].set_scope("local")
OL = conv
# create cache stage
AA = s.cache_read(pad_data, "shared", [OL])
WW = s.cache_read(kernel, "shared", [OL])
# tile and bind spatial axes
n, f, y, x = s[output].op.axis
kernel_scope, n = s[output].split(n, nparts=1)
bf, vf, tf, fi = cfg["tile_f"].apply(s, output, f)
by, vy, ty, yi = cfg["tile_y"].apply(s, output, y)
bx, vx, tx, xi = cfg["tile_x"].apply(s, output, x)
bf = s[output].fuse(n, bf)
s[output].bind(bf, te.thread_axis("blockIdx.z"))
s[output].bind(by, te.thread_axis("blockIdx.y"))
s[output].bind(bx, te.thread_axis("blockIdx.x"))
s[output].bind(vf, te.thread_axis("vthread"))
s[output].bind(vy, te.thread_axis("vthread"))
s[output].bind(vx, te.thread_axis("vthread"))
s[output].bind(tf, te.thread_axis("threadIdx.z"))
s[output].bind(ty, te.thread_axis("threadIdx.y"))
s[output].bind(tx, te.thread_axis("threadIdx.x"))
s[output].reorder(bf, by, bx, vf, vy, vx, tf, ty, tx, fi, yi, xi)
s[OL].compute_at(s[output], tx)
# tile reduction axes
n, f, y, x = s[OL].op.axis
rc, ry, rx = s[OL].op.reduce_axis
rco, rci = cfg["tile_rc"].apply(s, OL, rc)
ryo, ryi = cfg["tile_ry"].apply(s, OL, ry)
rxo, rxi = cfg["tile_rx"].apply(s, OL, rx)
s[OL].reorder(rco, ryo, rxo, rci, ryi, rxi, n, f, y, x)
s[AA].compute_at(s[OL], rxo)
s[WW].compute_at(s[OL], rxo)
# cooperative fetching
for load in [AA, WW]:
n, f, y, x = s[load].op.axis
fused = s[load].fuse(n, f, y, x)
tz, fused = s[load].split(fused, nparts=cfg["tile_f"].size[2])
ty, fused = s[load].split(fused, nparts=cfg["tile_y"].size[2])
tx, fused = s[load].split(fused, nparts=cfg["tile_x"].size[2])
s[load].bind(tz, te.thread_axis("threadIdx.z"))
s[load].bind(ty, te.thread_axis("threadIdx.y"))
s[load].bind(tx, te.thread_axis("threadIdx.x"))
# unroll
s[output].pragma(kernel_scope, "auto_unroll_max_step", cfg["auto_unroll_max_step"].val)
s[output].pragma(kernel_scope, "unroll_explicit", cfg["unroll_explicit"].val)
N, CO, OH, OW = get_const_tuple(output.shape)
_, KH, KW, CI = get_const_tuple(kernel.shape)
if isinstance(N, int):
cfg.add_flop(2 * N * OH * OW * CO * CI * KH * KW)
| 4,632 | 36.666667 | 91 | py |
tvm | tvm-main/python/tvm/topi/cuda/sort.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, no-member, too-many-locals, too-many-arguments, too-many-statements, singleton-comparison, unused-argument, no-else-return
"""Sort related operators """
import tvm
from tvm import te
from .injective import schedule_injective_from_existing
from ..transform import strided_slice, transpose
from .. import tag
from ..utils import ceil_div, swap
from ..math import cast, ceil_log2
def _schedule_sort(outs):
"""Schedule for argsort operator.
Parameters
----------
outs: Array of Tensor
The computation graph description of argsort
in the format of an array of tensors.
Returns
-------
s: Schedule
The computation schedule for the op.
"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
scheduled_ops = []
def traverse(op):
if tag.is_injective(op.tag):
schedule_injective_from_existing(s, op.output(0))
for tensor in op.input_tensors:
if tensor.op.input_tensors and tensor.op not in scheduled_ops:
traverse(tensor.op)
scheduled_ops.append(op)
for out in outs:
traverse(out.op)
return s
def _get_threads(ib, nthread_tx, nthread_bx, nthread_by, nthread_bz):
tx = te.thread_axis("threadIdx.x")
bx = te.thread_axis("blockIdx.x")
ib.scope_attr(tx, "thread_extent", nthread_tx)
ib.scope_attr(bx, "thread_extent", nthread_bx)
by = te.thread_axis("blockIdx.y")
bz = te.thread_axis("blockIdx.z")
ib.scope_attr(by, "thread_extent", nthread_by)
ib.scope_attr(bz, "thread_extent", nthread_bz)
return tx, bx, by, bz
def _sort_init(ib, shape, axis, keys_in, keys_out, values_out=None, value_init_func=None):
"""Initialize the output buffers by copying from inputs"""
axis_mul_before = 1
axis_mul_after = 1
if axis < 0:
axis = len(shape) + axis
for i, value in enumerate(shape, 0):
if i < axis:
axis_mul_before *= value
elif i > axis:
axis_mul_after *= value
# Set up threading
max_threads = int(tvm.target.Target.current(allow_none=False).max_num_threads)
nthread_tx = max_threads
nthread_bx = ceil_div(shape[axis], max_threads)
nthread_by = axis_mul_before
nthread_bz = axis_mul_after
# Copy the keys_in to initial output
with ib.new_scope():
tx, bx, by, bz = _get_threads(ib, nthread_tx, nthread_bx, nthread_by, nthread_bz)
tid = bx * nthread_tx + tx
idx = (by * shape[axis] + tid) * axis_mul_after + bz
with ib.if_scope(tid < shape[axis]):
keys_out[idx] = keys_in[idx]
if values_out is not None:
values_out[idx] = value_init_func(idx, tid)
return axis_mul_before, axis_mul_after
## TODO(mbrookhart): These are effective optimziation hyperparametrs
## Perhaps we can autotune?
block_size = 128
thread_work = 4
def _odd_even_sort(
ib,
size,
axis_mul_before,
axis_mul_after,
is_ascend,
keys,
keys_swap,
values=None,
values_swap=None,
):
nthread_tx = block_size // 2
nthread_bx = ceil_div(size, block_size)
nthread_by = axis_mul_before
nthread_bz = axis_mul_after
with ib.new_scope():
ib.scope_attr(tvm.tir.const(0), "hand_threaded", 0)
tx, bx, by, bz = _get_threads(ib, nthread_tx, nthread_bx, nthread_by, nthread_bz)
tid = 2 * tx
start = bx * block_size
## Create shared memory as syncable thread scratch space
tmp_keys_swap = ib.allocate(
keys_swap.dtype,
(block_size,),
name="temp_keys_swap",
scope="shared",
)
if values_swap is not None:
tmp_values_swap = ib.allocate(
values_swap.dtype,
(block_size,),
name="temp_values_swap",
scope="shared",
)
## Create thread local data for swapping
temp_keys = ib.allocate(keys_swap.dtype, (1,), name="temp_keys", scope="local")
if values_swap is not None:
temp_values = ib.allocate(values_swap.dtype, (1,), name="temp_values", scope="local")
temp_cond1 = ib.allocate(keys_swap.dtype, (1,), name="temp_cond1", scope="local")
temp_cond2 = ib.allocate(keys_swap.dtype, (1,), name="temp_cond2", scope="local")
# Copy data to scratch space
base_idx = by * size * axis_mul_after + bz
with ib.for_range(0, 2) as n:
with ib.if_scope((tid + n + start) < size):
tmp_keys_swap[tid + n] = keys[base_idx + (tid + n + start) * axis_mul_after]
if values_swap is not None:
tmp_values_swap[tid + n] = values[base_idx + (tid + n + start) * axis_mul_after]
ib.emit(tvm.tir.Call(None, "tir.tvm_storage_sync", tvm.runtime.convert(["shared"])))
idxm = tvm.tir.indexmod
# OddEvenTransposeSort
current_sort_num = tvm.tir.min(block_size, size - start)
with ib.for_range(0, current_sort_num) as k:
n = idxm(tid + k, 2)
with ib.if_scope(tid + n < current_sort_num - 1):
temp_cond1[0] = tmp_keys_swap[tid + n]
temp_cond2[0] = tmp_keys_swap[tid + n + 1]
if is_ascend:
cond = temp_cond1[0] > temp_cond2[0]
else:
cond = temp_cond1[0] < temp_cond2[0]
with ib.if_scope(cond):
temp_keys[0] = tmp_keys_swap[tid + n]
tmp_keys_swap[tid + n] = tmp_keys_swap[tid + n + 1]
tmp_keys_swap[tid + n + 1] = temp_keys[0]
if values_swap is not None:
temp_values[0] = tmp_values_swap[tid + n]
tmp_values_swap[tid + n] = tmp_values_swap[tid + n + 1]
tmp_values_swap[tid + n + 1] = temp_values[0]
ib.emit(tvm.tir.Call(None, "tir.tvm_storage_sync", tvm.runtime.convert(["shared"])))
## Copy sorted data to output
with ib.for_range(0, 2) as n:
with ib.if_scope(tid + n + start < size):
keys[base_idx + (tid + n + start) * axis_mul_after] = tmp_keys_swap[tid + n]
keys_swap[base_idx + (tid + n + start) * axis_mul_after] = tmp_keys_swap[tid + n]
if values_swap is not None:
values[base_idx + (tid + n + start) * axis_mul_after] = tmp_values_swap[tid + n]
values_swap[base_idx + (tid + n + start) * axis_mul_after] = tmp_values_swap[
tid + n
]
def _sort_common(
ib,
size,
axis_mul_before,
axis_mul_after,
is_ascend,
keys,
keys_swap,
values=None,
values_swap=None,
):
"""Either sort only values or sort values by keys."""
## This function performs a multi-level mergesort
## For blocks of length <= block_size, it does odd-even transpose sort
## in GPU shared memory
## For intermediate block sizes (>block_size, < max_threads * thread_work)
## it uses the mergpath algorthim https://arxiv.org/abs/1406.2628
## to merge blocks in parallel
## At some point, the size of the blocks to be merged is too big for max_threads
## and we switch to using a dual-level mergepath where the outer mergepath
## finds the start/end locations of the inner mergepath so that we can split
## the merge into more blocks
max_threads = int(tvm.target.Target.current(allow_none=False).max_num_threads)
nthread_by = axis_mul_before * axis_mul_after
nthread_bz = 1
nthread_tx = max_threads
nthread_bx = ceil_div(size, nthread_tx)
def compare(a, b):
"""
Compare a and b in proper ascending or descending order
"""
if is_ascend:
out = a <= b
else:
out = b <= a
return out
# Sort the lower levels of the merge using odd-even sort, it's fast for small inputs
lower_lim = ceil_log2(block_size)
_odd_even_sort(
ib,
size,
axis_mul_before * axis_mul_after,
1,
is_ascend,
keys,
keys_swap,
values,
values_swap,
)
upper_lim = ceil_log2(size)
def get_merge_begin(source, base_idx, aCount, bCount, aStart, bStart, diag, step_count):
first = ib.allocate("int64", (1,), name="first", scope="local")
mid = ib.allocate("int64", (1,), name="mid", scope="local")
last = ib.allocate("int64", (1,), name="last", scope="local")
first[0] = tvm.te.max(0, diag - bCount)
last[0] = tvm.te.min(diag, aCount)
with ib.while_loop(first[0] < last[0]):
mid = (first[0] + last[0]) >> 1
a = source[base_idx + (aStart + mid)]
b = source[base_idx + (bStart + diag - 1 - mid)]
with ib.if_scope(compare(a, b)):
first[0] = mid + 1
with ib.else_scope():
last[0] = mid
return first[0], last[0]
def serial_merge(
source,
dest,
source_idx,
dest_idx,
base_idx,
aCount,
bCount,
aStart,
bStart,
kStart,
diag,
step_count,
first,
last,
):
i = ib.allocate("int64", (1,), name="i", scope="local")
j = ib.allocate("int64", (1,), name="j", scope="local")
i[0] = aStart + first
j[0] = bStart + diag - last
with ib.for_range(0, tvm.te.min(aCount + bCount - diag, step_count)) as count:
i_idx = base_idx + i[0]
j_idx = base_idx + j[0]
k_idx = base_idx + (kStart + diag + count)
def assign_i():
"""assign i value to current output"""
dest[k_idx] = source[i_idx]
if values is not None:
dest_idx[k_idx] = source_idx[i_idx]
i[0] += 1
def assign_j():
"""assign j value to current output"""
dest[k_idx] = source[j_idx]
if values is not None:
dest_idx[k_idx] = source_idx[j_idx]
j[0] += 1
## if both of the iterators are in range
with ib.if_scope(tvm.tir.all(i[0] < aStart + aCount, j[0] < bStart + bCount)):
# compare them and insert whichever is next into the output
with ib.if_scope(compare(source[i_idx], source[j_idx])):
assign_i()
with ib.else_scope():
assign_j()
# otherwise, simply copy the remainder of the valid iterator to the output
with ib.else_scope():
with ib.if_scope(i[0] < aStart + aCount):
assign_i()
with ib.else_scope():
assign_j()
with ib.for_range(0, cast(upper_lim - lower_lim, "int64"), dtype="int64") as l2_width:
width = 2 << (l2_width + lower_lim)
# Define and launch the cuda kernel
with ib.new_scope():
target = tvm.target.Target.current()
if "vulkan" in str(target):
# Vulkan can't handle dynamic nthread, so we thread slightly differently
# for vulkan. We don't do this generally because it causes a 15% perf
# regression on other platforms
ntx = max_threads
nbx = tvm.tir.generic.cast(ceil_div(width, max_threads * thread_work), "int32")
nbz = tvm.tir.generic.cast(ceil_div(size, width), "int32")
tx, bx, by, bz = _get_threads(ib, ntx, nbx, nthread_by, nbz)
else:
ntx = tvm.tir.generic.cast(tvm.te.min(max_threads, width), "int32")
nbx = tvm.tir.generic.cast(ceil_div(width, max_threads * thread_work), "int32")
nbz = tvm.tir.generic.cast(ceil_div(size, width), "int32")
tx, bx, by, bz = _get_threads(ib, ntx, nbx, nthread_by, nbz)
def mergepath(
source,
dest,
source_idx,
dest_idx,
aCount,
bCount,
aStart,
bStart,
kStart,
step_count,
even,
):
# pylint: disable=arguments-out-of-order
def merge(source, dest, source_idx, dest_idx):
diag = tx * step_count
first, last = get_merge_begin(
source,
by * size,
aCount,
bCount,
aStart,
bStart,
diag,
step_count,
)
# iterate over the output loop
serial_merge(
source,
dest,
source_idx,
dest_idx,
by * size,
aCount,
bCount,
aStart,
bStart,
kStart,
diag,
step_count,
first,
last,
)
with ib.if_scope(even):
merge(source, dest, source_idx, dest_idx)
with ib.else_scope():
merge(dest, source, dest_idx, source_idx)
def mergesort(source, dest, source_idx, dest_idx, size, width, even):
# calculate the start, mid, and end points of this section
start = width * bz
middle = cast(tvm.te.min(start + tvm.tir.indexdiv(width, 2), size), "int64")
end = cast(tvm.te.min(start + width, size), "int64")
with ib.if_scope(start < size):
with ib.if_scope(nbx == 1):
## merge the start->middle and middle->end arrays
aCount = middle - start
bCount = end - middle
mergepath(
source,
dest,
source_idx,
dest_idx,
aCount,
bCount,
start,
middle,
start,
ceil_div(width, ntx),
even,
)
with ib.else_scope():
step_count = max_threads * thread_work
diag = bx * step_count
def do_merge(first, last):
aStart = start + first
bStart = middle + diag - last
aCount = tvm.te.min(middle - aStart, step_count)
bCount = tvm.te.min(end - bStart, step_count)
mergepath(
source,
dest,
source_idx,
dest_idx,
aCount,
bCount,
aStart,
bStart,
start + diag,
thread_work,
even,
)
with ib.if_scope(even):
first, last = get_merge_begin(
source,
by * size,
middle - start,
end - middle,
start,
middle,
diag,
step_count,
)
do_merge(first, last)
with ib.else_scope():
first, last = get_merge_begin(
dest,
by * size,
middle - start,
end - middle,
start,
middle,
diag,
step_count,
)
do_merge(first, last)
# Call the kernel
mergesort(
keys,
keys_swap,
values,
values_swap,
size,
width,
tvm.tir.indexmod(l2_width, 2) == 0,
)
nthread_by = axis_mul_before
nthread_bz = axis_mul_after
nthread_tx = max_threads
nthread_bx = ceil_div(size, nthread_tx)
## if the final sorted data ended up in the swap, copy it to the real output
with ib.if_scope(
tvm.tir.all(upper_lim > lower_lim, tvm.tir.indexmod(upper_lim - lower_lim, 2) == 1)
):
with ib.new_scope():
tx, bx, by, bz = _get_threads(ib, nthread_tx, nthread_bx, nthread_by, nthread_bz)
tid = bx * nthread_tx + tx
idx = (by * axis_mul_after + bz) * size + tid
with ib.if_scope(tid < size):
keys[idx] = keys_swap[idx]
if values is not None:
values[idx] = values_swap[idx]
def sort_ir(
data, values_out, values_out_swap, axis, is_ascend, indices_out=None, indices_out_swap=None
):
"""Low level IR to do sorting on the GPU, same usage as tvm.contrib.sort.argsort on the CPU.
Parameters
----------
data: Buffer
Buffer of input data. Data will be sorted in place.
values_out : Buffer
Output buffer of values of sorted tensor with same shape as data.
values_out_swap : Buffer
Output buffer of values with same shape as data to use as swap.
axis : Int
Axis long which to sort the input tensor.
is_ascend : Boolean
Whether to sort in ascending or descending order.
indicess_out : Buffer
Output buffer of indices of sorted tensor with same shape as data.
indices_out_swap : Buffer
Output buffer of indices with same shape as data to use as swap.
Returns
-------
stmt : Stmt
The result IR statement.
"""
ib = tvm.tir.ir_builder.create()
shape = data.shape
data = ib.buffer_ptr(data)
values_out = ib.buffer_ptr(values_out)
values_out_swap = ib.buffer_ptr(values_out_swap)
if indices_out is not None:
indices_out = ib.buffer_ptr(indices_out)
assert indices_out_swap is not None
indices_out_swap = ib.buffer_ptr(indices_out_swap)
with ib.if_scope(shape[axis] > 0):
axis_mul_before, axis_mul_after = _sort_init(
ib,
shape,
axis,
data,
values_out,
indices_out,
value_init_func=lambda _, tid: tvm.tir.generic.cast(tid, indices_out.dtype),
)
_sort_common(
ib,
shape[axis],
axis_mul_before,
axis_mul_after,
is_ascend,
values_out,
values_out_swap,
values=indices_out,
values_swap=indices_out_swap,
)
return ib.get()
def sort_by_key_ir(
keys_in, values_in, keys_out, values_out, keys_out_swap, values_out_swap, axis, is_ascend
):
"""Low level IR to do sort by key on the GPU.
Parameters
----------
keys_in: Buffer
Buffer of input keys.
values_in: Buffer
Buffer of input keys.
keys_out : Buffer
Buffer of output sorted keys.
values_out : Buffer
Buffer of output sorted values.
keys_out_swap : Buffer
Output buffer of values with same shape as keys_in to use as swap.
values_out_swap : Buffer
Output buffer of values with same shape as values_in to use as swap.
axis : Int
Axis long which to sort the input tensor.
is_ascend : Boolean
Whether to sort in ascending or descending order.
indicess_out : Buffer
Output buffer of indices of sorted tensor with same shape as keys_in.
values_out_swap : Buffer
Output buffer of indices with same shape as keys_in to use as swap.
Returns
-------
stmt : Stmt
The result IR statement.
"""
ib = tvm.tir.ir_builder.create()
shape = keys_in.shape
keys_in = ib.buffer_ptr(keys_in)
values_in = ib.buffer_ptr(values_in)
keys_out = ib.buffer_ptr(keys_out)
keys_out_swap = ib.buffer_ptr(keys_out_swap)
values_out = ib.buffer_ptr(values_out)
values_out_swap = ib.buffer_ptr(values_out_swap)
with ib.if_scope(shape[axis] > 0):
axis_mul_before, axis_mul_after = _sort_init(
ib,
shape,
axis,
keys_in,
keys_out,
values_out,
value_init_func=lambda idx, _: values_in[idx],
)
_sort_common(
ib,
shape[axis],
axis_mul_before,
axis_mul_after,
is_ascend,
keys_out,
keys_out_swap,
values=values_out,
values_swap=values_out_swap,
)
return ib.get()
def sort(data, axis=-1, is_ascend=1):
"""Performs sorting along the given axis and returns an array of
sorted values with the same shape as the input data.
Parameters
----------
data: tvm.te.Tensor
The input array.
axis : int, optional
Axis long which to sort the input tensor.
is_ascend : boolean, optional
Whether to sort in ascending or descending order.
Returns
-------
out : tvm.te.Tensor
The output of this function.
"""
ndim = len(data.shape)
axis = ndim + axis if axis < 0 else axis
if axis != ndim - 1:
# Prepare for sorting along axis -1.
axes = swap(list(range(ndim)), axis)
data = transpose(data, axes)
value_buf = tvm.tir.decl_buffer(data.shape, data.dtype, "value_buf", data_alignment=8)
value_buf_swap = tvm.tir.decl_buffer(data.shape, data.dtype, "value_buf_swap", data_alignment=8)
out = te.extern(
[data.shape, data.shape],
[data],
lambda ins, outs: sort_ir(ins[0], outs[0], outs[1], -1, is_ascend),
out_buffers=[value_buf, value_buf_swap],
name="sort_gpu",
tag="sort_gpu",
)[0]
if axis != ndim - 1:
axes = swap(list(range(ndim)), axis)
out = transpose(out, axes)
return out
def sort_thrust(data, axis=-1, is_ascend=1):
"""Performs sorting along the given axis and returns an array of
sorted values with the same shape as the input data.
Parameters
----------
data: tvm.te.Tensor
The input array.
axis : int, optional
Axis long which to sort the input tensor.
is_ascend : boolean, optional
Whether to sort in ascending or descending order.
Returns
-------
out : tvm.te.Tensor
The output of this function.
"""
dtype = "float32"
ndim = len(data.shape)
axis = ndim + axis if axis < 0 else axis
if axis != ndim - 1:
# Prepare for sorting along axis -1.
axes = swap(list(range(ndim)), axis)
data = transpose(data, axes)
value_buf = tvm.tir.decl_buffer(data.shape, data.dtype, "value_buf", data_alignment=8)
indices_buf = tvm.tir.decl_buffer(data.shape, dtype, "out_buf", data_alignment=8)
out = te.extern(
[data.shape, data.shape],
[data],
## TODO(mbrookhart): This thrust function is actually doing argsort, not sort
## For performance, we should probably rename the contrib function and add
## a pure sort
lambda ins, outs: tvm.tir.call_packed(
"tvm.contrib.thrust.sort", ins[0], outs[0], outs[1], is_ascend
),
out_buffers=[value_buf, indices_buf],
name="sort_gpu",
tag="sort_gpu",
)[0]
if axis != ndim - 1:
axes = swap(list(range(ndim)), axis)
out = transpose(out, axes)
return out
def argsort(data, axis=-1, is_ascend=1, dtype="float32", ret_type="indices"):
"""Performs sorting along the given axis and returns an array of indices
having same shape as an input array that index data in sorted order.
Parameters
----------
data: tvm.te.Tensor
The input array.
axis : int, optional
Axis long which to sort the input tensor.
is_ascend : boolean, optional
Whether to sort in ascending or descending order.
dtype : string, optional
DType of the output indices.
ret_type : string, optional
The return type [both, indices].
"both": return both sorted data and indices.
"indices": return sorted indices only.
Returns
-------
out : tvm.te.Tensor
The output of this function.
"""
ndim = len(data.shape)
axis = ndim + axis if axis < 0 else axis
if axis != ndim - 1:
# Prepare for sorting along axis -1.
axes = swap(list(range(ndim)), axis)
data = transpose(data, axes)
value_buf = tvm.tir.decl_buffer(data.shape, data.dtype, "value_buf", data_alignment=8)
value_swap_buf = tvm.tir.decl_buffer(data.shape, data.dtype, "value_swap_buf", data_alignment=8)
indices_buf = tvm.tir.decl_buffer(data.shape, dtype, "out_buf", data_alignment=8)
indices_swap_buf = tvm.tir.decl_buffer(data.shape, dtype, "out_swap_buf", data_alignment=8)
outs = te.extern(
[data.shape, data.shape, data.shape, data.shape],
[data],
lambda ins, outs: sort_ir(
ins[0],
outs[0],
outs[2],
-1,
is_ascend,
indices_out=outs[1],
indices_out_swap=outs[3],
),
out_buffers=[value_buf, indices_buf, value_swap_buf, indices_swap_buf],
name="argsort_gpu",
tag="argsort_gpu",
)
if axis != ndim - 1:
axes = swap(list(range(ndim)), axis)
outs = [transpose(out, axes) for out in outs]
if ret_type == "indices":
return outs[1]
return outs[0], outs[1]
def argsort_thrust(data, axis=-1, is_ascend=1, dtype="float32", ret_type="indices"):
"""Performs sorting along the given axis and returns an array of indices
having same shape as an input array that index data in sorted order.
Parameters
----------
data: tvm.te.Tensor
The input array.
axis : int, optional
Axis long which to sort the input tensor.
is_ascend : boolean, optional
Whether to sort in ascending or descending order.
dtype : string, optional
DType of the output indices.
ret_type : string, optional
The return type [both, indices].
"both": return both sorted data and indices.
"indices": return sorted indices only.
Returns
-------
out : tvm.te.Tensor
The output of this function.
"""
return topk_thrust(data, 0, axis, ret_type, is_ascend, dtype)
def schedule_sort(outs):
"""Schedule for sort operator.
Parameters
----------
outs: Array of Tensor
The computation graph description of argsort
in the format of an array of tensors.
Returns
-------
s: Schedule
The computation schedule for the op.
"""
return _schedule_sort(outs)
def schedule_argsort(outs):
"""Schedule for argsort operator.
Parameters
----------
outs: Array of Tensor
The computation graph description of argsort
in the format of an array of tensors.
Returns
-------
s: Schedule
The computation schedule for the op.
"""
return _schedule_sort(outs)
def topk(data, k=1, axis=-1, ret_type="both", is_ascend=False, dtype="int64"):
"""Get the top k elements in an input tensor along the given axis.
Parameters
----------
data : tvm.te.Tensor
The input tensor.
k : int, optional
Number of top elements to select. Return all elements if k < 1.
axis : int, optional
Axis long which to sort the input tensor.
ret_type: str, optional
The return type [both, values, indices].
"both": return both top k data and indices.
"values": return top k data only.
"indices": return top k indices only.
is_ascend : boolean, optional
Whether to sort in ascending or descending order.
dtype : string, optional
The data type of the indices output.
Returns
-------
out : tvm.te.Tensor or List[tvm.te.Tensor]
The computed result.
"""
assert ret_type in ["both", "values", "indices"]
ndim = len(data.shape)
axis = axis + ndim if axis < 0 else axis
assert 0 <= axis < ndim
dshape = data.shape
if axis != ndim - 1:
axes = swap(list(range(ndim)), axis)
data = transpose(data, axes)
values_buf = tvm.tir.decl_buffer(data.shape, data.dtype, "values_buf", data_alignment=8)
values_swap_buf = tvm.tir.decl_buffer(
data.shape, data.dtype, "values_swap_buf", data_alignment=8
)
indices_buf = tvm.tir.decl_buffer(data.shape, dtype, "indices_buf", data_alignment=8)
indices_swap_buf = tvm.tir.decl_buffer(data.shape, dtype, "indies_swap_buf", data_alignment=8)
if ret_type == "values":
output = te.extern(
[data.shape, data.shape],
[data],
lambda ins, outs: sort_ir(ins[0], outs[0], outs[1], -1, is_ascend),
out_buffers=[values_buf, values_swap_buf],
name="topk_gpu",
tag="topk_gpu",
)[0]
if axis != ndim - 1:
axes = swap(list(range(ndim)), axis)
output = transpose(output, axes)
else:
output = te.extern(
[data.shape, data.shape, data.shape, data.shape],
[data],
lambda ins, outs: sort_ir(
ins[0],
outs[0],
outs[2],
-1,
is_ascend,
indices_out=outs[1],
indices_out_swap=outs[3],
),
out_buffers=[values_buf, indices_buf, values_swap_buf, indices_swap_buf],
name="topk_gpu",
tag="topk_gpu",
)[0:2]
if axis != ndim - 1:
axes = swap(list(range(ndim)), axis)
output[0] = transpose(output[0], axes)
output[1] = transpose(output[1], axes)
if isinstance(k, int) and k < 1:
if ret_type == "indices":
return output[1]
return output
beg = [0] * ndim
end = []
strides = [1] * ndim
for i in range(ndim):
if i == axis:
end.append(k if isinstance(k, int) else tvm.te.size_var("dim"))
else:
end.append(dshape[i])
if ret_type == "both":
values_out, indices_out = output
values_out = strided_slice(values_out, beg, end, strides)
indices_out = strided_slice(indices_out, beg, end, strides)
output = [values_out, indices_out]
elif ret_type == "values":
output = [strided_slice(output, beg, end, strides)]
else: # ret_type == "indices"
indices_out = output[1]
output = [strided_slice(indices_out, beg, end, strides)]
return output
def topk_thrust(data, k=1, axis=-1, ret_type="both", is_ascend=False, dtype="int64"):
"""Get the top k elements in an input tensor along the given axis.
Parameters
----------
data : tvm.te.Tensor
The input tensor.
k : int, optional
Number of top elements to select. Return all elements if k < 1.
axis : int, optional
Axis long which to sort the input tensor.
ret_type: str, optional
The return type [both, values, indices].
"both": return both top k data and indices.
"values": return top k data only.
"indices": return top k indices only.
is_ascend : boolean, optional
Whether to sort in ascending or descending order.
dtype : string, optional
The data type of the indices output.
Returns
-------
out : tvm.te.Tensor or List[tvm.te.Tensor]
The computed result.
"""
assert ret_type in ["both", "values", "indices"]
ndim = len(data.shape)
axis = ndim + axis if axis < 0 else axis
if axis != ndim - 1:
# Prepare for sorting along axis -1.
axes = swap(list(range(ndim)), axis)
data = transpose(data, axes)
data_buf = tvm.tir.decl_buffer(data.shape, data.dtype, "data_buf", data_alignment=8)
out_bufs = [
tvm.tir.decl_buffer(data.shape, data.dtype, "value_buf", data_alignment=8),
tvm.tir.decl_buffer(data.shape, dtype, "indices_buf", data_alignment=8),
]
is_ascend = 1 if is_ascend else 0
out = te.extern(
[data.shape, data.shape],
[data],
lambda ins, outs: tvm.tir.call_packed(
"tvm.contrib.thrust.sort", ins[0], outs[0], outs[1], is_ascend
),
in_buffers=[data_buf],
out_buffers=out_bufs,
name="topk_gpu",
tag="topk_gpu",
)
if isinstance(k, tvm.tir.IntImm):
k = k.value
if not isinstance(k, int) or k > 0:
beg = [0] * ndim
end = data.shape[:-1] + [k if isinstance(k, int) else tvm.te.size_var("dim")]
strides = [1] * ndim
out = [strided_slice(o, beg, end, strides) for o in out]
if axis != ndim - 1:
axes = swap(list(range(ndim)), axis)
out = [transpose(o, axes) for o in out]
if ret_type == "values":
out = out[0]
elif ret_type == "indices":
out = out[1]
return out
def schedule_topk(outs):
"""Schedule for argsort operator.
Parameters
----------
outs: Array of Tensor
The computation graph description of argsort
in the format of an array of tensors.
Returns
-------
s: Schedule
The computation schedule for the op.
"""
return _schedule_sort(outs)
def sort_by_key(keys, values, axis=-1, is_ascend=1):
"""Sort values with respect to keys. Both keys and values will
be sorted and returned.
Parameters
----------
keys: tvm.te.Tensor
The input keys.
values : tvm.te.Tensor,
The input values.
axis : int, optional
Axis long which to sort the input tensor.
is_ascend : boolean, optional
Whether to sort in ascending or descending order.
Returns
-------
keys_sorted : tvm.te.Tensor
The sorted keys
values_sorted : tvm.te.Tensor
The values sorted with respect to the keys
"""
keys_buf = tvm.tir.decl_buffer(keys.shape, keys.dtype, "keys_buf", data_alignment=8)
values_buf = tvm.tir.decl_buffer(values.shape, values.dtype, "values_buf", data_alignment=8)
out_bufs = [
tvm.tir.decl_buffer(keys.shape, keys.dtype, "keys_buf", data_alignment=8),
tvm.tir.decl_buffer(values.shape, values.dtype, "values_buf", data_alignment=8),
tvm.tir.decl_buffer(keys.shape, keys.dtype, "keys_swap_buf", data_alignment=8),
tvm.tir.decl_buffer(values.shape, values.dtype, "values_swap_buf", data_alignment=8),
]
out = te.extern(
[keys.shape, values.shape, keys.shape, values.shape],
[keys, values],
lambda ins, outs: sort_by_key_ir(
ins[0], ins[1], outs[0], outs[1], outs[2], outs[3], axis, is_ascend
),
in_buffers=[keys_buf, values_buf],
out_buffers=out_bufs,
dtype=[keys.dtype, values.dtype],
name="sort_by_key",
tag="sort_by_key",
)
return out[0], out[1]
def stable_sort_by_key_thrust(keys, values, for_scatter=False):
"""Sort values with respect to keys using thrust.
Both keys and values will be sorted and returned.
Sorting is done via stable sort, so relative ordering among
ties are preserved.
Parameters
----------
keys: tvm.te.Tensor
The 1D input keys.
values : tvm.te.Tensor,
The 1D input values.
for_scatter: bool, optional
If True, negative keys are interpreted as negative indices.
Before sorting, negative indices are converted to corresponding positive indices.
The output keys (indices) are all positive.
This option is introduced to optimize the scatter implementation.
Returns
-------
keys_sorted : tvm.te.Tensor
The sorted keys
values_sorted : tvm.te.Tensor
The values sorted with respect to the keys
"""
keys_buf = tvm.tir.decl_buffer(keys.shape, keys.dtype, "keys_buf", data_alignment=8)
values_buf = tvm.tir.decl_buffer(values.shape, values.dtype, "values_buf", data_alignment=8)
out_bufs = [
tvm.tir.decl_buffer(keys.shape, keys.dtype, "keys_buf", data_alignment=8),
tvm.tir.decl_buffer(keys.shape, values.dtype, "values_buf", data_alignment=8),
]
out = te.extern(
[keys.shape, values.shape],
[keys, values],
lambda ins, outs: tvm.tir.call_packed(
"tvm.contrib.thrust.stable_sort_by_key", ins[0], ins[1], outs[0], outs[1], for_scatter
),
in_buffers=[keys_buf, values_buf],
out_buffers=out_bufs,
dtype=[keys.dtype, values.dtype],
name="stable_sort_by_key",
tag="stable_sort_by_key",
)
return out[0], out[1]
| 38,707 | 32.055508 | 154 | py |
tvm | tvm-main/python/tvm/topi/cuda/scatter.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""Scatter operators"""
import tvm
from tvm import te, tir, autotvm
from ..scatter import _verify_scatter_nd_inputs
from ..generic import schedule_extern
from .nms import atomic_add
from .sort import stable_sort_by_key_thrust
from ..utils import ceil_div
def gen_scatter_1d_thrust(data, indices_sorted, updates_sorted, out):
"""Generate scatter ir for 1d inputs, using a sorting based approach.
By sorting indices and comparing neighboring two indices, we can tell which
of elements in the indices tensor can scatter its update value into the output.
Sorting of indices, and sorting of updates with respect to indices, can be done
at the same time by thrust's sort_by_key function. It is important that sorting
be done in a "stable" way via stable_sort, to guarantee deterministic output.
Negative indices are assumed to have been converted to corresponding positive
indices.
Parameters
----------
data : tir.Tensor
The input data to the operator.
indices_sorted : tir.Tensor
The sorted index locations to update.
updates : tir.Tensor
The values to update, sorted by indices.
out : tir.Tensor
The output tensor.
Returns
-------
ret : tir
The computational ir.
"""
n = data.shape[0]
ib = tvm.tir.ir_builder.create()
out_ptr = ib.buffer_ptr(out)
data_ptr = ib.buffer_ptr(data)
max_threads = int(tvm.target.Target.current(allow_none=False).max_num_threads)
nthread_tx = max_threads
with ib.new_scope():
nthread_bx = ceil_div(n, nthread_tx)
tx = te.thread_axis("threadIdx.x")
bx = te.thread_axis("blockIdx.x")
ib.scope_attr(tx, "thread_extent", nthread_tx)
ib.scope_attr(bx, "thread_extent", nthread_bx)
tid = bx * nthread_tx + tx
with ib.if_scope(tid < n):
out_ptr[tid] = data_ptr[tid]
indices_ptr = ib.buffer_ptr(indices_sorted)
updates_ptr = ib.buffer_ptr(updates_sorted)
ni = indices_sorted.shape[0]
with ib.new_scope():
nthread_bx = ceil_div(ni, nthread_tx)
tx = te.thread_axis("threadIdx.x")
bx = te.thread_axis("blockIdx.x")
ib.scope_attr(tx, "thread_extent", nthread_tx)
ib.scope_attr(bx, "thread_extent", nthread_bx)
tid = bx * nthread_tx + tx
with ib.if_scope(tid == ni - 1):
# The last element can always update.
index = indices_ptr[tid]
update = updates_ptr[tid]
out_ptr[index] = update
with ib.else_scope():
with ib.if_scope(tid < ni - 1):
index = indices_ptr[tid]
index_next = indices_ptr[tid + 1]
# If the next neighbor in the sorted list of indices has a different index,
# that means thread tid is the last one to have this index.
# This thread can update the output.
with ib.if_scope(index != index_next):
update = updates_ptr[tid]
out_ptr[index] = update
return ib.get()
@autotvm.register_topi_compute("scatter_via_sort.cuda")
def scatter_via_sort(cfg, data, indices, updates, axis=0, reduction="add"):
"""Update data at positions defined by indices with values in updates
Parameters
----------
data : relay.Expr
The input data to the operator.
indices : relay.Expr
The index locations to update.
updates : relay.Expr
The values to update.
axis : int
The axis to scatter on
Returns
-------
ret : relay.Expr
The computed result.
"""
assert reduction == "add"
if axis < 0:
axis += len(data.shape)
assert axis == 0 and len(data.shape) == 1, "sorting based scatter only supported for 1d input"
cfg.add_flop(1) # A dummy value to satisfy AutoTVM
out_shape = data.shape
out_buf = tvm.tir.decl_buffer(out_shape, data.dtype, "out_buf")
indices_sorted, updates_sorted = stable_sort_by_key_thrust(indices, updates, for_scatter=True)
out = te.extern(
[out_shape],
[data, indices_sorted, updates_sorted],
lambda ins, outs: gen_scatter_1d_thrust(ins[0], ins[1], ins[2], outs[0]),
dtype=data.dtype,
out_buffers=[out_buf],
name="scatter_via_sort_gpu",
tag="scatter_via_sort_gpu",
)
return out
@autotvm.register_topi_schedule("scatter_via_sort.cuda")
def schedule_scatter_via_sort(_, outs):
return schedule_extern(outs)
def scatter_nd(data, indices, updates, mode):
"""Scatter elements from a n-dimension array.
Given updates with shape (Y_0, ..., Y_{K-1}, X_M, ..., X_{N-1}), indices with shape
(M, Y_0, ..., Y_{K-1}), and output copied from data with shape (X_0, X_1, ..., X_{N-1}),
scatter_nd computes
.. code-block::
output[indices[0, y_0, ..., y_{K-1}],
...,
indices[M-1, y_0, ..., y_{K-1}],
x_M,
...,
x_{N-1}
] = f(output[...], updates[y_0, ..., y_{K-1}, x_M, ..., x_{N-1}])
where the update function f is determinted by the mode.
Parameters
----------
data : tvm.te.Tensor
The source array.
indices : tvm.te.Tensor
The indices of the values to extract.
updates : tvm.te.Tensor
The updates to apply at the Indices
mode : string
The update mode for the algorithm, either "update" or "add"
If update, the update values will replace the input data
If add, the update values will be added to the input data
Returns
-------
ret : tvm.te.Tensor
"""
_verify_scatter_nd_inputs(data, indices, updates)
def gen_ir(data_ptr, indices_ptr, updates_ptr, out_ptr):
ib = tvm.tir.ir_builder.create()
data = ib.buffer_ptr(data_ptr)
indices = ib.buffer_ptr(indices_ptr)
updates = ib.buffer_ptr(updates_ptr)
out = ib.buffer_ptr(out_ptr)
atomic_add_return = ib.allocate(
updates.dtype, (1,), name="atomic_add_return", scope="local"
)
fused_indices_dimension = 1
for i in indices_ptr.shape[1:]:
fused_indices_dimension *= i
fused_updates_dimension = 1
for i in updates_ptr.shape[len(indices_ptr.shape) - 1 :]:
fused_updates_dimension *= i
fused_shape = 1
for i in data_ptr.shape:
fused_shape *= i
max_threads = int(tvm.target.Target.current(allow_none=False).max_num_threads)
tdim = tvm.tir.min(max_threads, fused_updates_dimension)
with ib.new_scope():
bdim = ceil_div(fused_shape, tdim)
bx = te.thread_axis("blockIdx.x")
tx = te.thread_axis("threadIdx.x")
ib.scope_attr(bx, "thread_extent", bdim)
ib.scope_attr(tx, "thread_extent", tdim)
index = bx * tdim + tx
with ib.if_scope(index < fused_shape):
out[index] = data[index]
# For better performance, we introduce blockIdx.y to implement for-loops
# within one thread.
# The code is parallel over the scattered indices, so we use atomic_add
# to guarantee correctness when mode=="add"
# For now, atomic is not supported by target "vulkan", "metal", or "cuda" with "int64"
# So we fallback to normal algorithm, using "+=" rather than atomic_add
# TODO (CaptainDuke):
# Since multiple threads compete for the same write index, which leads to
# non-determinstic output for update mode. We could add a new attribute,
# "allow_non_deterministic", which can be conditionally set to True by
# each frontend when non-determinsm is allowed.
cur_target_kind = str(tvm.target.Target.current(allow_none=False).kind)
with ib.new_scope():
if (
mode == "add"
and cur_target_kind not in ["vulkan", "metal"]
and updates.dtype in ["int32", "float32"]
):
bdim_x = fused_indices_dimension
bdim_y = ceil_div(fused_updates_dimension, tdim)
# In case of large input sizes, fused_indices_dimension might be too large.
# So we use blockIdx.x because holds larger scales.
bx = te.thread_axis("blockIdx.x")
by = te.thread_axis("blockIdx.y")
tx = te.thread_axis("threadIdx.x")
ib.scope_attr(bx, "thread_extent", bdim_x)
ib.scope_attr(by, "thread_extent", bdim_y)
ib.scope_attr(tx, "thread_extent", tdim)
j = by * tdim + tx
with ib.if_scope(j < fused_updates_dimension):
offset = fused_updates_dimension
index = j # This is x_M, .. x_{N-1} part of the index into out.
# Build up the indices[0, y_0, .. y_{K-1}], .. indices[M-1, y_0, .. y_{K-1}]
# part of the index into out.
up_index = bx * fused_updates_dimension + j
for l in reversed(range(indices_ptr.shape[0].value)):
# indices[bx * l * fused_indices_dimension] = indices[l, y_0, ... y_{k-1}]
index += offset * indices[bx + l * fused_indices_dimension]
offset *= data_ptr.shape[l]
atomic_add_return[0] = atomic_add(
tvm.tir.call_intrin("handle", "tir.address_of", out[index]),
updates[up_index],
)
else:
bdim_x = ceil_div(fused_updates_dimension, tdim)
bx = te.thread_axis("blockIdx.x")
tx = te.thread_axis("threadIdx.x")
ib.scope_attr(bx, "thread_extent", bdim_x)
ib.scope_attr(tx, "thread_extent", tdim)
with ib.for_range(0, fused_indices_dimension) as i:
j = bx * tdim + tx
with ib.if_scope(j < fused_updates_dimension):
offset = fused_updates_dimension
index = j # This is x_M, .. x_{N-1} part of the index into out.
# Build up the
# indices[0, y_0, .. y_{K-1}], ... indices[M-1, y_0, .. y_{K-1}]
# part of the index into out.
for l in reversed(range(indices_ptr.shape[0].value)):
# indices[i * l * fused_indices_dimension] = indices[l, y_0,
# ... y_{k-1}]
index += offset * indices[i + l * fused_indices_dimension]
offset *= data_ptr.shape[l]
if mode == "update":
out[index] = updates[i * fused_updates_dimension + j]
elif mode == "add":
out[index] += updates[i * fused_updates_dimension + j]
elif mode == "mul":
out[index] *= updates[i * fused_updates_dimension + j]
elif mode == "min":
out[index] = tir.min(
out[index], updates[i * fused_updates_dimension + j]
)
elif mode == "max":
out[index] = tir.max(
out[index], updates[i * fused_updates_dimension + j]
)
else:
raise NotImplementedError(
"scatter_nd mode not in [update, add, mul, min, max]:", mode
)
return ib.get()
out_buf = tvm.tir.decl_buffer(data.shape, data.dtype, "out_buf")
return te.extern(
[data.shape],
[data, indices, updates],
lambda ins, outs: gen_ir(ins[0], ins[1], ins[2], outs[0]),
dtype=data.dtype,
out_buffers=[out_buf],
name="scatter_nd_cuda",
tag="scatter_nd_cuda",
)
| 13,032 | 37.445428 | 98 | py |
tvm | tvm-main/python/tvm/topi/cuda/argwhere.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=too-many-arguments, invalid-name
"""Argwhere operator"""
import logging
import tvm
from tvm import te
from .injective import schedule_injective_from_existing
from .scan import exclusive_scan
from .. import tag
from ..utils import ceil_div, prod
from ..transform import reshape
from ..broadcast import not_equal
from ..math import cast
logger = logging.getLogger("topi")
fdiv = tvm.tir.floordiv
fmod = tvm.tir.floormod
def compact_nonzero_indices_ir(condition, write_indices, out, do_write_func):
"""Copy nonzero indices to the corresponding write locations.
Parameters
----------
condition : Buffer
The input condition.
write_indices : Buffer
The result of exclusive scan on a boolean array, where True indicates that
the condition is non zero at that position.
out : Buffer
The output buffer to copy indices to.
do_write_func : a function
A callback that accepts an output buffer, a dst index to write to, and a src index.
Returns
-------
stmt : Stmt
The result IR statement.
"""
ib = tvm.tir.ir_builder.create()
size_1d = prod(condition.shape)
condition = ib.buffer_ptr(condition)
write_indices = ib.buffer_ptr(write_indices)
out = ib.buffer_ptr(out)
nthread_tx = int(tvm.target.Target.current(allow_none=False).max_num_threads)
nthread_bx = ceil_div(size_1d, nthread_tx)
tx = te.thread_axis("threadIdx.x")
bx = te.thread_axis("blockIdx.x")
ib.scope_attr(tx, "thread_extent", nthread_tx)
ib.scope_attr(bx, "thread_extent", nthread_bx)
with ib.new_scope():
idx = bx * nthread_tx + tx
with ib.if_scope(idx < size_1d):
with ib.if_scope(condition[idx] != 0):
do_write_func(out, write_indices[idx], idx)
return ib.get()
def argwhere_common(output_shape, condition, do_write_func):
"""A common compute used by argwhere of various ranks.
Parameters
----------
output_shape : list of int or tvm.tir.Any
Tensor with output shape info.
condition : tvm.te.Tensor
The input condition.
do_write_func : a function
A callback that accepts an output buffer, a dst index to write to, and a src index.
Returns
-------
out : tvm.te.Tensor
Indices of non-zero elements.
"""
flags = not_equal(condition, tvm.tir.const(0))
flags_1d = reshape(flags, (prod(flags.shape),))
write_indices = exclusive_scan(cast(flags_1d, dtype="int32"))
condition_buf = tvm.tir.decl_buffer(
condition.shape, condition.dtype, "data_buf", data_alignment=8
)
write_indices_buf = tvm.tir.decl_buffer(
write_indices.shape, write_indices.dtype, "write_indices_buf", data_alignment=8
)
out_buf = tvm.tir.decl_buffer(output_shape, "int32", "out_buf", data_alignment=8)
out = te.extern(
[output_shape],
[condition, write_indices],
lambda ins, outs: compact_nonzero_indices_ir(ins[0], ins[1], outs[0], do_write_func),
dtype=["int32"],
in_buffers=[condition_buf, write_indices_buf],
out_buffers=[out_buf],
name="argwhere",
tag="argwhere_gpu",
)
return out
def argwhere_1d(output_shape, condition):
"""Compute for argwhere 1D
Parameters
----------
condition : list of int or tvm.tir.Any
The output shape
out : tvm.te.Tensor
Tensor with boolean values.
Returns
-------
stmt : Stmt
The result IR statement.
"""
def do_write(out, write_index, idx):
out[write_index] = idx
return argwhere_common(output_shape, condition, do_write)
def argwhere_2d(output_shape, condition):
"""Compute for argwhere 2D
Parameters
----------
condition : list of int or tvm.tir.Any
The output shape
out : tvm.te.Tensor
Tensor with boolean values.
Returns
-------
stmt : Stmt
The result IR statement.
"""
def do_write(out, write_index, idx):
a1 = condition.shape[1]
out[write_index * 2] = tvm.tir.floordiv(idx, a1)
out[write_index * 2 + 1] = tvm.tir.floormod(idx, a1)
return argwhere_common(output_shape, condition, do_write)
def argwhere_3d(output_shape, condition):
"""Compute for argwhere 3D
Parameters
----------
condition : list of int or tvm.tir.Any
The output shape
out : tvm.te.Tensor
Tensor with boolean values.
Returns
-------
stmt : Stmt
The result IR statement.
"""
def do_write(out, write_index, idx):
_, a1, a2 = condition.shape
s1 = a1 * a2
out[write_index * 3] = fdiv(idx, s1)
out[write_index * 3 + 1] = fdiv(fmod(idx, s1), a2)
out[write_index * 3 + 2] = fmod(idx, a2)
return argwhere_common(output_shape, condition, do_write)
def argwhere_4d(output_shape, condition):
"""Compute for argwhere 4D
Parameters
----------
condition : list of int or tvm.tir.Any
The output shape
out : tvm.te.Tensor
Tensor with boolean values.
Returns
-------
stmt : Stmt
The result IR statement.
"""
def do_write(out, write_index, idx):
_, a1, a2, a3 = condition.shape
s1 = a2 * a3
s2 = a1 * s1
out[write_index * 4] = fdiv(idx, s2)
out[write_index * 4 + 1] = fdiv(fmod(idx, s2), s1)
out[write_index * 4 + 2] = fdiv(fmod(idx, s1), a3)
out[write_index * 4 + 3] = fmod(idx, a3)
return argwhere_common(output_shape, condition, do_write)
def argwhere_5d(output_shape, condition):
"""Compute for argwhere 5D
Parameters
----------
condition : list of int or tvm.tir.Any
The output shape
out : tvm.te.Tensor
Tensor with boolean values.
Returns
-------
stmt : Stmt
The result IR statement.
"""
def do_write(out, write_index, idx):
_, a1, a2, a3, a4 = condition.shape
s1 = a3 * a4
s2 = a2 * s1
s3 = a1 * s2
out[write_index * 5] = fdiv(idx, s3)
out[write_index * 5 + 1] = fdiv(fmod(idx, s3), s2)
out[write_index * 5 + 2] = fdiv(fmod(idx, s2), s1)
out[write_index * 5 + 3] = fdiv(fmod(idx, s1), a4)
out[write_index * 5 + 4] = fmod(idx, a4)
return argwhere_common(output_shape, condition, do_write)
def argwhere(output_shape, condition):
"""Find the indices of elements of a tensor that are non-zero.
Parameters
----------
output_shape : tvm.te.Tensor
Tensor with output shape info.
condition : tvm.te.Tensor
Tensor with boolean values.
Returns
-------
out : tvm.te.Tensor
Indices of non-zero elements.
"""
if len(condition.shape) == 1:
return argwhere_1d(output_shape.shape, condition)
if len(condition.shape) == 2:
return argwhere_2d(output_shape.shape, condition)
if len(condition.shape) == 3:
return argwhere_3d(output_shape.shape, condition)
if len(condition.shape) == 4:
return argwhere_4d(output_shape.shape, condition)
if len(condition.shape) == 5:
return argwhere_5d(output_shape.shape, condition)
raise ValueError("Argwhere does not support rank higher than 5")
def schedule_argwhere(outs):
"""Schedule for argwhere on cuda.
Parameters
----------
outs: Array of Tensor
The computation graph description of argwhere
in the format of an array of tensors.
Returns
-------
s: Schedule
The computation schedule for argwhere
"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
scheduled_ops = []
def traverse(op):
if tag.is_injective(op.tag):
schedule_injective_from_existing(s, op.output(0))
for tensor in op.input_tensors:
if tensor.op.input_tensors and tensor.op not in scheduled_ops:
traverse(tensor.op)
scheduled_ops.append(op)
for out in outs:
traverse(out.op)
return s
| 8,918 | 26.443077 | 93 | py |
tvm | tvm-main/python/tvm/topi/cuda/dense_tensorcore.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, too-many-locals, too-many-statements, unused-argument
"""Compute and Schedule definition for dense tensorcore with cuda backend"""
from __future__ import absolute_import as _abs
import tvm
from tvm import te, autotvm
from .. import tag
from ..utils import traverse_inline, get_const_tuple
from .tensor_intrin import (
intrin_wmma_load_matrix_A,
intrin_wmma_load_matrix_W,
intrin_wmma_store_matrix,
intrin_wmma_gemm,
)
@autotvm.register_topi_compute("dense_tensorcore.cuda")
def dense_tensorcore(cfg, data, weight, bias=None, out_dtype=None):
"""Dense tensorcore operator on CUDA"""
matmul = dense_tensorcore_cuda(data, weight, bias, out_dtype)
return matmul
@autotvm.register_topi_schedule("dense_tensorcore.cuda")
def schedule_dense_tensorcore(cfg, outs):
"""Schedule dense operator using Tensorcore"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
def _callback(op):
if op.tag == "dense_tensorcore":
_schedule_dense_tensorcore(cfg, s, op.output(0))
traverse_inline(s, outs[0].op, _callback)
return s
def dense_tensorcore_cuda(data, weight, bias=None, out_dtype=None):
"""Dense tensorcore operator on CUDA"""
assert len(data.shape) == 2 and len(weight.shape) == 2, "only support 2-dim dense"
if bias is not None:
assert len(bias.shape) == 1
if out_dtype is None:
out_dtype = data.dtype
batch, in_dim = get_const_tuple(data.shape)
out_dim, _ = get_const_tuple(weight.shape)
assert data.dtype == weight.dtype
assert data.dtype in ["float16", "int8", "uint8", "int4", "uint4"]
if data.dtype in ["float16", "int8", "uint8"]:
assert (
(batch % 8 == 0 and in_dim % 16 == 0 and out_dim % 32 == 0)
or (batch % 16 == 0 and in_dim % 16 == 0 and out_dim % 16 == 0)
or (batch % 32 == 0 and in_dim % 16 == 0 and out_dim % 8 == 0)
), (
"The shape of (batch, in_dim, out_dim) "
"must be multiple of (16, 16, 16) or (32, 16, 8) or (8, 16, 32) for now"
)
else:
assert (
batch % 8 == 0 and in_dim % 32 == 0 and out_dim % 8 == 0
), "The shape of (batch, in_dim, out_dim) must be multiple of (8, 32, 8)"
k = te.reduce_axis((0, in_dim), name="k")
matmul = te.compute(
(batch, out_dim),
lambda i, j: te.sum(data[i, k].astype(out_dtype) * weight[j, k].astype(out_dtype), axis=k),
name="T_dense",
tag="dense_tensorcore",
)
if bias is not None:
matmul = te.compute(
(batch, out_dim),
lambda i, j: matmul[i, j] + bias[j].astype(out_dtype),
tag=tag.BROADCAST,
)
return matmul
def _schedule_dense_tensorcore(cfg, s, C):
"""Schedule dense operator using Tensorcore"""
A, B = s[C].op.input_tensors
if len(B.op.input_tensors) == 1 and B.op.input_tensors[0] == A:
s[B].compute_inline()
batch, out_dim = get_const_tuple(C.shape)
data_dtype = A.dtype
out_dtype = C.dtype
# Explicit memory access
AS = s.cache_read(A, "shared", [C])
BS = s.cache_read(B, "shared", [C])
AF = s.cache_read(AS, "wmma.matrix_a", [C])
BF = s.cache_read(BS, "wmma.matrix_b", [C])
CF = s.cache_write(C, "wmma.accumulator")
CS = s.cache_read(CF, "shared", [C])
# fallback support
target = tvm.target.Target.current()
if cfg.is_fallback:
ref_log = autotvm.tophub.load_reference_log(
target.kind.name, target.model, "dense_tensorcore.cuda"
)
cfg.fallback_with_reference_log(ref_log)
# Deal with op fusion, such as bias and relu
if C.op not in s.outputs:
s[C].compute_inline()
C = s.outputs[0].output(0)
# create tuning space
cfg.define_knob("block_row_warps", [1, 2, 4])
cfg.define_knob("block_col_warps", [1, 2, 4])
cfg.define_knob("warp_row_tiles", [1, 2, 4])
cfg.define_knob("warp_col_tiles", [1, 2, 4])
cfg.define_knob("chunk", [1, 2, 4, 8])
cfg.define_knob("offset", [0, 8])
cfg.define_knob("offsetCS", [0, 8])
cfg.define_knob("vec", [1, 2, 4, 8])
if data_dtype in ["float16", "int8", "uint8"]:
# Ensure that the default parameters are applicable when autotvm is not in use
if batch % 32 == 0 and out_dim % 8 == 0:
cfg.define_knob("wmma_m", [32, 16, 8])
elif batch % 16 == 0 and out_dim % 16 == 0:
cfg.define_knob("wmma_m", [16, 8, 32])
elif batch % 8 == 0 and out_dim % 32 == 0:
cfg.define_knob("wmma_m", [8, 16, 32])
wmma_k = 16
wmma_m = cfg["wmma_m"].val
if wmma_m == 16:
wmma_n = 16
elif wmma_m == 8:
wmma_n = 32
elif wmma_m == 32:
wmma_n = 8
elif data_dtype in ["int4", "uint4"]:
wmma_m = wmma_n = 8
wmma_k = 32
else:
raise ValueError(f"data dtype {data_dtype} is not yet supported")
warp_size = 32
block_row_warps = cfg["block_row_warps"].val
block_col_warps = cfg["block_col_warps"].val
warp_row_tiles = cfg["warp_row_tiles"].val
warp_col_tiles = cfg["warp_col_tiles"].val
chunk = cfg["chunk"].val
offset = cfg["offset"].val
offsetCS = cfg["offsetCS"].val
vec = cfg["vec"].val
# Define the stride of intrin functions
AS_align = chunk * wmma_k + offset
BS_align = chunk * wmma_k + offset
CS_align = warp_col_tiles * block_col_warps * wmma_n + offsetCS
AS_stride = [AS_align, 1]
BS_stride = [BS_align, 1]
AF_stride = [wmma_k, 1]
BF_stride = [wmma_k, 1]
CF_stride = [warp_col_tiles * wmma_n, 1]
CS_stride = [CS_align, 1]
block_x = te.thread_axis("blockIdx.x")
block_y = te.thread_axis("blockIdx.y")
thread_x = te.thread_axis("threadIdx.x")
thread_y = te.thread_axis("threadIdx.y")
thread_z = te.thread_axis("threadIdx.z")
# Schedule for dense computation
block_factor_b = wmma_m * warp_row_tiles * block_row_warps
block_factor_o = wmma_n * warp_col_tiles * block_col_warps
b, o = C.op.axis
block_i, bc = s[C].split(b, factor=block_factor_b)
block_j, oc = s[C].split(o, factor=block_factor_o)
s[C].reorder(block_i, block_j, bc, oc)
t = s[C].fuse(bc, oc)
t, vi = s[C].split(t, factor=vec)
t, tx = s[C].split(t, factor=warp_size)
t, ty = s[C].split(t, factor=block_row_warps)
t, tz = s[C].split(t, factor=block_col_warps)
s[C].bind(block_i, block_x)
s[C].bind(block_j, block_y)
s[C].bind(tz, thread_z)
s[C].bind(ty, thread_y)
s[C].bind(tx, thread_x)
s[C].vectorize(vi)
# Schedule for wmma store
s[CS].compute_at(s[C], block_j)
bb, oo = CS.op.axis
s[CS].storage_align(bb, CS_align - 1, CS_align)
bb, bbi = s[CS].split(bb, factor=wmma_m)
oo, ooi = s[CS].split(oo, factor=wmma_n)
bb, bbii = s[CS].split(bb, factor=warp_row_tiles)
oo, ooii = s[CS].split(oo, factor=warp_col_tiles)
s[CS].reorder(bb, oo, bbii, ooii, bbi, ooi)
s[CS].bind(bb, thread_y)
s[CS].bind(oo, thread_z)
# Schedule for wmma computation
s[CF].compute_at(s[CS], oo)
warp_i, warp_j = CF.op.axis
warp_i, _ii = s[CF].split(warp_i, factor=wmma_m)
warp_j, _jj = s[CF].split(warp_j, factor=wmma_n)
(k,) = CF.op.reduce_axis
k, _k = s[CF].split(k, factor=wmma_k)
ko, ki = s[CF].split(k, factor=chunk)
s[CF].reorder(ko, ki, warp_i, warp_j, _ii, _jj, _k)
# Schedule for wmma_matrix_a load
s[AF].compute_at(s[CF], ki)
b, i = AF.op.axis
b, b_ii = s[AF].split(b, factor=wmma_m)
i, i_jj = s[AF].split(i, factor=wmma_k)
s[AF].reorder(b, i, b_ii, i_jj)
# Schedule for wmma_matrix_b load
s[BF].compute_at(s[CF], ki)
o, i = BF.op.axis
o, o_ii = s[BF].split(o, factor=wmma_n)
i, i_ii = s[BF].split(i, factor=wmma_k)
s[BF].reorder(o, i, o_ii, i_ii)
# Schedule for A's(B's) shared memory load
def shared_schedule(stage, strides):
s[stage].compute_at(s[CF], ko)
xo, yo = stage.op.axis
s[stage].storage_align(xo, strides - 1, strides)
t = s[stage].fuse(xo, yo)
t, vi = s[stage].split(t, factor=vec)
t, tx = s[stage].split(t, factor=warp_size)
t, ty = s[stage].split(t, factor=block_row_warps)
_, tz = s[stage].split(t, factor=block_col_warps)
s[stage].bind(ty, thread_y)
s[stage].bind(tz, thread_z)
s[stage].bind(tx, thread_x)
s[stage].vectorize(vi)
shared_schedule(AS, AS_align)
shared_schedule(BS, BS_align)
shape = (wmma_m, wmma_n, wmma_k)
AL_gemm = te.placeholder((wmma_m, wmma_k), name="AL_gemm", dtype=data_dtype)
BL_gemm = te.placeholder((wmma_n, wmma_k), name="BL_gemm", dtype=data_dtype)
k_gemm = te.reduce_axis((0, wmma_k), name="k_gemm")
CL_compute = te.compute(
(wmma_m, wmma_n),
lambda ii, jj: te.sum(
AL_gemm[ii, k_gemm].astype(out_dtype) * BL_gemm[jj, k_gemm].astype(out_dtype),
axis=k_gemm,
),
name="CL_compute",
)
# lower the computation loops down to TensorCore hardware intrinsics
# by mapping the dense tensorcore to tensor intrinsics
s[AF].tensorize(
b_ii,
intrin_wmma_load_matrix_A(
AF_stride, AS_stride, shape, "row_major", (wmma_m, wmma_k), (wmma_m, wmma_k), data_dtype
),
)
s[BF].tensorize(
o_ii,
intrin_wmma_load_matrix_W(
BF_stride, BS_stride, shape, "col_major", (wmma_n, wmma_k), (wmma_n, wmma_k), data_dtype
),
)
s[CF].tensorize(
_ii, intrin_wmma_gemm(AL_gemm, BL_gemm, CL_compute, AF_stride, BF_stride, CF_stride, shape)
)
s[CS].tensorize(
bbi,
intrin_wmma_store_matrix(
CS_stride, CF_stride, shape, out_dtype, (wmma_m, wmma_n), (wmma_m, wmma_n)
),
)
| 10,714 | 35.569966 | 100 | py |
tvm | tvm-main/python/tvm/topi/cuda/conv2d_int8.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
# pylint: disable=no-value-for-parameter
"""Int8 conv2d in NCHWc layout"""
import tvm
from tvm import te
from tvm import autotvm
from .injective import schedule_injective_from_existing
from .tensor_intrin import dp4a
from ..nn.pad import pad
from ..nn.conv2d import unpack_NCHWc_to_nchw
from ..nn.utils import get_pad_tuple
from ..utils import get_const_tuple, traverse_inline
def conv2d_nchw_int8(data, kernel, strides, padding, dilation, out_dtype="int32"):
"""Compute conv2d internally using conv2d_nchwc layout for int8 dtype"""
assert data.dtype in ("int8", "uint8")
assert kernel.dtype in ("int8", "uint8")
assert data.dtype == kernel.dtype
packed_out = conv2d_NCHWc_int8(data, kernel, strides, padding, dilation, "NCHW", out_dtype)
return unpack_NCHWc_to_nchw(packed_out, out_dtype)
def schedule_conv2d_nchw_int8(outs):
"""Create schedule for tensors"""
return schedule_conv2d_NCHWc_int8(outs)
@autotvm.register_topi_compute("conv2d_NCHWc_int8.cuda")
def conv2d_NCHWc_int8(cfg, data, kernel, stride, padding, dilation, layout, out_dtype):
"""Convolution operator in NCHW[x]c layout for int8.
Parameters
----------
cfg: ConfigEntity
The config for this template
data : tvm.te.Tensor
4-D with shape [batch, in_channel, in_height, in_width] or
5-D with shape [batch, in_channel_chunk, in_height, in_width, in_channel_block]
kernel : tvm.te.Tensor
4-D with shape [num_filter, in_channel, filter_height, filter_width] or
6-D with shape [num_filter_chunk, in_channel_chunk, filter_height,
filter_width, num_filter_block, in_channel_block]
stride : int or a list/tuple of two ints
stride size, or [stride_height, stride_width]
padding: int or a list/tuple of two ints
padding size, or [pad_height, pad_width]
dilation: int or a list/tuple of two ints
dilation size, or [dilation_height, dilation_width]
layout : str
layout of data
out_dtype : str
The output type. This is used for mixed precision.
Returns
-------
output : tvm.te.Tensor
5-D with shape [batch, out_channel_chunk, out_height, out_width, out_channel_block]
"""
assert layout in ["NCHW", "NCHW4c"]
ic_block_factor = 4
oc_block_factor = 4
pre_computed = len(kernel.shape) == 6
if not pre_computed:
batch, channels, height, width = get_const_tuple(data.shape)
assert (
channels % ic_block_factor == 0
), f"Number of input channels should be multiple of {ic_block_factor}"
packed_data = te.compute(
(batch, channels // ic_block_factor, height, width, ic_block_factor),
lambda n, c, h, w, vc: data[n, c * ic_block_factor + vc, h, w],
name="packed_data",
)
out_channels, in_channels, kernel_h, kernel_w = get_const_tuple(kernel.shape)
assert (
out_channels % oc_block_factor == 0
), f"Number of output channels should be multiple of {oc_block_factor}"
packed_kernel = te.compute(
(
out_channels // oc_block_factor,
in_channels // ic_block_factor,
kernel_h,
kernel_w,
oc_block_factor,
ic_block_factor,
),
lambda oc_chunk, ic_chunk, kh, kw, oc_block, ic_block: kernel[
oc_chunk * oc_block_factor + oc_block, ic_chunk * ic_block_factor + ic_block, kh, kw
],
name="packed_kernel",
)
else:
packed_data = data
packed_kernel = kernel
batch, ic_chunk, in_height, in_width, ic_block = get_const_tuple(packed_data.shape)
oc_chunk, ic_chunk, kernel_h, kernel_w, oc_block, ic_block = get_const_tuple(
packed_kernel.shape
)
if isinstance(stride, int):
stride_h = stride_w = stride
else:
stride_h, stride_w = stride
if isinstance(dilation, int):
dilation_h = dilation_w = dilation
else:
dilation_h, dilation_w = dilation
pad_top, pad_left, pad_down, pad_right = get_pad_tuple(padding, (kernel_h, kernel_w))
# compute graph
pad_before = [0, 0, pad_top, pad_left, 0]
pad_after = [0, 0, pad_down, pad_right, 0]
pad_data = pad(packed_data, pad_before, pad_after, name="pad_data")
# compute the output shape
dilated_kernel_h = (kernel_h - 1) * dilation_h + 1
dilated_kernel_w = (kernel_w - 1) * dilation_w + 1
out_height = (in_height - dilated_kernel_h + pad_top + pad_down) // stride_h + 1
out_width = (in_width - dilated_kernel_w + pad_left + pad_right) // stride_w + 1
oshape = (batch, oc_chunk, out_height, out_width, oc_block)
icc = te.reduce_axis((0, ic_chunk), name="ic_chunk")
icb = te.reduce_axis((0, ic_block), name="ic_block")
kh = te.reduce_axis((0, kernel_h), name="kh")
kw = te.reduce_axis((0, kernel_w), name="kw")
packed_kernel_dtype = packed_kernel.dtype
packed_dtype = "int32" if packed_kernel_dtype == "int8" else "uint32"
conv = te.compute(
oshape,
lambda n, oc_chunk, oh, ow, oc_block: te.sum(
pad_data[
n, icc, oh * stride_h + kh * dilation_h, ow * stride_w + kw * dilation_w, icb
].astype(packed_dtype)
* packed_kernel[oc_chunk, icc, kh, kw, oc_block, icb].astype(packed_dtype),
axis=[icc, kh, kw, icb],
),
)
output = te.compute(
oshape,
lambda n, oc_chunk, oh, ow, oc_block: conv[n, oc_chunk, oh, ow, oc_block].astype(out_dtype),
tag="conv2d_NCHWc_int8",
)
# num flop
num_flop = (
batch
* oc_chunk
* oc_block
* out_height
* out_width
* ic_chunk
* ic_block
* kernel_h
* kernel_w
* 2
)
cfg.add_flop(num_flop)
return output
@autotvm.register_topi_schedule("conv2d_NCHWc_int8.cuda")
def schedule_conv2d_NCHWc_int8(cfg, outs):
"""Schedule conv2d int8 NCHWc template"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
def _callback(op):
if op.tag == "conv2d_NCHWc_int8":
_schedule_conv2d_NCHWc_int8(cfg, s, op.output(0))
traverse_inline(s, outs[0].op, _callback)
return s
def _schedule_conv2d_NCHWc_int8(cfg, s, output):
conv = output.op.input_tensors[0]
packed_data, packed_kernel = conv.op.input_tensors
if isinstance(packed_data.op, tvm.te.ComputeOp) and "pad" in packed_data.op.tag:
pad_data = packed_data
packed_data = pad_data.op.input_tensors[0]
else:
pad_data = packed_data
if autotvm.GLOBAL_SCOPE.in_tuning:
# skip this part during tuning to make recrods accurate
# this part will be pre-computed during NNVM's pre-compute optimization pass
s[packed_data].pragma(s[packed_data].op.axis[0], "debug_skip_region")
s[packed_kernel].pragma(s[packed_kernel].op.axis[0], "debug_skip_region")
else:
if isinstance(packed_kernel.op, tvm.te.ComputeOp) and packed_kernel.name == "packed_kernel":
# data and kernel are not pre-computed, schedule layout transform here
schedule_injective_from_existing(s, packed_data)
schedule_injective_from_existing(s, packed_kernel)
if pad_data != packed_data:
s[pad_data].compute_inline()
# create cache stage
AA = s.cache_read(pad_data, "shared", [conv])
WW = s.cache_read(packed_kernel, "shared", [conv])
s[conv].set_scope("local")
# handle bias
if output.op not in s.outputs:
s[output].compute_inline()
output = s.outputs[0].output(0)
# tile and bind spatial axes
if len(s[output].op.axis) == 5:
n, f, y, x, c = s[output].op.axis
else:
# For task extraction of auto-tuning, the expected output is 4D. Since auto-tuning tasks
# are created from scratch, therefore the real auto-tuning will still happen on 5D output.
n, f, y, x = s[output].op.axis
cfg.define_split("tile_n", cfg.axis(n), num_outputs=4)
cfg.define_split("tile_f", cfg.axis(f), num_outputs=4)
cfg.define_split("tile_y", cfg.axis(y), num_outputs=4)
cfg.define_split("tile_x", cfg.axis(x), num_outputs=4)
# this is the scope to attach global config inside this kernel
kernel_scope, n = s[output].split(n, nparts=1)
bn, vn, tn, ni = cfg["tile_n"].apply(s, output, n)
bf, vf, tf, fi = cfg["tile_f"].apply(s, output, f)
by, vy, ty, yi = cfg["tile_y"].apply(s, output, y)
bx, vx, tx, xi = cfg["tile_x"].apply(s, output, x)
s[output].reorder(bn, bf, by, bx, vn, vf, vy, vx, tn, tf, ty, tx, ni, fi, yi, xi)
s[output].bind(bn, te.thread_axis("blockIdx.z"))
s[output].bind(bf, te.thread_axis("blockIdx.y"))
s[output].bind(s[output].fuse(by, bx), te.thread_axis("blockIdx.x"))
s[output].bind(vn, te.thread_axis("vthread"))
s[output].bind(vf, te.thread_axis("vthread"))
s[output].bind(vy, te.thread_axis("vthread"))
s[output].bind(vx, te.thread_axis("vthread"))
cfg.define_knob("fuse_yx", [0, 1]) # fuse ty,tx or tn,tf
if cfg["fuse_yx"].val:
s[output].bind(tn, te.thread_axis("threadIdx.z"))
s[output].bind(tf, te.thread_axis("threadIdx.y"))
tyx = s[output].fuse(ty, tx)
s[output].bind(tyx, te.thread_axis("threadIdx.x"))
s[conv].compute_at(s[output], tyx)
# number of threads
n_tz = cfg["tile_n"].size[2]
n_ty = cfg["tile_f"].size[2]
n_tx = cfg["tile_y"].size[2] * cfg["tile_x"].size[2]
else:
s[output].bind(s[output].fuse(tn, tf), te.thread_axis("threadIdx.z"))
s[output].bind(ty, te.thread_axis("threadIdx.y"))
s[output].bind(tx, te.thread_axis("threadIdx.x"))
s[conv].compute_at(s[output], tx)
# number of threads
n_tz = cfg["tile_n"].size[2] * cfg["tile_f"].size[2]
n_ty = cfg["tile_y"].size[2]
n_tx = cfg["tile_x"].size[2]
# tile and bind reduction axes
n, f, y, x, c = s[conv].op.axis
rc, ry, rx, rc_block = s[conv].op.reduce_axis
cfg.define_split("tile_rc", cfg.axis(rc), num_outputs=2)
cfg.define_split("tile_ry", cfg.axis(ry), num_outputs=2)
cfg.define_split("tile_rx", cfg.axis(rx), num_outputs=2)
rco, rci = cfg["tile_rc"].apply(s, conv, rc)
ryo, ryi = cfg["tile_ry"].apply(s, conv, ry)
rxo, rxi = cfg["tile_rx"].apply(s, conv, rx)
s[conv].reorder(rco, ryo, rxo, rci, ryi, rxi, n, f, y, x, c, rc_block)
cfg.define_reorder("reorder_inner", [rco, ryo, rxo], policy="all")
cfg["reorder_inner"].apply(s, conv, [rco, ryo, rxo])
cfg["reorder_inner"].apply(s, conv, [rci, ryi, rxi])
_, rc_block = s[conv].split(rc_block, factor=4)
target = tvm.target.Target.current(allow_none=False)
do_tensorize = "+dotprod" in target.mattr or target.supports_integer_dot_product
if do_tensorize:
dtypes = (pad_data.dtype, packed_kernel.dtype)
s[conv].tensorize(rc_block, dp4a("shared", "shared", "local", dtypes))
cache_loc = [rco, ryo, rxo][cfg["reorder_inner"].perm[-1]]
s[AA].compute_at(s[conv], cache_loc)
s[WW].compute_at(s[conv], cache_loc)
# cooperative fetching
for load in [AA, WW]:
c = s[load].op.axis[-1]
c_outer, c = s[load].split(c, factor=4)
s[load].vectorize(c)
fused = s[load].op.axis[:-1] + [c_outer]
fused = s[load].fuse(*fused)
fused, tx = s[load].split(fused, factor=n_tx)
fused, ty = s[load].split(fused, factor=n_ty)
fused, tz = s[load].split(fused, factor=n_tz)
s[load].bind(tz, te.thread_axis("threadIdx.z"))
s[load].bind(ty, te.thread_axis("threadIdx.y"))
s[load].bind(tx, te.thread_axis("threadIdx.x"))
# double buffer
cfg.define_knob("AA_double_buffer", [0, 1])
cfg.define_knob("WW_double_buffer", [0, 1])
if cfg["AA_double_buffer"].val:
s[AA].double_buffer()
if cfg["WW_double_buffer"].val:
s[WW].double_buffer()
# unroll
cfg.define_knob("auto_unroll_max_step", [0, 512, 1500])
s[output].pragma(kernel_scope, "auto_unroll_max_step", cfg["auto_unroll_max_step"].val)
s[output].pragma(kernel_scope, "unroll_explicit", False)
return s
| 13,169 | 36.308782 | 100 | py |
tvm | tvm-main/python/tvm/topi/cuda/conv3d_winograd.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unused-variable,unused-argument
"""Winograd template for cuda backend"""
import logging
import tvm
from tvm import te
from tvm import autotvm
from .. import nn
from ..utils import get_const_int, get_const_tuple, traverse_inline, simplify
from ..nn.winograd_util import winograd_transform_matrices
logger = logging.getLogger("conv3d_winograd")
def _infer_tile_size(data, kernel):
N, CI, D, H, W = get_const_tuple(data.shape)
if H % 8 == 0:
return 4
return 2
def winograd_cuda(cfg, data, kernel, strides, padding, dilation, out_dtype, pre_computed):
"""Compute declaration for winograd"""
tile_size = _infer_tile_size(data, kernel)
N, CI, D, H, W = get_const_tuple(data.shape)
if isinstance(dilation, int):
dilation_d = dilation_h = dilation_w = dilation
else:
dilation_d, dilation_h, dilation_w = dilation
DSTR, HSTR, WSTR = (strides, strides, strides) if isinstance(strides, int) else strides
if not pre_computed: # kernel tensor is raw tensor, do strict check
if dilation_d != 1 or dilation_h != 1 or dilation_w != 1:
kernel = nn.dilate(kernel, (1, 1, dilation_d, dilation_h, dilation_w))
CO, CI, KD, KH, KW = get_const_tuple(kernel.shape)
alpha = KW + tile_size - 1
assert DSTR == 1 and HSTR == 1 and WSTR == 1 and KD == KH and KH == KW
else:
# kernel tensor is pre-transformed. this op is created by alter op layout.
# dilation is not supported
alpha, _, _, CO, CI = get_const_tuple(kernel.shape)
KD = KH = KW = alpha + 1 - tile_size
assert (
DSTR == 1
and HSTR == 1
and WSTR == 1
and dilation_d == 1
and dilation_h == 1
and dilation_w == 1
)
pf, pt, pl, pb, pd, pr = nn.get_pad_tuple3d(padding, (KD, KH, KW))
data_pad = nn.pad(data, (0, 0, pf, pt, pl), (0, 0, pb, pd, pr), name="data_pad")
r = KW
m = tile_size
A, B, G = winograd_transform_matrices(m, r, out_dtype)
D = (D + pf + pb - KD) // DSTR + 1
H = (H + pt + pd - KH) // HSTR + 1
W = (W + pl + pr - KW) // WSTR + 1
nD, nH, nW = (D + m - 1) // m, (H + m - 1) // m, (W + m - 1) // m
P = N * nD * nH * nW
# transform kernel
if not pre_computed:
# Check if we are currently tuning, if so we want to avoid counting
# prepacking in time costs. Just use a placeholder with the packed shape instead.
if autotvm.GLOBAL_SCOPE.in_tuning:
kernel_pack = te.placeholder(
(alpha, alpha, alpha, CO, CI), dtype=kernel.dtype, name="kernel_pack"
)
else:
r_kd = te.reduce_axis((0, KD), name="r_kd")
r_kh = te.reduce_axis((0, KH), name="r_kh")
r_kw = te.reduce_axis((0, KW), name="r_kw")
kernel_pack = te.compute(
(alpha, alpha, alpha, CO, CI),
lambda omg, eps, nu, co, ci: te.sum(
kernel[co][ci][r_kd][r_kh][r_kw] * G[omg][r_kd] * G[eps][r_kh] * G[nu][r_kw],
axis=[r_kd, r_kh, r_kw],
),
name="kernel_pack",
)
else:
kernel_pack = kernel
idxdiv = tvm.tir.indexdiv
idxmod = tvm.tir.indexmod
# pack input tile
input_tile = te.compute(
(CI, P, alpha, alpha, alpha),
lambda c, p, omg, eps, nu: data_pad[idxdiv(p, (nD * nH * nW))][c][
idxmod(idxdiv(p, nH * nW), nD) * m + omg
][idxmod(idxdiv(p, nW), nH) * m + eps][idxmod(p, nW) * m + nu],
name="d",
)
# transform data
r_a = te.reduce_axis((0, alpha), "r_a")
r_b = te.reduce_axis((0, alpha), "r_b")
r_c = te.reduce_axis((0, alpha), "r_c")
data_pack = te.compute(
(alpha, alpha, alpha, CI, P),
lambda omg, eps, nu, ci, p: te.sum(
input_tile[ci][p][r_a][r_b][r_c] * B[r_a][omg] * B[r_b][eps] * B[r_c][nu],
axis=[r_a, r_b, r_c],
),
name="data_pack",
)
# do batch gemm
ci = te.reduce_axis((0, CI), name="ci")
bgemm = te.compute(
(alpha, alpha, alpha, CO, P),
lambda omg, eps, nu, co, p: te.sum(
kernel_pack[omg][eps][nu][co][ci] * data_pack[omg][eps][nu][ci][p], axis=[ci]
),
name="bgemm",
)
# inverse transform
r_a = te.reduce_axis((0, alpha), "r_a")
r_b = te.reduce_axis((0, alpha), "r_b")
r_c = te.reduce_axis((0, alpha), "r_c")
inverse = te.compute(
(CO, P, m, m, m),
lambda co, p, vd, vh, vw: te.sum(
bgemm[r_a][r_b][r_c][co][p] * A[r_a][vd] * A[r_b][vh] * A[r_c][vw], axis=[r_a, r_b, r_c]
),
name="inverse",
)
# output
output = te.compute(
(N, CO, D, H, W),
lambda n, co, d, h, w: inverse[
co,
n * nD * nH * nW + idxdiv(d, m) * nH * nW + idxdiv(h, m) * nW + idxdiv(w, m),
idxmod(d, m),
idxmod(h, m),
idxmod(w, m),
],
name="output",
tag="conv3d_ncdhw_winograd",
)
cfg.add_flop(2 * N * CO * D * H * W * CI * KD * KH * KW)
return output
def winograd_without_depth_cuda(
cfg, data, kernel, strides, padding, dilation, out_dtype, pre_computed
):
"""Compute declaration for winograd without transforming depth"""
tile_size = _infer_tile_size(data, kernel)
N, CI, D, H, W = get_const_tuple(data.shape)
if isinstance(dilation, int):
dilation_d = dilation_h = dilation_w = dilation
else:
dilation_d, dilation_h, dilation_w = dilation
DSTR, HSTR, WSTR = (strides, strides, strides) if isinstance(strides, int) else strides
if not pre_computed: # kernel tensor is raw tensor, do strict check
if dilation_d != 1 or dilation_h != 1 or dilation_w != 1:
kernel = nn.dilate(kernel, (1, 1, dilation_d, dilation_h, dilation_w))
CO, CI, KD, KH, KW = get_const_tuple(kernel.shape)
alpha = KW + tile_size - 1
assert HSTR == 1 and WSTR == 1 and KH == KW
else:
# kernel tensor is pre-transfomred. this op is created by alter op layout.
# dilation is not supported
alpha, _, KD, CO, CI = get_const_tuple(kernel.shape)
KH = KW = alpha + 1 - tile_size
assert HSTR == 1 and WSTR == 1 and dilation_h == 1 and dilation_w == 1
pf, pt, pl, pb, pd, pr = nn.get_pad_tuple3d(padding, (KD, KH, KW))
data_pad = nn.pad(data, (0, 0, pf, pt, pl), (0, 0, pb, pd, pr), name="data_pad")
out_depth = simplify((D - KD + pf + pb) // DSTR + 1)
D += pf + pb
r = KW
m = tile_size
A, B, G = winograd_transform_matrices(m, r, out_dtype)
H = (H + pt + pd - KH) // HSTR + 1
W = (W + pl + pr - KW) // WSTR + 1
nH, nW = (H + m - 1) // m, (W + m - 1) // m
P = N * nH * nW
# transform kernel
if not pre_computed:
# During autotuning dont count kernel packing as a time cost
# as it will later be removed via alter_op_layout.
if autotvm.GLOBAL_SCOPE.in_tuning:
kernel_pack = te.placeholder(
(alpha, alpha, KD, CO, CI), dtype=kernel.dtype, name="kernel_pack"
)
else:
r_kh = te.reduce_axis((0, KH), name="r_kh")
r_kw = te.reduce_axis((0, KW), name="r_kw")
kernel_pack = te.compute(
(alpha, alpha, KD, CO, CI),
lambda eps, nu, d, co, ci: te.sum(
kernel[co][ci][d][r_kh][r_kw] * G[eps][r_kh] * G[nu][r_kw], axis=[r_kh, r_kw]
),
name="kernel_pack",
)
else:
kernel_pack = kernel
idxdiv = tvm.tir.indexdiv
idxmod = tvm.tir.indexmod
# pack input tile
input_tile = te.compute(
(CI, D, P, alpha, alpha),
lambda c, d, p, eps, nu: data_pad[idxdiv(p, (nH * nW))][c][d][
idxmod(idxdiv(p, nW), nH) * m + eps
][idxmod(p, nW) * m + nu],
name="d",
)
# transform data
r_a = te.reduce_axis((0, alpha), "r_a")
r_b = te.reduce_axis((0, alpha), "r_b")
data_pack = te.compute(
(alpha, alpha, CI, D, P),
lambda eps, nu, ci, d, p: te.sum(
input_tile[ci][d][p][r_a][r_b] * B[r_a][eps] * B[r_b][nu], axis=[r_a, r_b]
),
name="data_pack",
)
# do batch gemm
ci = te.reduce_axis((0, CI), name="ci")
rz = te.reduce_axis((0, KD), name="rz")
bgemm = te.compute(
(alpha, alpha, CO, out_depth, P),
lambda eps, nu, co, d, p: te.sum(
kernel_pack[eps][nu][rz][co][ci] * data_pack[eps][nu][ci][d * DSTR + rz][p],
axis=[ci, rz],
),
name="bgemm",
)
# inverse transform
r_a = te.reduce_axis((0, alpha), "r_a")
r_b = te.reduce_axis((0, alpha), "r_b")
inverse = te.compute(
(CO, out_depth, P, m, m),
lambda co, d, p, vh, vw: te.sum(
bgemm[r_a][r_b][co][d][p] * A[r_a][vh] * A[r_b][vw], axis=[r_a, r_b]
),
name="inverse",
)
# output
output = te.compute(
(N, CO, out_depth, H, W),
lambda n, co, d, h, w: inverse[
co, d, n * nH * nW + idxdiv(h, m) * nW + idxdiv(w, m), idxmod(h, m), idxmod(w, m)
],
name="output",
tag="conv3d_ncdhw_winograd_without_depth",
)
cfg.add_flop(2 * N * CO * D * H * W * CI * KD * KH * KW)
return output
def schedule_winograd_cuda(cfg, s, output, pre_computed):
"""Schedule winograd template"""
# get stages
inverse = s[output].op.input_tensors[0]
bgemm, A = s[inverse].op.input_tensors
kernel_pack, data_pack = s[bgemm].op.input_tensors
input_tile, B = s[data_pack].op.input_tensors
pad_data = s[input_tile].op.input_tensors[0]
# data transform
s[B].compute_inline()
data_l = s.cache_write(data_pack, "local")
omg, eps, nu, c, p = s[data_l].op.axis
r_a, r_b, r_c = s[data_l].op.reduce_axis
# TODO unrolling by omg, eps, nu may improve performance but
# in some cases causes extremely long build times due to imperfect tiling.
for axis in [r_a, r_b, r_c]:
s[data_l].unroll(axis)
omg, eps, nu, c, p = s[data_pack].op.axis
p, pi = s[data_pack].split(p, 1)
fused = s[data_pack].fuse(c, p)
bb, tt = s[data_pack].split(fused, 128)
s[data_pack].reorder(bb, tt, pi, omg, eps, nu)
s[data_pack].bind(bb, te.thread_axis("blockIdx.x"))
s[data_pack].bind(tt, te.thread_axis("threadIdx.x"))
s[data_l].compute_at(s[data_pack], pi)
s[input_tile].compute_at(s[data_pack], pi)
s[pad_data].compute_inline()
# transform kernel
if not pre_computed and not autotvm.GLOBAL_SCOPE.in_tuning:
kernel, G = s[kernel_pack].op.input_tensors
omg, eps, nu, co, ci = s[kernel_pack].op.axis
s[G].compute_inline()
r_a, r_b, r_c = s[kernel_pack].op.reduce_axis
# Could add additional unrolling by omg, eps, nu in the future.
for axis in [r_a, r_b, r_c]:
s[kernel_pack].unroll(axis)
fused = s[kernel_pack].fuse(co, ci)
bb, tt = s[kernel_pack].split(fused, 128)
s[kernel_pack].reorder(bb, tt, omg, eps, nu, r_a, r_b, r_c)
s[kernel_pack].bind(bb, te.thread_axis("blockIdx.x"))
s[kernel_pack].bind(tt, te.thread_axis("threadIdx.x"))
else:
kernel = kernel_pack
if isinstance(kernel.op, tvm.te.ComputeOp) and "dilate" in kernel.op.tag:
s[kernel].compute_inline()
##### space definition begin #####
b1, b2, b3, y, x = s[bgemm].op.axis
rc = s[bgemm].op.reduce_axis[0]
alpha = get_const_int(b1.dom.extent)
cfg.define_split(
"tile_b",
cfg.axis(alpha * alpha * alpha),
num_outputs=4,
filter=lambda x: x.size[-3:] == [1, 1, 1],
)
cfg.define_split("tile_y", y, num_outputs=4)
cfg.define_split("tile_x", x, num_outputs=4)
cfg.define_split("tile_rc", rc, num_outputs=2)
cfg.define_knob("auto_unroll_max_step", [0, 128, 1500])
target = tvm.target.Target.current()
if target.kind.name in ["nvptx", "rocm"]:
cfg.define_knob("unroll_explicit", [1])
else:
cfg.define_knob("unroll_explicit", [0, 1])
##### space definition end #####
# batch gemm
C = bgemm
A0, B0 = kernel_pack, data_pack
OL = s.cache_write(C, "local")
AA = s.cache_read(A0, "shared", [OL])
BB = s.cache_read(B0, "shared", [OL])
b = s[bgemm].fuse(b1, b2, b3)
# tile and bind spatial axes
bgemm_scope, b = s[bgemm].split(b, nparts=1)
bz, vz, tz, zi = cfg["tile_b"].apply(s, C, b)
by, vy, ty, yi = cfg["tile_y"].apply(s, C, y)
bx, vx, tx, xi = cfg["tile_x"].apply(s, C, x)
s[C].bind(bz, te.thread_axis("blockIdx.z"))
s[C].bind(by, te.thread_axis("blockIdx.y"))
s[C].bind(bx, te.thread_axis("blockIdx.x"))
s[C].bind(vz, te.thread_axis("vthread"))
s[C].bind(vy, te.thread_axis("vthread"))
s[C].bind(vx, te.thread_axis("vthread"))
s[C].bind(tz, te.thread_axis("threadIdx.z"))
s[C].bind(ty, te.thread_axis("threadIdx.y"))
s[C].bind(tx, te.thread_axis("threadIdx.x"))
s[C].reorder(bgemm_scope, bz, by, bx, vz, vy, vx, tz, ty, tx, zi, yi, xi)
# tile reduction axes
s[OL].compute_at(s[C], tx)
b1, b2, b3, y, x = s[OL].op.axis
b = s[OL].fuse(b1, b2, b3)
(rc,) = s[OL].op.reduce_axis
rco, rci = cfg["tile_rc"].apply(s, OL, rc)
s[OL].reorder(rco, rci, b, y, x)
s[AA].compute_at(s[OL], rco)
s[BB].compute_at(s[OL], rco)
# cooperative fetching
for load in [AA, BB]:
fused = s[load].fuse(*list(s[load].op.axis))
fused, tx = s[load].split(fused, cfg["tile_x"].size[2])
fused, ty = s[load].split(fused, cfg["tile_y"].size[2])
fused, tz = s[load].split(fused, cfg["tile_b"].size[2])
s[load].bind(tz, te.thread_axis("threadIdx.z"))
s[load].bind(ty, te.thread_axis("threadIdx.y"))
s[load].bind(tx, te.thread_axis("threadIdx.x"))
s[C].pragma(bgemm_scope, "auto_unroll_max_step", cfg["auto_unroll_max_step"].val)
s[C].pragma(bgemm_scope, "unroll_explicit", cfg["unroll_explicit"].val)
# schedule inverse, output and fusion
if output.op in s.outputs:
OL = None
else:
OL = output
s[OL].set_scope("local")
output = s.outputs[0]
m = alpha - 3 + 1
n, co, d, h, w = s[output].op.axis
do, di = s[output].split(d, m)
ho, hi = s[output].split(h, m)
wo, wi = s[output].split(w, m)
s[output].reorder(n, co, do, ho, wo, di, hi, wi)
inverse_scope, n = s[output].split(n, nparts=1)
fused = s[output].fuse(n, co, do, ho, wo)
bb, tt = s[output].split(fused, 128)
s[output].bind(bb, te.thread_axis("blockIdx.x"))
s[output].bind(tt, te.thread_axis("threadIdx.x"))
if OL is not None:
s[OL].compute_at(s[output], tt)
s[A].compute_inline()
co, p, vd, vh, vw = s[inverse].op.axis
r_a, r_b, r_c = s[inverse].op.reduce_axis
# Could add additional unrolling of vd, vh, vw, in the future
for axis in [r_a, r_b, r_c]:
s[inverse].unroll(axis)
s[inverse].compute_at(s[output], tt)
return s
def schedule_winograd_no_depth_cuda(cfg, s, output, pre_computed):
"""Schedule winograd template"""
# get stages
inverse = s[output].op.input_tensors[0]
bgemm, A = s[inverse].op.input_tensors
kernel_pack, data_pack = s[bgemm].op.input_tensors
input_tile, B = s[data_pack].op.input_tensors
pad_data = s[input_tile].op.input_tensors[0]
# data transform
s[B].compute_inline()
data_l = s.cache_write(data_pack, "local")
eps, nu, c, d, p = s[data_l].op.axis
r_a, r_b = s[data_l].op.reduce_axis
for axis in [eps, nu, r_a, r_b]:
s[data_l].unroll(axis)
eps, nu, c, d, p = s[data_pack].op.axis
p, pi = s[data_pack].split(p, 1)
fused = s[data_pack].fuse(c, d, p)
bb, tt = s[data_pack].split(fused, 128)
s[data_pack].reorder(bb, tt, pi, eps, nu)
s[data_pack].bind(bb, te.thread_axis("blockIdx.x"))
s[data_pack].bind(tt, te.thread_axis("threadIdx.x"))
s[data_l].compute_at(s[data_pack], pi)
s[input_tile].compute_at(s[data_pack], pi)
s[pad_data].compute_inline()
# transform kernel
if not pre_computed and not autotvm.GLOBAL_SCOPE.in_tuning:
kernel, G = s[kernel_pack].op.input_tensors
eps, nu, kd, co, ci = s[kernel_pack].op.axis
s[G].compute_inline()
r_a, r_b = s[kernel_pack].op.reduce_axis
for axis in [eps, nu, r_a, r_b]:
s[kernel_pack].unroll(axis)
fused = s[kernel_pack].fuse(kd, co, ci)
bb, tt = s[kernel_pack].split(fused, 128)
s[kernel_pack].reorder(bb, tt, eps, nu, r_a, r_b)
s[kernel_pack].bind(bb, te.thread_axis("blockIdx.x"))
s[kernel_pack].bind(tt, te.thread_axis("threadIdx.x"))
else:
kernel = kernel_pack
if isinstance(kernel.op, tvm.te.ComputeOp) and "dilate" in kernel.op.tag:
s[kernel].compute_inline()
##### space definition begin #####
b1, b2, z, y, x = s[bgemm].op.axis
# Combine channel and depth axes.
rc = s[bgemm].op.reduce_axis[0]
rz = s[bgemm].op.reduce_axis[1]
alpha = get_const_int(b1.dom.extent)
cfg.define_split(
"tile_b", cfg.axis(alpha * alpha), num_outputs=4, filter=lambda x: x.size[-3:] == [1, 1, 1]
)
cfg.define_split("tile_y", y, num_outputs=4)
cfg.define_split("tile_x", x, num_outputs=4)
cfg.define_split("tile_rc", rc, num_outputs=2)
cfg.define_split("tile_rz", rz, num_outputs=2)
cfg.define_knob("auto_unroll_max_step", [0, 128, 1500])
target = tvm.target.Target.current()
if target.kind.name in ["nvptx", "rocm"]:
cfg.define_knob("unroll_explicit", [1])
else:
cfg.define_knob("unroll_explicit", [0, 1])
##### space definition end #####
# batch gemm
C = bgemm
A0, B0 = kernel_pack, data_pack
OL = s.cache_write(C, "local")
AA = s.cache_read(A0, "shared", [OL])
BB = s.cache_read(B0, "shared", [OL])
b = s[bgemm].fuse(b1, b2)
# Allow two different tiling strategies as both seem
# to work best in different cases.
cfg.define_knob("unroll_axis", [0, 1])
# tile and bind spatial axes
bgemm_scope, b = s[bgemm].split(b, nparts=1)
bz, vz, tz, zi = cfg["tile_b"].apply(s, C, b)
by, vy, ty, yi = cfg["tile_y"].apply(s, C, z)
if cfg["unroll_axis"].val:
bx, vx, tx, xi = cfg["tile_x"].apply(s, C, y)
else:
bx, vx, tx, xi = cfg["tile_x"].apply(s, C, x)
s[C].bind(bz, te.thread_axis("blockIdx.z"))
s[C].bind(by, te.thread_axis("blockIdx.y"))
s[C].bind(bx, te.thread_axis("blockIdx.x"))
s[C].bind(vz, te.thread_axis("vthread"))
s[C].bind(vy, te.thread_axis("vthread"))
s[C].bind(vx, te.thread_axis("vthread"))
s[C].bind(tz, te.thread_axis("threadIdx.z"))
s[C].bind(ty, te.thread_axis("threadIdx.y"))
s[C].bind(tx, te.thread_axis("threadIdx.x"))
s[C].reorder(bgemm_scope, bz, by, bx, vz, vy, vx, tz, ty, tx, zi, yi, xi)
if cfg["unroll_axis"].val:
s[C].unroll(x)
else:
s[C].unroll(y)
# tile reduction axes
s[OL].compute_at(s[C], tx)
b1, b2, y1, y2, x = s[OL].op.axis
y = s[OL].fuse(y1, y2)
b = s[OL].fuse(b1, b2)
rc, rz = s[OL].op.reduce_axis
rco, rci = cfg["tile_rc"].apply(s, OL, rc)
rzo, rzi = cfg["tile_rz"].apply(s, OL, rz)
s[OL].reorder(rco, rzo, rci, rzi, b, y, x)
s[AA].compute_at(s[OL], rco)
s[BB].compute_at(s[OL], rco)
# cooperative fetching
for load in [AA, BB]:
fused = s[load].fuse(*list(s[load].op.axis))
fused, tx = s[load].split(fused, cfg["tile_x"].size[2])
fused, ty = s[load].split(fused, cfg["tile_y"].size[2])
fused, tz = s[load].split(fused, cfg["tile_b"].size[2])
s[load].bind(tz, te.thread_axis("threadIdx.z"))
s[load].bind(ty, te.thread_axis("threadIdx.y"))
s[load].bind(tx, te.thread_axis("threadIdx.x"))
s[C].pragma(bgemm_scope, "auto_unroll_max_step", cfg["auto_unroll_max_step"].val)
s[C].pragma(bgemm_scope, "unroll_explicit", cfg["unroll_explicit"].val)
# schedule inverse, output and fusion
if output.op in s.outputs:
OL = None
else:
OL = output
s[OL].set_scope("local")
output = s.outputs[0]
m = alpha - 3 + 1
n, co, d, h, w = s[output].op.axis
do, di = s[output].split(d, m)
ho, hi = s[output].split(h, m)
wo, wi = s[output].split(w, m)
s[output].reorder(n, co, do, ho, wo, di, hi, wi)
inverse_scope, n = s[output].split(n, nparts=1)
fused = s[output].fuse(n, co, do, ho, wo)
bb, tt = s[output].split(fused, 128)
s[output].bind(bb, te.thread_axis("blockIdx.x"))
s[output].bind(tt, te.thread_axis("threadIdx.x"))
if OL is not None:
s[OL].compute_at(s[output], tt)
s[A].compute_inline()
co, d, p, vh, vw = s[inverse].op.axis
r_a, r_b = s[inverse].op.reduce_axis
for axis in [vh, vw, r_a, r_b]:
s[inverse].unroll(axis)
s[inverse].compute_at(s[output], tt)
return s
@autotvm.register_topi_compute("conv3d_ncdhw_winograd.cuda")
def conv3d_ncdhw_winograd(cfg, data, kernel, strides, padding, dilation, groups, out_dtype):
"""Conv3d NCDHW using winograd optimization"""
assert groups == 1, "conv3d_ncdhw_winograd only supports a single group"
CO, CI, KD, KH, KW = get_const_tuple(kernel.shape)
# Check if we can transform depth.
if 2 < KD < 8 and KD == KH:
return winograd_cuda(
cfg, data, kernel, strides, padding, dilation, out_dtype, pre_computed=False
)
return winograd_without_depth_cuda(
cfg, data, kernel, strides, padding, dilation, out_dtype, pre_computed=False
)
@autotvm.register_topi_schedule("conv3d_ncdhw_winograd.cuda")
def schedule_conv3d_ncdhw_winograd(cfg, outs):
"""Dispatch to schedule approriate for conv3d winograd algorithm used."""
s = te.create_schedule([x.op for x in outs])
def _callback(op):
if "conv3d_ncdhw_winograd_without_depth" in op.tag:
schedule_winograd_no_depth_cuda(cfg, s, op.output(0), pre_computed=False)
elif "conv3d_ncdhw_winograd" in op.tag:
schedule_winograd_cuda(cfg, s, op.output(0), pre_computed=False)
traverse_inline(s, outs[0].op, _callback)
return s
@autotvm.register_topi_compute("conv3d_ncdhw_winograd_without_weight_transform.cuda")
def conv3d_ncdhw_winograd_without_weight_transform(
cfg, data, kernel, strides, padding, dilation, groups, out_dtype
):
"""Conv3d NCDHW winograd without weight transform."""
assert (
groups == 1
), "conv3d_ncdhw_winograd_without_weight_transform does not support more than one group"
A, B, C, _, _ = get_const_tuple(kernel.shape)
# Check if we can transform depth.
if A == B == C:
return winograd_cuda(
cfg, data, kernel, strides, padding, dilation, out_dtype, pre_computed=True
)
return winograd_without_depth_cuda(
cfg, data, kernel, strides, padding, dilation, out_dtype, pre_computed=True
)
@autotvm.register_topi_schedule("conv3d_ncdhw_winograd_without_weight_transform.cuda")
def schedule_conv3d_ncdhw_winograd_without_weight_transform(cfg, outs):
"""TOPI schedule callback"""
s = te.create_schedule([x.op for x in outs])
def _callback(op):
if "conv3d_ncdhw_winograd_without_depth" in op.tag:
schedule_winograd_no_depth_cuda(cfg, s, op.output(0), pre_computed=True)
elif "conv3d_ncdhw_winograd" in op.tag:
schedule_winograd_cuda(cfg, s, op.output(0), pre_computed=True)
traverse_inline(s, outs[0].op, _callback)
return s
| 24,547 | 34.784257 | 100 | py |
tvm | tvm-main/python/tvm/topi/cuda/conv2d_alter_op.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unused-variable,unused-argument
"""Conv2D alter op and legalize functions for cuda backend"""
import logging
import tvm
from tvm import autotvm, relay, te
from .. import nn
from ..nn import conv2d_legalize
from ..utils import get_const_tuple, is_target
from .conv2d_winograd import _infer_tile_size
from .tensorcore_alter_op import pad_to_tensorcore
logger = logging.getLogger("topi")
@nn.conv2d_alter_layout.register(["cuda", "gpu"])
def _alter_conv2d_layout(attrs, inputs, tinfos, out_type):
target = tvm.target.Target.current(allow_none=False)
if not is_target(["vulkan", "rocm", "cuda"]):
return None
dispatch_ctx = autotvm.task.DispatchContext.current
new_attrs = {k: attrs[k] for k in attrs.keys()}
strides = attrs.get_int_tuple("strides")
padding = attrs.get_int_tuple("padding")
dilation = attrs.get_int_tuple("dilation")
groups = attrs.get_int("groups")
data_layout = attrs["data_layout"]
kernel_layout = attrs["kernel_layout"]
data, kernel = tinfos
out_dtype = out_type.dtype
impl, outs = relay.backend.te_compiler.select_implementation(
relay.op.get("nn.conv2d"), attrs, tinfos, out_type, target
)
workload = autotvm.task.get_workload(outs)
if workload is None:
# The best implementation is not an AutoTVM template.
# It may be from the auto-scheduler
if impl.name.find("winograd") != -1:
if dilation != (1, 1):
logger.warning("Does not support weight pre-transform for dilated convolution.")
return None
if data_layout == "NHWC" and kernel_layout == "HWIO":
N, H, W, CI = get_const_tuple(data.shape)
KH, KW, _, CO = get_const_tuple(kernel.shape)
# Pre-compute weight transformation in winograd
tile_size = _infer_tile_size(tinfos[0], tinfos[1], layout="NHWC")
# HWIO -> OIHW
kernel_transform = relay.transpose(inputs[1], axes=[3, 2, 0, 1])
# alpha, alpha, CO, CI
weight = relay.nn.contrib_conv2d_winograd_weight_transform(
kernel_transform, tile_size=tile_size
)
new_attrs["tile_size"] = tile_size
new_attrs["channels"] = CO
return relay.nn.contrib_conv2d_winograd_without_weight_transform(
inputs[0], weight, **new_attrs
)
elif data_layout == "NCHW" and kernel_layout == "OIHW":
N, CI, H, W = get_const_tuple(data.shape)
CO, _, KH, KW = get_const_tuple(kernel.shape)
# Pre-compute weight transformation in winograd
tile_size = _infer_tile_size(tinfos[0], tinfos[1], layout="NCHW")
# alpha, alpha, CO, CI
weight = relay.nn.contrib_conv2d_winograd_weight_transform(
inputs[1], tile_size=tile_size
)
# alpha, alpha, CI, CO
weight = relay.transpose(weight, axes=[0, 1, 3, 2])
new_attrs["tile_size"] = tile_size
new_attrs["channels"] = CO
return relay.nn.contrib_conv2d_winograd_without_weight_transform(
inputs[0], weight, **new_attrs
)
return None
cfg = dispatch_ctx.query(target, workload)
if cfg.is_fallback: # if is fallback, clear query cache and return None
autotvm.task.clear_fallback_cache(target, workload)
do_new_layout = False
if is_target(["vulkan", "rocm"]):
do_new_layout = "+dotprod" in target.mattr or target.supports_integer_dot_product
if not do_new_layout:
return None
topi_tmpl = workload[0]
if topi_tmpl == "conv2d_NCHWc_int8.cuda":
assert data_layout == "NCHW" and kernel_layout == "OIHW"
N, CI, H, W = get_const_tuple(data.shape)
CO, _, KH, KW = get_const_tuple(kernel.shape)
assert CO % 4 == 0, "Number of output channels should be multiple of 4"
new_layout = "NCHW4c"
new_attrs["channels"] = CO
new_attrs["data_layout"] = new_layout
new_attrs["out_layout"] = new_layout
new_attrs["kernel_layout"] = "OIHW4o4i"
ic_block_factor = oc_block_factor = 4
# Store the same config for the altered operator (workload)
new_data = te.placeholder(
(N, CI // ic_block_factor, H, W, ic_block_factor), dtype=data.dtype
)
new_kernel = te.placeholder(
(
CO // oc_block_factor,
CI // ic_block_factor,
KH,
KW,
oc_block_factor,
ic_block_factor,
),
dtype=kernel.dtype,
)
new_workload = autotvm.task.args_to_workload(
[new_data, new_kernel, strides, padding, dilation, new_layout, out_dtype],
"conv2d_NCHWc_int8.cuda",
)
dispatch_ctx.update(target, new_workload, cfg)
return relay.nn.conv2d(*inputs, **new_attrs)
if topi_tmpl == "conv2d_nchw_winograd.cuda":
if dilation != (1, 1):
logger.warning("Does not support weight pre-transform for dilated convolution.")
return None
assert data_layout == "NCHW" and kernel_layout == "OIHW"
N, CI, H, W = get_const_tuple(data.shape)
CO, _, KH, KW = get_const_tuple(kernel.shape)
# pre-compute weight transformation in winograd
tile_size = _infer_tile_size(tinfos[0], tinfos[1])
weight = relay.nn.contrib_conv2d_winograd_weight_transform(inputs[1], tile_size=tile_size)
weight = relay.transpose(weight, axes=[0, 1, 3, 2])
new_attrs["tile_size"] = tile_size
new_attrs["channels"] = CO
# Store the same config for the altered operator (workload)
new_data = data
new_weight = te.placeholder(
(KH + tile_size - 1, KW + tile_size - 1, CI, CO), dtype=kernel.dtype
)
new_workload = autotvm.task.args_to_workload(
[new_data, new_weight, strides, padding, dilation, out_dtype],
"conv2d_nchw_winograd_without_weight_transform.cuda",
)
dispatch_ctx.update(target, new_workload, cfg)
return relay.nn.contrib_conv2d_winograd_without_weight_transform(
inputs[0], weight, **new_attrs
)
if topi_tmpl in ("conv2d_nhwc_winograd_direct.cuda", "conv2d_nhwc_winograd_tensorcore.cuda"):
if dilation != (1, 1):
logger.warning("Does not support weight pre-transform for dilated convolution.")
return None
assert data_layout == "NHWC" and kernel_layout == "HWIO"
N, H, W, CI = get_const_tuple(data.shape)
KH, KW, _, CO = get_const_tuple(kernel.shape)
# Pre-compute weight transformation in winograd
tile_size = _infer_tile_size(data, kernel, layout="NHWC")
kernel_transform = relay.transpose(inputs[1], axes=[3, 2, 0, 1])
weight = relay.nn.contrib_conv2d_winograd_weight_transform(
kernel_transform, tile_size=tile_size
)
weight = relay.transpose(weight, axes=[0, 1, 3, 2])
new_attrs["tile_size"] = tile_size
new_attrs["channels"] = CO
# Store the same config for the altered operator (workload)
new_data = data
new_weight = te.placeholder(
(KH + tile_size - 1, KW + tile_size - 1, CI, CO), dtype=kernel.dtype
)
if topi_tmpl == "conv2d_nhwc_winograd_direct.cuda":
new_workload = autotvm.task.args_to_workload(
[new_data, new_weight, strides, padding, dilation, out_dtype],
"conv2d_nhwc_winograd_direct_without_weight_transform.cuda",
)
elif topi_tmpl == "conv2d_nhwc_winograd_tensorcore.cuda":
new_workload = autotvm.task.args_to_workload(
[new_data, new_weight, strides, padding, dilation, out_dtype],
"conv2d_nhwc_winograd_tensorcore_without_weight_transform.cuda",
)
dispatch_ctx.update(target, new_workload, cfg)
return relay.nn.contrib_conv2d_winograd_without_weight_transform(
inputs[0], weight, **new_attrs
)
if topi_tmpl == "group_conv2d_NCHWc_int8.cuda":
assert data_layout == "NCHW" and kernel_layout == "OIHW"
N, CI, H, W = get_const_tuple(data.shape)
CO, _, KH, KW = get_const_tuple(kernel.shape)
new_layout = "NCHW4c"
new_attrs["channels"] = CO
new_attrs["data_layout"] = new_layout
new_attrs["out_layout"] = new_layout
new_attrs["kernel_layout"] = "OIHW4o4i"
ic_block_factor = oc_block_factor = 4
# Store the same config for the altered operator (workload)
new_data = te.placeholder(
(N, CI // ic_block_factor, H, W, ic_block_factor), dtype=data.dtype
)
new_kernel = te.placeholder(
(
CO // oc_block_factor,
CI // ic_block_factor // groups,
KH,
KW,
oc_block_factor,
ic_block_factor,
),
dtype=kernel.dtype,
)
new_workload = autotvm.task.args_to_workload(
[new_data, new_kernel, strides, padding, dilation, groups, out_dtype],
"group_conv2d_NCHWc_int8.cuda",
)
dispatch_ctx.update(target, new_workload, cfg)
return relay.nn.conv2d(*inputs, **new_attrs)
if topi_tmpl == "conv2d_HWNCnc_tensorcore.cuda":
assert data_layout == "HWNC" and kernel_layout == "HWOI"
assert float(tvm.cuda(0).compute_version) >= 7.5
H, W, N, CI = get_const_tuple(data.shape)
KH, KW, CO, _ = get_const_tuple(kernel.shape)
if (
kernel.dtype in ["int4", "uint4"]
and (CI % 32 != 0 or CO % 8 != 0)
or kernel.dtype in ["int8", "uint8"]
and (CI % 16 != 0 or CO % 32 != 0)
):
return relay.nn.conv2d(*inputs, **new_attrs)
new_attrs["channels"] = CO
if kernel.dtype in ["int4", "uint4"]:
new_attrs["kernel_layout"] = "HWOI8o32i"
ic_block_factor = 32
oc_block_factor = 8
else:
new_attrs["kernel_layout"] = "HWOI32o16i"
ic_block_factor = 16
oc_block_factor = 32
new_kernel = te.placeholder(
(
KH,
KW,
CO // oc_block_factor,
CI // ic_block_factor,
oc_block_factor,
ic_block_factor,
),
dtype=kernel.dtype,
)
new_workload = autotvm.task.args_to_workload(
[data, new_kernel, strides, padding, dilation, out_dtype],
"conv2d_HWNCnc_tensorcore.cuda",
)
dispatch_ctx.update(target, new_workload, cfg)
return relay.nn.conv2d(*inputs, **new_attrs)
return None
def _pad_conv2d_HWNC(db, di, do, data, kernel, out_channel, new_attrs, output_tensor):
# Pad batch size
if db != 0:
data = relay.nn.pad(data, pad_width=((0, 0), (0, 0), (0, db), (0, 0)))
# Pad input channel
if di != 0:
data = relay.nn.pad(data, pad_width=((0, 0), (0, 0), (0, 0), (0, di)))
kernel = relay.nn.pad(kernel, pad_width=((0, 0), (0, 0), (0, 0), (0, di)))
# Pad output channel
if do != 0:
kernel = relay.nn.pad(kernel, pad_width=((0, 0), (0, 0), (0, do), (0, 0)))
if do != 0:
new_out_channel = out_channel + do
new_attrs["channels"] = new_out_channel
out = relay.nn.conv2d(data, kernel, **new_attrs)
if db != 0 or do != 0:
original_out_shape = [x.value for x in output_tensor.shape]
out = relay.strided_slice(out, begin=[0, 0, 0, 0], end=original_out_shape)
return out
def _pad_conv2d_NHWC(db, di, do, data, kernel, out_channel, new_attrs, output_tensor):
# Pad batch size
if db != 0:
data = relay.nn.pad(data, pad_width=((0, db), (0, 0), (0, 0), (0, 0)))
# Pad input channel
if di != 0:
data = relay.nn.pad(data, pad_width=((0, 0), (0, 0), (0, 0), (0, di)))
kernel = relay.nn.pad(kernel, pad_width=((0, 0), (0, 0), (0, di), (0, 0)))
# Pad output channel
if do != 0:
kernel = relay.nn.pad(kernel, pad_width=((0, 0), (0, 0), (0, 0), (0, do)))
if do != 0:
new_out_channel = out_channel + do
new_attrs["channels"] = new_out_channel
out = relay.nn.conv2d(data, kernel, **new_attrs)
if db != 0 or do != 0:
original_out_shape = [x.value for x in output_tensor.shape]
out = relay.strided_slice(out, begin=[0, 0, 0, 0], end=original_out_shape)
return out
@conv2d_legalize.register(["cuda", "gpu"])
def _conv2d_legalize(attrs, inputs, arg_types):
"""Legalizes Conv2D op.
Parameters
----------
attrs : tvm.ir.Attrs
Attributes of current convolution
inputs : list of tvm.relay.Expr
The args of the Relay expr to be legalized
types : list of types
List of input and output types
Returns
-------
result : tvm.relay.Expr
The legalized expr
"""
if not is_target(["vulkan", "rocm", "cuda"]):
return None
# Dilation not supported yet. Return None if dilation is not (1, 1)
dilation = attrs.get_int_tuple("dilation")
if not (dilation[0] == 1 and dilation[1] == 1):
return None
# No legalization for depthwise convolutions yet.
groups = attrs.get_int("groups")
if groups != 1:
return None
# Collect the input tensors.
data_tensor, kernel_tensor = arg_types[0], arg_types[1]
data_dtype = data_tensor.dtype
# Collect the output tensor.
output_tensor = arg_types[2]
# Collect the input exprs.
data, kernel = inputs
# Get the conv attrs
new_attrs = {k: attrs[k] for k in attrs.keys()}
# Get data layout. Return None if not NCHW
data_layout = attrs["data_layout"]
kernel_layout = attrs["kernel_layout"]
# Pad input and output channels to use int8 schedule.
if data_dtype in ["int8", "uint8"]:
if data_layout == "NCHW" and kernel_layout == "OIHW":
oc_modified = False
in_channel = data_tensor.shape[1].value
out_channel = kernel_tensor.shape[0].value
# Pad input channel
if in_channel % 4 != 0:
new_in_channel = ((in_channel + 4) // 4) * 4
diff = new_in_channel - in_channel
pad_width = ((0, 0), (0, diff), (0, 0), (0, 0))
data = relay.nn.pad(data, pad_width=pad_width)
kernel = relay.nn.pad(kernel, pad_width=pad_width)
# Pad output channel
new_out_channel = out_channel
if out_channel % 4 != 0:
new_out_channel = ((out_channel + 4) // 4) * 4
diff = new_out_channel - out_channel
kernel = relay.nn.pad(kernel, pad_width=((0, diff), (0, 0), (0, 0), (0, 0)))
oc_modified = True
if oc_modified:
new_attrs["channels"] = new_out_channel
out = tvm.relay.nn.conv2d(data, kernel, **new_attrs)
original_out_shape = [x.value for x in output_tensor.shape]
out = relay.strided_slice(out, begin=[0, 0, 0, 0], end=original_out_shape)
else:
out = relay.nn.conv2d(data, kernel, **new_attrs)
return out
if data_layout == "NHWC" and kernel_layout == "HWIO":
batch = data_tensor.shape[0].value
in_channel = data_tensor.shape[3].value
out_channel = kernel_tensor.shape[3].value
if (
(batch % 8 == 0 and in_channel % 16 == 0 and out_channel % 32 == 0)
or (batch % 16 == 0 and in_channel % 16 == 0 and out_channel % 16 == 0)
or (batch % 32 == 0 and in_channel % 16 == 0 and out_channel % 8 == 0)
):
# no need to pad
return None
candidates = [(16, 16, 16), (32, 16, 8), (8, 16, 32)]
(db, di, do), extra_flops = pad_to_tensorcore(
batch, in_channel, out_channel, candidates
)
if extra_flops > 2:
logger.info("conv2d pad_to_tensorcore skipped, extra_flops %s", extra_flops)
return None
logger.info("conv2d pad_to_tensorcore, extra_flops %s", extra_flops)
return _pad_conv2d_NHWC(db, di, do, data, kernel, out_channel, new_attrs, output_tensor)
if data_layout == "HWNC" and kernel_layout == "HWOI":
batch = data_tensor.shape[2].value
in_channel = data_tensor.shape[3].value
out_channel = kernel_tensor.shape[2].value
if batch % 8 == 0 and in_channel % 16 == 0 and out_channel % 32 == 0:
return None
candidates = [(8, 16, 32)]
(db, di, do), extra_flops = pad_to_tensorcore(
batch, in_channel, out_channel, candidates
)
if extra_flops > 2:
logger.info("conv2d pad_to_tensorcore skipped, extra_flops %s", extra_flops)
return None
logger.info("conv2d pad_to_tensorcore, extra_flops %s", extra_flops)
return _pad_conv2d_HWNC(db, di, do, data, kernel, out_channel, new_attrs, output_tensor)
elif data_dtype in ["float16"]:
if data_layout == "NHWC" and kernel_layout == "HWIO":
if isinstance(data_tensor.shape[0], tvm.tir.expr.Any):
# Skip legalize when the batch size is dynamic
return None
batch = data_tensor.shape[0].value
in_channel = data_tensor.shape[3].value
out_channel = kernel_tensor.shape[3].value
if (
(batch % 8 == 0 and in_channel % 16 == 0 and out_channel % 32 == 0)
or (batch % 16 == 0 and in_channel % 16 == 0 and out_channel % 16 == 0)
or (batch % 32 == 0 and in_channel % 16 == 0 and out_channel % 8 == 0)
):
# no need to pad
return None
candidates = [(16, 16, 16), (32, 16, 8), (8, 16, 32)]
(db, di, do), extra_flops = pad_to_tensorcore(
batch, in_channel, out_channel, candidates
)
if extra_flops > 2:
logger.info("conv2d pad_to_tensorcore skipped, extra_flops %s", extra_flops)
return None
logger.info("conv2d pad_to_tensorcore, extra_flops %s", extra_flops)
return _pad_conv2d_NHWC(db, di, do, data, kernel, out_channel, new_attrs, output_tensor)
elif data_dtype in ["int4", "uint4"]:
if data_layout == "NHWC" and kernel_layout == "HWIO":
batch = data_tensor.shape[0].value
in_channel = data_tensor.shape[3].value
out_channel = kernel_tensor.shape[3].value
if (
(batch % 8 == 0 and in_channel % 16 == 0 and out_channel % 32 == 0)
or (batch % 16 == 0 and in_channel % 16 == 0 and out_channel % 16 == 0)
or (batch % 32 == 0 and in_channel % 16 == 0 and out_channel % 8 == 0)
):
# no need to pad
return None
candidates = [(16, 16, 16), (32, 16, 8), (8, 16, 32)]
(db, di, do), extra_flops = pad_to_tensorcore(
batch, in_channel, out_channel, candidates
)
if extra_flops > 2:
logger.info("conv2d pad_to_tensorcore skipped, extra_flops %s", extra_flops)
return None
logger.info("conv2d pad_to_tensorcore, extra_flops %s", extra_flops)
return _pad_conv2d_NHWC(db, di, do, data, kernel, out_channel, new_attrs, output_tensor)
if data_layout == "HWNC" and kernel_layout == "HWOI":
batch = data_tensor.shape[2].value
in_channel = data_tensor.shape[3].value
out_channel = kernel_tensor.shape[2].value
if batch % 8 == 0 and in_channel % 32 == 0 and out_channel % 8 == 0:
return None
candidates = [(8, 32, 8)]
(db, di, do), extra_flops = pad_to_tensorcore(
batch, in_channel, out_channel, candidates
)
if extra_flops > 2:
logger.info("conv2d pad_to_tensorcore skipped, extra_flops %s", extra_flops)
return None
logger.info("conv2d pad_to_tensorcore, extra_flops %s", extra_flops)
return _pad_conv2d_HWNC(db, di, do, data, kernel, out_channel, new_attrs, output_tensor)
return None
| 21,672 | 38.333938 | 100 | py |
tvm | tvm-main/python/tvm/topi/cuda/nn.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""scheduler functions for cuda backend"""
from __future__ import absolute_import as _abs
import tvm
from tvm import te
from ..utils import traverse_inline
def schedule_lrn(outs):
"""Schedule for LRN
Parameters
----------
outs: Array of Tensor
The computation graph description of LRN
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
max_threads = int(tvm.target.Target.current(allow_none=False).max_num_threads)
def _callback(op):
if "sqr_sum" in op.tag:
pad = op.input_tensors[0]
s[pad].compute_inline()
fused_axis = s[outs[0]].fuse(*s[outs[0]].op.axis)
bx, tx = s[outs[0]].split(fused_axis, factor=max_threads)
s[outs[0]].bind(bx, te.thread_axis("blockIdx.x"))
s[outs[0]].bind(tx, te.thread_axis("threadIdx.x"))
s[op].compute_at(s[outs[0]], tx)
traverse_inline(s, outs[0].op, _callback)
return s
| 1,965 | 34.107143 | 82 | py |
tvm | tvm-main/python/tvm/topi/cuda/batch_matmul_tensorcore.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,too-many-locals,unused-variable,unused-argument
"""cuda batch_matmul operators"""
import tvm
from tvm import autotvm
from tvm import te
from ..utils import traverse_inline, get_const_tuple
from .tensor_intrin import (
intrin_wmma_load_matrix_A,
intrin_wmma_load_matrix_W,
intrin_wmma_store_matrix,
intrin_wmma_gemm,
)
@autotvm.register_topi_compute("batch_matmul_tensorcore.cuda")
def batch_matmul_tensorcore(
cfg, x, y, out_shape=None, out_dtype=None, transpose_a=False, transpose_b=True
):
"""batch matmul tensorcore operator on cuda"""
# TODO(jcf94): Deal with different transpose combinations
assert not transpose_a and transpose_b
# TODO(liuxin.ai): Deal with out_shape for broadcast
del out_shape
return batch_matmul_tensorcore_cuda(x, y, out_dtype)
@autotvm.register_topi_schedule("batch_matmul_tensorcore.cuda")
def schedule_batch_matmul_tensorcore(cfg, outs):
"""Schedule for batch_matmul operator using Tensorcore
Parameters
----------
outs: Array of Tensor
The computation graph description of batch_matmul
in the format of an array of tensors.
Returns
-------
s: Schedule
The computation schedule for the op.
"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
def _schedule(cfg, s, C):
A, B = s[C].op.input_tensors
if len(B.op.input_tensors) == 1 and B.op.input_tensors[0] == A:
s[B].compute_inline()
batch, m_dim, k_dim = get_const_tuple(A.shape)
batch, n_dim, k_dim = get_const_tuple(B.shape)
data_dtype = A.dtype
out_dtype = C.dtype
# Explicit memory access
AS = s.cache_read(A, "shared", [C])
BS = s.cache_read(B, "shared", [C])
AF = s.cache_read(AS, "wmma.matrix_a", [C])
BF = s.cache_read(BS, "wmma.matrix_b", [C])
CF = s.cache_write(C, "wmma.accumulator")
CS = s.cache_read(CF, "shared", [C])
# fallback support
target = tvm.target.Target.current()
if cfg.is_fallback:
ref_log = autotvm.tophub.load_reference_log(
target.kind.name, target.model, "batch_matmul_tensorcore.cuda"
)
cfg.fallback_with_reference_log(ref_log)
# Deal with op fusion, such as bias/relu and slice after padding
if C.op not in s.outputs and "injective" in s.outputs[0].tag:
s[C].compute_inline()
C = s.outputs[0].output(0)
# create tuning space
cfg.define_knob("block_row_warps", [1, 2, 4])
cfg.define_knob("block_col_warps", [1, 2, 4])
cfg.define_knob("warp_row_tiles", [1, 2, 4])
cfg.define_knob("warp_col_tiles", [1, 2, 4])
cfg.define_knob("chunk", [1, 2, 4, 8])
cfg.define_knob("offset", [0, 8])
cfg.define_knob("offsetCS", [0, 8])
cfg.define_knob("vec", [1, 2, 4, 8])
# Ensure that the default parameters are applicable when autotvm is not in use
if data_dtype in ["float16", "uint8", "int8"]:
if m_dim % 32 == 0 and n_dim % 8 == 0:
cfg.define_knob("wmma_m", [32, 16, 8])
elif m_dim % 16 == 0 and n_dim % 16 == 0:
cfg.define_knob("wmma_m", [16, 8, 32])
elif m_dim % 8 == 0 and n_dim % 32 == 0:
cfg.define_knob("wmma_m", [8, 16, 32])
wmma_k = 16
wmma_m = cfg["wmma_m"].val
if wmma_m == 16:
wmma_n = 16
elif wmma_m == 8:
wmma_n = 32
elif wmma_m == 32:
wmma_n = 8
elif data_dtype in ["int4", "uint4"]:
wmma_m = wmma_n = 8
wmma_k = 32
else:
raise ValueError(f"data dtype {data_dtype} is not yet supported")
warp_size = 32
block_row_warps = cfg["block_row_warps"].val
block_col_warps = cfg["block_col_warps"].val
warp_row_tiles = cfg["warp_row_tiles"].val
warp_col_tiles = cfg["warp_col_tiles"].val
chunk = cfg["chunk"].val
offset = cfg["offset"].val
offsetCS = cfg["offsetCS"].val
vec = cfg["vec"].val
# Define the stride of intrin functions
AS_align = chunk * wmma_k + offset
BS_align = chunk * wmma_k + offset
CS_align = warp_col_tiles * block_col_warps * wmma_n + offsetCS
AS_stride = [AS_align, 1]
BS_stride = [BS_align, 1]
AF_stride = [wmma_k, 1]
BF_stride = [wmma_k, 1]
CF_stride = [warp_col_tiles * wmma_n, 1]
CS_stride = [CS_align, 1]
block_x = te.thread_axis("blockIdx.x")
block_y = te.thread_axis("blockIdx.y")
block_z = te.thread_axis("blockIdx.z")
thread_x = te.thread_axis("threadIdx.x")
thread_y = te.thread_axis("threadIdx.y")
thread_z = te.thread_axis("threadIdx.z")
# Schedule for dense computation
block_factor_m = wmma_m * warp_row_tiles * block_row_warps
block_factor_n = wmma_n * warp_col_tiles * block_col_warps
b, m, n = C.op.axis
block_i, bc = s[C].split(m, factor=block_factor_m)
block_j, oc = s[C].split(n, factor=block_factor_n)
s[C].reorder(b, block_i, block_j, bc, oc)
t = s[C].fuse(bc, oc)
t, vi = s[C].split(t, factor=vec)
t, tx = s[C].split(t, factor=warp_size)
t, ty = s[C].split(t, factor=block_row_warps)
t, tz = s[C].split(t, factor=block_col_warps)
s[C].bind(block_i, block_x)
s[C].bind(block_j, block_y)
s[C].bind(b, block_z)
s[C].bind(tz, thread_z)
s[C].bind(ty, thread_y)
s[C].bind(tx, thread_x)
s[C].vectorize(vi)
# Schedule for wmma store
s[CS].compute_at(s[C], block_j)
bs, bb, oo = CS.op.axis
s[CS].storage_align(bb, CS_align - 1, CS_align)
bb, bbi = s[CS].split(bb, factor=wmma_m)
oo, ooi = s[CS].split(oo, factor=wmma_n)
bb, bbii = s[CS].split(bb, factor=warp_row_tiles)
oo, ooii = s[CS].split(oo, factor=warp_col_tiles)
s[CS].reorder(bs, bb, oo, bbii, ooii, bbi, ooi)
s[CS].bind(bb, thread_z)
s[CS].bind(oo, thread_y)
# Schedule for wmma computation
s[CF].compute_at(s[CS], oo)
bs, warp_i, warp_j = CF.op.axis
warp_i, _ii = s[CF].split(warp_i, factor=wmma_m)
warp_j, _jj = s[CF].split(warp_j, factor=wmma_n)
(k,) = CF.op.reduce_axis
k, _k = s[CF].split(k, factor=wmma_k)
ko, ki = s[CF].split(k, factor=chunk)
s[CF].reorder(bs, ko, ki, warp_i, warp_j, _ii, _jj, _k)
# Schedule for wmma_matrix_a load
s[AF].compute_at(s[CF], ki)
bs, b, i = AF.op.axis
b, b_ii = s[AF].split(b, factor=wmma_m)
i, i_jj = s[AF].split(i, factor=wmma_k)
s[AF].reorder(bs, b, i, b_ii, i_jj)
# Schedule for wmma_matrix_b load
s[BF].compute_at(s[CF], ki)
bs, o, i = BF.op.axis
o, o_ii = s[BF].split(o, factor=wmma_n)
i, i_ii = s[BF].split(i, factor=wmma_k)
s[BF].reorder(bs, o, i, o_ii, i_ii)
# Schedule for A's(B's) shared memory load
def shared_schedule(stage, strides):
s[stage].compute_at(s[CF], ko)
bs, xo, yo = stage.op.axis
s[stage].storage_align(xo, strides - 1, strides)
t = s[stage].fuse(xo, yo)
t, vi = s[stage].split(t, factor=vec)
t, tx = s[stage].split(t, factor=warp_size)
t, ty = s[stage].split(t, factor=block_row_warps)
_, tz = s[stage].split(t, factor=block_col_warps)
s[stage].bind(ty, thread_y)
s[stage].bind(tz, thread_z)
s[stage].bind(tx, thread_x)
s[stage].vectorize(vi)
shared_schedule(AS, AS_align)
shared_schedule(BS, BS_align)
shape = (wmma_m, wmma_n, wmma_k)
AL_gemm = te.placeholder((wmma_m, wmma_k), name="AL_gemm", dtype=data_dtype)
BL_gemm = te.placeholder((wmma_n, wmma_k), name="BL_gemm", dtype=data_dtype)
k_gemm = te.reduce_axis((0, wmma_k), name="k_gemm")
CL_compute = te.compute(
(wmma_m, wmma_n),
lambda ii, jj: te.sum(
AL_gemm[ii, k_gemm].astype(out_dtype) * BL_gemm[jj, k_gemm].astype(out_dtype),
axis=k_gemm,
),
name="CL_compute",
)
# lower the computation loops down to TensorCore hardware intrinsics
# by mapping the dense tensorcore to tensor intrinsics
s[AF].tensorize(
b_ii,
intrin_wmma_load_matrix_A(
AF_stride,
AS_stride,
shape,
"row_major",
(wmma_m, wmma_k),
(wmma_m, wmma_k),
data_dtype,
),
)
s[BF].tensorize(
o_ii,
intrin_wmma_load_matrix_W(
BF_stride,
BS_stride,
shape,
"col_major",
(wmma_n, wmma_k),
(wmma_n, wmma_k),
data_dtype,
),
)
s[CF].tensorize(
_ii,
intrin_wmma_gemm(AL_gemm, BL_gemm, CL_compute, AF_stride, BF_stride, CF_stride, shape),
)
s[CS].tensorize(
bbi,
intrin_wmma_store_matrix(
CS_stride, CF_stride, shape, out_dtype, (wmma_m, wmma_n), (wmma_m, wmma_n)
),
)
def _callback(op):
if "batch_matmul_tensorcore" in op.tag:
_schedule(cfg, s, op.output(0))
traverse_inline(s, outs[0].op, _callback)
return s
def batch_matmul_tensorcore_cuda(x, y, out_dtype=None):
"""Computes batch matrix multiplication of `x` and `y` when `x` and `y` are
data in batch.
Parameters
----------
x : tvm.te.Tensor
3-D with shape [batch, M, K]
y : tvm.te.Tensor
3-D with shape [batch, N, K]
Returns
-------
output : tvm.te.Tensor
3-D with shape [batch, M, N]
"""
assert len(x.shape) == 3 and len(y.shape) == 3, "only support 3-dim batch_matmul"
x_shape = get_const_tuple(x.shape)
y_shape = get_const_tuple(y.shape)
assert x_shape[0] == y_shape[0], "batch dimension doesn't match"
assert x_shape[2] == y_shape[2], "shapes of x and y is inconsistent"
batch, M, K = x.shape
N = y.shape[1]
if out_dtype is None:
out_dtype = x.dtype
assert x.dtype == y.dtype
assert x.dtype in ["float16", "uint8", "int8", "uint4", "int4"]
if x.dtype in ["float16", "uint8", "int8"]:
assert (
(M % 8 == 0 and K % 16 == 0 and N % 32 == 0)
or (M % 16 == 0 and K % 16 == 0 and N % 16 == 0)
or (M % 32 == 0 and K % 16 == 0 and N % 8 == 0)
), "The shape of (M, K, N) must be multiple of (16, 16, 16) or (32, 16, 8) or (8, 16, 32)"
else:
assert (
M % 8 == 0 and K % 32 == 0 and N % 8 == 0
), "The shape of (M, K, N) must be multiple of (8, 32, 8)"
k = te.reduce_axis((0, K), name="k")
return te.compute(
(batch, M, N),
lambda b, i, j: te.sum(x[b, i, k].astype(out_dtype) * y[b, j, k].astype(out_dtype), axis=k),
tag="batch_matmul_tensorcore",
)
| 12,197 | 35.963636 | 100 | py |
tvm | tvm-main/python/tvm/topi/cuda/batch_matmul.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,too-many-locals,unused-variable,unused-argument
"""cuda batch_matmul operators"""
import tvm
from tvm import autotvm
from tvm import te
from tvm.contrib import cublas
from tvm.autotvm.task.space import SplitEntity, OtherOptionEntity
from .. import nn, generic
from ..utils import traverse_inline, get_const_tuple, get_max_power2_factor
from .tensor_intrin import dp4a
@autotvm.register_topi_compute("batch_matmul.cuda")
def batch_matmul(cfg, x, y, out_shape=None, out_dtype=None, transpose_a=False, transpose_b=True):
"""Compute batch matrix multiplication of `tensor_a` and `tensor_b`.
Both `tensor_a` and `tensor_b` can be transposed. For legacy reason, we use NT format
(transpose_a=False, transpose_b=True) by default.
Parameters
----------
cfg : ConfigSpace
Autotvm tuning space config file.
tensor_a : tvm.te.Tensor
3-D with shape [batch, M, K] or [batch, K, M].
tensor_b : tvm.te.Tensor
3-D with shape [batch, K, N] or [batch, N, K].
out_shape : List[Optional]
Explicit intended output shape of the computation. Can be useful in cases
with dynamic input shapes.
out_dtype : Optional[str]
Specifies the output data type for mixed precision batch matmul.
transpose_a : Optional[bool] = False
Whether the first tensor is in transposed format.
transpose_b : Optional[bool] = True
Whether the second tensor is in transposed format.
Returns
-------
output : tvm.te.Tensor
3-D with shape [batch, M, N]
"""
return nn.batch_matmul(
x,
y,
oshape=out_shape,
out_dtype=out_dtype,
transpose_a=transpose_a,
transpose_b=transpose_b,
)
@autotvm.register_topi_schedule("batch_matmul.cuda")
def schedule_batch_matmul(cfg, outs):
"""Schedule for batch_matmul
Parameters
----------
outs: Array of Tensor
The computation graph description of batch_matmul
in the format of an array of tensors.
Returns
-------
s: Schedule
The computation schedule for the op.
"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
def _schedule(cfg, op):
C = op.output(0)
A, B = s[C].op.input_tensors
if len(B.op.input_tensors) == 1 and B.op.input_tensors[0] == A:
s[B].compute_inline()
_, M, N = get_const_tuple(C.shape)
AA = s.cache_read(A, "shared", [C])
AL = s.cache_read(AA, "local", [C])
BB = s.cache_read(B, "shared", [C])
BL = s.cache_read(BB, "local", [C])
CC = s.cache_write(C, "local")
if op not in s.outputs:
s[C].compute_inline()
C = s.outputs[0].output(0)
b, y, x = s[C].op.axis
(k,) = s[CC].op.reduce_axis
cfg.define_split("tile_y", y, num_outputs=3)
cfg.define_split("tile_x", x, num_outputs=3)
cfg.define_split("tile_k", k, num_outputs=2)
cfg.define_knob("auto_unroll_max_step", [8, 16, 32, 64])
target = tvm.target.Target.current()
if target.kind.name in ["nvptx", "rocm"]:
# llvm-based backends cannot do non-explicit unrolling
cfg.define_knob("unroll_explicit", [1])
else:
cfg.define_knob("unroll_explicit", [0, 1])
if cfg.is_fallback:
y_bn = get_max_power2_factor(M, 64)
x_bn = get_max_power2_factor(N, 64)
y_nthreads = min(y_bn, 8)
x_nthreads = min(x_bn, 8)
cfg["tile_x"] = SplitEntity([-1, x_nthreads, x_bn // x_nthreads])
cfg["tile_y"] = SplitEntity([-1, y_nthreads, y_bn // y_nthreads])
cfg["tile_k"] = SplitEntity([-1, 8])
cfg["auto_unroll_max_step"] = OtherOptionEntity(16)
by, ty, yi = cfg["tile_y"].apply(s, C, y)
bx, tx, xi = cfg["tile_x"].apply(s, C, x)
thread_x = te.thread_axis("threadIdx.x")
thread_y = te.thread_axis("threadIdx.y")
s[C].reorder(b, by, bx, ty, tx, yi, xi)
s[C].bind(b, te.thread_axis("blockIdx.z"))
s[C].bind(by, te.thread_axis("blockIdx.y"))
s[C].bind(bx, te.thread_axis("blockIdx.x"))
s[C].bind(ty, thread_y)
s[C].bind(tx, thread_x)
s[C].pragma(yi, "auto_unroll_max_step", cfg["auto_unroll_max_step"].val)
s[C].pragma(yi, "unroll_explicit", cfg["unroll_explicit"].val)
s[CC].compute_at(s[C], tx)
_, yi, xi = s[CC].op.axis
ko, ki = cfg["tile_k"].apply(s, CC, k)
s[CC].reorder(ko, ki, yi, xi)
s[CC].pragma(ki, "auto_unroll_max_step", cfg["auto_unroll_max_step"].val)
s[CC].pragma(ki, "unroll_explicit", cfg["unroll_explicit"].val)
s[AA].compute_at(s[CC], ko)
s[AL].compute_at(s[CC], ki)
s[BB].compute_at(s[CC], ko)
s[BL].compute_at(s[CC], ki)
_, y, k = s[AA].op.axis
ty, yi = s[AA].split(y, nparts=cfg["tile_y"].size[1])
tx, ki = s[AA].split(k, nparts=cfg["tile_x"].size[1])
s[AA].reorder(ty, tx, yi, ki)
s[AA].bind(ty, thread_y)
s[AA].bind(tx, thread_x)
s[AA].pragma(yi, "auto_unroll_max_step", cfg["auto_unroll_max_step"].val)
s[AA].pragma(yi, "unroll_explicit", cfg["unroll_explicit"].val)
_, x, k = s[BB].op.axis
ty, xi = s[BB].split(x, nparts=cfg["tile_y"].size[1])
tx, ki = s[BB].split(k, nparts=cfg["tile_x"].size[1])
s[BB].bind(ty, thread_y)
s[BB].bind(tx, thread_x)
s[BB].reorder(ty, tx, xi, ki)
s[BB].pragma(xi, "auto_unroll_max_step", cfg["auto_unroll_max_step"].val)
s[BB].pragma(xi, "unroll_explicit", cfg["unroll_explicit"].val)
def _callback(op):
if "batch_matmul" in op.tag:
_schedule(cfg, op)
traverse_inline(s, outs[0].op, _callback)
return s
@autotvm.register_topi_compute("batch_matmul_cublas.cuda")
def batch_matmul_cublas(
cfg, x, y, out_shape=None, out_dtype=None, transpose_a=False, transpose_b=True
):
"""Compute batch matrix multiplication of `x` and `y`.
Both `x` and `y` can be transposed. For legacy reason, we use NT format
(transpose_a=False, transpose_b=True) by default.
Parameters
----------
cfg : ConfigSpace
Autotvm tuning space config file.
x : tvm.te.Tensor
3-D with shape [batch, M, K] or [batch, K, M].
y : tvm.te.Tensor
3-D with shape [batch, K, N] or [batch, N, K].
out_shape : List[Optional]
Explicit intended output shape of the computation. Can be useful in cases
with dynamic input shapes.
out_dtype : Optional[str]
Specifies the output data type for mixed precision batch matmul.
transpose_a : Optional[bool] = False
Whether the first tensor is in transposed format.
transpose_b : Optional[bool] = True
Whether the second tensor is in transposed format.
Returns
-------
output : tvm.te.Tensor
3-D with shape [batch, M, N]
"""
if transpose_a:
b, k, m = get_const_tuple(x.shape)
else:
b, m, k = get_const_tuple(x.shape)
if transpose_b:
b, n, k = get_const_tuple(y.shape)
else:
b, k, n = get_const_tuple(y.shape)
if all([isinstance(s, int) for s in [b, m, n, k]]):
cfg.add_flop(b * m * k * n * 2)
return cublas.batch_matmul(x, y, transa=transpose_a, transb=transpose_b, dtype=out_dtype)
@autotvm.register_topi_schedule("batch_matmul_cublas.cuda")
def schedule_batch_matmul_cublas(_, outs):
"""Schedule batch_matmul operator using CUBLAS"""
return generic.schedule_extern(outs)
@autotvm.register_topi_compute("batch_matmul_int8.cuda")
def batch_matmul_int8(
cfg, x, y, out_shape=None, out_dtype=None, transpose_a=False, transpose_b=True
):
"""Batch Matmul operator for int8 on CUDA.
Parameters
----------
cfg : ConfigSpace
Autotvm tuning space config file.
x : tvm.te.Tensor
3-D with shape [batch, M, K] or [batch, K, M].
y : tvm.te.Tensor
3-D with shape [batch, K, N] or [batch, N, K].
out_shape : List[Optional]
Explicit intended output shape of the computation. Can be useful in cases
with dynamic input shapes.
out_dtype : Optional[str]
Specifies the output data type for mixed precision batch matmul.
transpose_a : Optional[bool] = False
Whether the first tensor is in transposed format.
transpose_b : Optional[bool] = True
Whether the second tensor is in transposed format.
Returns
-------
output : tvm.te.Tensor
3-D with shape [batch, M, N]
"""
del out_shape
# TODO(jcf94): Deal with different transpose combinations
assert not transpose_a and transpose_b
if out_dtype is None:
out_dtype = x.dtype
x_shape = get_const_tuple(x.shape)
y_shape = get_const_tuple(y.shape)
assert len(x_shape) == 3 and len(y_shape) == 3, "only support 3-dim batch_matmul"
XB, M, XK = x.shape
YB, N, YK = y.shape
assert XB == YB or XB == 1 or YB == 1, "batch dimension doesn't match"
assert XK == YK, "shapes of x and y is inconsistent"
nB = tvm.te.max(XB, YB)
nK = ((XK + 3) // 4) * 4
reduce_k = te.reduce_axis((0, nK), name="k")
# pad for _dp4a vectorize
pad_x = te.compute(
(XB, M, nK),
lambda b, i, j: tvm.te.if_then_else(
j >= XK, tvm.runtime.convert(0).astype(x.dtype), x[b, i, j]
),
)
pad_y = te.compute(
(YB, N, nK),
lambda b, i, j: tvm.te.if_then_else(
j >= YK, tvm.runtime.convert(0).astype(y.dtype), y[b, i, j]
),
)
out = te.compute(
(nB, M, N),
lambda b, i, j: te.sum(
pad_x[b if XB != 1 else 0, i, reduce_k].astype(out_dtype)
* pad_y[b if YB != 1 else 0, j, reduce_k].astype(out_dtype),
axis=[reduce_k],
),
tag="batch_matmul_int8",
)
cfg.add_flop(XB * M * N * nK * 2)
return out
@autotvm.register_topi_schedule("batch_matmul_int8.cuda")
def schedule_batch_matmul_int8(cfg, outs):
"""Batch Matmul schedule for int8 on CUDA"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
def _callback(op):
if "batch_matmul_int8" in op.tag:
_schedule_batch_matmul_int8(cfg, s, op.output(0))
traverse_inline(s, outs[0].op, _callback)
return s
def _schedule_batch_matmul_int8(cfg, s, output):
input_x, input_y = s[output].op.input_tensors
if len(input_y.op.input_tensors) == 1 and input_y.op.input_tensors[0] == input_x:
s[input_y].compute_inline()
B, M, K = get_const_tuple(input_x.shape)
_, N, _ = get_const_tuple(input_y.shape)
k_factor = 4
assert K % k_factor == 0, f"Input dimension must divide {k_factor}"
if K % 16 == 0:
k_factor = 16
cfg.define_split("tile_f", B, num_outputs=4)
cfg.define_split("tile_m", M, num_outputs=4)
cfg.define_split("tile_n", N, num_outputs=4)
cfg.define_split("tile_k", K // k_factor, num_outputs=2)
cfg.define_knob("auto_unroll_max_step", [0, 256, 512, 1024])
batch_matmul_op = s[output].op
s[input_x].compute_inline()
s[input_y].compute_inline()
x_cache = s.cache_read(input_x, "shared", [batch_matmul_op])
y_cache = s.cache_read(input_y, "shared", [batch_matmul_op])
batch_matmul_cache = s.cache_write(batch_matmul_op.output(0), "local")
# tile reduce axis
ko = batch_matmul_cache.op.reduce_axis[0]
ko, ki = s[batch_matmul_cache].split(ko, factor=4)
ko, kt = cfg["tile_k"].apply(s, batch_matmul_cache, ko)
# dp4a tensorize
target = tvm.target.Target.current(allow_none=False)
do_tensorize = "+dotprod" in target.mattr or target.supports_integer_dot_product
if do_tensorize:
dtypes = (input_x.dtype, input_y.dtype)
s[batch_matmul_cache].tensorize(ki, dp4a("shared", "shared", "local", dtypes))
if batch_matmul_op not in s.outputs:
s[output].compute_inline()
batch_matmul_op = s.outputs[0]
# tile axis
f, m, n = batch_matmul_op.axis
kernel_scope, f = s[batch_matmul_op].split(f, nparts=1)
bf, vf, tf, fi = cfg["tile_f"].apply(s, batch_matmul_op, f)
bm, vm, tm, mi = cfg["tile_m"].apply(s, batch_matmul_op, m)
bn, vn, tn, ni = cfg["tile_n"].apply(s, batch_matmul_op, n)
s[batch_matmul_op].reorder(bf, bm, bn, vf, vm, vn, tf, tm, tn, fi, mi, ni)
# bind axis
s[batch_matmul_op].bind(bf, tvm.te.thread_axis("blockIdx.z"))
s[batch_matmul_op].bind(bm, tvm.te.thread_axis("blockIdx.y"))
s[batch_matmul_op].bind(bn, tvm.te.thread_axis("blockIdx.x"))
s[batch_matmul_op].bind(vf, tvm.te.thread_axis("vthread"))
s[batch_matmul_op].bind(vm, tvm.te.thread_axis("vthread"))
s[batch_matmul_op].bind(vn, tvm.te.thread_axis("vthread"))
s[batch_matmul_op].bind(tf, tvm.te.thread_axis("threadIdx.z"))
s[batch_matmul_op].bind(tm, tvm.te.thread_axis("threadIdx.y"))
s[batch_matmul_op].bind(tn, tvm.te.thread_axis("threadIdx.x"))
# cache compute at
s[batch_matmul_cache].compute_at(s[batch_matmul_op], tn)
fo, mo, no = batch_matmul_cache.op.axis[:3]
s[batch_matmul_cache].reorder(ko, kt, fo, mo, no, ki)
# for load in [splited_x_op, splited_y_op]
for load in [x_cache, y_cache]:
s[load].compute_at(s[batch_matmul_cache], ko)
outer, inner = s[load].split(s[load].op.axis[-1], factor=k_factor)
s[load].vectorize(inner)
fused = s[load].op.axis[:-1] + [outer]
fused = s[load].fuse(*fused)
fused, tx = s[load].split(fused, factor=cfg["tile_n"].size[2])
fused, ty = s[load].split(fused, factor=cfg["tile_m"].size[2])
fused, tz = s[load].split(fused, factor=cfg["tile_f"].size[2])
s[load].bind(tz, tvm.te.thread_axis("threadIdx.z"))
s[load].bind(ty, tvm.te.thread_axis("threadIdx.y"))
s[load].bind(tx, tvm.te.thread_axis("threadIdx.x"))
# max unroll
s[batch_matmul_op].pragma(kernel_scope, "auto_unroll_max_step", cfg["auto_unroll_max_step"].val)
s[batch_matmul_op].pragma(kernel_scope, "unroll_explicit", False)
return s
| 15,091 | 34.179487 | 100 | py |
tvm | tvm-main/python/tvm/topi/cuda/conv2d_nhwc_winograd.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unused-variable,unused-argument
# pylint: disable=too-many-arguments,too-many-locals
# pylint: disable=too-many-statements
"""Winograd template for cuda backend"""
import tvm
from tvm import autotvm, te
from .. import nn
from ..nn.winograd_util import winograd_transform_matrices
from ..utils import get_const_int, get_const_tuple, traverse_inline
from .tensor_intrin import (
intrin_wmma_gemm,
intrin_wmma_load_matrix_A,
intrin_wmma_load_matrix_W,
intrin_wmma_store_matrix,
)
def _infer_tile_size(data, kernel):
"""Compute the tile size"""
N, H, W, CI = get_const_tuple(data.shape)
if H % 8 == 0:
return 4
return 2
def schedule_bgemm_tensorcore(cfg, s, bgemm, data_pack, kernel_pack):
"""Schedule for bgemm tensorcore"""
A = data_pack
B = kernel_pack
C = bgemm
_, _, P, out_dim = get_const_tuple(C.shape)
out_dtype = C.dtype
# Explicit memory access
AS = s.cache_read(A, "shared", [C])
BS = s.cache_read(B, "shared", [C])
AF = s.cache_read(AS, "wmma.matrix_a", [C])
BF = s.cache_read(BS, "wmma.matrix_b", [C])
CF = s.cache_write(C, "wmma.accumulator")
CS = s.cache_read(CF, "shared", [C])
# Create tuning space
cfg.define_knob("block_row_warps", [1, 2, 4])
cfg.define_knob("block_col_warps", [1, 2, 4])
cfg.define_knob("warp_row_tiles", [1, 2, 4, 8])
cfg.define_knob("warp_col_tiles", [1, 2, 4, 8])
cfg.define_knob("chunk", [1, 2, 4, 8])
cfg.define_knob("offset", [0, 1, 2, 4, 8])
cfg.define_knob("offsetCS", [0, 1, 2, 4, 8])
cfg.define_knob("vec", [1, 2, 4, 8])
# Ensure that the default parameters are applicable when autotvm is not in use
if P % 16 == 0 and out_dim % 16 == 0:
cfg.define_knob("wmma_m", [16, 8, 32])
elif P % 32 == 0 and out_dim % 8 == 0:
cfg.define_knob("wmma_m", [32, 16, 8])
elif P % 8 == 0 and out_dim % 32 == 0:
cfg.define_knob("wmma_m", [8, 16, 32])
warp_size = 32
wmma_k = 16
block_row_warps = cfg["block_row_warps"].val
block_col_warps = cfg["block_col_warps"].val
warp_row_tiles = cfg["warp_row_tiles"].val
warp_col_tiles = cfg["warp_col_tiles"].val
chunk = cfg["chunk"].val
offsetAB = cfg["offset"].val
offsetCS = cfg["offsetCS"].val
wmma_m = cfg["wmma_m"].val
vec = cfg["vec"].val
if wmma_m == 16:
wmma_n = 16
elif wmma_m == 8:
wmma_n = 32
elif wmma_m == 32:
wmma_n = 8
# Define the stride of intrin functions
AS_align = chunk * wmma_k + offsetAB
BS_align = warp_col_tiles * block_col_warps * wmma_n + offsetAB
CS_align = warp_col_tiles * block_col_warps * wmma_n + offsetCS
AS_stride = [AS_align, 1]
BS_stride = [BS_align, 1]
AF_stride = [wmma_k, 1]
BF_stride = [wmma_n * warp_col_tiles, 1]
CF_stride = [warp_col_tiles * wmma_n, 1]
CS_stride = [CS_align, 1]
block_x = te.thread_axis("blockIdx.x")
block_y = te.thread_axis("blockIdx.y")
block_z = te.thread_axis("blockIdx.z")
thread_x = te.thread_axis("threadIdx.x")
thread_y = te.thread_axis("threadIdx.y")
thread_z = te.thread_axis("threadIdx.z")
# Schedule for computation
block_factor_b = wmma_m * warp_row_tiles * block_row_warps
block_factor_o = wmma_n * warp_col_tiles * block_col_warps
alpha_1, alpha_2, b, o = C.op.axis
block_k = s[C].fuse(alpha_1, alpha_2)
block_i, bc = s[C].split(b, factor=block_factor_b)
block_j, oc = s[C].split(o, factor=block_factor_o)
s[C].reorder(block_k, block_i, block_j, bc, oc)
t = s[C].fuse(bc, oc)
t, vi = s[C].split(t, factor=vec)
t, tx = s[C].split(t, factor=warp_size)
t, ty = s[C].split(t, factor=block_row_warps)
t, tz = s[C].split(t, factor=block_col_warps)
s[C].bind(block_k, block_z)
s[C].bind(block_i, block_x)
s[C].bind(block_j, block_y)
s[C].bind(tz, thread_z)
s[C].bind(ty, thread_y)
s[C].bind(tx, thread_x)
s[C].vectorize(vi)
# Schedule for wmma store
s[CS].compute_at(s[C], block_j)
_, _, bb, oo = CS.op.axis
s[CS].storage_align(bb, CS_align - 1, CS_align)
bb, bbi = s[CS].split(bb, factor=wmma_m)
oo, ooi = s[CS].split(oo, factor=wmma_n)
bb, bbii = s[CS].split(bb, factor=warp_row_tiles)
oo, ooii = s[CS].split(oo, factor=warp_col_tiles)
s[CS].reorder(bb, oo, bbii, ooii, bbi, ooi)
# Schedule for wmma computation
s[CF].compute_at(s[CS], oo)
_, _, warp_i, warp_j = CF.op.axis
warp_i, _ii = s[CF].split(warp_i, factor=wmma_m)
warp_j, _jj = s[CF].split(warp_j, factor=wmma_n)
(k,) = CF.op.reduce_axis
k, _k = s[CF].split(k, factor=wmma_k)
ko, ki = s[CF].split(k, factor=chunk)
s[CF].reorder(ko, ki, warp_i, warp_j, _ii, _jj, _k)
# Schedule for wmma_matrix_a load
s[AF].compute_at(s[CF], ki)
_, _, b, i = AF.op.axis
b, b_ii = s[AF].split(b, factor=wmma_m)
i, i_jj = s[AF].split(i, factor=wmma_k)
s[AF].reorder(b, i, b_ii, i_jj)
# Schedule for wmma_matrix_b load
s[BF].compute_at(s[CF], ki)
_, _, i, o = BF.op.axis
o, o_ii = s[BF].split(o, factor=wmma_n)
i, i_ii = s[BF].split(i, factor=wmma_k)
s[BF].reorder(i, o, i_ii, o_ii)
# Schedule for A's(B's) shared memory load
def shared_schedule(stage, strides):
s[stage].compute_at(s[CF], ko)
_, _, xo, yo = stage.op.axis
s[stage].storage_align(xo, strides - 1, strides)
t = s[stage].fuse(xo, yo)
t, vi = s[stage].split(t, factor=vec)
t, tx = s[stage].split(t, factor=warp_size)
t, ty = s[stage].split(t, factor=block_row_warps)
_, tz = s[stage].split(t, factor=block_col_warps)
s[stage].bind(ty, thread_y)
s[stage].bind(tz, thread_z)
s[stage].bind(tx, thread_x)
s[stage].vectorize(vi)
shared_schedule(AS, AS_align)
shared_schedule(BS, BS_align)
shape = (wmma_m, wmma_n, wmma_k)
in_dtype = "float16"
AL_gemm = te.placeholder((wmma_m, wmma_k), name="AL_gemm", dtype=in_dtype)
BL_gemm = te.placeholder((wmma_k, wmma_n), name="BL_gemm", dtype=in_dtype)
k_gemm = te.reduce_axis((0, wmma_k), name="k_gemm")
CL_compute = te.compute(
(wmma_m, wmma_n),
lambda ii, jj: te.sum(
AL_gemm[ii, k_gemm].astype(out_dtype) * BL_gemm[k_gemm, jj].astype(out_dtype),
axis=k_gemm,
),
name="CL_compute",
)
# Lower the computation loops down to TensorCore hardware intrinsics
# by mapping the tensorcore to tensor intrinsics
s[AF].tensorize(
b_ii,
intrin_wmma_load_matrix_A(
AF_stride, AS_stride, shape, "row_major", (wmma_m, wmma_k), (wmma_m, wmma_k), "float16"
),
)
s[BF].tensorize(
i_ii,
intrin_wmma_load_matrix_W(
BF_stride, BS_stride, shape, "row_major", (wmma_k, wmma_n), (wmma_k, wmma_n), "float16"
),
)
s[CF].tensorize(
_ii, intrin_wmma_gemm(AL_gemm, BL_gemm, CL_compute, AF_stride, BF_stride, CF_stride, shape)
)
s[CS].tensorize(
bbi,
intrin_wmma_store_matrix(
CS_stride, CF_stride, shape, out_dtype, (wmma_m, wmma_n), (wmma_m, wmma_n)
),
)
def schedule_bgemm_direct(cfg, s, bgemm, data_pack, kernel_pack):
"""Schedule for bgemm direct"""
b1, b2, y, x = s[bgemm].op.axis
rc = s[bgemm].op.reduce_axis[0]
alpha = get_const_int(b1.dom.extent)
# Create tuning space
cfg.define_split(
"tile_b", cfg.axis(alpha * alpha), num_outputs=4, filter=lambda x: x.size[-3:] == [1, 1, 1]
)
cfg.define_split("tile_y", y, num_outputs=4)
cfg.define_split("tile_x", x, num_outputs=4)
cfg.define_split("tile_rc", rc, num_outputs=2)
cfg.define_knob("offset_bgemm", [0, 1, 2, 4, 8])
cfg.define_knob("vector_bgemm", [1, 2, 4, 8])
offset_bgemm = cfg["offset_bgemm"].val
vector_bgemm = cfg["vector_bgemm"].val
C = bgemm
A0, B0 = kernel_pack, data_pack
# Designate the memory hierarchy
OL = s.cache_write(C, "local")
AA = s.cache_read(A0, "shared", [OL])
BB = s.cache_read(B0, "shared", [OL])
# Tile and bind spatial axes
b = s[bgemm].fuse(b1, b2)
bgemm_scope, b = s[bgemm].split(b, nparts=1)
bz, vz, tz, zi = cfg["tile_b"].apply(s, C, b)
by, vy, ty, yi = cfg["tile_y"].apply(s, C, y)
bx, vx, tx, xi = cfg["tile_x"].apply(s, C, x)
s[C].bind(bz, te.thread_axis("blockIdx.z"))
s[C].bind(by, te.thread_axis("blockIdx.y"))
s[C].bind(bx, te.thread_axis("blockIdx.x"))
s[C].bind(vz, te.thread_axis("vthread"))
s[C].bind(vy, te.thread_axis("vthread"))
s[C].bind(vx, te.thread_axis("vthread"))
s[C].bind(tz, te.thread_axis("threadIdx.z"))
s[C].bind(ty, te.thread_axis("threadIdx.y"))
s[C].bind(tx, te.thread_axis("threadIdx.x"))
s[C].reorder(bgemm_scope, bz, by, bx, vz, vy, vx, tz, ty, tx, zi, yi, xi)
# Tile reduction axes
s[OL].compute_at(s[C], tx)
b1, b2, y, x = s[OL].op.axis
b = s[OL].fuse(b1, b2)
(rc,) = s[OL].op.reduce_axis
rco, rci = cfg["tile_rc"].apply(s, OL, rc)
s[OL].reorder(rco, b, y, x, rci)
s[AA].compute_at(s[OL], rco)
_, _, k, n = s[AA].op.axis
AA_align = offset_bgemm + cfg["tile_x"].size[1] * cfg["tile_x"].size[2] * cfg["tile_x"].size[3]
s[AA].storage_align(k, AA_align - 1, AA_align)
s[BB].compute_at(s[OL], rco)
_, _, m, k = s[BB].op.axis
BB_align = offset_bgemm + cfg["tile_rc"].size[1]
s[BB].storage_align(m, BB_align - 1, BB_align)
# Schedule for A and B shared memory load
for load in [AA, BB]:
fused = s[load].fuse(*list(s[load].op.axis))
fused, ti = s[load].split(fused, factor=vector_bgemm)
fused, tx = s[load].split(fused, cfg["tile_x"].size[2])
fused, ty = s[load].split(fused, cfg["tile_y"].size[2])
fused, tz = s[load].split(fused, cfg["tile_b"].size[2])
s[load].bind(tz, te.thread_axis("threadIdx.z"))
s[load].bind(ty, te.thread_axis("threadIdx.y"))
s[load].bind(tx, te.thread_axis("threadIdx.x"))
s[load].vectorize(ti)
def nhwc_winograd_cuda(
cfg, data, kernel, strides, padding, dilation, out_dtype, use_tensorcore, pre_computed
):
"""Compute declaration for winograd"""
tile_size = _infer_tile_size(data, kernel)
N, H, W, CI = get_const_tuple(data.shape)
if isinstance(N, tvm.tir.Any):
N = tvm.te.size_var("n")
if not isinstance(H, int) or not isinstance(W, int):
raise RuntimeError(
"cuda winograd nhwc conv2d doesn't support dynamic \
input height or width."
)
if isinstance(dilation, int):
dilation_h = dilation_w = dilation
else:
dilation_h, dilation_w = dilation
HSTR, WSTR = (strides, strides) if isinstance(strides, int) else strides
if not pre_computed: # Kernel tensor is raw tensor, do strict check
if dilation_h != 1 or dilation_w != 1:
kernel = nn.dilate(kernel, (dilation_h, dilation_w, 1, 1))
KH, KW, CI, CO = get_const_tuple(kernel.shape)
alpha = KW + tile_size - 1
assert HSTR == 1 and WSTR == 1 and KH == KW
else:
# Kernel tensor is pre-transfomred. This op is created by conv2d_alter_op.
# Dilation is not supported
alpha, _, CI, CO = get_const_tuple(kernel.shape)
KH = KW = alpha + 1 - tile_size
assert HSTR == 1 and WSTR == 1 and dilation_h == 1 and dilation_w == 1
pt, pl, pb, pr = nn.get_pad_tuple(padding, (KH, KW))
data_pad = nn.pad(
data,
(0, pt, pl, 0),
(0, pb, pr, 0),
name="data_pad",
attrs={"schedule_rule": "None"},
)
r = KW
m = tile_size
H = (H + pt + pb - KH) // HSTR + 1
W = (W + pl + pr - KW) // WSTR + 1
nH, nW = (H + m - 1) // m, (W + m - 1) // m
P = N * nH * nW if isinstance(N, int) else nH * nW
# Determine whether the shape is available with tensorcore
shape_judge = (
(P % 16 == 0 and CI % 16 == 0 and CO % 16 == 0)
or (P % 8 == 0 and CI % 16 == 0 and CO % 32 == 0)
or (P % 32 == 0 and CI % 16 == 0 and CO % 8 == 0)
)
if shape_judge and use_tensorcore:
trans_type = "float16"
else:
trans_type = data.dtype
# Compute transform matrix
A, _, _ = winograd_transform_matrices(m, r, out_dtype)
_, B, G = winograd_transform_matrices(m, r, data.dtype)
# Transform kernel
if not pre_computed:
# Check if we are currently tuning, if so we want to avoid counting
# prepacking in time costs. Just use a placeholder with the packed shape instead.
if autotvm.GLOBAL_SCOPE.in_tuning:
kernel_pack = te.placeholder(
(alpha, alpha, CI, CO), dtype=kernel.dtype, name="kernel_pack"
)
else:
r_kh = te.reduce_axis((0, KH), name="r_kh")
r_kw = te.reduce_axis((0, KW), name="r_kw")
kernel_pack = te.compute(
(alpha, alpha, CI, CO),
lambda eps, nu, ci, co: te.sum(
(kernel[r_kh][r_kw][ci][co]) * G[eps][r_kh] * G[nu][r_kw], axis=[r_kh, r_kw]
),
name="kernel_pack",
)
else:
kernel_pack = kernel
idxdiv = tvm.tir.indexdiv
idxmod = tvm.tir.indexmod
# Pack input tile
input_tile = te.compute(
(P, CI, alpha, alpha),
lambda p, c, eps, nu: data_pad[
idxdiv(p, (nH * nW)), idxmod(idxdiv(p, nW), nH) * m + eps, idxmod(p, nW) * m + nu, c
],
name="d",
attrs={"schedule_rule": "None"},
)
# Transform data
r_a = te.reduce_axis((0, alpha), "r_a")
r_b = te.reduce_axis((0, alpha), "r_b")
data_pack = te.compute(
(alpha, alpha, P, CI),
lambda eps, nu, p, ci: te.sum(
input_tile[p][ci][r_a][r_b] * B[r_a][eps] * B[r_b][nu], axis=[r_a, r_b]
),
name="data_pack",
)
# Convert data type of input feature maps and weights for tensorcore
Transdata = te.compute(
data_pack.shape, lambda eps, nu, p, ci: data_pack[eps, nu, p, ci].astype(trans_type)
)
TransFilter = te.compute(
kernel_pack.shape, lambda eps, nu, ci, co: kernel_pack[eps, nu, ci, co].astype(trans_type)
)
# Do batch gemm
ci = te.reduce_axis((0, CI), name="ci")
bgemm = te.compute(
(alpha, alpha, P, CO),
lambda eps, nu, p, co: te.sum(
(Transdata[eps][nu][p][ci]).astype(out_dtype)
* (TransFilter[eps][nu][ci][co]).astype(out_dtype),
axis=[ci],
),
name="bgemm",
)
# Inverse transform
r_a = te.reduce_axis((0, alpha), "r_a")
r_b = te.reduce_axis((0, alpha), "r_b")
inverse = te.compute(
(P, CO, m, m),
lambda p, co, vh, vw: te.sum(
bgemm[r_a][r_b][p][co] * A[r_a][vh] * A[r_b][vw], axis=[r_a, r_b]
),
name="inverse",
)
# Output
output = te.compute(
(N, H, W, CO),
lambda n, h, w, co: inverse[
n * nH * nW + idxdiv(h, m) * nW + idxdiv(w, m), co, idxmod(h, m), idxmod(w, m)
],
name="output",
tag="conv2d_nhwc_winograd",
)
if isinstance(N, int):
cfg.add_flop(2 * N * CO * H * W * CI * KH * KW)
return output
def data_weight_transform(s, data_trans, input_tile, thread_num_trans, offset_trans, trans_tag):
"""Schedule for data or kernel transform"""
kernel_align = thread_num_trans + offset_trans
indata_s = s.cache_read(input_tile, "shared", [data_trans])
data_l = s.cache_write(data_trans, "local")
# Schedule for data or kernel transform
eps, nu, p, c = s[data_trans].op.axis
block_x, thread_x = s[data_trans].split(c, thread_num_trans)
block_x = s[data_trans].fuse(p, block_x)
s[data_trans].reorder(block_x, thread_x, eps, nu)
s[data_trans].bind(thread_x, te.thread_axis("threadIdx.x"))
s[data_trans].bind(block_x, te.thread_axis("blockIdx.x"))
s[data_l].compute_at(s[data_trans], thread_x)
eps_l, nu_l, p_l, c_l = s[data_l].op.axis
r_a, r_b = s[data_l].op.reduce_axis
block_x_l, thread_x_l = s[data_l].split(c_l, thread_num_trans)
block_x_l = s[data_l].fuse(p_l, block_x_l)
s[data_l].reorder(block_x_l, thread_x_l, eps_l, nu_l, r_a, r_b)
for axis in [eps_l, nu_l, r_a, r_b]:
s[data_l].unroll(axis)
# Schedule for share memory load
s[indata_s].compute_at(s[data_l], block_x_l)
if trans_tag == "data":
p_is, c_is, eps_is, nu_is = s[indata_s].op.axis
data_align = (
get_const_int(eps_is.dom.extent) * get_const_int(nu_is.dom.extent) + offset_trans
)
s[indata_s].storage_align(c_is, data_align - 1, data_align)
block_x_is, thread_x_is = s[indata_s].split(c_is, thread_num_trans)
s[indata_s].bind(thread_x_is, te.thread_axis("threadIdx.x"))
else:
eps_is, nu_is, ci_is, co_is = s[indata_s].op.axis
s[indata_s].storage_align(nu_is, kernel_align - 1, kernel_align)
block_x_is, thread_x_is = s[indata_s].split(co_is, thread_num_trans)
s[indata_s].reorder(ci_is, block_x_is, eps_is, nu_is, thread_x_is)
s[indata_s].bind(thread_x_is, te.thread_axis("threadIdx.x"))
def schedule_nhwc_winograd_cuda(cfg, s, output, use_tensorcore, pre_computed):
"""Schedule winograd template"""
# Get stages
inverse = s[output].op.input_tensors[0]
bgemm, A = s[inverse].op.input_tensors
Transdata, TransFilter = s[bgemm].op.input_tensors
data_pack = s[Transdata].op.input_tensors[0]
kernel_pack = s[TransFilter].op.input_tensors[0]
s[Transdata].compute_inline()
s[TransFilter].compute_inline()
input_tile, B = s[data_pack].op.input_tensors
pad_data = s[input_tile].op.input_tensors[0]
# Define the stride of intrin functions
cfg.define_knob("thread_num_inverse", [1, 32, 64, 128, 256])
cfg.define_knob("thread_num_data", [1, 32, 64, 128, 256])
cfg.define_knob("thread_num_kernel", [1, 32, 64, 128, 256])
cfg.define_knob("offset_inverse", [0, 2, 4])
cfg.define_knob("offset_data", [0, 1, 2, 4])
cfg.define_knob("offset_kernel", [0, 1, 2, 4])
cfg.define_knob("inverse_in_vector", [1, 2, 4])
thread_num_data = cfg["thread_num_data"].val
thread_num_kernel = cfg["thread_num_kernel"].val
thread_num_inverse = cfg["thread_num_inverse"].val
offset_data = cfg["offset_data"].val
offset_kernel = cfg["offset_kernel"].val
offset_inverse = cfg["offset_inverse"].val
inverse_in_vector = cfg["inverse_in_vector"].val
# Data transform
s[B].compute_inline()
data_weight_transform(s, data_pack, input_tile, thread_num_data, offset_data, trans_tag="data")
s[input_tile].compute_inline()
s[pad_data].compute_inline()
# Kernel transform
if not pre_computed and not autotvm.GLOBAL_SCOPE.in_tuning:
kernel, G = s[kernel_pack].op.input_tensors
s[G].compute_inline()
data_weight_transform(
s, kernel_pack, kernel, thread_num_kernel, offset_kernel, trans_tag="kernel"
)
else:
kernel = kernel_pack
if isinstance(kernel.op, tvm.te.ComputeOp) and "dilate" in kernel.op.tag:
s[kernel].compute_inline()
b1, b2, y, x = s[bgemm].op.axis
alpha = get_const_int(b1.dom.extent)
_, _, P, CI = get_const_tuple(Transdata.shape)
_, _, _, CO = get_const_tuple(TransFilter.shape)
# Determine whether the shape is available with tensorcore
shape_judge = (
(P % 16 == 0 and CI % 16 == 0 and CO % 16 == 0)
or (P % 8 == 0 and CI % 16 == 0 and CO % 32 == 0)
or (P % 32 == 0 and CI % 16 == 0 and CO % 8 == 0)
)
if shape_judge and use_tensorcore:
schedule_bgemm_tensorcore(cfg, s, bgemm, Transdata, TransFilter)
else:
schedule_bgemm_direct(cfg, s, bgemm, Transdata, TransFilter)
# Schedule inverse, output and fusion
if output.op in s.outputs:
OL = None
else:
OL = output
s[OL].set_scope("local")
output = s.outputs[0]
s[A].compute_inline()
inverse_s = s.cache_read(bgemm, "shared", [inverse])
m = alpha - 3 + 1
offset_inverse_in = offset_inverse
vector_width_inverse_in = inverse_in_vector
# Schedule for output
n, h, w, co = s[output].op.axis
ho, wo, hi, wi = s[output].tile(h, w, m, m)
s[output].reorder(n, ho, wo, co, hi, wi)
fused = s[output].fuse(n, ho, wo)
block_x_s, thread_x_s = s[output].split(co, thread_num_inverse)
block_x_s = s[output].fuse(fused, block_x_s)
s[output].reorder(block_x_s, thread_x_s, hi, wi)
if OL is not None:
s[OL].compute_inline()
# Schedule for inverse
s[inverse].compute_at(s[output], thread_x_s)
p_inv, co_inv, eps_inv, nu_inv = s[inverse].op.axis
block_x_inv, thread_x_inv = s[inverse].split(co_inv, thread_num_inverse)
r_a, r_b = s[inverse].op.reduce_axis
for axis in [eps_inv, nu_inv, r_a, r_b]:
s[inverse].unroll(axis)
# Schedule for share memory load
s[inverse_s].compute_at(s[output], block_x_s)
eps_inv_s, nu_inv_s, p_inv_s, co_inv_s = s[inverse_s].op.axis
inverse_in_align = offset_inverse_in + thread_num_inverse
s[inverse_s].storage_align(p_inv_s, inverse_in_align - 1, inverse_in_align)
block_x_inv_s, thread_x_inv_s = s[inverse_s].split(co_inv_s, thread_num_inverse)
block_x_inv_s = s[inverse_s].fuse(p_inv_s, block_x_inv_s)
s[inverse_s].reorder(block_x_inv_s, eps_inv_s, nu_inv_s, thread_x_inv_s)
t = s[inverse_s].fuse(eps_inv_s, nu_inv_s, thread_x_inv_s)
t, ti = s[inverse_s].split(t, factor=vector_width_inverse_in)
t, tx = s[inverse_s].split(t, factor=thread_num_inverse)
s[inverse_s].bind(tx, te.thread_axis("threadIdx.x"))
s[inverse_s].vectorize(ti)
s[output].bind(thread_x_s, te.thread_axis("threadIdx.x"))
s[output].bind(block_x_s, te.thread_axis("blockIdx.x"))
return s
@autotvm.register_topi_compute("conv2d_nhwc_winograd_direct.cuda")
def conv2d_nhwc_winograd_direct(cfg, data, kernel, strides, padding, dilation, out_dtype):
"""Compute conv2d with winograd for NHWC layout"""
return nhwc_winograd_cuda(
cfg,
data,
kernel,
strides,
padding,
dilation,
out_dtype,
use_tensorcore=False,
pre_computed=False,
)
@autotvm.register_topi_schedule("conv2d_nhwc_winograd_direct.cuda")
def schedule_conv2d_nhwc_winograd_direct(cfg, outs):
"""TOPI schedule callback"""
s = te.create_schedule([x.op for x in outs])
def _callback(op):
if "conv2d_nhwc_winograd" in op.tag:
schedule_nhwc_winograd_cuda(
cfg, s, op.output(0), use_tensorcore=False, pre_computed=False
)
traverse_inline(s, outs[0].op, _callback)
return s
@autotvm.register_topi_compute("conv2d_nhwc_winograd_tensorcore.cuda")
def conv2d_nhwc_winograd_tensorcore(cfg, data, kernel, strides, padding, dilation, out_dtype):
"""Compute conv2d with winograd for NHWC layout"""
return nhwc_winograd_cuda(
cfg,
data,
kernel,
strides,
padding,
dilation,
out_dtype,
use_tensorcore=True,
pre_computed=False,
)
@autotvm.register_topi_schedule("conv2d_nhwc_winograd_tensorcore.cuda")
def schedule_conv2d_nhwc_winograd_tensorcore(cfg, outs):
"""TOPI schedule callback"""
s = te.create_schedule([x.op for x in outs])
def _callback(op):
if "conv2d_nhwc_winograd" in op.tag:
schedule_nhwc_winograd_cuda(
cfg, s, op.output(0), use_tensorcore=True, pre_computed=False
)
traverse_inline(s, outs[0].op, _callback)
return s
@autotvm.register_topi_compute("conv2d_nhwc_winograd_direct_without_weight_transform.cuda")
def conv2d_nhwc_winograd_direct_without_weight_transform(
cfg, data, kernel, strides, padding, dilation, out_dtype
):
"""Compute conv2d with winograd for NHWC layout"""
return nhwc_winograd_cuda(
cfg,
data,
kernel,
strides,
padding,
dilation,
out_dtype,
use_tensorcore=False,
pre_computed=True,
)
@autotvm.register_topi_schedule("conv2d_nhwc_winograd_direct_without_weight_transform.cuda")
def schedule_conv2d_nhwc_winograd_direct_without_weight_transform(cfg, outs):
"""TOPI schedule callback"""
s = te.create_schedule([x.op for x in outs])
def _callback(op):
if "conv2d_nhwc_winograd" in op.tag:
schedule_nhwc_winograd_cuda(
cfg, s, op.output(0), use_tensorcore=False, pre_computed=True
)
traverse_inline(s, outs[0].op, _callback)
return s
@autotvm.register_topi_compute("conv2d_nhwc_winograd_tensorcore_without_weight_transform.cuda")
def conv2d_nhwc_winograd_tensorcore_without_weight_transform(
cfg, data, kernel, strides, padding, dilation, out_dtype
):
"""Compute conv2d with winograd for NHWC layout"""
return nhwc_winograd_cuda(
cfg,
data,
kernel,
strides,
padding,
dilation,
out_dtype,
use_tensorcore=True,
pre_computed=True,
)
@autotvm.register_topi_schedule("conv2d_nhwc_winograd_tensorcore_without_weight_transform.cuda")
def schedule_conv2d_nhwc_winograd_tensorcore_without_weight_transform(cfg, outs):
"""TOPI schedule callback"""
s = te.create_schedule([x.op for x in outs])
def _callback(op):
if "conv2d_nhwc_winograd" in op.tag:
schedule_nhwc_winograd_cuda(
cfg, s, op.output(0), use_tensorcore=True, pre_computed=True
)
traverse_inline(s, outs[0].op, _callback)
return s
| 26,587 | 34.497997 | 99 | py |
tvm | tvm-main/python/tvm/topi/cuda/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=redefined-builtin, wildcard-import
"""CUDA specific declaration and schedules."""
from .conv1d import *
from .conv1d_transpose_ncw import *
from .conv2d import *
from .conv2d_hwcn import *
from .conv2d_int8 import *
from .conv2d_winograd import *
from .conv2d_nhwc_winograd import *
from .depthwise_conv2d import *
from .group_conv2d_nchw import *
from . import conv2d_alter_op
from .conv2d_transpose import *
from .conv3d_transpose_ncdhw import *
from .deformable_conv2d import *
from .conv3d import *
from .conv3d_winograd import *
from . import conv3d_alter_op
from .reduction import schedule_reduce
from .softmax import *
from .injective import schedule_injective, schedule_elemwise, schedule_broadcast
from .dense import *
from .pooling import *
from .nn import schedule_lrn
from .batch_matmul import *
from .batch_matmul_tensorcore import *
from .vision import *
from .ssd import *
from .nms import get_valid_counts, non_max_suppression, all_class_non_max_suppression
from .rcnn import *
from .scatter import *
from .scatter_elements import *
from .sort import *
from .conv2d_nhwc_tensorcore import *
from .conv3d_ndhwc_tensorcore import *
from .dense_tensorcore import *
from .conv2d_hwnc_tensorcore import *
from .correlation import *
from .sparse import *
from . import tensorcore_alter_op
from .argwhere import *
from .scan import *
from .sparse_reshape import *
from .transform import *
from .unique import *
from .searchsorted import *
from .signal import *
| 2,274 | 34 | 85 | py |
tvm | tvm-main/python/tvm/topi/cuda/conv2d_winograd.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unused-variable,unused-argument
"""Winograd template for cuda backend"""
import logging
import tvm
from tvm import autotvm, te
from .. import nn
from ..nn.conv2d import (
_conv2d_winograd_nchw_impl,
_conv2d_winograd_nhwc_impl,
conv2d_winograd_nchw,
conv2d_winograd_nhwc,
)
from ..nn.winograd_util import winograd_transform_matrices
from ..utils import get_const_int, get_const_tuple, traverse_inline
logger = logging.getLogger("conv2d_winograd")
def _infer_tile_size(data, kernel, layout="NCHW"):
if layout == "NCHW":
N, CI, H, W = get_const_tuple(data.shape)
else:
assert layout == "NHWC"
N, H, W, CI = get_const_tuple(data.shape)
if H % 8 == 0:
return 4
return 2
def winograd_cuda(cfg, data, kernel, strides, padding, dilation, out_dtype, pre_computed):
"""Compute declaration for winograd"""
tile_size = _infer_tile_size(data, kernel)
N, CI, H, W = get_const_tuple(data.shape)
if isinstance(N, tvm.tir.Any):
N = tvm.te.size_var("n")
if not isinstance(H, int) or not isinstance(W, int):
raise RuntimeError(
"cuda winograd conv2d doesn't support dynamic input\
height or width."
)
if isinstance(dilation, int):
dilation_h = dilation_w = dilation
else:
dilation_h, dilation_w = dilation
HSTR, WSTR = (strides, strides) if isinstance(strides, int) else strides
if not pre_computed: # kernel tensor is raw tensor, do strict check
if dilation_h != 1 or dilation_w != 1:
kernel = nn.dilate(kernel, (1, 1, dilation_h, dilation_w))
CO, CI, KH, KW = get_const_tuple(kernel.shape)
alpha = KW + tile_size - 1
assert HSTR == 1 and WSTR == 1 and KH == KW
else:
# kernel tensor is pre-transfomred. this op is created by alter op layout.
# dilation is not supported
alpha, _, CI, CO = get_const_tuple(kernel.shape)
KH = KW = alpha + 1 - tile_size
assert HSTR == 1 and WSTR == 1 and dilation_h == 1 and dilation_w == 1
pt, pl, pb, pr = nn.get_pad_tuple(padding, (KH, KW))
data_pad = nn.pad(
data,
(0, 0, pt, pl),
(0, 0, pb, pr),
name="data_pad",
)
r = KW
m = tile_size
A, B, G = winograd_transform_matrices(m, r, out_dtype)
H = (H + pt + pb - KH) // HSTR + 1
W = (W + pl + pr - KW) // WSTR + 1
nH, nW = (H + m - 1) // m, (W + m - 1) // m
P = N * nH * nW if isinstance(N, int) else nH * nW
# transform kernel
if not pre_computed:
r_kh = te.reduce_axis((0, KH), name="r_kh")
r_kw = te.reduce_axis((0, KW), name="r_kw")
kernel_pack = te.compute(
(alpha, alpha, CI, CO),
lambda eps, nu, ci, co: te.sum(
kernel[co][ci][r_kh][r_kw] * G[eps][r_kh] * G[nu][r_kw], axis=[r_kh, r_kw]
),
name="kernel_pack",
)
else:
kernel_pack = kernel
idxdiv = tvm.tir.indexdiv
idxmod = tvm.tir.indexmod
# pack input tile
input_tile = te.compute(
(CI, P, alpha, alpha),
lambda c, p, eps, nu: data_pad[idxdiv(p, (nH * nW))][c][
idxmod(idxdiv(p, nW), nH) * m + eps
][idxmod(p, nW) * m + nu],
name="d",
)
# transform data
r_a = te.reduce_axis((0, alpha), "r_a")
r_b = te.reduce_axis((0, alpha), "r_a")
data_pack = te.compute(
(alpha, alpha, CI, P),
lambda eps, nu, ci, p: te.sum(
input_tile[ci][p][r_a][r_b] * B[r_a][eps] * B[r_b][nu], axis=[r_a, r_b]
),
name="data_pack",
)
# do batch gemm
ci = te.reduce_axis((0, CI), name="ci")
bgemm = te.compute(
(alpha, alpha, CO, P),
lambda eps, nu, co, p: te.sum(
kernel_pack[eps][nu][ci][co] * data_pack[eps][nu][ci][p], axis=[ci]
),
name="bgemm",
)
# inverse transform
r_a = te.reduce_axis((0, alpha), "r_a")
r_b = te.reduce_axis((0, alpha), "r_a")
inverse = te.compute(
(CO, P, m, m),
lambda co, p, vh, vw: te.sum(
bgemm[r_a][r_b][co][p] * A[r_a][vh] * A[r_b][vw], axis=[r_a, r_b]
),
name="inverse",
)
# output
output = te.compute(
(N, CO, H, W),
lambda n, co, h, w: inverse[
co, n * nH * nW + idxdiv(h, m) * nW + idxdiv(w, m), idxmod(h, m), idxmod(w, m)
],
name="output",
tag="conv2d_nchw_winograd",
)
if isinstance(N, int):
cfg.add_flop(2 * N * CO * H * W * CI * KH * KW)
return output
def schedule_winograd_cuda(cfg, s, output, pre_computed):
"""Schedule winograd template"""
# get stages
inverse = s[output].op.input_tensors[0]
bgemm, A = s[inverse].op.input_tensors
kernel_pack, data_pack = s[bgemm].op.input_tensors
input_tile, B = s[data_pack].op.input_tensors
pad_data = s[input_tile].op.input_tensors[0]
# data transform
s[B].compute_inline()
data_l = s.cache_write(data_pack, "local")
eps, nu, c, p = s[data_l].op.axis
r_a, r_b = s[data_l].op.reduce_axis
for axis in [eps, nu, r_a, r_b]:
s[data_l].unroll(axis)
eps, nu, c, p = s[data_pack].op.axis
p, pi = s[data_pack].split(p, 1)
fused = s[data_pack].fuse(c, p)
bb, tt = s[data_pack].split(fused, 128)
s[data_pack].reorder(bb, tt, pi, eps, nu)
s[data_pack].bind(bb, te.thread_axis("blockIdx.x"))
s[data_pack].bind(tt, te.thread_axis("threadIdx.x"))
s[data_l].compute_at(s[data_pack], pi)
s[input_tile].compute_at(s[data_pack], pi)
s[pad_data].compute_inline()
# transform kernel
if not pre_computed:
kernel, G = s[kernel_pack].op.input_tensors
eps, nu, ci, co = s[kernel_pack].op.axis
if autotvm.GLOBAL_SCOPE.in_tuning:
# skip this part during tuning to make recrods accurate
# this part will be pre-computed during pre-compute optimization pass
s[G].pragma(s[G].op.axis[0], "debug_skip_region")
s[kernel_pack].pragma(eps, "debug_skip_region")
else:
s[G].compute_inline()
r_a, r_b = s[kernel_pack].op.reduce_axis
for axis in [eps, nu, r_a, r_b]:
s[kernel_pack].unroll(axis)
fused = s[kernel_pack].fuse(ci, co)
bb, tt = s[kernel_pack].split(fused, 128)
s[kernel_pack].reorder(bb, tt, eps, nu, r_a, r_b)
s[kernel_pack].bind(bb, te.thread_axis("blockIdx.x"))
s[kernel_pack].bind(tt, te.thread_axis("threadIdx.x"))
else:
kernel = kernel_pack
if isinstance(kernel.op, tvm.te.ComputeOp) and "dilate" in kernel.op.tag:
s[kernel].compute_inline()
##### space definition begin #####
b1, b2, y, x = s[bgemm].op.axis
rc = s[bgemm].op.reduce_axis[0]
alpha = get_const_int(b1.dom.extent)
cfg.define_split(
"tile_b", cfg.axis(alpha * alpha), num_outputs=4, filter=lambda x: x.size[-3:] == [1, 1, 1]
)
cfg.define_split("tile_y", y, num_outputs=4)
cfg.define_split("tile_x", x, num_outputs=4)
cfg.define_split("tile_rc", rc, num_outputs=2)
cfg.define_knob("auto_unroll_max_step", [0, 128, 1500])
target = tvm.target.Target.current()
if target.kind.name in ["nvptx", "rocm"]:
cfg.define_knob("unroll_explicit", [1])
else:
cfg.define_knob("unroll_explicit", [0, 1])
##### space definition end #####
# batch gemm
C = bgemm
A0, B0 = kernel_pack, data_pack
OL = s.cache_write(C, "local")
AA = s.cache_read(A0, "shared", [OL])
BB = s.cache_read(B0, "shared", [OL])
b = s[bgemm].fuse(b1, b2)
# tile and bind spatial axes
bgemm_scope, b = s[bgemm].split(b, nparts=1)
bz, vz, tz, zi = cfg["tile_b"].apply(s, C, b)
by, vy, ty, yi = cfg["tile_y"].apply(s, C, y)
bx, vx, tx, xi = cfg["tile_x"].apply(s, C, x)
s[C].bind(bz, te.thread_axis("blockIdx.z"))
s[C].bind(by, te.thread_axis("blockIdx.y"))
s[C].bind(bx, te.thread_axis("blockIdx.x"))
s[C].bind(vz, te.thread_axis("vthread"))
s[C].bind(vy, te.thread_axis("vthread"))
s[C].bind(vx, te.thread_axis("vthread"))
s[C].bind(tz, te.thread_axis("threadIdx.z"))
s[C].bind(ty, te.thread_axis("threadIdx.y"))
s[C].bind(tx, te.thread_axis("threadIdx.x"))
s[C].reorder(bgemm_scope, bz, by, bx, vz, vy, vx, tz, ty, tx, zi, yi, xi)
# tile reduction axes
s[OL].compute_at(s[C], tx)
b1, b2, y, x = s[OL].op.axis
b = s[OL].fuse(b1, b2)
(rc,) = s[OL].op.reduce_axis
rco, rci = cfg["tile_rc"].apply(s, OL, rc)
s[OL].reorder(rco, rci, b, y, x)
s[AA].compute_at(s[OL], rco)
s[BB].compute_at(s[OL], rco)
# cooperative fetching
for load in [AA, BB]:
fused = s[load].fuse(*list(s[load].op.axis))
fused, tx = s[load].split(fused, cfg["tile_x"].size[2])
fused, ty = s[load].split(fused, cfg["tile_y"].size[2])
fused, tz = s[load].split(fused, cfg["tile_b"].size[2])
s[load].bind(tz, te.thread_axis("threadIdx.z"))
s[load].bind(ty, te.thread_axis("threadIdx.y"))
s[load].bind(tx, te.thread_axis("threadIdx.x"))
s[C].pragma(bgemm_scope, "auto_unroll_max_step", cfg["auto_unroll_max_step"].val)
s[C].pragma(bgemm_scope, "unroll_explicit", cfg["unroll_explicit"].val)
# schedule inverse, output and fusion
if output.op in s.outputs:
OL = None
else:
OL = output
s[OL].set_scope("local")
output = s.outputs[0]
m = alpha - 3 + 1
n, co, h, w = s[output].op.axis
ho, wo, hi, wi = s[output].tile(h, w, m, m)
inverse_scope, n = s[output].split(n, nparts=1)
fused = s[output].fuse(n, co, ho, wo)
bb, tt = s[output].split(fused, 128)
s[output].bind(bb, te.thread_axis("blockIdx.x"))
s[output].bind(tt, te.thread_axis("threadIdx.x"))
if OL is not None:
s[OL].compute_at(s[output], tt)
s[A].compute_inline()
co, p, vh, vw = s[inverse].op.axis
r_a, r_b = s[inverse].op.reduce_axis
for axis in [vh, vw, r_a, r_b]:
s[inverse].unroll(axis)
s[inverse].compute_at(s[output], tt)
return s
@autotvm.register_topi_compute("conv2d_nchw_winograd.cuda")
def conv2d_nchw_winograd(cfg, data, kernel, strides, padding, dilation, out_dtype):
return winograd_cuda(
cfg, data, kernel, strides, padding, dilation, out_dtype, pre_computed=False
)
@autotvm.register_topi_schedule("conv2d_nchw_winograd.cuda")
def schedule_conv2d_nchw_winograd(cfg, outs):
s = te.create_schedule([x.op for x in outs])
def _callback(op):
if "conv2d_nchw_winograd" in op.tag:
schedule_winograd_cuda(cfg, s, op.output(0), pre_computed=False)
traverse_inline(s, outs[0].op, _callback)
return s
@autotvm.register_topi_compute("conv2d_nchw_winograd_without_weight_transform.cuda")
def conv2d_nchw_winograd_without_weight_transform(
cfg, data, kernel, strides, padding, dilation, out_dtype
):
return winograd_cuda(
cfg, data, kernel, strides, padding, dilation, out_dtype, pre_computed=True
)
@autotvm.register_topi_schedule("conv2d_nchw_winograd_without_weight_transform.cuda")
def schedule_conv2d_nchw_winograd_without_weight_transform(cfg, outs):
"""TOPI schedule callback"""
s = te.create_schedule([x.op for x in outs])
def _callback(op):
if "conv2d_nchw_winograd" in op.tag:
schedule_winograd_cuda(cfg, s, op.output(0), pre_computed=True)
traverse_inline(s, outs[0].op, _callback)
return s
@conv2d_winograd_nhwc.register(["cuda", "gpu"])
def conv2d_winograd_nhwc_cuda(
data,
weight,
strides,
padding,
dilation,
out_dtype,
pre_computed=False,
auto_scheduler_rewritten_layout="",
meta_schedule_original_shape=None,
):
"""Conv2D Winograd in NHWC layout.
This is a clean version to be used by the auto-scheduler for both CPU and GPU.
"""
tile_size = _infer_tile_size(data, weight, layout="NHWC")
return _conv2d_winograd_nhwc_impl(
data, weight, strides, padding, dilation, out_dtype, tile_size, pre_computed
)
@conv2d_winograd_nchw.register(["cuda", "gpu"])
def conv2d_winograd_nchw_cuda(
data,
weight,
strides,
padding,
dilation,
out_dtype,
pre_computed=False,
auto_scheduler_rewritten_layout="",
meta_schedule_original_shape=None,
):
"""Conv2D Winograd in NCHW layout.
This is a clean version to be used by the auto-scheduler for both CPU and GPU.
"""
tile_size = _infer_tile_size(data, weight, layout="NCHW")
return _conv2d_winograd_nchw_impl(
data, weight, strides, padding, dilation, out_dtype, tile_size, pre_computed
)
| 13,569 | 31.857143 | 99 | py |
tvm | tvm-main/python/tvm/topi/cuda/vision.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-variable, unused-argument, no-member, import-outside-toplevel
"""Schedule for vision operators"""
from __future__ import absolute_import as _abs
import tvm
from tvm import te
from .. import cpp
from .. import tag
from .pooling import schedule_pool
from .injective import schedule_injective_from_existing
def _default_schedule(outs):
"""Default schedule for gpu."""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
scheduled_ops = []
def traverse(op):
if tag.is_injective(op.tag) or op.tag in ["bbox_score", "sorted_bbox"]:
schedule_injective_from_existing(s, op.output(0))
for tensor in op.input_tensors:
if tensor.op.input_tensors and tensor.op not in scheduled_ops:
traverse(tensor.op)
scheduled_ops.append(op)
for o in outs:
traverse(o.op)
return s
def schedule_reorg(outs):
"""Schedule for reorg operator.
Parameters
----------
outs: Array of Tensor
The computation graph description of reorg
in the format of an array of tensors.
Returns
-------
s: Schedule
The computation schedule for reorg.
"""
target = tvm.target.Target.current(allow_none=False)
cpp_target = cpp.TEST_create_target(target.kind.name)
return cpp.cuda.schedule_injective(cpp_target, outs)
def schedule_nms(outs):
"""Schedule for non-maximum suppression
Parameters
----------
outs: Array of Tensor
The computation graph description of nms
in the format of an array of tensors.
Returns
-------
s: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs)
def schedule_multibox_prior(outs):
"""Schedule for multibox_prior operator.
Parameters
----------
outs: Array of Tensor
The computation graph description of multibox_prior
in the format of an array of tensors.
Returns
-------
s: Schedule
The computation schedule for multibox_prior.
"""
return _default_schedule(outs)
def schedule_multibox_transform_loc(outs):
"""Schedule for multibox_transform_loc
Parameters
----------
outs: Array of Tensor
The computation graph description of
multibox_transform_loc in the format
of an array of tensors.
Returns
-------
s: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs)
def schedule_multibox_detection(outs):
"""Schedule for multibox_detection operator.
Parameters
----------
outs: Array of Tensor
The computation graph description of multibox_detection
in the format of an array of tensors.
Returns
-------
s: Schedule
The computation schedule for multibox_detection.
"""
return _default_schedule(outs)
def schedule_roi_align(outs):
return schedule_pool(outs, "NCHW")
def schedule_roi_pool(outs):
return schedule_pool(outs, "NCHW")
def schedule_proposal(outs):
"""Schedule for proposal operator.
Parameters
----------
outs: Array of Tensor
The computation graph description of proposal
in the format of an array of tensors.
Returns
-------
s: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs)
def schedule_get_valid_counts(outs):
"""Schedule for get_valid_counts operator.
Parameters
----------
outs: Array of Tensor
The computation graph description of get_valid_counts
in the format of an array of tensors.
Returns
-------
s: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs)
| 4,603 | 25.308571 | 100 | py |
tvm | tvm-main/python/tvm/topi/cuda/tensorcore_alter_op.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unused-variable,unused-argument
"""Tensorcore alter op and legalize functions for cuda backend"""
import logging
import math
from tvm import relay, tir
from .. import nn
logger = logging.getLogger("topi")
@nn.batch_matmul_legalize.register("cuda")
def _batch_matmul_legalize(attrs, inputs, arg_types):
"""Legalizes batch_matmul op.
Parameters
----------
attrs : tvm.ir.Attrs
Attributes of current convolution
inputs : list of tvm.relay.Expr
The args of the Relay expr to be legalized
arg_types : list of types
List of input and output types
Returns
-------
result : tvm.relay.Expr
The legalized expr
"""
# Collect the input tensors.
x_tensor, y_tensor = arg_types[0], arg_types[1]
dtype = x_tensor.dtype
if attrs.transpose_a:
B, K, M = x_tensor.shape
else:
B, M, K = x_tensor.shape
if attrs.transpose_b:
B, N, K = y_tensor.shape
else:
B, K, N = y_tensor.shape
# Collect the output tensor.
output_tensor = arg_types[2]
# Collect the input exprs.
x, y = inputs
if (
isinstance(B, tir.expr.Any)
or isinstance(M, tir.expr.Any)
or isinstance(K, tir.expr.Any)
or isinstance(N, tir.expr.Any)
):
# Dynamic shape do not support alter op layout now
return None
M = M.value
K = K.value
N = N.value
# Pad input and output channels to use tensorcore schedule.
if dtype in ["float16", "int8", "uint8"]:
# The shape of (M, K, N) must be multiple of (16, 16, 16) or (32, 16, 8) or (8, 16, 32)
if (
(M % 8 == 0 and K % 16 == 0 and N % 32 == 0)
or (M % 16 == 0 and K % 16 == 0 and N % 16 == 0)
or (M % 32 == 0 and K % 16 == 0 and N % 8 == 0)
):
# no need to pad
return None
candidates = [(16, 16, 16), (32, 16, 8), (8, 16, 32)]
elif dtype in ["int4", "uint4"]:
if M % 8 == 0 and K % 32 == 0 and N % 8 == 0:
# no need to pad
return None
candidates = [(8, 32, 8)]
else:
return None
(dm, dk, dn), extra_flops = pad_to_tensorcore(M, K, N, candidates)
if extra_flops > 2:
logger.info("batch_matmul pad_to_tensorcore skipped, extra_flops %s", extra_flops)
return None
logger.info("batch_matmul pad_to_tensorcore, extra_flops %s", extra_flops)
if attrs.transpose_a:
pad_width = ((0, 0), (0, dk), (0, dm))
else:
pad_width = ((0, 0), (0, dm), (0, dk))
x_ = relay.nn.pad(x, pad_width=pad_width) if dm or dk else x
if attrs.transpose_b:
pad_width = ((0, 0), (0, dn), (0, dk))
else:
pad_width = ((0, 0), (0, dk), (0, dn))
y_ = relay.nn.pad(y, pad_width=pad_width) if dn or dk else y
out_ = relay.nn.batch_matmul(x_, y_, **attrs)
out = (
relay.strided_slice(out_, begin=[0, 0, 0], end=[x.value for x in output_tensor.shape])
if dm or dn
else out_
)
return out
@nn.dense_legalize.register("cuda")
def _dense_legalize(attrs, inputs, arg_types):
"""Legalizes dense op.
Parameters
----------
attrs : tvm.ir.Attrs
Attributes of current convolution
inputs : list of tvm.relay.Expr
The args of the Relay expr to be legalized
types : list of types
List of input and output types
Returns
-------
result : tvm.relay.Expr
The legalized expr
"""
new_attrs = {k: attrs[k] for k in attrs.keys()}
# Collect the input tensors.
x_tensor, y_tensor = arg_types[0], arg_types[1]
dtype = x_tensor.dtype
# Collect the output tensor.
output_tensor = arg_types[2]
# Collect the input exprs.
x, y = inputs
M, K = x_tensor.shape
N, K = y_tensor.shape
try:
M = M.value
K = K.value
N = N.value
except AttributeError:
# todo: deal with unfixed shape when compiling wdl model
return None
# Pad input and output channels to use tensorcore schedule.
if dtype in ["float16", "int8", "uint8"]:
# The shape of (M, K, N) must be multiple of (16, 16, 16) or (32, 16, 8) or (8, 16, 32)
if (
(M % 8 == 0 and K % 16 == 0 and N % 32 == 0)
or (M % 16 == 0 and K % 16 == 0 and N % 16 == 0)
or (M % 32 == 0 and K % 16 == 0 and N % 8 == 0)
):
# no need to pad
return None
candidates = [(16, 16, 16), (32, 16, 8), (8, 16, 32)]
elif dtype in ["int4", "uint4"]:
if M % 8 == 0 and K % 32 == 0 and N % 8 == 0:
# no need to pad
return None
candidates = [(8, 32, 8)]
else:
return None
(dm, dk, dn), extra_flops_ratio = pad_to_tensorcore(M, K, N, candidates)
skip_pad = extra_flops_ratio > 2
if skip_pad and dtype in ["int8", "uint8"]:
skip_pad = False
# If tensorcore schedule padding fails, pad to nearest upward 4x4x4 as long as
# the additional flops ratio isn't double or more.
# Note that 4x4x4 is invalid for tensorcore scheduling, but padding upwards to 4x4x4
# doesn't hurt if tensorcore padding has already failed.
if M % 4 == 0 and K % 4 == 0 and N % 4 == 0:
# No need to pad
return None
(dm, dk, dn) = _pad_to(M, K, N, (4, 4, 4))
extra_flops_ratio = _extra_flops(M, K, N, dm, dk, dn) / (M * K * N)
skip_pad = extra_flops_ratio > 2
if skip_pad:
logger.info("dense pad_to_tensorcore skipped, extra_flops_ratio %s", extra_flops_ratio)
return None
logger.info("dense pad_to_tensorcore, extra_flops_ratio %s", extra_flops_ratio)
x_ = relay.nn.pad(x, pad_width=((0, dm), (0, dk))) if dm or dk else x
y_ = relay.nn.pad(y, pad_width=((0, dn), (0, dk))) if dn or dk else y
# If units is explicitly specified, it is used to compute the output shape.
# We need to update units after padding to prevent a type error.
if attrs["units"] is not None:
new_attrs["units"] = N + dn
out_ = relay.nn.dense(x_, y_, **new_attrs)
out = (
relay.strided_slice(out_, begin=[0, 0], end=[x.value for x in output_tensor.shape])
if dm or dn
else out_
)
return out
def pad_to_tensorcore(M, K, N, candidates):
"""pad shape to enable tensorcore"""
flops = M * K * N
extra_flops = math.inf
best_pad = (0, 0, 0)
for padding in candidates:
dm, dk, dn = _pad_to(M, K, N, padding)
e = _extra_flops(M, K, N, dm, dk, dn)
# print(dm, dk, dn, e, flops)
if e < extra_flops:
extra_flops = e
best_pad = (dm, dk, dn)
return best_pad, extra_flops / flops
def _extra_flops(M, K, N, dm, dk, dn):
return (M + dm) * (N + dn) * (K + dk) - M * N * K
def _pad_to(M, K, N, PADDING):
dm, dk, dn = 0, 0, 0
if M % PADDING[0] != 0:
M_ = ((M + PADDING[0]) // PADDING[0]) * PADDING[0]
dm = M_ - M
if K % PADDING[1] != 0:
K_ = ((K + PADDING[1]) // PADDING[1]) * PADDING[1]
dk = K_ - K
if N % PADDING[2] != 0:
N_ = ((N + PADDING[2]) // PADDING[2]) * PADDING[2]
dn = N_ - N
return dm, dk, dn
| 8,084 | 29.741445 | 95 | py |
tvm | tvm-main/python/tvm/topi/cuda/conv2d_transpose.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""Conv2d transpose template for cuda backend"""
import tvm
from tvm import te
from tvm.contrib import cudnn
from tvm import autotvm
from tvm.autotvm.task.space import SplitEntity, OtherOptionEntity
from .. import nn
from ..utils import get_const_tuple, traverse_inline
@autotvm.register_topi_compute("conv2d_transpose_nchw.cuda")
def conv2d_transpose_nchw(cfg, data, kernel, stride, padding, out_dtype, output_padding, groups=1):
"""Transposed 2D convolution nchw forward operator.
Parameters
----------
cfg: ConfigEntity
The config for this template
Input : tvm.te.Tensor
4-D with shape [batch, in_channel, in_height, in_width]
Filter : tvm.te.Tensor
4-D with shape [in_channel, num_filter, filter_height, filter_width]
strides : tuple of two ints
The spatial stride along height and width
padding : int or str
Padding size, or ['VALID', 'SAME']
out_dtype: str
The output type. This is used in mixed precision
output_padding : tuple of two ints
Used to disambiguate output shape.
groups : int
number of groups
Returns
-------
Output : tvm.te.Tensor
4-D with shape [batch, out_channel, out_height, out_width]
"""
batch, inp_channels, inp_height, inp_width = get_const_tuple(data.shape)
_, out_channels, kernel_height, kernel_width = get_const_tuple(kernel.shape)
stride_height, stride_width = stride
outpad_height, outpad_width = output_padding
assert outpad_height < stride_height and outpad_width < stride_width
assert (
inp_channels % groups == 0
), f"input channels {inp_channels} must divide group size {groups}"
cfg.stride = stride
pad_top, pad_left, pad_bottom, pad_right = nn.get_pad_tuple(
padding, (kernel_height, kernel_width)
)
out_width = (inp_width - 1) * stride_width + kernel_width - pad_left - pad_right + outpad_width
pad_left = kernel_width - 1 - pad_left
pad_right = kernel_width - 1 - pad_right + outpad_width
dilated_width = stride_width * (inp_width - 1) + 1
out_height = (
(inp_height - 1) * stride_height + kernel_height - pad_top - pad_bottom + outpad_height
)
pad_top = kernel_height - 1 - pad_top
pad_bottom = kernel_height - 1 - pad_bottom + outpad_height
dilated_height = stride_height * (inp_height - 1) + 1
# compute pad
data = te.compute(
(
batch,
inp_channels,
pad_top + dilated_height + pad_bottom,
pad_left + dilated_width + pad_right,
),
lambda n, c, y, x: tvm.tir.if_then_else(
tvm.tir.all(
x >= pad_left,
x < pad_left + dilated_width,
tvm.tir.indexmod(x - pad_left, stride_width).equal(0),
y >= pad_top,
y < pad_top + dilated_height,
tvm.tir.indexmod(y - pad_top, stride_height).equal(0),
),
data[
n,
c,
tvm.tir.indexdiv(y - pad_top, stride_height),
tvm.tir.indexdiv(x - pad_left, stride_width),
],
tvm.tir.const(0.0, data.dtype),
),
name="data_pad",
)
# compute transposed conv
dc = te.reduce_axis((0, inp_channels // groups), name="dc")
dh = te.reduce_axis((0, kernel_height), name="dh")
dw = te.reduce_axis((0, kernel_width), name="dw")
data_out = te.compute(
(batch, out_channels * groups, out_height, out_width),
lambda b, c, h, w: te.sum(
data[b, c // out_channels * (inp_channels // groups) + dc, h + dh, w + dw].astype(
out_dtype
)
* kernel[
c // out_channels * (inp_channels // groups) + dc,
c % out_channels,
kernel_height - 1 - dh,
kernel_width - 1 - dw,
].astype(out_dtype),
axis=[dc, dh, dw],
),
tag="conv2d_transpose_nchw",
)
return data_out
@autotvm.register_topi_schedule("conv2d_transpose_nchw.cuda")
def schedule_conv2d_transpose_nchw(cfg, outs):
"""TOPI Schedule callback for conv2d transpose operator.
Parameters
----------
cfg: ConfigEntity
The parameters for this template
outs: Array of Tensor
The computation graph description of conv2d transpose
in the format of an array of tensors.
Returns
-------
s: Schedule
The computation schedule for conv2d transpose.
"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
def _fallback_schedule(N, F, Y, X):
# pylint: disable=unused-argument
# split N (batch dimension)
if N > 1:
cfg["tile_n"] = SplitEntity([-1, 1, 1, 4])
else:
cfg["tile_n"] = SplitEntity([1, 1, 1, 1])
# split F (output channel dimension)
if F > 1:
cfg["tile_f"] = SplitEntity([-1, 1, 4, 1])
# split Y (height dimension)
y_split_factor = 1
for candidate in range(5, 17):
if Y % candidate == 0:
y_split_factor = candidate
break
cfg["tile_y"] = SplitEntity([-1, 1, 1, y_split_factor])
# split X (width dimension)
x_split_factor = 1
for candidate in range(5, 17):
if X % candidate == 0:
x_split_factor = candidate
break
cfg["tile_x"] = SplitEntity([-1, x_split_factor, 1, 1])
# split RC (input channel dimension, which is a reduction axis)
cfg["tile_rc"] = SplitEntity([-1, 1, 16])
# other configurations
cfg["fuse_yx"] = OtherOptionEntity(False)
cfg["unroll_explicit"] = OtherOptionEntity(True)
cfg["auto_unroll_max_step"] = OtherOptionEntity(1500)
def _callback(op):
if op.tag == "conv2d_transpose_nchw":
pad_data = op.input_tensors[0]
kernel = op.input_tensors[1]
conv = op.output(0)
##### space definition begin #####
n, f, y, x = s[conv].op.axis
rc = s[conv].op.reduce_axis[0]
# TODO(@kevinthesun): Support tuning/optimization for dynamic shape.
bs = pad_data.shape[0]
n_tuning_axis = n if isinstance(bs, tvm.tir.IntImm) else 1
cfg.define_split("tile_n", cfg.axis(n_tuning_axis), num_outputs=4)
cfg.define_split("tile_f", cfg.axis(f), num_outputs=4)
cfg.define_split("tile_y", cfg.axis(y), num_outputs=4)
cfg.define_split("tile_x", cfg.axis(x), num_outputs=4)
cfg.define_split("tile_rc", cfg.axis(rc), num_outputs=3)
cfg.define_knob("auto_unroll_max_step", [64, 512, 1500])
target = tvm.target.Target.current()
if target.kind.name in ["nvptx", "rocm"]:
cfg.define_knob("unroll_explicit", [1])
else:
cfg.define_knob("unroll_explicit", [0, 1])
if cfg.is_fallback:
N, F, Y, X = get_const_tuple(conv.shape)
if not isinstance(N, int):
N = 1
_fallback_schedule(N, F, Y, X)
##### space definition end #####
if isinstance(kernel.op, tvm.te.ComputeOp) and "dilate" in kernel.op.tag:
s[kernel].compute_inline()
if conv.op in s.outputs:
output = conv
OL = s.cache_write(conv, "local")
else:
output = s.outputs[0].output(0)
s[conv].set_scope("local")
OL = conv
# create cache stage
s[pad_data].set_scope("shared")
AA = pad_data
WW = s.cache_read(kernel, "shared", [OL])
# tile and bind spatial axes
n, f, y, x = s[output].op.axis
kernel_scope, n = s[output].split(n, nparts=1)
bn, vn, tn, ni = cfg["tile_n"].apply(s, output, n)
bf, vf, tf, fi = cfg["tile_f"].apply(s, output, f)
by, vy, ty, yi = cfg["tile_y"].apply(s, output, y)
bx, vx, tx, xi = cfg["tile_x"].apply(s, output, x)
s[output].reorder(bn, bf, by, bx, vn, vf, vy, vx, tn, tf, ty, tx, ni, fi, yi, xi)
s[output].bind(bn, te.thread_axis("blockIdx.z"))
s[output].bind(bf, te.thread_axis("blockIdx.y"))
s[output].bind(s[output].fuse(by, bx), te.thread_axis("blockIdx.x"))
s[output].bind(vn, te.thread_axis("vthread"))
s[output].bind(vf, te.thread_axis("vthread"))
s[output].bind(vy, te.thread_axis("vthread"))
s[output].bind(vx, te.thread_axis("vthread"))
cfg.define_knob("fuse_yx", [0, 1]) # fuse ty,tx or tn,tf
if cfg["fuse_yx"].val:
s[output].bind(tn, te.thread_axis("threadIdx.z"))
s[output].bind(tf, te.thread_axis("threadIdx.y"))
tyx = s[output].fuse(ty, tx)
s[output].bind(s[output].fuse(ty, tx), te.thread_axis("threadIdx.x"))
s[OL].compute_at(s[output], tyx)
# number of threads
n_tz = cfg["tile_n"].size[2]
n_ty = cfg["tile_f"].size[2]
n_tx = cfg["tile_y"].size[2] * cfg["tile_x"].size[2]
else:
s[output].bind(s[output].fuse(tn, tf), te.thread_axis("threadIdx.z"))
s[output].bind(ty, te.thread_axis("threadIdx.y"))
s[output].bind(tx, te.thread_axis("threadIdx.x"))
s[OL].compute_at(s[output], tx)
# number of threads
n_tz = cfg["tile_n"].size[2] * cfg["tile_f"].size[2]
n_ty = cfg["tile_y"].size[2]
n_tx = cfg["tile_x"].size[2]
# tile reduction axes
n, f, y, x = s[OL].op.axis
rc, ry, rx = s[OL].op.reduce_axis
rco, rcm, rci = cfg["tile_rc"].apply(s, OL, rc)
s[OL].reorder(rco, rcm, ry, rx, rci, n, f, y, x)
s[AA].compute_at(s[OL], rx)
s[WW].compute_at(s[OL], rx)
# cooperative fetching
for load in [AA, WW]:
n, f, y, x = s[load].op.axis
fused = s[load].fuse(f, y, x)
tz, fused = s[load].split(fused, nparts=n_tz)
ty, fused = s[load].split(fused, nparts=n_ty)
tx, fused = s[load].split(fused, nparts=n_tx)
s[load].bind(tz, te.thread_axis("threadIdx.z"))
s[load].bind(ty, te.thread_axis("threadIdx.y"))
s[load].bind(tx, te.thread_axis("threadIdx.x"))
s[output].pragma(kernel_scope, "auto_unroll_max_step", cfg["auto_unroll_max_step"].val)
s[output].pragma(kernel_scope, "unroll_explicit", cfg["unroll_explicit"].val)
traverse_inline(s, outs[0].op, _callback)
return s
def conv2d_transpose_cudnn(
x, w, stride, padding, out_dtype, output_padding=(0, 0), layout="NCHW", groups=1
):
"""Compute conv2d_tranpose using cudnn dgrad kernel"""
tensor_format = 0 if layout == "NCHW" else 1
return cudnn.conv_backward_data(
x,
w,
padding,
stride,
(1, 1),
1,
tensor_format,
out_dtype,
groups=groups,
output_padding=output_padding,
)
| 12,269 | 37.224299 | 99 | py |
tvm | tvm-main/python/tvm/topi/cuda/depthwise_conv2d.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument
"""Schedule for depthwise_conv2d with auto fusion"""
import tvm
from tvm import te
from tvm import autotvm
from ..utils import traverse_inline
from .. import tag
from .. import nn
# register original implementation of depthwise_conv2d_nchw since we don't need to change this part
@autotvm.register_topi_compute("depthwise_conv2d_nchw.cuda")
def depthwise_conv2d_nchw(cfg, data, kernel, strides, padding, dilation, out_dtype):
"""Compute depthwise_conv2d with NCHW layout."""
return nn.depthwise_conv2d_nchw(data, kernel, strides, padding, dilation, out_dtype)
@autotvm.register_topi_schedule("depthwise_conv2d_nchw.cuda")
def schedule_depthwise_conv2d_nchw(cfg, outs):
"""Schedule for depthwise_conv2d nchw forward.
Parameters
----------
outs: Array of Tensor
The computation graph description of depthwise_conv2d
in the format of an array of tensors.
Returns
-------
s: Schedule
The computation schedule for depthwise_conv2d nchw.
"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
def _callback(op):
if op.tag == "depthwise_conv2d_nchw":
pad_data = op.input_tensors[0]
kernel = op.input_tensors[1]
conv = op.output(0)
##### space definition begin #####
n, f, y, x = s[conv].op.axis
cfg.define_split("tile_f", f, num_outputs=4)
cfg.define_split("tile_y", y, num_outputs=4)
cfg.define_split("tile_x", x, num_outputs=4)
cfg.define_knob("auto_unroll_max_step", [0, 256, 1500])
target = tvm.target.Target.current()
if target.kind.name in ["nvptx", "rocm"]:
cfg.define_knob("unroll_explicit", [1])
else:
cfg.define_knob("unroll_explicit", [0, 1])
# fallback support
if cfg.is_fallback:
ref_log = autotvm.tophub.load_reference_log(
target.kind.name, target.model, "depthwise_conv2d_nchw.cuda"
)
cfg.fallback_with_reference_log(ref_log)
# TODO(lmzheng): A bug here, set unroll_explicit to False as workaround
cfg["unroll_explicit"].val = 0
##### space definition end #####
s[pad_data].compute_inline()
if isinstance(kernel.op, tvm.te.ComputeOp) and "dilate" in kernel.op.tag:
s[kernel].compute_inline()
if conv.op in s.outputs:
output = conv
OL = s.cache_write(conv, "local")
else:
output = s.outputs[0].output(0)
s[conv].set_scope("local")
OL = conv
# create cache stage
AA = s.cache_read(pad_data, "shared", [OL])
WW = s.cache_read(kernel, "shared", [OL])
AL = s.cache_read(AA, "local", [OL])
WL = s.cache_read(WW, "local", [OL])
# tile and bind spatial axes
n, f, y, x = s[output].op.axis
bf, vf, tf, fi = cfg["tile_f"].apply(s, output, f)
by, vy, ty, yi = cfg["tile_y"].apply(s, output, y)
bx, vx, tx, xi = cfg["tile_x"].apply(s, output, x)
kernel_scope, n = s[output].split(n, nparts=1)
bf = s[output].fuse(n, bf)
s[output].bind(bf, te.thread_axis("blockIdx.z"))
s[output].bind(by, te.thread_axis("blockIdx.y"))
s[output].bind(bx, te.thread_axis("blockIdx.x"))
s[output].bind(vf, te.thread_axis("vthread"))
s[output].bind(vy, te.thread_axis("vthread"))
s[output].bind(vx, te.thread_axis("vthread"))
s[output].bind(tf, te.thread_axis("threadIdx.z"))
s[output].bind(ty, te.thread_axis("threadIdx.y"))
s[output].bind(tx, te.thread_axis("threadIdx.x"))
s[output].reorder(bf, by, bx, vf, vy, vx, tf, ty, tx, fi, yi, xi)
s[OL].compute_at(s[output], tx)
# cooperative fetching
s[AA].compute_at(s[output], bx)
s[WW].compute_at(s[output], bx)
s[AL].compute_at(s[output], tx)
s[WL].compute_at(s[output], tx)
for load in [AA, WW]:
fused = s[load].fuse(*list(s[load].op.axis))
fused, tx = s[load].split(fused, cfg["tile_x"].size[2])
fused, ty = s[load].split(fused, cfg["tile_y"].size[2])
fused, tz = s[load].split(fused, cfg["tile_f"].size[2])
s[load].bind(tz, te.thread_axis("threadIdx.z"))
s[load].bind(ty, te.thread_axis("threadIdx.y"))
s[load].bind(tx, te.thread_axis("threadIdx.x"))
s[output].pragma(kernel_scope, "auto_unroll_max_step", cfg["auto_unroll_max_step"].val)
s[output].pragma(kernel_scope, "unroll_explicit", cfg["unroll_explicit"].val)
traverse_inline(s, outs[0].op, _callback)
return s
def schedule_depthwise_conv2d_nhwc(outs):
"""Schedule for depthwise_conv2d nhwc forward.
Parameters
----------
outs: Array of Tensor
The computation graph description of depthwise_conv2d
in the format of an array of tensors.
Returns
-------
s: Schedule
The computation schedule for depthwise_conv2d nhwc.
"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
def _schedule(temp, Filter, DepthwiseConv2d):
s[temp].compute_inline()
FS = s.cache_read(Filter, "shared", [DepthwiseConv2d])
if DepthwiseConv2d.op in s.outputs:
Output = DepthwiseConv2d
CL = s.cache_write(DepthwiseConv2d, "local")
else:
Output = outs[0].op.output(0)
s[DepthwiseConv2d].set_scope("local")
block_x = te.thread_axis("blockIdx.x")
thread_x = te.thread_axis("threadIdx.x")
b, h, w, c = s[Output].op.axis
# make sure the size of our parallelism is not larger than the number of threads
num_thread = min(
tvm.arith.Analyzer().simplify(temp.shape[3]).value,
tvm.target.Target.current().max_num_threads,
)
xoc, xic = s[Output].split(c, factor=num_thread)
s[Output].reorder(xoc, b, h, w, xic)
xo, yo, _, _ = s[Output].tile(h, w, x_factor=2, y_factor=2)
fused = s[Output].fuse(yo, xo)
fused = s[Output].fuse(fused, b)
fused = s[Output].fuse(fused, xoc)
s[Output].bind(fused, block_x)
s[Output].bind(xic, thread_x)
if DepthwiseConv2d.op in s.outputs:
s[CL].compute_at(s[Output], xic)
else:
s[DepthwiseConv2d].compute_at(s[Output], xic)
_, _, ci, fi = s[FS].op.axis
s[FS].compute_at(s[Output], fused)
fused = s[FS].fuse(fi, ci)
s[FS].bind(fused, thread_x)
scheduled_ops = []
def traverse(OP):
"""Internal traverse function"""
# inline all one-to-one-mapping operators except the last stage (output)
if tag.is_broadcast(OP.tag):
if OP not in s.outputs:
s[OP].compute_inline()
for tensor in OP.input_tensors:
if isinstance(tensor.op, te.tensor.ComputeOp) and tensor.op not in scheduled_ops:
traverse(tensor.op)
# schedule depthwise_conv2d
if OP.tag == "depthwise_conv2d_nhwc":
PaddedInput = OP.input_tensors[0]
Filter = OP.input_tensors[1]
if isinstance(Filter.op, tvm.te.ComputeOp) and "dilate" in Filter.op.tag:
s[Filter].compute_inline()
DepthwiseConv2d = OP.output(0)
_schedule(PaddedInput, Filter, DepthwiseConv2d)
scheduled_ops.append(OP)
traverse(outs[0].op)
return s
def schedule_depthwise_conv2d_backward_input_nhwc(outs):
"""Schedule for depthwise_conv2d nhwc backward wrt input.
Parameters
----------
outs: Array of Tensor
The computation graph description of depthwise_conv2d
backward wrt input in the format of an array of tensors.
Returns
-------
s: Schedule
The computation schedule for depthwise_conv2d backward
wrt input with layout nhwc.
"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
def _schedule(Padded_out_grad, In_grad):
s[Padded_out_grad].compute_inline()
block_x = te.thread_axis("blockIdx.x")
thread_x = te.thread_axis("threadIdx.x")
_, h, w, c = In_grad.op.axis
fused_hwc = s[In_grad].fuse(h, w, c)
xoc, xic = s[In_grad].split(fused_hwc, factor=128)
s[In_grad].bind(xoc, block_x)
s[In_grad].bind(xic, thread_x)
def traverse(OP):
# inline all one-to-one-mapping operators except the last stage (output)
if OP.tag == "depthwise_conv2d_backward_input_nhwc":
Padded_out_grad = OP.input_tensors[0]
Dilated_out_grad = Padded_out_grad.op.input_tensors[0]
s[Dilated_out_grad].compute_inline()
In_grad = OP.output(0)
_schedule(Padded_out_grad, In_grad)
else:
raise ValueError("Depthwise conv backward wrt input for non-NHWC is not supported.")
traverse(outs[0].op)
return s
def schedule_depthwise_conv2d_backward_weight_nhwc(outs):
"""Schedule for depthwise_conv2d nhwc backward wrt weight.
Parameters
----------
outs: Array of Tensor
The computation graph description of depthwise_conv2d
backward wrt weight in the format of an array of tensors.
Returns
-------
s: Schedule
The computation schedule for depthwise_conv2d backward
wrt weight with layout nhwc.
"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
def _schedule(Weight_grad):
block_x = te.thread_axis("blockIdx.x")
thread_y = te.thread_axis("threadIdx.y")
thread_x = te.thread_axis("threadIdx.x")
db, dh, dw = Weight_grad.op.reduce_axis
fused_dbdhdw = s[Weight_grad].fuse(db, dh, dw)
_, ki = s[Weight_grad].split(fused_dbdhdw, factor=8)
BF = s.rfactor(Weight_grad, ki)
fused_fwcm = s[Weight_grad].fuse(*s[Weight_grad].op.axis)
xo, xi = s[Weight_grad].split(fused_fwcm, factor=32)
s[Weight_grad].bind(xi, thread_x)
s[Weight_grad].bind(xo, block_x)
s[Weight_grad].bind(s[Weight_grad].op.reduce_axis[0], thread_y)
s[BF].compute_at(s[Weight_grad], s[Weight_grad].op.reduce_axis[0])
def traverse(OP):
# inline all one-to-one-mapping operators except the last stage (output)
if OP.tag == "depthwise_conv2d_backward_weight_nhwc":
Padded_in = OP.input_tensors[1]
s[Padded_in].compute_inline()
Weight_grad = OP.output(0)
_schedule(Weight_grad)
else:
raise ValueError("Depthwise conv backward wrt weight for non-NHWC is not supported.")
traverse(outs[0].op)
return s
| 12,050 | 36.659375 | 99 | py |
tvm | tvm-main/python/tvm/topi/cuda/conv2d_nhwc_tensorcore.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, too-many-locals, too-many-function-args
# pylint: disable=too-many-statements, unused-argument, too-many-arguments
"""Tensorcore template for cuda backend"""
import numpy as np
import tvm
from tvm import te
from tvm import autotvm
from ..utils import get_const_tuple, traverse_inline, simplify
from ..nn.pad import pad
from ..nn.utils import get_pad_tuple
from .tensor_intrin import intrin_wmma_load_matrix_A
from .tensor_intrin import intrin_wmma_load_matrix_W
from .tensor_intrin import intrin_wmma_store_matrix
from .tensor_intrin import intrin_wmma_gemm
def nhwc_tensorcore_cuda(cfg, Input, Filter, stride, padding, dilation, out_dtype):
"""Compute declaration for tensorcore"""
assert isinstance(stride, int) or len(stride) == 2
assert isinstance(dilation, int) or len(dilation) == 2
if isinstance(stride, int):
stride_h = stride_w = stride
else:
stride_h, stride_w = stride
if isinstance(dilation, int):
dilation_h = dilation_w = dilation
else:
dilation_h, dilation_w = dilation
batch, in_height, in_width, in_channel = get_const_tuple(Input.shape)
kernel_h, kernel_w, _, num_filter = get_const_tuple(Filter.shape)
assert (
(batch % 16 == 0 and in_channel % 16 == 0 and num_filter % 16 == 0)
or (batch % 8 == 0 and in_channel % 16 == 0 and num_filter % 32 == 0)
or (batch % 32 == 0 and in_channel % 16 == 0 and num_filter % 8 == 0)
), (
"The shape of (batch, in_channel, num_filter) "
"must be multiple of (16, 16, 16) or (32, 16, 8) or (8, 16, 32) for now"
)
# compute the output shape
dilated_kernel_h = (kernel_h - 1) * dilation_h + 1
dilated_kernel_w = (kernel_w - 1) * dilation_w + 1
pad_top, pad_left, pad_down, pad_right = get_pad_tuple(
padding, (dilated_kernel_h, dilated_kernel_w)
)
out_channel = num_filter
out_height = simplify((in_height - dilated_kernel_h + pad_top + pad_down) // stride_h + 1)
out_width = simplify((in_width - dilated_kernel_w + pad_left + pad_right) // stride_w + 1)
pad_before = [0, pad_top, pad_left, 0]
pad_after = [0, pad_down, pad_right, 0]
PaddedInput = pad(Input, pad_before, pad_after, name="PaddedInput")
rc = te.reduce_axis((0, in_channel), name="rc")
ry = te.reduce_axis((0, kernel_h), name="ry")
rx = te.reduce_axis((0, kernel_w), name="rx")
# convert data type of input feature maps and weights
# TODO: add checking here, datatype casting may cause precision loss
TransPaddedInput = te.compute(
PaddedInput.shape, lambda n, h, w, c: PaddedInput[n, h, w, c].astype("float16")
)
TransFilter = te.compute(Filter.shape, lambda h, w, i, o: Filter[h, w, i, o].astype("float16"))
Output = te.compute(
(batch, out_height, out_width, out_channel),
lambda nn, yy, xx, ff: te.sum(
TransPaddedInput[
nn, yy * stride_h + ry * dilation_h, xx * stride_w + rx * dilation_w, rc
].astype(out_dtype)
* TransFilter[ry, rx, rc, ff].astype(out_dtype),
axis=[ry, rx, rc],
),
name="Conv2dOutput",
tag="conv2d_nhwc_tensorcore",
)
return Output
def schedule_nhwc_tensorcore_cuda(cfg, s, Conv):
"""Schedule tensorcore template"""
kh, kw, ic = s[Conv].op.reduce_axis
out_dtype = Conv.dtype
trans_paddata, kernel = s[Conv].op.input_tensors
in_dtype = trans_paddata.dtype
batch, _, _, _ = get_const_tuple(Conv.shape)
_, _, _, out_channels = get_const_tuple(kernel.shape)
paddata = s[trans_paddata].op.input_tensors
# inline the pad and dtype transform
s[trans_paddata].compute_inline()
s[kernel].compute_inline()
s[paddata[0]].compute_inline()
# Designate the memory hierarchy
AS = s.cache_read(trans_paddata, "shared", [Conv])
WS = s.cache_read(kernel, "shared", [Conv])
AF = s.cache_read(AS, "wmma.matrix_a", [Conv])
WF = s.cache_read(WS, "wmma.matrix_b", [Conv])
ConvF = s.cache_write(Conv, "wmma.accumulator")
if Conv.op in s.outputs:
output = Conv
ConvS = s.cache_read(ConvF, "shared", [Conv])
OL = ConvS
else:
output = s.outputs[0].output(0)
s[Conv].set_scope("shared")
OL = Conv
# Schedule for autotvm
cfg.define_knob("block_row_warps", [1, 2, 4])
cfg.define_knob("block_col_warps", [1, 2, 4])
cfg.define_knob("warp_row_tiles", [1, 2, 4])
cfg.define_knob("warp_col_tiles", [1, 2, 4])
cfg.define_knob("chunk", [1, 2, 4, 8])
cfg.define_knob("offset", [0, 8])
cfg.define_knob("vector_width", [1, 2, 4, 8])
if batch % 16 == 0 and out_channels % 16 == 0:
cfg.define_knob("wmma_m", [16, 8, 32])
elif batch % 8 == 0 and out_channels % 32 == 0:
cfg.define_knob("wmma_m", [8, 16, 32])
elif batch % 32 == 0 and out_channels % 8 == 0:
cfg.define_knob("wmma_m", [32, 16, 8])
# fallback support
target = tvm.target.Target.current()
if cfg.is_fallback:
ref_log = autotvm.tophub.load_reference_log(
target.kind.name, target.model, "conv2d_nhwc_tensorcore.cuda"
)
cfg.fallback_with_reference_log(ref_log)
block_row_warps = cfg["block_row_warps"].val
block_col_warps = cfg["block_col_warps"].val
warp_row_tiles = cfg["warp_row_tiles"].val
warp_col_tiles = cfg["warp_col_tiles"].val
chunk = cfg["chunk"].val
offset = cfg["offset"].val
wmma_m = cfg["wmma_m"].val
vector_width = cfg["vector_width"].val
wmma_k = 16
if wmma_m == 16:
wmma_n = 16
elif wmma_m == 8:
wmma_n = 32
elif wmma_m == 32:
wmma_n = 8
warp_size = 32
block_x = te.thread_axis("blockIdx.x")
block_y = te.thread_axis("blockIdx.y")
block_z = te.thread_axis("blockIdx.z")
thread_x = te.thread_axis("threadIdx.x")
thread_y = te.thread_axis("threadIdx.y")
thread_z = te.thread_axis("threadIdx.z")
# Define the intrin strides
def get_strides(extents):
return [np.prod(extents[i:]).tolist() for i in range(len(extents))]
AS_align = chunk * wmma_k + offset
WS_align = warp_col_tiles * block_col_warps * wmma_n + offset
block_factor_n = wmma_m * warp_row_tiles * block_row_warps
block_factor_o = wmma_n * warp_col_tiles * block_col_warps
CS_align = block_factor_o + offset
AS_strides = get_strides([1, 1, AS_align, 1])
AL_strides = get_strides([1, 1, wmma_k, 1])
WS_strides = get_strides([WS_align, 1])
WL_strides = get_strides([wmma_n * warp_col_tiles, 1])
CL_strides = get_strides([1, 1, wmma_n * warp_col_tiles, 1])
CS_strides = get_strides([1, 1, CS_align, 1])
# Schedule for output
nc, hc, wc, oc = output.op.axis
block_k = s[output].fuse(hc, wc)
s[output].bind(block_k, block_z)
block_i, nc = s[output].split(nc, factor=block_factor_n)
block_j, oc = s[output].split(oc, factor=block_factor_o)
s[output].reorder(block_k, block_i, block_j, nc, oc)
t = s[output].fuse(nc, oc)
t, ti = s[output].split(t, factor=vector_width)
t, tx = s[output].split(t, factor=warp_size)
t, ty = s[output].split(t, factor=block_row_warps)
t, tz = s[output].split(t, factor=block_col_warps)
s[output].bind(block_i, block_x)
s[output].bind(block_j, block_y)
s[output].bind(tz, thread_z)
s[output].bind(ty, thread_y)
s[output].bind(tx, thread_x)
s[output].vectorize(ti)
# Schedule wmma store
s[OL].compute_at(s[output], block_j)
nc, hc, wc, oc = OL.op.axis
s[OL].reorder(hc, wc, nc, oc)
s[OL].storage_align(wc, CS_align - 1, CS_align)
oc, ooc = s[OL].split(oc, factor=wmma_n)
oc, oci = s[OL].split(oc, factor=warp_col_tiles)
_, oc = s[OL].split(oc, factor=block_col_warps)
nc, nnc = s[OL].split(nc, factor=wmma_m)
nc, nci = s[OL].split(nc, factor=warp_row_tiles)
_, nc = s[OL].split(nc, factor=block_row_warps)
s[OL].reorder(nc, oc, nci, oci, nnc, ooc)
s[OL].bind(nc, thread_y)
s[OL].bind(oc, thread_z)
# Schedule wmma computation
s[ConvF].compute_at(s[OL], oc)
n, h, w, o = ConvF.op.axis
n, nnf = s[ConvF].split(n, factor=wmma_m)
o, oof = s[ConvF].split(o, factor=wmma_n)
ic, ii = s[ConvF].split(ic, factor=wmma_k)
ko, ki = s[ConvF].split(ic, factor=chunk)
s[ConvF].reorder(kh, kw, ko, ki, n, o, nnf, oof, ii)
s[AF].compute_at(s[ConvF], ki)
s[WF].compute_at(s[ConvF], ki)
# Schedule wmma load
n, h, w, i = AF.op.axis
n, nn = s[AF].split(n, factor=wmma_m)
i, ii = s[AF].split(i, factor=wmma_k)
s[AF].reorder(n, i, nn, ii)
kh, kw, i, o = WF.op.axis
i, ii = s[WF].split(i, factor=wmma_k)
o, oo = s[WF].split(o, factor=wmma_n)
s[WF].reorder(o, i, oo)
s[WF].reorder(i, o, ii, oo)
s[WS].compute_at(s[ConvF], ko)
s[AS].compute_at(s[ConvF], ko)
# Schedule for data's share memory
n, h, w, i = AS.op.axis
s[AS].reorder(h, w, n, i)
s[AS].storage_align(w, AS_align - 1, AS_align)
t = s[AS].fuse(n, i)
t, ti = s[AS].split(t, factor=vector_width)
t, tx = s[AS].split(t, factor=warp_size)
t, ty = s[AS].split(t, factor=block_row_warps)
_, tz = s[AS].split(t, factor=block_col_warps)
s[AS].bind(ty, thread_y)
s[AS].bind(tz, thread_z)
s[AS].bind(tx, thread_x)
s[AS].vectorize(ti)
# Schedule for kernel's share memory
kh, kw, ic, o = WS.op.axis
t = s[WS].fuse(ic, o)
s[WS].storage_align(ic, WS_align - 1, WS_align)
t, ti = s[WS].split(t, factor=vector_width)
t, tx = s[WS].split(t, factor=warp_size)
t, ty = s[WS].split(t, factor=block_row_warps)
_, tz = s[WS].split(t, factor=block_col_warps)
s[WS].bind(ty, thread_y)
s[WS].bind(tz, thread_z)
s[WS].bind(tx, thread_x)
s[WS].vectorize(ti)
shape = (wmma_m, wmma_n, wmma_k)
# tensorize the wmma process
AS_shape = (wmma_m, 1, 1, wmma_k)
AL_shape = (wmma_m, 1, 1, wmma_k)
WS_shape = (wmma_k, wmma_n)
WL_shape = (wmma_k, wmma_n)
CL_shape = (wmma_m, 1, 1, wmma_n)
CS_shape = (wmma_m, 1, 1, wmma_n)
AL_gemm = te.placeholder(AL_shape, name="A", dtype=in_dtype)
WL_gemm = te.placeholder(WL_shape, name="B", dtype=in_dtype)
k_gemm = te.reduce_axis((0, wmma_k), name="k")
CL_compute = te.compute(
CL_shape,
lambda ii, t0, t1, jj: te.sum(
AL_gemm[ii, t0, t1, k_gemm].astype(out_dtype) * WL_gemm[k_gemm, jj].astype(out_dtype),
axis=k_gemm,
),
name="C",
)
s[AF].tensorize(
nn,
intrin_wmma_load_matrix_A(
AL_strides, AS_strides, shape, "row_major", AS_shape, AL_shape, in_dtype
),
)
s[WF].tensorize(
ii,
intrin_wmma_load_matrix_W(
WL_strides, WS_strides, shape, "row_major", WS_shape, WL_shape, in_dtype
),
)
s[OL].tensorize(
nnc, intrin_wmma_store_matrix(CS_strides, CL_strides, shape, out_dtype, CL_shape, CS_shape)
)
s[ConvF].tensorize(
nnf,
intrin_wmma_gemm(AL_gemm, WL_gemm, CL_compute, AL_strides, WL_strides, CL_strides, shape),
)
N, OH, OW, CO = get_const_tuple(output.shape)
KH, KW, CI, _ = get_const_tuple(kernel.shape)
cfg.add_flop(2 * N * OH * OW * CO * CI * KH * KW)
@autotvm.register_topi_compute("conv2d_nhwc_tensorcore.cuda")
def conv2d_nhwc_tensorcore(cfg, data, kernel, strides, padding, dilation, out_dtype):
"""Compute conv2d with tensorcore for NCHW layout"""
return nhwc_tensorcore_cuda(cfg, data, kernel, strides, padding, dilation, out_dtype)
@autotvm.register_topi_schedule("conv2d_nhwc_tensorcore.cuda")
def schedule_conv2d_nhwc_tensorcore(cfg, outs):
"""TOPI schedule callback"""
s = te.create_schedule([x.op for x in outs])
def _callback(op):
if "conv2d_nhwc_tensorcore" in op.tag:
schedule_nhwc_tensorcore_cuda(cfg, s, op.output(0))
traverse_inline(s, outs[0].op, _callback)
return s
| 12,782 | 36.159884 | 99 | py |
tvm | tvm-main/python/tvm/topi/cuda/deformable_conv2d.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unused-argument
"""Schedule template of deformable conv2d with cuda backend"""
import tvm
from tvm import te
from tvm import autotvm
from .. import nn
from ..utils import traverse_inline
@autotvm.register_topi_compute("deformable_conv2d_nchw.cuda")
def deformable_conv2d_nchw(
cfg, data, offset, kernel, strides, padding, dilation, deformable_groups, groups, out_dtype
):
"""Deformable Conv2d."""
return nn.deformable_conv2d_nchw(
data, offset, kernel, strides, padding, dilation, deformable_groups, groups, out_dtype
)
@autotvm.register_topi_schedule("deformable_conv2d_nchw.cuda")
def schedule_deformable_conv2d_nchw(cfg, outs):
"""TOPI schedule callback of deformable conv2d for cuda gpu
Parameters
----------
cfg: ConfigEntity
The config for this template
outs: Array of Tensor
The computation graph description of conv2d
in the format of an array of tensors.
Returns
-------
s: Schedule
The computation schedule for conv2d.
"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
def _callback(op):
if op.tag == "deformable_conv2d_nchw":
_schedule_direct_cuda(cfg, s, op.output(0))
traverse_inline(s, outs[0].op, _callback)
return s
def _schedule_direct_cuda(cfg, s, conv):
"""Schedule template of deformable conv2d"""
n, f, y, x = s[conv].op.axis
rc, ry, rx = s[conv].op.reduce_axis
cfg.define_split("tile_f", f, num_outputs=4)
cfg.define_split("tile_y", y, num_outputs=4)
cfg.define_split("tile_x", x, num_outputs=4)
cfg.define_split("tile_rc", rc, num_outputs=2)
cfg.define_split("tile_ry", ry, num_outputs=2)
cfg.define_split("tile_rx", rx, num_outputs=2)
cfg.define_knob("auto_unroll_max_step", [0, 512, 1500])
target = tvm.target.Target.current()
if target.kind.name in ["nvptx", "rocm"]:
cfg.define_knob("unroll_explicit", [1])
else:
cfg.define_knob("unroll_explicit", [0, 1])
data_deform, kernel = s[conv].op.input_tensors
s[data_deform].compute_inline()
if isinstance(kernel.op, tvm.te.ComputeOp) and "dilate" in kernel.op.tag:
s[kernel].compute_inline()
if conv.op in s.outputs:
output = conv
OL = s.cache_write(conv, "local")
else:
output = s.outputs[0].output(0)
s[conv].set_scope("local")
OL = conv
# create cache stage
AA = s.cache_read(data_deform, "shared", [OL])
WW = s.cache_read(kernel, "shared", [OL])
# tile and bind spatial axes
n, f, y, x = s[output].op.axis
kernel_scope, n = s[output].split(n, nparts=1)
bf, vf, tf, fi = cfg["tile_f"].apply(s, output, f)
by, vy, ty, yi = cfg["tile_y"].apply(s, output, y)
bx, vx, tx, xi = cfg["tile_x"].apply(s, output, x)
bf = s[output].fuse(n, bf)
s[output].bind(bf, te.thread_axis("blockIdx.z"))
s[output].bind(by, te.thread_axis("blockIdx.y"))
s[output].bind(bx, te.thread_axis("blockIdx.x"))
s[output].bind(vf, te.thread_axis("vthread"))
s[output].bind(vy, te.thread_axis("vthread"))
s[output].bind(vx, te.thread_axis("vthread"))
s[output].bind(tf, te.thread_axis("threadIdx.z"))
s[output].bind(ty, te.thread_axis("threadIdx.y"))
s[output].bind(tx, te.thread_axis("threadIdx.x"))
s[output].reorder(bf, by, bx, vf, vy, vx, tf, ty, tx, fi, yi, xi)
s[OL].compute_at(s[output], tx)
# tile reduction axes
n, f, y, x = s[OL].op.axis
rc, ry, rx = s[OL].op.reduce_axis
rco, rci = cfg["tile_rc"].apply(s, OL, rc)
ryo, ryi = cfg["tile_ry"].apply(s, OL, ry)
rxo, rxi = cfg["tile_rx"].apply(s, OL, rx)
s[OL].reorder(rco, ryo, rxo, rci, ryi, rxi, n, f, y, x)
cfg.define_reorder("reorder_inner", [rco, ryo, rxo], "all")
cfg["reorder_inner"].apply(s, OL, [rco, ryo, rxo])
cfg["reorder_inner"].apply(s, OL, [rci, ryi, rxi])
cache_loc = [rco, ryo, rxo][cfg["reorder_inner"].perm[-1]]
s[AA].compute_at(s[OL], cache_loc)
s[WW].compute_at(s[OL], cache_loc)
# cooperative fetching
for load in [AA, WW]:
fused = s[load].fuse(*s[load].op.axis)
tz, fused = s[load].split(fused, nparts=cfg["tile_f"].size[2])
ty, fused = s[load].split(fused, nparts=cfg["tile_y"].size[2])
tx, fused = s[load].split(fused, nparts=cfg["tile_x"].size[2])
s[load].bind(tz, te.thread_axis("threadIdx.z"))
s[load].bind(ty, te.thread_axis("threadIdx.y"))
s[load].bind(tx, te.thread_axis("threadIdx.x"))
# unroll
s[output].pragma(kernel_scope, "auto_unroll_max_step", cfg["auto_unroll_max_step"].val)
s[output].pragma(kernel_scope, "unroll_explicit", cfg["unroll_explicit"].val)
| 5,572 | 36.153333 | 95 | py |
tvm | tvm-main/python/tvm/topi/cuda/tensor_intrin.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unnecessary-lambda, too-many-arguments
"""Tensor intrinsics on CUDA."""
import tvm
from tvm import te
from ..utils import is_target
def dp4a(x_scope="local", y_scope="local", z_scope="local", dtypes=("int8", "int8")):
"""
Int8 dot product reduced by every 4 elements using __dp4a
Parameters
----------
x_scope : str, optional
The storage scope of buffer for lhs
y_scope : str, optional
The storage scope of buffer for rhs
z_scope : str, optional
The storage scope of buffer for result
dtypes: tuple of strs, optional
The dtype of x and y
Returns
-------
intrin : TensorIntrin
The dp4a TensorIntrin that can be used in tensorizing schedule.
"""
n = 4 # dp4a requires operands packed by 4
result_dtype = "int32" if dtypes[1] == "int8" else "uint32"
x = te.placeholder((n,), name="x", dtype=dtypes[0])
y = te.placeholder((n,), name="y", dtype=dtypes[1])
k = te.reduce_axis((0, n), name="rc")
z = te.compute(
(1,), lambda i: te.sum(x[k].astype(result_dtype) * y[k].astype(result_dtype), axis=[k])
)
def _intrin_func(ins, outs):
def _instr(index):
xx, yy = ins
zz = outs[0]
zz_dtype = zz.dtype
if index == 1:
return zz.vstore(0, tvm.tir.const(0, zz_dtype))
ib = tvm.tir.ir_builder.create()
vec_x_dtype = "int8x4" if xx.dtype == "int8" else "uint8x4"
vec_y_dtype = "int8x4" if yy.dtype == "int8" else "uint8x4"
vec_x = xx.vload(0, dtype=vec_x_dtype)
vec_y = yy.vload(0, dtype=vec_y_dtype)
prev_z = 0 if index == 0 else zz.vload(0)
if is_target("rocm"):
# TODO(masahi): Here we are assuming that we are compiling for gfx10 or later
# We can refine the specification for dot product on rocm if needed later.
# We can just use "llvm.amdgcn.udot4" for u8u8u32, but it is not tested.
assert (
dtypes[0] == "int8" and dtypes[0] == "int8"
), "u8u8u32 dot product for rocm not supported yet"
new_z = tvm.tir.call_llvm_pure_intrin(
zz_dtype,
"llvm.amdgcn.sdot4",
tvm.tir.const(4, "uint32"),
tvm.tir.call_intrin("int32", "tir.reinterpret", vec_x),
tvm.tir.call_intrin("int32", "tir.reinterpret", vec_y),
prev_z,
True,
)
else:
new_z = tvm.tir.call_pure_extern(zz_dtype, "__dp4a", vec_x, vec_y, prev_z)
ib.emit(zz.vstore(0, new_z))
return ib.get()
return _instr(0), _instr(1), _instr(2) # body, reset, update
default_buffer_params = {"data_alignment": 4, "offset_factor": 1}
scopes = {x: x_scope, y: y_scope, z: z_scope}
binds = {
t: tvm.tir.decl_buffer(
t.shape, t.dtype, t.op.name, scope=scopes[t], **default_buffer_params
)
for t in [x, y, z]
}
return te.decl_tensor_intrin(
z.op, _intrin_func, binds=binds, default_buffer_params=default_buffer_params
)
def intrin_wmma_load_matrix_A(strides_dst, strides_from, shape, layout, A_shape, C_shape, in_dtype):
"""Intrin function for loading data from shared memory to wmma.matrix_a"""
wmma_m, wmma_n, wmma_k = shape
A = te.placeholder(A_shape, name="A", dtype=in_dtype)
BA = tvm.tir.decl_buffer(
A.shape, A.dtype, scope="shared", strides=strides_from, data_alignment=32, offset_factor=8
)
C = te.compute(C_shape, lambda *i: A(*i), name="C")
BC = tvm.tir.decl_buffer(
C.shape,
C.dtype,
scope="wmma.matrix_a",
strides=strides_dst,
data_alignment=32,
offset_factor=8,
)
def intrin_func(ins, outs):
ib = tvm.tir.ir_builder.create()
BA = ins[0]
BC = outs[0]
row = wmma_m * wmma_k
warp_index = BC.elem_offset // row + BC.elem_offset % row // wmma_k
ib.emit(
tvm.tir.call_intrin(
"handle",
"tir.tvm_load_matrix_sync",
BC.data,
wmma_m,
wmma_n,
wmma_k,
warp_index,
BA.access_ptr("r"),
strides_from[0],
layout,
)
)
return ib.get()
return te.decl_tensor_intrin(C.op, intrin_func, binds={A: BA, C: BC})
def intrin_wmma_load_matrix_W(strides_dst, strides_from, shape, layout, A_shape, C_shape, in_dtype):
"""Intrin function for loading data from shared memory to wmma.matrix_b"""
wmma_m, wmma_n, wmma_k = shape
A = te.placeholder(A_shape, name="A", dtype=in_dtype)
BA = tvm.tir.decl_buffer(
A.shape, A.dtype, scope="shared", strides=strides_from, data_alignment=32, offset_factor=8
)
C = te.compute(C_shape, lambda *i: A(*i), name="C")
BC = tvm.tir.decl_buffer(
C.shape,
C.dtype,
scope="wmma.matrix_b",
strides=strides_dst,
data_alignment=32,
offset_factor=8,
)
def intrin_func(ins, outs):
ib = tvm.tir.ir_builder.create()
BA = ins[0]
BC = outs[0]
row = wmma_n * wmma_k
warp_index = BC.elem_offset // row + BC.elem_offset % row // wmma_n
ib.emit(
tvm.tir.call_intrin(
"handle",
"tir.tvm_load_matrix_sync",
BC.data,
wmma_m,
wmma_n,
wmma_k,
warp_index,
BA.access_ptr("r"),
strides_from[0],
layout,
)
)
return ib.get()
return te.decl_tensor_intrin(C.op, intrin_func, binds={A: BA, C: BC})
def intrin_wmma_store_matrix(strides_dst, strides_from, shape, out_dtype, A_shape, C_shape):
"""Intrin function for storing the results from wmma.accumulator to shared"""
wmma_m, wmma_n, wmma_k = shape
A = te.placeholder(A_shape, name="A", dtype=out_dtype)
BA = tvm.tir.decl_buffer(
A.shape,
A.dtype,
scope="wmma.accumulator",
strides=strides_from,
data_alignment=32,
offset_factor=8,
)
C = te.compute(C_shape, lambda *i: A(*i), name="C")
BC = tvm.tir.decl_buffer(
C.shape, C.dtype, scope="shared", strides=strides_dst, data_alignment=32, offset_factor=8
)
def intrin_func(ins, outs):
ib = tvm.tir.ir_builder.create()
BA = ins[0]
BC = outs[0]
row = wmma_m * wmma_n
warp_index = BA.elem_offset // row + BA.elem_offset % row // wmma_n
ib.emit(
tvm.tir.call_intrin(
"handle",
"tir.tvm_store_matrix_sync",
BA.data,
wmma_m,
wmma_n,
wmma_k,
warp_index,
BC.access_ptr("w"),
strides_dst[0],
"row_major",
)
)
return ib.get()
return te.decl_tensor_intrin(C.op, intrin_func, binds={A: BA, C: BC})
def intrin_wmma_gemm(AL_gemm, WL_gemm, CL_compute, strides_A, strides_W, strides_Conv, shape):
"""Intrin for wmma fill_fragment and mma_sync
Parameters
----------
AL_gemm : tvm.te.placeholder
wmma matrix A
WL_gemm : tvm.te.placeholder
wmma matrix B
CL_compute : tvm.te.compute
The definition of wmma gemm
"""
wmma_m, wmma_n, wmma_k = shape
A = AL_gemm
B = WL_gemm
C = CL_compute
BA = tvm.tir.decl_buffer(
A.shape,
A.dtype,
name="BA",
scope="wmma.matrix_a",
data_alignment=32,
offset_factor=8,
strides=strides_A,
)
BB = tvm.tir.decl_buffer(
B.shape,
B.dtype,
name="BB",
scope="wmma.matrix_b",
data_alignment=32,
offset_factor=8,
strides=strides_W,
)
BC = tvm.tir.decl_buffer(
C.shape,
C.dtype,
name="BC",
scope="wmma.accumulator",
data_alignment=32,
offset_factor=8,
strides=strides_Conv,
)
def intrin_func(ins, outs):
BA, BB = ins
(BC,) = outs
def warp_idnex(offset, row, col):
row = row * col
return offset // row + offset % row // col
warp_index_A = warp_idnex(BA.elem_offset, wmma_m, wmma_k)
warp_index_B = warp_idnex(BB.elem_offset, wmma_k, wmma_n)
warp_index_C = warp_idnex(BC.elem_offset, wmma_m, wmma_n)
def init():
ib = tvm.tir.ir_builder.create()
ib.emit(
tvm.tir.call_intrin(
"handle",
"tir.tvm_fill_fragment",
BC.data,
wmma_m,
wmma_n,
wmma_k,
warp_index_C,
0.0,
)
)
return ib.get()
def update():
ib = tvm.tir.ir_builder.create()
ib.emit(
tvm.tir.call_intrin(
"handle",
"tir.tvm_mma_sync",
BC.data,
warp_index_C,
BA.data,
warp_index_A,
BB.data,
warp_index_B,
BC.data,
warp_index_C,
)
)
return ib.get()
return update(), init(), update()
return te.decl_tensor_intrin(C.op, intrin_func, binds={A: BA, B: BB, C: BC})
| 10,581 | 30.032258 | 100 | py |
tvm | tvm-main/python/tvm/topi/cuda/conv2d_hwcn.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, too-many-locals, too-many-statements, unused-argument
"""Schedule for conv2d_hwcn with auto fusion"""
import tvm
from tvm import te
from tvm import autotvm
from tvm.autotvm.task.space import SplitEntity
from .. import nn, tag
@autotvm.register_topi_compute("conv2d_hwcn.cuda")
def conv2d_hwcn(cfg, data, kernel, strides, padding, dilation, out_dtype="float32"):
"""Compute conv2d with HWCN layout on CUDA"""
return nn.conv2d_hwcn(data, kernel, strides, padding, dilation, out_dtype)
@autotvm.register_topi_schedule("conv2d_hwcn.cuda")
def schedule_conv2d_hwcn(cfg, outs):
"""Schedule for conv2d_hwcn and any element-wise operations.
Parameters
----------
outs: Array of Tensor
The computation graph description of conv2d_hwcn in the format
of an array of tensors.
Returns
-------
s: Schedule
The computation schedule for conv2d_hwcn.
"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
sch = te.create_schedule([x.op for x in outs])
def schedule(Apad, W, B):
"""Schedule conv2d_hwcn"""
sch[Apad].compute_inline()
AA = sch.cache_read(Apad, "shared", [B])
WW = sch.cache_read(W, "shared", [B])
AL = sch.cache_read(AA, "local", [B])
WL = sch.cache_read(WW, "local", [B])
if B.op in sch.outputs:
Out = B
BL = sch.cache_write(Out, "local")
else:
Out = sch.outputs[0].output(0)
sch[B].set_scope("local")
BL = B
hi, wi, fi, ni = sch[Out].op.axis
# Create tuning space
n_thread_cand = [1, 2, 4, 8, 16, 32]
vthread_cand = [1, 2, 4, 8]
cfg.define_split(
"tile_fi",
fi,
num_outputs=4,
filter=lambda x: (x.size[1] in vthread_cand and x.size[2] in n_thread_cand),
)
cfg.define_split(
"tile_ni",
ni,
num_outputs=4,
filter=lambda x: (x.size[1] in vthread_cand and x.size[2] in n_thread_cand),
)
if cfg.is_fallback:
cfg["tile_fi"] = SplitEntity([-1, 2, 8, 4])
cfg["tile_ni"] = SplitEntity([-1, 2, 8, 4])
# Scheduling
step = 8
bz = sch[Out].fuse(hi, wi)
by, tyz, ty, fi = cfg["tile_fi"].apply(sch, Out, fi)
bx, txz, tx, ni = cfg["tile_ni"].apply(sch, Out, ni)
sch[Out].reorder(bz, by, bx, tyz, txz, ty, tx, fi, ni)
sch[Out].bind(bz, te.thread_axis("blockIdx.z"))
sch[Out].bind(by, te.thread_axis("blockIdx.y"))
sch[Out].bind(bx, te.thread_axis("blockIdx.x"))
sch[Out].bind(tyz, te.thread_axis("vthread"))
sch[Out].bind(txz, te.thread_axis("vthread"))
sch[Out].bind(ty, te.thread_axis("threadIdx.y"))
sch[Out].bind(tx, te.thread_axis("threadIdx.x"))
# Schedule BL local write
sch[BL].compute_at(sch[Out], tx)
yi, xi, fi, ni = sch[BL].op.axis
ry, rx, rc = sch[BL].op.reduce_axis
rco, rci = sch[BL].split(rc, factor=step)
sch[BL].reorder(rco, ry, rx, rci, fi, ni)
fuse_index = sch[BL].fuse(ry, rx)
fuse_index = sch[BL].fuse(fuse_index, rco)
rx = fuse_index
sch[AA].compute_at(sch[BL], rx)
sch[WW].compute_at(sch[BL], rx)
sch[AL].compute_at(sch[BL], rci)
sch[WL].compute_at(sch[BL], rci)
# Schedule for A's shared memory load
yi, xi, ci, ni = sch[AA].op.axis
ty, ci = sch[AA].split(ci, nparts=cfg["tile_fi"].size[2])
tx, ni = sch[AA].split(ni, nparts=cfg["tile_ni"].size[2])
_, ni = sch[AA].split(ni, factor=4)
sch[AA].reorder(ty, tx, yi, xi, ci, ni)
sch[AA].bind(ty, te.thread_axis("threadIdx.y"))
sch[AA].bind(tx, te.thread_axis("threadIdx.x"))
sch[AA].vectorize(ni)
# Schedule for W's shared memory load
yi, xi, ci, fi = sch[WW].op.axis
ty, ci = sch[WW].split(ci, nparts=cfg["tile_fi"].size[2])
tx, fi = sch[WW].split(fi, nparts=cfg["tile_ni"].size[2])
_, fi = sch[WW].split(fi, factor=4)
sch[WW].reorder(ty, tx, yi, xi, ci, fi)
sch[WW].bind(ty, te.thread_axis("threadIdx.y"))
sch[WW].bind(tx, te.thread_axis("threadIdx.x"))
sch[WW].vectorize(fi)
scheduled_ops = []
def traverse(operator):
"""Traverse operators from computation graph"""
if tag.is_broadcast(operator.tag):
if operator not in sch.outputs:
sch[operator].compute_inline()
for tensor in operator.input_tensors:
if isinstance(tensor.op, te.tensor.ComputeOp) and tensor.op not in scheduled_ops:
traverse(tensor.op)
elif operator.tag == "conv2d_hwcn":
Apad = operator.input_tensors[0]
W = operator.input_tensors[1]
if isinstance(W.op, tvm.te.ComputeOp) and "dilate" in W.op.tag:
sch[W].compute_inline()
B = operator.output(0)
schedule(Apad, W, B)
else:
raise RuntimeError(f"Unsupported operator: {operator.tag}")
scheduled_ops.append(operator)
traverse(outs[0].op)
return sch
| 6,036 | 35.810976 | 97 | py |
tvm | tvm-main/python/tvm/topi/cuda/conv3d_ndhwc_tensorcore.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, too-many-locals, too-many-function-args
# pylint: disable=too-many-statements, unused-argument, too-many-arguments
"""Tensorcore template for cuda backend"""
import numpy as np
import tvm
from tvm import te
from tvm import autotvm
from ..utils import get_const_tuple, traverse_inline, simplify
from ..nn.pad import pad
from ..nn.utils import get_pad_tuple3d
from .tensor_intrin import intrin_wmma_load_matrix_A
from .tensor_intrin import intrin_wmma_load_matrix_W
from .tensor_intrin import intrin_wmma_store_matrix
from .tensor_intrin import intrin_wmma_gemm
def ndhwc_tensorcore_cuda(cfg, Input, Filter, stride, padding, dilation, out_dtype):
"""Compute declaration for conv3d tensorcore function"""
assert isinstance(stride, int) or len(stride) == 3
assert isinstance(dilation, int) or len(dilation) == 3
if isinstance(stride, int):
stride_d = stride_h = stride_w = stride
else:
stride_d, stride_h, stride_w = stride
if isinstance(dilation, int):
dilation_d = dilation_h = dilation_w = dilation
else:
dilation_d, dilation_h, dilation_w = dilation
batch, in_depth, in_height, in_width, in_channel = get_const_tuple(Input.shape)
kernel_d, kernel_h, kernel_w, _, num_filter = get_const_tuple(Filter.shape)
assert (
(batch % 16 == 0 and in_channel % 16 == 0 and num_filter % 16 == 0)
or (batch % 8 == 0 and in_channel % 16 == 0 and num_filter % 32 == 0)
or (batch % 32 == 0 and in_channel % 16 == 0 and num_filter % 8 == 0)
), (
"The shape of (batch, in_channel, num_filter) "
"must be multiple of (16, 16, 16) or (32, 16, 8) or (8, 16, 32) for now"
)
# compute the output shape
dilated_kernel_d = (kernel_d - 1) * dilation_d + 1
dilated_kernel_h = (kernel_h - 1) * dilation_h + 1
dilated_kernel_w = (kernel_w - 1) * dilation_w + 1
pad_front, pad_top, pad_left, pad_back, pad_down, pad_right = get_pad_tuple3d(
padding, (dilated_kernel_d, dilated_kernel_h, dilated_kernel_w)
)
out_channel = num_filter
out_depth = simplify((in_depth - dilated_kernel_d + pad_front + pad_back) // stride_d + 1)
out_height = simplify((in_height - dilated_kernel_h + pad_top + pad_down) // stride_h + 1)
out_width = simplify((in_width - dilated_kernel_w + pad_left + pad_right) // stride_w + 1)
pad_before = [0, pad_front, pad_top, pad_left, 0]
pad_after = [0, pad_back, pad_down, pad_right, 0]
PaddedInput = pad(Input, pad_before, pad_after, name="PaddedInput")
rc = te.reduce_axis((0, in_channel), name="rc")
rz = te.reduce_axis((0, kernel_d), name="rz")
ry = te.reduce_axis((0, kernel_h), name="ry")
rx = te.reduce_axis((0, kernel_w), name="rx")
# convert data type of input feature maps and weights
# TODO: add checking here, datatype casting may cause precision loss
TransPaddedInput = te.compute(
PaddedInput.shape, lambda n, d, h, w, c: PaddedInput[n, d, h, w, c].astype("float16")
)
TransFilter = te.compute(
Filter.shape, lambda d, h, w, i, o: Filter[d, h, w, i, o].astype("float16")
)
Output = te.compute(
(batch, out_depth, out_height, out_width, out_channel),
lambda nn, zz, yy, xx, ff: te.sum(
TransPaddedInput[
nn,
zz * stride_d + rz * dilation_d,
yy * stride_h + ry * dilation_h,
xx * stride_w + rx * dilation_w,
rc,
].astype(out_dtype)
* TransFilter[rz, ry, rx, rc, ff].astype(out_dtype),
axis=[rz, ry, rx, rc],
),
name="Conv3dOutput",
tag="conv3d_ndhwc_tensorcore",
)
return Output
def schedule_ndhwc_tensorcore_cuda(cfg, s, Conv):
"""Schedule tensorcore template"""
kd, kh, kw, ic = s[Conv].op.reduce_axis
out_dtype = Conv.dtype
trans_paddata, kernel = s[Conv].op.input_tensors
in_dtype = trans_paddata.dtype
batch, _, _, _, _ = get_const_tuple(Conv.shape)
_, _, _, _, out_channels = get_const_tuple(kernel.shape)
paddata = s[trans_paddata].op.input_tensors
# inline the pad and dtype transform
s[trans_paddata].compute_inline()
s[kernel].compute_inline()
s[paddata[0]].compute_inline()
# Designate the memory hierarchy
AS = s.cache_read(trans_paddata, "shared", [Conv])
WS = s.cache_read(kernel, "shared", [Conv])
AF = s.cache_read(AS, "wmma.matrix_a", [Conv])
WF = s.cache_read(WS, "wmma.matrix_b", [Conv])
ConvF = s.cache_write(Conv, "wmma.accumulator")
if Conv.op in s.outputs:
output = Conv
ConvS = s.cache_read(ConvF, "shared", [Conv])
OL = ConvS
else:
output = s.outputs[0].output(0)
s[Conv].set_scope("shared")
OL = Conv
# Schedule for autotvm
cfg.define_knob("block_row_warps", [1, 2, 4])
cfg.define_knob("block_col_warps", [1, 2, 4])
cfg.define_knob("warp_row_tiles", [1, 2, 4])
cfg.define_knob("warp_col_tiles", [1, 2, 4])
cfg.define_knob("chunk", [1, 2, 4, 8])
cfg.define_knob("offset", [0, 8])
cfg.define_knob("vector_width", [1, 2, 4, 8])
if batch % 16 == 0 and out_channels % 16 == 0:
cfg.define_knob("wmma_m", [16, 8, 32])
elif batch % 8 == 0 and out_channels % 32 == 0:
cfg.define_knob("wmma_m", [8, 16, 32])
elif batch % 32 == 0 and out_channels % 8 == 0:
cfg.define_knob("wmma_m", [32, 16, 8])
# fallback support
target = tvm.target.Target.current()
if cfg.is_fallback:
ref_log = autotvm.tophub.load_reference_log(
target.kind.name, target.model, "conv3d_ndhwc_tensorcore.cuda"
)
cfg.fallback_with_reference_log(ref_log)
block_row_warps = cfg["block_row_warps"].val
block_col_warps = cfg["block_col_warps"].val
warp_row_tiles = cfg["warp_row_tiles"].val
warp_col_tiles = cfg["warp_col_tiles"].val
chunk = cfg["chunk"].val
offset = cfg["offset"].val
wmma_m = cfg["wmma_m"].val
vector_width = cfg["vector_width"].val
wmma_k = 16
if wmma_m == 16:
wmma_n = 16
elif wmma_m == 8:
wmma_n = 32
elif wmma_m == 32:
wmma_n = 8
warp_size = 32
block_x = te.thread_axis("blockIdx.x")
block_y = te.thread_axis("blockIdx.y")
block_z = te.thread_axis("blockIdx.z")
thread_x = te.thread_axis("threadIdx.x")
thread_y = te.thread_axis("threadIdx.y")
thread_z = te.thread_axis("threadIdx.z")
# Define the intrin strides
def get_strides(extents):
return [np.prod(extents[i:]).tolist() for i in range(len(extents))]
AS_align = chunk * wmma_k + offset
WS_align = warp_col_tiles * block_col_warps * wmma_n + offset
block_factor_n = wmma_m * warp_row_tiles * block_row_warps
block_factor_o = wmma_n * warp_col_tiles * block_col_warps
CS_align = block_factor_o + offset
AS_strides = get_strides([1, 1, 1, AS_align, 1])
AL_strides = get_strides([1, 1, 1, wmma_k, 1])
WS_strides = get_strides([WS_align, 1])
WL_strides = get_strides([wmma_n * warp_col_tiles, 1])
CL_strides = get_strides([1, 1, 1, wmma_n * warp_col_tiles, 1])
CS_strides = get_strides([1, 1, 1, CS_align, 1])
# Schedule for output
nc, dc, hc, wc, oc = output.op.axis
block_k = s[output].fuse(dc, hc, wc)
s[output].bind(block_k, block_z)
block_i, nc = s[output].split(nc, factor=block_factor_n)
block_j, oc = s[output].split(oc, factor=block_factor_o)
s[output].reorder(block_k, block_i, block_j, nc, oc)
t = s[output].fuse(nc, oc)
t, ti = s[output].split(t, factor=vector_width)
t, tx = s[output].split(t, factor=warp_size)
t, ty = s[output].split(t, factor=block_row_warps)
t, tz = s[output].split(t, factor=block_col_warps)
s[output].bind(block_i, block_x)
s[output].bind(block_j, block_y)
s[output].bind(tz, thread_z)
s[output].bind(ty, thread_y)
s[output].bind(tx, thread_x)
s[output].vectorize(ti)
# Schedule wmma store
s[OL].compute_at(s[output], block_j)
nc, dc, hc, wc, oc = OL.op.axis
s[OL].reorder(dc, hc, wc, nc, oc)
s[OL].storage_align(wc, CS_align - 1, CS_align)
oc, ooc = s[OL].split(oc, factor=wmma_n)
oc, oci = s[OL].split(oc, factor=warp_col_tiles)
_, oc = s[OL].split(oc, factor=block_col_warps)
nc, nnc = s[OL].split(nc, factor=wmma_m)
nc, nci = s[OL].split(nc, factor=warp_row_tiles)
_, nc = s[OL].split(nc, factor=block_row_warps)
s[OL].reorder(nc, oc, nci, oci, nnc, ooc)
s[OL].bind(nc, thread_y)
s[OL].bind(oc, thread_z)
# Schedule wmma computation
s[ConvF].compute_at(s[OL], oc)
n, d, h, w, o = ConvF.op.axis
n, nnf = s[ConvF].split(n, factor=wmma_m)
o, oof = s[ConvF].split(o, factor=wmma_n)
ic, ii = s[ConvF].split(ic, factor=wmma_k)
ko, ki = s[ConvF].split(ic, factor=chunk)
s[ConvF].reorder(kd, kh, kw, ko, ki, n, o, nnf, oof, ii)
s[AF].compute_at(s[ConvF], ki)
s[WF].compute_at(s[ConvF], ki)
# Schedule wmma load
n, d, h, w, i = AF.op.axis
n, nn = s[AF].split(n, factor=wmma_m)
i, ii = s[AF].split(i, factor=wmma_k)
s[AF].reorder(n, i, nn, ii)
kd, kh, kw, i, o = WF.op.axis
i, ii = s[WF].split(i, factor=wmma_k)
o, oo = s[WF].split(o, factor=wmma_n)
s[WF].reorder(o, i, oo)
s[WF].reorder(i, o, ii, oo)
s[WS].compute_at(s[ConvF], ko)
s[AS].compute_at(s[ConvF], ko)
# Schedule for data's share memory
n, d, h, w, i = AS.op.axis
s[AS].reorder(d, h, w, n, i)
s[AS].storage_align(w, AS_align - 1, AS_align)
t = s[AS].fuse(n, i)
t, ti = s[AS].split(t, factor=vector_width)
t, tx = s[AS].split(t, factor=warp_size)
t, ty = s[AS].split(t, factor=block_row_warps)
_, tz = s[AS].split(t, factor=block_col_warps)
s[AS].bind(ty, thread_y)
s[AS].bind(tz, thread_z)
s[AS].bind(tx, thread_x)
s[AS].vectorize(ti)
# Schedule for kernel's share memory
kd, kh, kw, ic, o = WS.op.axis
t = s[WS].fuse(ic, o)
s[WS].storage_align(ic, WS_align - 1, WS_align)
t, ti = s[WS].split(t, factor=vector_width)
t, tx = s[WS].split(t, factor=warp_size)
t, ty = s[WS].split(t, factor=block_row_warps)
_, tz = s[WS].split(t, factor=block_col_warps)
s[WS].bind(ty, thread_y)
s[WS].bind(tz, thread_z)
s[WS].bind(tx, thread_x)
s[WS].vectorize(ti)
shape = (wmma_m, wmma_n, wmma_k)
# tensorize the wmma process
AS_shape = (wmma_m, 1, 1, 1, wmma_k)
AL_shape = (wmma_m, 1, 1, 1, wmma_k)
WS_shape = (wmma_k, wmma_n)
WL_shape = (wmma_k, wmma_n)
CL_shape = (wmma_m, 1, 1, 1, wmma_n)
CS_shape = (wmma_m, 1, 1, 1, wmma_n)
AL_gemm = te.placeholder(AL_shape, name="A", dtype=in_dtype)
WL_gemm = te.placeholder(WL_shape, name="B", dtype=in_dtype)
k_gemm = te.reduce_axis((0, wmma_k), name="k")
CL_compute = te.compute(
CL_shape,
lambda ii, t0, t1, t2, jj: te.sum(
AL_gemm[ii, t0, t1, t2, k_gemm].astype(out_dtype)
* WL_gemm[k_gemm, jj].astype(out_dtype),
axis=k_gemm,
),
name="C",
)
s[AF].tensorize(
nn,
intrin_wmma_load_matrix_A(
AL_strides, AS_strides, shape, "row_major", AS_shape, AL_shape, in_dtype
),
)
s[WF].tensorize(
ii,
intrin_wmma_load_matrix_W(
WL_strides, WS_strides, shape, "row_major", WS_shape, WL_shape, in_dtype
),
)
s[OL].tensorize(
nnc, intrin_wmma_store_matrix(CS_strides, CL_strides, shape, out_dtype, CL_shape, CS_shape)
)
s[ConvF].tensorize(
nnf,
intrin_wmma_gemm(AL_gemm, WL_gemm, CL_compute, AL_strides, WL_strides, CL_strides, shape),
)
N, OD, OH, OW, CO = get_const_tuple(output.shape)
KD, KH, KW, CI, _ = get_const_tuple(kernel.shape)
cfg.add_flop(2 * N * OD * OH * OW * CO * CI * KD * KH * KW)
@autotvm.register_topi_compute("conv3d_ndhwc_tensorcore.cuda")
def conv3d_ndhwc_tensorcore(cfg, data, kernel, strides, padding, dilation, groups, out_dtype):
"""Compute conv3d with tensorcore for NDHWC layout"""
assert groups == 1, "tensorcore conv3d does not support groups"
return ndhwc_tensorcore_cuda(cfg, data, kernel, strides, padding, dilation, out_dtype)
@autotvm.register_topi_schedule("conv3d_ndhwc_tensorcore.cuda")
def schedule_conv3d_ndhwc_tensorcore(cfg, outs):
"""TOPI schedule callback"""
s = te.create_schedule([x.op for x in outs])
def _callback(op):
if "conv3d_ndhwc_tensorcore" in op.tag:
schedule_ndhwc_tensorcore_cuda(cfg, s, op.output(0))
traverse_inline(s, outs[0].op, _callback)
return s
| 13,475 | 36.960563 | 99 | py |
tvm | tvm-main/python/tvm/topi/cuda/ssd/multibox.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, no-member, too-many-locals, too-many-arguments, too-many-statements, too-many-function-args
"""SSD multibox operators"""
import math
import tvm
from tvm import te
from tvm.tir import if_then_else, exp
from tvm import topi
from ..nms import non_max_suppression
def multibox_prior_ir(data, out, sizes, ratios, steps, offsets):
"""Low level IR routing for multibox_prior operator.
Parameters
----------
data : Buffer
Input data buffer.
out : Buffer
Output buffer.
sizes : tuple of float
Tuple of sizes for anchor boxes.
ratios : tuple of float
Tuple of ratios for anchor boxes.
steps : Tuple of float
Priorbox step across y and x, -1 for auto calculation.
offsets : tuple of int
Priorbox center offsets, y and x respectively.
Returns
-------
stmt : Stmt
The result IR statement.
"""
max_threads = int(math.sqrt(tvm.target.Target.current(allow_none=False).max_num_threads))
tx = te.thread_axis("threadIdx.x")
ty = te.thread_axis("threadIdx.y")
bx = te.thread_axis("blockIdx.x")
by = te.thread_axis("blockIdx.y")
ib = tvm.tir.ir_builder.create()
p_out = ib.buffer_ptr(out)
in_height = data.shape[2]
in_width = data.shape[3]
nthread_tx = max_threads
nthread_bx = in_height // max_threads + 1
nthread_ty = max_threads
nthread_by = in_width // max_threads + 1
ib.scope_attr(tx, "thread_extent", nthread_tx)
ib.scope_attr(ty, "thread_extent", nthread_ty)
ib.scope_attr(bx, "thread_extent", nthread_bx)
ib.scope_attr(by, "thread_extent", nthread_by)
num_sizes = len(sizes)
num_ratios = len(ratios)
size_ratio_concat = sizes + ratios
steps_h = steps[0] if steps[0] > 0 else 1.0 / in_height
steps_w = steps[1] if steps[1] > 0 else 1.0 / in_width
offset_h = offsets[0]
offset_w = offsets[1]
i = bx * max_threads + tx
j = by * max_threads + ty
with ib.if_scope((i < in_height)):
with ib.if_scope((j < in_width)):
center_h = (i + offset_h) * steps_h
center_w = (j + offset_w) * steps_w
for k in range(num_sizes + num_ratios - 1):
w = if_then_else(
k < num_sizes,
float(size_ratio_concat[k]) * in_height / in_width / 2.0,
float(size_ratio_concat[0])
* in_height
/ in_width
* math.sqrt(size_ratio_concat[k + 1])
/ 2.0,
)
h = if_then_else(
k < num_sizes,
size_ratio_concat[k] / 2.0,
size_ratio_concat[0] / math.sqrt(size_ratio_concat[k + 1]) / 2.0,
)
count = (
i * in_width * (num_sizes + num_ratios - 1)
+ j * (num_sizes + num_ratios - 1)
+ k
) * 4
p_out[count] = center_w - w
p_out[count + 1] = center_h - h
p_out[count + 2] = center_w + w
p_out[count + 3] = center_h + h
body = ib.get()
return body
def multibox_prior(data, sizes=(1,), ratios=(1,), steps=(-1, -1), offsets=(0.5, 0.5), clip=False):
"""Generate prior(anchor) boxes from data, sizes and ratios.
Parameters
----------
data : tvm.te.Tensor
4-D with shape [batch, c_in, h_in, w_in]]
sizes : tuple of float
Tuple of sizes for anchor boxes.
ratios : tuple of float
Tuple of ratios for anchor boxes.
steps : Tuple of float
Priorbox step across y and x, -1 for auto calculation.
offsets : tuple of int
Priorbox center offsets, y and x respectively.
clip : boolean
Whether to clip out-of-boundary boxes.
Returns
-------
out : tvm.te.Tensor
3-D tensor with shape [1, h_in * w_in * (num_sizes + num_ratios - 1), 4]
"""
num_sizes = len(sizes)
num_ratios = len(ratios)
oshape = (1, data.shape[2] * data.shape[3] * (num_sizes + num_ratios - 1), 4)
out = te.extern(
oshape,
[data],
lambda ins, outs: multibox_prior_ir(ins[0], outs[0], sizes, ratios, steps, offsets),
tag="multibox_prior",
)
if clip:
out = topi.clip(out, 0, 1)
return out
def transform_loc_pre(cls_prob, valid_count, temp_valid_count, temp_cls_id, temp_score, threshold):
"""Low level IR routing for transform location data preparation.
Parameters
----------
cls_prob : Buffer
Buffer of class probabilities.
valid_count : Buffer
Buffer of number of valid output boxes.
temp_valid_count : Buffer
Output intermediate result buffer
temp_cls_id : Buffer
Output intermediate result buffer
temp_score : Buffer
Output buffer
threshold : float
Threshold to be a positive prediction.
Returns
-------
stmt : Stmt
The result IR statement.
"""
batch_size = cls_prob.shape[0]
num_classes = cls_prob.shape[1]
num_anchors = cls_prob.shape[2]
ib = tvm.tir.ir_builder.create()
cls_prob = ib.buffer_ptr(cls_prob)
cls_id = ib.buffer_ptr(temp_cls_id)
valid_count = ib.buffer_ptr(valid_count)
temp_valid_count = ib.buffer_ptr(temp_valid_count)
score = ib.buffer_ptr(temp_score)
threshold = tvm.tir.FloatImm("float32", threshold)
max_threads = int(tvm.target.Target.current(allow_none=False).max_num_threads)
nthread_tx = max_threads
nthread_bx = (batch_size * num_anchors) // max_threads + 1
tx = te.thread_axis("threadIdx.x")
bx = te.thread_axis("blockIdx.x")
ib.scope_attr(tx, "thread_extent", nthread_tx)
ib.scope_attr(bx, "thread_extent", nthread_bx)
tid = bx * max_threads + tx
idxd = tvm.tir.indexdiv
idxm = tvm.tir.indexmod
with ib.if_scope(tid < batch_size * num_anchors):
i = idxd(tid, num_anchors)
j = idxm(tid, num_anchors)
valid_count[i] = 0
score[tid] = -1.0
cls_id[tid] = 0
with ib.for_range(0, num_classes - 1) as k:
temp = cls_prob[i * num_classes * num_anchors + (k + 1) * num_anchors + j]
cls_id[tid] = if_then_else(temp > score[tid], k + 1, cls_id[tid])
score[tid] = tvm.te.max(temp, score[tid])
with ib.if_scope(tvm.tir.all(cls_id[tid] > 0, score[tid] < threshold)):
cls_id[tid] = 0
with ib.if_scope(cls_id[tid] > 0):
temp_valid_count[tid] = 1
with ib.else_scope():
temp_valid_count[tid] = 0
with ib.if_scope(tid < batch_size):
with ib.for_range(0, num_anchors) as k:
with ib.if_scope(k > 0):
temp_valid_count[tid * num_anchors + k] += temp_valid_count[
tid * num_anchors + k - 1
]
valid_count[tid] = temp_valid_count[tid * num_anchors + num_anchors - 1]
return ib.get()
def transform_loc_ir(
loc_pred,
anchor,
temp_valid_count,
temp_cls_id,
temp_score,
out,
clip,
variances,
batch_size,
num_anchors,
):
"""Low level IR routing for transform location in multibox_detection operator.
Parameters
----------
loc_pred : Buffer
Buffer of location regression predictions.
anchor : Buffer
Buffer of prior anchor boxes.
temp_valid_count : Buffer
Intermediate result buffer.
temp_cls_id : Buffer
Intermediate result buffer.
temp_score : Buffer
Input buffer which stores intermediate results.
out : Buffer
Output buffer.
clip : boolean
Whether to clip out-of-boundary boxes.
variances : tuple of float
Variances to be decoded from box regression output.
batch_size : int
Batch size
num_anchors : int
Number of anchors
Returns
-------
stmt : Stmt
The result IR statement.
"""
def transform_loc(loc, loc_base_idx, anchor, anchor_base_idx, clip, vx, vy, vw, vh):
"""Transform prior anchor box to output box through location predictions."""
al = anchor[anchor_base_idx]
at = anchor[anchor_base_idx + 1]
ar = anchor[anchor_base_idx + 2]
ab = anchor[anchor_base_idx + 3]
aw = ar - al
ah = ab - at
ax = (al + ar) / 2.0
ay = (at + ab) / 2.0
px = loc[loc_base_idx]
py = loc[loc_base_idx + 1]
pw = loc[loc_base_idx + 2]
ph = loc[loc_base_idx + 3]
ox = px * vx * aw + ax
oy = py * vy * ah + ay
ow = exp(pw * vw) * aw / 2.0
oh = exp(ph * vh) * ah / 2.0
return (
tvm.tir.if_then_else(clip, tvm.te.max(0.0, tvm.te.min(1.0, ox - ow)), ox - ow),
tvm.tir.if_then_else(clip, tvm.te.max(0.0, tvm.te.min(1.0, oy - oh)), oy - oh),
tvm.tir.if_then_else(clip, tvm.te.max(0.0, tvm.te.min(1.0, ox + ow)), ox + ow),
tvm.tir.if_then_else(clip, tvm.te.max(0.0, tvm.te.min(1.0, oy + oh)), oy + oh),
)
ib = tvm.tir.ir_builder.create()
loc_pred = ib.buffer_ptr(loc_pred)
anchor = ib.buffer_ptr(anchor)
temp_valid_count = ib.buffer_ptr(temp_valid_count)
cls_id = ib.buffer_ptr(temp_cls_id)
score = ib.buffer_ptr(temp_score)
out_loc = ib.buffer_ptr(out)
max_threads = int(tvm.target.Target.current(allow_none=False).max_num_threads)
nthread_tx = max_threads
nthread_bx = (batch_size * num_anchors) // max_threads + 1
tx = te.thread_axis("threadIdx.x")
bx = te.thread_axis("blockIdx.x")
ib.scope_attr(tx, "thread_extent", nthread_tx)
ib.scope_attr(bx, "thread_extent", nthread_bx)
tid = bx * max_threads + tx
idxd = tvm.tir.indexdiv
idxm = tvm.tir.indexmod
with ib.if_scope(tid < batch_size * num_anchors):
i = idxd(tid, num_anchors)
j = idxm(tid, num_anchors)
with ib.if_scope(cls_id[tid] > 0):
with ib.if_scope(j == 0):
out_base_idx = i * num_anchors * 6
out_loc[out_base_idx] = cls_id[tid] - 1.0
out_loc[out_base_idx + 1] = score[tid]
(
out_loc[out_base_idx + 2],
out_loc[out_base_idx + 3],
out_loc[out_base_idx + 4],
out_loc[out_base_idx + 5],
) = transform_loc(
loc_pred,
tid * 4,
anchor,
j * 4,
clip,
variances[0],
variances[1],
variances[2],
variances[3],
)
with ib.else_scope():
out_base_idx = i * num_anchors * 6 + temp_valid_count[tid - 1] * 6
out_loc[out_base_idx] = cls_id[tid] - 1.0
out_loc[out_base_idx + 1] = score[tid]
(
out_loc[out_base_idx + 2],
out_loc[out_base_idx + 3],
out_loc[out_base_idx + 4],
out_loc[out_base_idx + 5],
) = transform_loc(
loc_pred,
tid * 4,
anchor,
j * 4,
clip,
variances[0],
variances[1],
variances[2],
variances[3],
)
return ib.get()
def multibox_transform_loc(
cls_prob, loc_pred, anchor, clip=True, threshold=0.01, variances=(0.1, 0.1, 0.2, 0.2)
):
"""Location transformation for multibox detection
Parameters
----------
cls_prob : tvm.te.Tensor
Class probabilities.
loc_pred : tvm.te.Tensor
Location regression predictions.
anchor : tvm.te.Tensor
Prior anchor boxes.
clip : boolean
Whether to clip out-of-boundary boxes.
threshold : float
Threshold to be a positive prediction.
variances : tuple of float
Variances to be decoded from box regression output.
Returns
-------
ret : tuple of tvm.te.Tensor composed of
out : tvm.te.Tensor
3-D tensor with shape (batch_size, num_anchors, 6)
valid_count : tvm.te.Tensor
1-D tensor with shape (batch_size,), number of valid anchor boxes.
"""
batch_size = cls_prob.shape[0]
num_anchors = cls_prob.shape[2]
oshape = (batch_size, num_anchors, 6)
# Define data alignment for intermediate buffer
valid_count_dtype = "int32"
out_loc_dtype = loc_pred.dtype
valid_count_buf = tvm.tir.decl_buffer(
(batch_size,), valid_count_dtype, "valid_count_buf", data_alignment=4
)
loc_pred_buf = tvm.tir.decl_buffer(
loc_pred.shape, loc_pred.dtype, "loc_pred_buf", data_alignment=8
)
anchor_buf = tvm.tir.decl_buffer(anchor.shape, anchor.dtype, "anchor_buf", data_alignment=8)
temp_valid_count_buf = tvm.tir.decl_buffer(
(
batch_size,
num_anchors,
),
valid_count_dtype,
"temp_valid_count",
data_alignment=8,
)
temp_cls_id_buf = tvm.tir.decl_buffer(
(
batch_size,
num_anchors,
),
valid_count_dtype,
"temp_cls_id",
data_alignment=8,
)
temp_score_buf = tvm.tir.decl_buffer(
(
batch_size,
num_anchors,
),
cls_prob.dtype,
"temp_score",
data_alignment=8,
)
valid_count, temp_valid_count, temp_cls_id, temp_score = te.extern(
[
(batch_size,),
(
batch_size,
num_anchors,
),
(
batch_size,
num_anchors,
),
(
batch_size,
num_anchors,
),
],
[cls_prob],
lambda ins, outs: transform_loc_pre(ins[0], outs[0], outs[1], outs[2], outs[3], threshold),
dtype=[valid_count_dtype, valid_count_dtype, valid_count_dtype, cls_prob.dtype],
out_buffers=[valid_count_buf, temp_valid_count_buf, temp_cls_id_buf, temp_score_buf],
tag="multibox_transform_loc_phase_one",
)
out_loc = te.extern(
[oshape],
[loc_pred, anchor, temp_valid_count, temp_cls_id, temp_score],
lambda ins, outs: transform_loc_ir(
ins[0],
ins[1],
ins[2],
ins[3],
ins[4],
outs[0],
clip,
variances,
batch_size,
num_anchors,
),
in_buffers=[
loc_pred_buf,
anchor_buf,
temp_valid_count_buf,
temp_cls_id_buf,
temp_score_buf,
],
dtype=[out_loc_dtype],
tag="multibox_transform_loc",
)
return [out_loc, valid_count]
def multibox_detection(
cls_prob,
loc_pred,
anchor,
clip=True,
threshold=0.01,
nms_threshold=0.5,
force_suppress=False,
variances=(0.1, 0.1, 0.2, 0.2),
nms_topk=-1,
):
"""Convert multibox detection predictions.
Parameters
----------
cls_prob : tvm.te.Tensor
Class probabilities.
loc_pred : tvm.te.Tensor
Location regression predictions.
anchor : tvm.te.Tensor
Prior anchor boxes.
clip : boolean
Whether to clip out-of-boundary boxes.
nms_threshold : float
Non-maximum suppression threshold.
force_suppress : boolean
Whether to suppress all detections regardless of class_id.
threshold : float
Threshold to be a positive prediction.
variances : tuple of float
Variances to be decoded from box regression output.
nms_topk : int
Keep maximum top k detections before nms, -1 for no limit.
Returns
-------
out : tvm.te.Tensor
3-D tensor with shape (batch_size, num_anchors, 6)
"""
inter_out = multibox_transform_loc(cls_prob, loc_pred, anchor, clip, threshold, variances)
out = non_max_suppression(
inter_out[0],
inter_out[1],
inter_out[1],
max_output_size=-1,
iou_threshold=nms_threshold,
force_suppress=force_suppress,
top_k=nms_topk,
return_indices=False,
)
return out
| 17,262 | 28.866782 | 123 | py |
tvm | tvm-main/python/tvm/topi/cuda/ssd/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=wildcard-import
"""VISION network operators"""
from __future__ import absolute_import as _abs
from .multibox import *
| 923 | 39.173913 | 62 | py |
tvm | tvm-main/python/tvm/topi/cuda/rcnn/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=wildcard-import
"""Faster R-CNN and Mask R-CNN operators"""
from .proposal import proposal
| 895 | 41.666667 | 62 | py |
tvm | tvm-main/python/tvm/topi/cuda/rcnn/proposal.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, singleton-comparison, bad-continuation
"""Proposal operator"""
import math
import tvm
from tvm import te
from ...vision.rcnn import generate_anchor, reg_bbox, reg_iou
from ...utils import get_const_tuple, get_const_int
def predict_bbox_ir(
cls_prob_buf,
bbox_pred_buf,
im_info_buf,
out_buf,
scales,
ratios,
feature_stride,
rpn_min_size,
iou_loss,
):
"""Predict bounding boxes based on anchors, scores and deltas.
Parameters
----------
cls_prob_buf : tvm.te.schedule.Buffer
4-D with shape [batch, 2 * num_anchors, height, width]
bbox_pred_buf : tvm.te.schedule.Buffer
4-D with shape [batch, 4 * num_anchors, height, width]
im_info_buf : tvm.te.schedule.Buffer
2-D with shape [batch, 3]
out_buf : tvm.te.schedule.Buffer
3-D with shape [batch, num_bbox, 5]
The last dimension is in format of [w_start, h_start, w_end, h_end, score]
scales : list/tuple of float
Scales of anchor windows.
ratios : list/tuple of float
Ratios of anchor windows.
feature_stride : int
The size of the receptive field each unit in the convolution layer of the rpn, for example
the product of all stride's prior to this layer.
rpn_min_size : int
Minimum height or width in proposal.
iou_loss : bool
Usage of IoU loss.
Returns
-------
stmt : Stmt
The result IR statement.
"""
batch, num_anchors, height, width = get_const_tuple(cls_prob_buf.shape)
num_anchors //= 2
max_threads = int(tvm.target.Target.current(allow_none=False).max_num_threads)
nthread_tx = max_threads
nthread_bx = (batch * height * width) // max_threads + 1
tx = te.thread_axis("threadIdx.x")
bx = te.thread_axis("blockIdx.x")
tid = bx * max_threads + tx
ib = tvm.tir.ir_builder.create()
ib.scope_attr(tx, "thread_extent", nthread_tx)
ib.scope_attr(bx, "thread_extent", nthread_bx)
p_score = ib.buffer_ptr(cls_prob_buf)
p_delta = ib.buffer_ptr(bbox_pred_buf)
p_im_info = ib.buffer_ptr(im_info_buf)
p_out = ib.buffer_ptr(out_buf)
idxm = tvm.tir.indexmod
idxd = tvm.tir.indexdiv
with ib.if_scope(tid < batch * height * width):
w = idxm(tid, width)
h = idxm(idxd(tid, width), height)
b = idxd(idxd(tid, width), height)
for k in range(num_anchors):
out_index = tid * num_anchors + k
ratio = ratios[k // len(scales)]
scale = scales[k % len(scales)]
anchor = generate_anchor(ratio, scale, feature_stride)
im_height = p_im_info[b * 3]
im_width = p_im_info[b * 3 + 1]
x1 = anchor[0] + w * feature_stride
y1 = anchor[1] + h * feature_stride
x2 = anchor[2] + w * feature_stride
y2 = anchor[3] + h * feature_stride
delta = [
p_delta[((((b * num_anchors + k) * 4 + i) * height + h) * width + w)]
for i in range(4)
]
regression_func = reg_iou if iou_loss else reg_bbox
pred_x1, pred_y1, pred_x2, pred_y2 = regression_func(x1, y1, x2, y2, *delta)
pred_x1 = tvm.te.max(tvm.te.min(pred_x1, im_width - 1.0), 0.0)
pred_y1 = tvm.te.max(tvm.te.min(pred_y1, im_height - 1.0), 0.0)
pred_x2 = tvm.te.max(tvm.te.min(pred_x2, im_width - 1.0), 0.0)
pred_y2 = tvm.te.max(tvm.te.min(pred_y2, im_height - 1.0), 0.0)
real_height = (im_height / feature_stride).astype("int32")
real_width = (im_width / feature_stride).astype("int32")
bbox_w = pred_x2 - pred_x1 + 1.0
bbox_h = pred_y2 - pred_y1 + 1.0
min_size = p_im_info[b * 3 + 2] * rpn_min_size
pred_score = p_score[((b * num_anchors * 2 + num_anchors + k) * height + h) * width + w]
pred_score = tvm.tir.Select(
tvm.tir.any(h >= real_height, w >= real_width), -1.0, pred_score
)
p_out[out_index * 5 + 0] = pred_x1
p_out[out_index * 5 + 1] = pred_y1
p_out[out_index * 5 + 2] = pred_x2
p_out[out_index * 5 + 3] = pred_y2
p_out[out_index * 5 + 4] = pred_score
with ib.if_scope(tvm.tir.any(bbox_w < min_size, bbox_h < min_size)):
p_out[out_index * 5 + 0] -= min_size / 2.0
p_out[out_index * 5 + 1] -= min_size / 2.0
p_out[out_index * 5 + 2] += min_size / 2.0
p_out[out_index * 5 + 3] += min_size / 2.0
p_out[out_index * 5 + 4] = -1.0
return ib.get()
def argsort_ir(data_buf, out_index_buf):
"""Batched odd-even transposition sort.
Parameters
----------
data_buf : tvm.te.schedule.Buffer
2-D with shape [batch, num_bbox]
out_index_buf : tvm.te.schedule.Buffer
2-D with shape [batch, num_bbox]. Indices of data in sorted order.
Returns
-------
stmt : Stmt
The result IR statement.
"""
batch, num_bbox = get_const_tuple(data_buf.shape)
max_threads = int(tvm.target.Target.current(allow_none=False).max_num_threads)
ib = tvm.tir.ir_builder.create()
p_data = ib.buffer_ptr(data_buf)
index_out = ib.buffer_ptr(out_index_buf)
nthread_tx = max_threads
nthread_bx = (num_bbox + 1) // 2 // max_threads + 1
tx = te.thread_axis("threadIdx.x")
bx = te.thread_axis("vthread")
ib.scope_attr(tx, "thread_extent", nthread_tx)
ib.scope_attr(bx, "virtual_thread", nthread_bx)
tid = bx * nthread_tx + tx
temp_data = ib.allocate("float32", (1,), name="temp_data", scope="local")
temp_index = ib.allocate("int32", (1,), name="temp_index", scope="local")
idxm = tvm.tir.indexmod
with ib.for_range(0, batch, kind="unroll") as b:
start = b * num_bbox
for i in range(2):
bbox_id = tid * 2 + i
with ib.if_scope(bbox_id < num_bbox):
index_out[start + bbox_id] = bbox_id
with ib.for_range(0, num_bbox) as k:
offset = start + 2 * tid + idxm(k, 2)
with ib.if_scope(
tvm.tir.all(offset + 1 < num_bbox, p_data[offset] < p_data[offset + 1])
):
temp_data[0] = p_data[offset]
p_data[offset] = p_data[offset + 1]
p_data[offset + 1] = temp_data[0]
temp_index[0] = index_out[offset]
index_out[offset] = index_out[offset + 1]
index_out[offset + 1] = temp_index[0]
ib.emit(tvm.tir.Call(None, "tir.tvm_storage_sync", tvm.runtime.convert(["shared"])))
return ib.get()
def nms_ir(sorted_bbox_buf, out_buf, nms_threshold):
"""Non-maximum suppression.
Parameters
----------
sorted_bbox_buf : tvm.te.schedule.Buffer
3-D with shape [batch, num_bbox, 5]. The last dimension is in format of
[w_start, h_start, w_end, h_end, score].
out_buf : tvm.te.schedule.Buffer
2-D with shape [batch, num_bbox]. Boolean mask of whether a bounding box should be removed.
nms_threshold : float
Non-maximum suppression threshold.
Returns
-------
stmt : Stmt
The result IR statement.
"""
def calculate_overlap(out_tensor, box_a_idx, box_b_idx):
"""Calculate overlap of two boxes."""
w = tvm.te.max(
0.0,
tvm.te.min(out_tensor[box_a_idx + 2], out_tensor[box_b_idx + 2])
- tvm.te.max(out_tensor[box_a_idx], out_tensor[box_b_idx])
+ 1.0,
)
h = tvm.te.max(
0.0,
tvm.te.min(out_tensor[box_a_idx + 3], out_tensor[box_b_idx + 3])
- tvm.te.max(out_tensor[box_a_idx + 1], out_tensor[box_b_idx + 1])
+ 1.0,
)
i = w * h
u = (
(out_tensor[box_a_idx + 2] - out_tensor[box_a_idx] + 1.0)
* (out_tensor[box_a_idx + 3] - out_tensor[box_a_idx + 1] + 1.0)
+ (out_tensor[box_b_idx + 2] - out_tensor[box_b_idx] + 1.0)
* (out_tensor[box_b_idx + 3] - out_tensor[box_b_idx + 1] + 1.0)
- i
)
return i / u
batch, num_bbox = get_const_tuple(out_buf.shape)
max_threads = int(math.sqrt(tvm.target.Target.current(allow_none=False).max_num_threads))
tx = te.thread_axis("threadIdx.x")
bx = te.thread_axis("blockIdx.x")
ib = tvm.tir.ir_builder.create()
p_data = ib.buffer_ptr(sorted_bbox_buf)
p_out = ib.buffer_ptr(out_buf)
nthread_tx = max_threads
nthread_bx = num_bbox // max_threads + 1
ib.scope_attr(tx, "thread_extent", nthread_tx)
ib.scope_attr(bx, "thread_extent", nthread_bx)
i = bx * max_threads + tx
with ib.for_range(0, batch, kind="unroll", name="n") as b:
base_idx = b * num_bbox
with ib.if_scope(i < num_bbox):
p_out[base_idx + i] = False
with ib.for_range(0, num_bbox - 1) as l:
with ib.if_scope(tvm.tir.all(i < num_bbox, i > l, p_out[base_idx + l] == False)):
iou = calculate_overlap(p_data, (base_idx + l) * 5, (base_idx + i) * 5)
with ib.if_scope(iou > nms_threshold):
p_out[base_idx + i] = True
ib.emit(tvm.tir.Call(None, "tir.tvm_storage_sync", tvm.runtime.convert(["shared"])))
return ib.get()
def prepare_output_ir(sorted_bbox_buf, remove_mask_buf, out_buf):
"""Copy output after applying nms to continuous memory.
Parameters
----------
sorted_bbox_buf : tvm.te.schedule.Buffer
3-D with shape [batch, num_bbox, 5]. The last dimension is in format of
[w_start, h_start, w_end, h_end, score].
remove_mask_buf : tvm.te.schedule.Buffer
2-D with shape [batch, num_bbox]. Boolean mask of whether a bounding box should be removed.
out_buf : tvm.te.schedule.Buffer
2-D with shape [batch * rpn_post_nms_top_n, 5]. The last dimension is in format of
[batch_index, w_start, h_start, w_end, h_end].
Returns
-------
stmt : Stmt
The result IR statement.
"""
batch, num_bbox, _ = get_const_tuple(sorted_bbox_buf.shape)
rpn_post_nms_top_n = get_const_int(out_buf.shape[0]) // batch
nthread_tx = batch
tx = te.thread_axis("threadIdx.x")
ib = tvm.tir.ir_builder.create()
ib.scope_attr(tx, "thread_extent", nthread_tx)
i = ib.allocate("int32", (1,), "i", scope="local")
i[0] = 0
p_sorted_bbox = ib.buffer_ptr(sorted_bbox_buf)
p_remove = ib.buffer_ptr(remove_mask_buf)
p_out = ib.buffer_ptr(out_buf)
b = tx
nkeep = ib.allocate("int32", (1,), "nkeep", scope="local")
nkeep[0] = 0 # number of bbox after nms
with ib.for_range(0, num_bbox) as j:
with ib.if_scope(p_remove[b * num_bbox + j] == False):
nkeep[0] += 1
with ib.if_scope(nkeep[0] > 0):
with ib.for_range(
0, te.ceil(tvm.tir.const(rpn_post_nms_top_n, "float32") / nkeep[0]).astype("int32")
):
with ib.for_range(0, num_bbox) as j:
offset_j = (b * num_bbox + j) * 5
offset_i = (b * rpn_post_nms_top_n + i[0]) * 5
with ib.if_scope(
tvm.tir.all(i[0] < rpn_post_nms_top_n, p_remove[(b * num_bbox + j)] == False)
):
p_out[offset_i] = tvm.tir.Cast("float32", b)
with ib.for_range(0, 4, kind="unroll") as k:
p_out[offset_i + k + 1] = p_sorted_bbox[offset_j + k]
i[0] = i[0] + 1
body = ib.get()
return body
def proposal(
cls_prob,
bbox_pred,
im_info,
scales,
ratios,
feature_stride,
threshold,
rpn_pre_nms_top_n,
rpn_post_nms_top_n,
rpn_min_size,
iou_loss,
):
"""Proposal operator.
Parameters
----------
cls_prob : tvm.te.Tensor
4-D with shape [batch, 2 * num_anchors, height, width]
bbox_pred : tvm.te.Tensor
4-D with shape [batch, 4 * num_anchors, height, width]
im_info : tvm.te.Tensor
2-D with shape [batch, 3]
scales : list/tuple of float
Scales of anchor windows.
ratios : list/tuple of float
Ratios of anchor windows.
feature_stride : int
The size of the receptive field each unit in the convolution layer of the rpn, for example
the product of all stride's prior to this layer.
threshold : float
Non-maximum suppression threshold.
rpn_pre_nms_top_n : int
Number of top scoring boxes to apply NMS. -1 to use all boxes.
rpn_post_nms_top_n : int
Number of top scoring boxes to keep after applying NMS to RPN proposals.
rpn_min_size : int
Minimum height or width in proposal.
iou_loss : bool
Usage of IoU loss.
Returns
-------
out : tvm.te.Tensor
2-D tensor with shape [batch * rpn_post_nms_top_n, 5]. The last dimension is in format of
[batch_index, w_start, h_start, w_end, h_end].
"""
batch, _, height, width = get_const_tuple(cls_prob.shape)
num_anchors = len(scales) * len(ratios)
num_bbox = height * width * num_anchors
rpn_pre_nms_top_n = min(rpn_pre_nms_top_n, num_bbox) if rpn_pre_nms_top_n > 0 else num_bbox
bbox = te.extern(
(batch, num_bbox, 5),
[cls_prob, bbox_pred, im_info],
lambda ins, outs: predict_bbox_ir(
ins[0], ins[1], ins[2], outs[0], scales, ratios, feature_stride, rpn_min_size, iou_loss
),
dtype=bbox_pred.dtype,
)
score = te.compute((batch, num_bbox), lambda b, i: bbox[b, i, 4], tag="bbox_score")
sorted_index = te.extern(
[score.shape], [score], lambda ins, outs: argsort_ir(ins[0], outs[0]), dtype="int32"
)
sorted_bbox = te.compute(
(batch, rpn_pre_nms_top_n, 5),
lambda b, i, j: bbox[b, sorted_index[b, i], j],
tag="sorted_bbox",
)
nms_remove_mask = te.extern(
(batch, rpn_pre_nms_top_n),
[sorted_bbox],
lambda ins, outs: nms_ir(ins[0], outs[0], threshold),
dtype="bool",
)
nms_out = te.extern(
(batch * rpn_post_nms_top_n, 5),
[sorted_bbox, nms_remove_mask],
lambda ins, outs: prepare_output_ir(ins[0], ins[1], outs[0]),
dtype=sorted_bbox.dtype,
)
return nms_out
| 15,163 | 34.512881 | 100 | py |
tvm | tvm-main/python/tvm/topi/x86/injective.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""x86 declaration and schedules."""
from tvm import te
from tvm.topi import tag
from tvm.tir import IntImm
from tvm.topi.generic.injective import (
schedule_injective_from_existing as schedule_injective_for_concat,
)
from ..utils import is_empty_shape
def schedule_injective_from_existing(sch, out):
"""Schedule for injective op from existing schedule.
Parameters
----------
sch: Schedule
The schedule to update.
out: Tensor
The tensor representing the injective op.
Returns
-------
sch: Schedule
The updated schedule.
"""
if len(sch[out].op.axis) >= 5:
fused = sch[out].fuse(sch[out].op.axis[0], sch[out].op.axis[1], sch[out].op.axis[2])
sch[out].parallel(fused)
elif len(sch[out].op.axis) >= 3:
fused = sch[out].fuse(sch[out].op.axis[0], sch[out].op.axis[1])
sch[out].parallel(fused)
elif len(sch[out].op.axis) >= 1:
sch[out].parallel(sch[out].op.axis[0])
# Vectorize the inner most for loop. Tiling first to get a const extent
if len(sch[out].op.axis) >= 1:
l = sch[out].op.axis[-1]
lo, li = sch[out].split(l, factor=16)
sch[out].vectorize(li)
# for 1D loop, the above split will break the parallel axis
# Need to make the outer loop parallel again
if len(sch[out].op.axis) == 1:
sch[out].parallel(lo)
return sch
def schedule_injective(outs):
"""X86 schedule for injective op.
Parameters
----------
outs: Array of Tensor
The computation graph description of injective in the format
of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
te.schedule.AutoInlineInjective(s)
for x in outs:
if not is_empty_shape(x.shape):
schedule_injective_from_existing(s, x)
return s
def schedule_concatenate(outs):
"""X86 schedule for concatenate op.
Parameters
----------
outs: Array of Tensor
The computation graph description of injective in the format
of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
def vectorize(sch, tensor, vectorize_limit):
"""Internal vectorization function for concatenate."""
inner_axis = s[tensor].op.axis[len(s[tensor].op.axis) - 1]
# Check that the tensor shape is static. Otherwise skip vectorization.
if isinstance(tensor.shape[len(tensor.shape) - 1], IntImm):
inner_length = tensor.shape[len(tensor.shape) - 1].value
if inner_length <= vectorize_limit:
sch[tensor].vectorize(inner_axis)
else:
split_factor = 1
for i in range(vectorize_limit, 1, -1):
if inner_length % i == 0:
split_factor = i
break
if split_factor > 1:
_, inner_i = sch[tensor].split(inner_axis, split_factor)
sch[tensor].vectorize(inner_i)
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
x = outs[0]
s = te.create_schedule([x.op for x in outs])
te.schedule.AutoInlineInjective(s)
if len(s[x].op.axis) >= 5:
fused = s[x].fuse(s[x].op.axis[0], s[x].op.axis[1], s[x].op.axis[2])
vectorize(s, x, 64)
s[x].parallel(fused)
elif len(s[x].op.axis) >= 3:
fused = s[x].fuse(s[x].op.axis[0], s[x].op.axis[1])
s[x].parallel(fused)
else:
s[x].parallel(s[x].op.axis[0])
return s
def schedule_concatenate_cpu(outs):
"""X86 schedule for concatenate op.
Parameters
----------
outs: Array of Tensor
The computation graph description in the format
of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
scheduled_ops = []
def traverse(op):
if tag.is_injective(op.tag):
schedule_injective_for_concat(s, op.output(0))
for tensor in op.input_tensors:
if tensor.op.input_tensors and tensor.op not in scheduled_ops:
traverse(tensor.op)
scheduled_ops.append(op)
for out in outs:
traverse(out.op)
return s
schedule_elemwise = schedule_injective
schedule_broadcast = schedule_injective
| 5,471 | 31.963855 | 92 | py |
tvm | tvm-main/python/tvm/topi/x86/pooling.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-variable
"""Schedule for pooling operators"""
from tvm import te
from .. import tag
def _parallel_sch(sch, oshape, do_vectorize=False):
def vectorize(fused_axis, num_parallel_axis, vectorize_limit=64):
"""Internal vectorization utility function."""
reorder_axis = [fused_axis]
for i in range(num_parallel_axis, len(sch.op.axis) - 1):
reorder_axis.append(sch.op.axis[i])
k = sch.op.reduce_axis
fuse_k = sch.fuse(*k)
c = sch.op.axis[len(sch.op.axis) - 1]
reorder_axis += [fuse_k, c]
sch.reorder(*reorder_axis)
inner_length = oshape[len(oshape) - 1].value
if inner_length <= vectorize_limit:
sch.vectorize(c)
else:
split_factor = 1
for i in range(vectorize_limit, 1, -1):
if inner_length % i == 0:
split_factor = i
break
if split_factor > 1:
_, c_i = sch.split(c, split_factor)
sch.vectorize(c_i)
if len(sch.op.axis) >= 5:
fused = sch.fuse(sch.op.axis[0], sch.op.axis[1], sch.op.axis[2])
if do_vectorize:
vectorize(fused, 3)
elif len(sch.op.axis) >= 3:
fused = sch.fuse(sch.op.axis[0], sch.op.axis[1])
if do_vectorize:
vectorize(fused, 2)
else:
sch.parallel(sch.op.axis[0])
return
sch.parallel(fused)
def schedule_pool(outs, layout):
"""Schedule for pool
Parameters
----------
outs: Array of Tensor
The computation graph description of pool
in the format of an array of tensors.
layout: str
Data layout.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
scheduled_ops = []
def _schedule(PaddedInput, Pool):
if isinstance(PaddedInput.op, te.tensor.ComputeOp):
s[PaddedInput].compute_inline()
do_vectorize = layout[-1] not in "DHWdhw"
_parallel_sch(s[Pool], outs[0].shape, do_vectorize)
def traverse(OP):
"""Internal traverse function"""
# inline all one-to-one-mapping operators except the last stage (output)
if tag.is_injective(OP.tag):
if OP not in s.outputs:
s[OP].compute_inline()
for tensor in OP.input_tensors:
if isinstance(tensor.op, te.tensor.ComputeOp) and tensor.op not in scheduled_ops:
traverse(tensor.op)
# schedule pool
elif OP.tag.startswith("pool"):
# Average pool accumulation and division happens in different for loops (#3607).
# To ensure good parallel support, apply multi-threading on the second loop.
if OP != outs[0].op:
output = outs[0]
output_fused = s[output].fuse(output.op.axis[0], output.op.axis[1])
s[output].parallel(output_fused)
PaddedInput = OP.input_tensors[0]
Pool = OP.output(0)
_schedule(PaddedInput, Pool)
else:
raise RuntimeError(f"Unsupported operator: {OP.tag}")
scheduled_ops.append(OP)
traverse(outs[0].op)
return s
def schedule_adaptive_pool(outs):
"""Schedule for adaptive pool
Parameters
----------
outs: Array of Tensor
The computation graph description of adaptive pool
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
scheduled_ops = []
def traverse(OP):
"""Internal traverse function"""
# inline all one-to-one-mapping operators except the last stage (output)
if tag.is_injective(OP.tag):
if OP not in s.outputs:
s[OP].compute_inline()
for tensor in OP.input_tensors:
if isinstance(tensor.op, te.tensor.ComputeOp) and tensor.op not in scheduled_ops:
traverse(tensor.op)
# schedule pool
elif OP.tag.startswith("adaptive_pool"):
if OP != outs[0].op:
output = outs[0]
output_fused = s[output].fuse(output.op.axis[0], output.op.axis[1])
s[output].parallel(output_fused)
Pool = OP.output(0)
_parallel_sch(s[Pool], outs[0].shape)
else:
raise RuntimeError(f"Unsupported operator: {OP.tag}")
scheduled_ops.append(OP)
traverse(outs[0].op)
return s
| 5,570 | 33.388889 | 97 | py |
tvm | tvm-main/python/tvm/topi/x86/conv2d.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unused-variable,unused-argument,no-member
# pylint: disable=no-value-for-parameter,import-outside-toplevel
"""Conv2D schedule on x86"""
import logging
import tvm
from tvm import te
from tvm import autotvm
from tvm.contrib import dnnl
from .. import nn
from ..generic import schedule_extern
from ..nn.conv2d import conv2d_infer_layout, _get_workload as _get_conv2d_workload
from ..nn.conv2d import unpack_NCHWc_to_nchw
from ..nn.depthwise_conv2d import _get_workload as _get_depthwise_conv2d_workload
from ..nn.utils import get_pad_tuple
from ..utils import get_const_tuple, traverse_inline
from . import conv2d_avx_1x1, conv2d_avx_common
logger = logging.getLogger("topi")
def _get_default_config(
cfg, data, kernel, strides, padding, dilation, out_dtype, is_depthwise=False, layout="NCHW"
):
"""
Get default schedule config for the workload
"""
static_data_shape = []
for dim in get_const_tuple(data.shape):
if isinstance(dim, tvm.tir.Var):
static_data_shape.append(1)
else:
static_data_shape.append(dim)
data = te.placeholder(static_data_shape, dtype=data.dtype)
if is_depthwise:
wkl = _get_depthwise_conv2d_workload(data, kernel, strides, padding, dilation, out_dtype)
from .depthwise_conv2d import _fallback_schedule
_fallback_schedule(cfg, wkl)
else:
wkl = _get_conv2d_workload(data, kernel, strides, padding, dilation, out_dtype, layout)
is_kernel_1x1 = wkl.kernel_h == 1 and wkl.kernel_w == 1
if is_kernel_1x1:
conv2d_avx_1x1._fallback_schedule(cfg, wkl)
else:
conv2d_avx_common._fallback_schedule(cfg, wkl)
@conv2d_infer_layout.register("cpu")
def _conv2d_infer_layout(workload, cfg):
_, data, kernel, strides, padding, dilation, layout, _, dtype = workload
batch_size, in_channel, in_height, in_width = data[1]
out_channel, _, k_height, k_width = kernel[1]
idxdiv = tvm.tir.indexdiv
pt, pl, pb, pr = get_pad_tuple(padding, (k_height, k_width))
hdilation, wdilation = dilation if isinstance(dilation, (tuple, list)) else (dilation, dilation)
dilated_kernel_h = (k_height - 1) * hdilation + 1
dilated_kernel_w = (k_width - 1) * wdilation + 1
out_height = idxdiv(in_height + pt + pb - dilated_kernel_h, strides[0]) + 1
out_width = idxdiv(in_width + pl + pr - dilated_kernel_w, strides[1]) + 1
tile_ic, tile_oc = cfg["tile_ic"].size[-1], cfg["tile_oc"].size[-1]
in_shape = (batch_size, idxdiv(in_channel, tile_ic), in_height, in_width, tile_ic)
in_layout = f"NCHW{tile_ic}c"
out_shape = (batch_size, idxdiv(out_channel, tile_oc), out_height, out_width, tile_oc)
out_layout = f"NCHW{tile_oc}c"
return ((in_shape, in_layout),), ((out_shape, out_layout),)
def schedule_conv2d_nhwc(outs):
"""Create schedule for conv2d_nhwc"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
output_op = outs[0].op
def _callback(op):
if "conv2d_nhwc" in op.tag:
conv = op.output(0)
kernel = op.input_tensors[1]
if isinstance(kernel.op, tvm.te.ComputeOp) and "dilate" in kernel.op.tag:
s[kernel].compute_inline()
data = op.input_tensors[0]
data_pad = None
if isinstance(data.op, tvm.te.ComputeOp) and "pad" in data.op.tag:
data_pad = data
data = data_pad.op.input_tensors[0]
n_pad, h_pad, w_pad, c_pad = data_pad.op.axis
pad_fused = s[data_pad].fuse(n_pad, h_pad)
s[data_pad].parallel(pad_fused)
C = conv
n, h, w, c = C.op.axis
s[C].vectorize(c)
O = output_op.output(0)
if len(O.op.axis) == 4: # schedule bias + bn + relu
n, h, w, c = O.op.axis
fused = s[O].fuse(n, h, w)
s[O].parallel(fused)
channels = int(O.shape[-1])
if channels % 64 == 0:
c, ci = s[O].split(c, 64)
s[O].vectorize(ci)
if C != O:
s[C].compute_at(s[O], c)
traverse_inline(s, output_op, _callback)
return s
def conv2d_nchw(data, kernel, strides, padding, dilation, out_dtype):
layout = "NCHW"
packed_out = conv2d_NCHWc(data, kernel, strides, padding, dilation, layout, layout, out_dtype)
return unpack_NCHWc_to_nchw(packed_out, out_dtype)
def schedule_conv2d_nchw(outs):
"""Create schedule for tensors"""
return schedule_conv2d_NCHWc(outs)
def _pack_data(cfg, data, kernel):
n, _, ih, iw = get_const_tuple(data.shape)
oc, ic, kh, kw = get_const_tuple(kernel.shape)
ic_bn, oc_bn = cfg["tile_ic"].size[-1], cfg["tile_oc"].size[-1]
ic_chunk = ic // ic_bn
oc_chunk = oc // oc_bn
# Handle dynamic shape to pass tuning dispatch.
if isinstance(n, tvm.tir.Any):
n = tvm.te.size_var("n")
if isinstance(ih, tvm.tir.Any):
ih = tvm.te.size_var("ih")
if isinstance(iw, tvm.tir.Any):
iw = tvm.te.size_var("iw")
if isinstance(ic, tvm.tir.Any):
raise RuntimeError("Dynamic input channel is not supported for conv2d.")
data = te.compute(
(n, ic_chunk, ih, iw, ic_bn),
lambda bs, c, h, w, vc: data[bs, c * ic_bn + vc, h, w],
name="data_vec",
)
kernel = te.compute(
(oc_chunk, ic_chunk, kh, kw, ic_bn, oc_bn),
lambda occ, icc, k_h, k_w, icb, ocb: kernel[occ * oc_bn + ocb, icc * ic_bn + icb, k_h, k_w],
name="kernel_vec",
)
return data, kernel
@autotvm.register_topi_compute("conv2d_NCHWc.x86")
def conv2d_NCHWc(cfg, data, kernel, strides, padding, dilation, layout, out_layout, out_dtype):
"""Compute conv2d with NCHWc layout."""
# layout and out_layout are not used here,
# we keep them for debug convenience when dumping autotvm workload
if len(data.shape) == 5:
n, ic_chunk, ih, iw, ic_bn = get_const_tuple(data.shape)
oc_chunk, ic_chunk_group, kernel_height, kernel_width, _, oc_bn = get_const_tuple(
kernel.shape
)
in_channel = ic_chunk * ic_bn
num_filter = oc_chunk * oc_bn
else:
n, in_channel, ih, iw = get_const_tuple(data.shape)
num_filter, _, kernel_height, kernel_width = get_const_tuple(kernel.shape)
# Define autotvm tuning space
is_kernel_1x1 = kernel_height == 1 and kernel_width == 1
pt, pl, pb, pr = get_pad_tuple(padding, (kernel_height, kernel_width))
sh, sw = strides if isinstance(strides, (tuple, list)) else (strides, strides)
oh = (ih - kernel_height + pt + pb) // sh + 1
ow = (iw - kernel_width + pl + pr) // sw + 1
cfg.define_split("tile_ic", in_channel, num_outputs=2)
cfg.define_split("tile_oc", num_filter, num_outputs=2)
if isinstance(ow, (tvm.tir.IntImm, int)):
cfg.define_split(
"tile_ow", ow, num_outputs=2, filter=lambda y: y.size[-1] <= 64, policy="verbose"
)
if is_kernel_1x1:
if isinstance(oh, (tvm.tir.IntImm, int)):
cfg.define_knob("tile_oh", [1, 2] if oh > 1 else [1])
else:
cfg.define_knob("unroll_kw", [True, False])
# If no config was set, we can fallback to default config.
if cfg.is_fallback:
_get_default_config(
cfg,
te.placeholder((n, in_channel, ih, iw), dtype=data.dtype),
te.placeholder(
(num_filter, in_channel, kernel_height, kernel_width), dtype=kernel.dtype
),
strides,
padding,
dilation,
out_dtype,
)
# Pack data if raw 4-D data is provided.
# This can only happen when autotuning.
if len(data.shape) == 4:
if autotvm.GLOBAL_SCOPE.in_tuning:
# Directly use modified data layout placeholder.
dshape = (n, in_channel // cfg["tile_ic"].size[-1], ih, iw, cfg["tile_ic"].size[-1])
data = tvm.te.placeholder(dshape, data.dtype, name="data")
kshape = (
num_filter // cfg["tile_oc"].size[-1],
in_channel // cfg["tile_ic"].size[-1],
kernel_height,
kernel_width,
cfg["tile_ic"].size[-1],
cfg["tile_oc"].size[-1],
)
kernel = tvm.te.placeholder(kshape, kernel.dtype, name="kernel")
else:
data, kernel = _pack_data(cfg, data, kernel)
return nn.conv2d_NCHWc(data, kernel, strides, padding, dilation, layout, out_layout, out_dtype)
@autotvm.register_topi_schedule("conv2d_NCHWc.x86")
def schedule_conv2d_NCHWc(cfg, outs):
"""Create schedule for tensors"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
def _callback(op):
if "conv2d_NCHWc" in op.tag:
conv_out = op.output(0)
kernel_vec = conv_out.op.input_tensors[1]
data_vec = conv_out.op.input_tensors[0]
args = [s, cfg, data_vec, kernel_vec, conv_out, outs[0]]
(_, _, kh, kw, _, _) = get_const_tuple(kernel_vec.shape)
if kh == 1 and kw == 1:
conv2d_avx_1x1._schedule_conv_NCHWc(*args)
else:
conv2d_avx_common._schedule_conv_NCHWc(*args)
traverse_inline(s, outs[0].op, _callback)
return s
@autotvm.register_topi_compute("conv2d_nchw_dnnl.x86")
def conv2d_nchw_dnnl(cfg, data, kernel, strides, padding, dilation, out_dtype):
"""Compute conv2d in NCHW format using dnnl."""
groups = 1
_out = dnnl.dnnl_conv2d(data, kernel, strides, padding, dilation, groups, False, out_dtype)
return _out
@autotvm.register_topi_schedule("conv2d_nchw_dnnl.x86")
def schedule_conv2d_nchw_dnnl(_, outs):
"""Create schedule for conv2d_nchw_dnnl"""
return schedule_extern(outs)
@autotvm.register_topi_compute("conv2d_nhwc_dnnl.x86")
def conv2d_nhwc_dnnl(cfg, data, kernel, strides, padding, dilation, out_dtype):
"""Compute conv2d in NHWC format using dnnl."""
groups = 1
_out = dnnl.dnnl_conv2d(data, kernel, strides, padding, dilation, groups, True, out_dtype)
return _out
@autotvm.register_topi_schedule("conv2d_nhwc_dnnl.x86")
def schedule_conv2d_nhwc_dnnl(_, outs):
"""Create schedule for conv2d_nhwc_dnnl"""
return schedule_extern(outs)
# FIXME - https://github.com/apache/tvm/issues/4122
# _declaration_conv_nhwc_pack expects kernel layout to be HWOI. However, the tests use HWIO
# layout. Commenting until we have clarity about the nhwc_pack implementation from the author.
# elif layout == 'NHWC' and kh == 1 and kw == 1 and kernel.dtype == "int8":
# if cfg.is_fallback:
# _get_default_config(cfg, data, kernel, strides, padding, out_dtype, False, layout)
# # specialize for INT8 1X1 conv on X86
# return conv2d_avx_1x1._declaration_conv_nhwc_pack(cfg, data, kernel, strides,
# padding, dilation, out_dtype)
| 11,899 | 38.144737 | 100 | py |
tvm | tvm-main/python/tvm/topi/x86/concat.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"concatenate related operators"
from typing import Optional
import numpy as np
import tvm
from tvm import te
from ..utils import get_const_int
def concatenate(data: tvm.te.Tensor, axis: Optional[int] = 0):
"""Join a sequence of arrays along an existing axis.
Optimized for CPU execution.
Parameters
----------
data : tuple of tvm.te.Tensor
The arrays to concatenate
axis : int, optional
The axis along which the arrays will be joined. Default is 0.
Returns
-------
ret : tvm.te.Tensor
"""
in_outers = [int(np.prod(i.shape[axis:])) for i in data]
in_outers_cumsum = [0, *np.cumsum(in_outers, dtype="int64")[0:-1]]
def gen_ir_1d(data_bufs, out_buf):
"""Custom concatenation execution."""
i_b = tvm.tir.ir_builder.create()
data_bufs1 = [i_b.buffer_ptr(data_buf) for data_buf in data_bufs]
out_buf = i_b.buffer_ptr(out_buf)
for i in range(len(data)):
with i_b.for_range(0, in_outers[i], name="j") as j:
out_buf[in_outers_cumsum[i] + j] = data_bufs1[i][j]
return i_b.get()
def gen_ir(data_bufs, out_buf, inner, outer):
"""Common case of concatenation execution."""
i_b = tvm.tir.ir_builder.create()
data_bufs1 = [i_b.buffer_ptr(data_buf) for data_buf in data_bufs]
out_buf = i_b.buffer_ptr(out_buf)
if inner > 1:
with i_b.for_range(0, inner, name="inn", kind="parallel") as inn:
pos = inn * outer
for i in range(len(data)):
offset = inn * in_outers[i]
with i_b.for_range(0, in_outers[i], name="j") as j:
out_buf[pos + in_outers_cumsum[i] + j] = data_bufs1[i][offset + j]
else:
for i in range(len(data)):
with i_b.for_range(0, in_outers[i], name="j", kind="parallel") as j:
out_buf[in_outers_cumsum[i] + j] = data_bufs1[i][j]
return i_b.get()
if axis < 0:
axis += len(data[0].shape)
concat_axis_sizes = [int(t.shape[axis]) for t in data]
join_size = int(np.sum(concat_axis_sizes))
dtype = data[0].dtype
out_shape = data[0].shape[:axis] + [join_size] + data[0].shape[axis + 1 :]
right_val = np.prod(out_shape[axis:])
left_val = np.prod(out_shape[:axis])
if (
len(data[0].shape) == 1
or (left_val == 1 and axis == len(data[0].shape) - 1)
or (left_val == 1 and right_val == 1)
):
# badly parallelized case
return te.extern(
[out_shape],
list(data),
lambda ins, outs: gen_ir_1d(ins, outs[0]),
dtype=dtype,
name="concatenate_ext",
)
inner = get_const_int(int(left_val))
outer = get_const_int(int(right_val))
return te.extern(
[out_shape],
list(data),
lambda ins, outs: gen_ir(ins, outs[0], inner, outer),
dtype=dtype,
name="concatenate_ext",
)
| 3,799 | 34.514019 | 90 | py |
tvm | tvm-main/python/tvm/topi/x86/reduction.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""x86 declaration and schedules."""
import tvm
from tvm import te
from .injective import schedule_injective_from_existing
from .. import tag
from ..utils import get_const_tuple
def _schedule_reduce(sch, op, is_idx_reduce=False):
if is_idx_reduce:
real_out = op.output(0)
fused = sch[real_out].fuse(*sch[real_out].op.axis)
out = op.input_tensors[0]
else:
out = op.output(0)
const_shape = True
out_shape = get_const_tuple(out.shape)
for d in out_shape:
if not isinstance(d, int):
const_shape = False
break
if const_shape:
naxes = len(sch[out].op.axis)
parallelism = 1
fuse_axes = []
# We choose a heuristic number 128 to limit the maximum parallelism
while len(fuse_axes) < naxes and parallelism < 128:
ivar = sch[out].op.axis[len(fuse_axes)]
parallelism *= int(ivar.dom.extent)
fuse_axes.append(ivar)
fused = sch[out].fuse(*fuse_axes)
sch[out].parallel(fused)
else:
if len(sch[out].op.axis) >= 5:
# avoid too many parallelism
fused = sch[out].fuse(sch[out].op.axis[0], sch[out].op.axis[1], sch[out].op.axis[2])
sch[out].parallel(fused)
else:
fused = sch[out].fuse(*sch[out].op.axis)
sch[out].parallel(fused)
def schedule_reduce(outs):
"""X86 schedule for reduction op.
Parameters
----------
outs: Array of Tensor
The computation graph description of injective in the format
of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
sch = te.create_schedule([x.op for x in outs])
scheduled_ops = []
def traverse_before_reduce(operator):
"""Internal traverse function"""
if isinstance(operator, tvm.te.PlaceholderOp):
return
if tag.is_injective(operator.tag):
sch[operator].compute_inline()
for tensor in operator.input_tensors:
if tensor.op not in scheduled_ops:
traverse_before_reduce(tensor.op)
else:
raise RuntimeError(f"Unsupported operator: {operator.tag}")
scheduled_ops.append(operator)
def traverse_after_reduce(operator):
"""Internal traverse function"""
if tag.is_broadcast(operator.tag):
if operator not in scheduled_ops:
schedule_injective_from_existing(sch, operator)
for tensor in operator.input_tensors:
traverse_after_reduce(tensor.op)
elif operator.tag == "comm_reduce":
_schedule_reduce(sch, operator, is_idx_reduce=False)
for tensor in operator.input_tensors:
if tensor.op not in scheduled_ops:
traverse_before_reduce(tensor.op)
elif operator.tag == "comm_reduce_idx":
_schedule_reduce(sch, operator, is_idx_reduce=True)
input_tensors = operator.input_tensors[0].op.input_tensors
for tensor in input_tensors:
if tensor.op not in scheduled_ops:
traverse_before_reduce(tensor.op)
elif isinstance(operator, tvm.te.PlaceholderOp):
pass
else:
raise RuntimeError(f"Unsupported operator: {operator} (tag: {operator.tag})")
scheduled_ops.append(operator)
traverse_after_reduce(outs[0].op)
return sch
| 4,378 | 35.190083 | 96 | py |
tvm | tvm-main/python/tvm/topi/x86/math_alter_op.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unused-variable,unused-argument,no-member
"""Legalization transforms for math operations on x86"""
import logging
from tvm import relay
from ..math import erf_legalize
logger = logging.getLogger("topi")
@erf_legalize.register("cpu")
def _erf_legalize(attrs, inputs, arg_types):
"""Legalizes ERF op if needed.
Parameters
----------
attrs : tvm.ir.Attrs
Attributes of current convolution
inputs : list of tvm.relay.Expr
The args of the Relay expr to be legalized
types : list of types
List of input and output types
Returns
-------
result : tvm.relay.Expr
The legalized expr
"""
# Extract types and expressions.
data = inputs[0]
data_tensor = arg_types[0]
# Check if the input type is supported.
data_dtype = data_tensor.dtype
# If input is not fp32, we must cast to it.
if data_dtype != "float32":
data = relay.cast(data, "float32")
output = relay.erf(data)
return relay.cast(output, data_dtype)
# Otherwise do nothing.
return None
| 1,887 | 31 | 72 | py |
tvm | tvm-main/python/tvm/topi/x86/dense.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,too-many-locals,unused-argument
# pylint: disable=no-value-for-parameter,unused-variable
"""x86 dense operators"""
from __future__ import absolute_import as _abs
import tvm
from tvm import autotvm, te
from tvm.autotvm.task.space import SplitEntity
from tvm.contrib import cblas, dnnl, mkl
from tvm.target.x86 import get_simd_32bit_lanes, target_has_amx, target_has_avx512
from .. import generic, tag
from ..utils import get_const_tuple, traverse_inline
from .tensor_intrin import (
acc_32x32_int32_sapphirerapids,
dot_16x1x16_uint8_int8_int32,
dot_32x128x32_u8s8s32_sapphirerapids,
)
def _schedule_dense_pack_template(cfg, s, C, O):
A, packedB = s[C].op.input_tensors
CC = s.cache_write(C, "global")
y, x = s[C].op.axis
(k,) = s[CC].op.reduce_axis
yt, yo, yi = cfg["tile_y"].apply(s, C, y)
xt, xo, xi = cfg["tile_x"].apply(s, C, x)
s[C].reorder(xt, yt, yo, xo, yi, xi)
xyt = s[C].fuse(xt, yt)
if C == O:
s[C].parallel(xyt)
xyo = s[C].fuse(yo, xo)
s[C].unroll(yi)
s[C].vectorize(xi)
s[CC].compute_at(s[C], xyo)
y, x = s[CC].op.axis
ko, ki = cfg["tile_k"].apply(s, CC, k)
s[CC].reorder(ko, ki, y, x)
s[CC].vectorize(x)
tile_inner = cfg["tile_inner"].size[-1]
if tile_inner > 1:
yo, yi = s[CC].split(y, tile_inner)
s[CC].reorder(ko, yo, ki, yi, x)
s[CC].unroll(yo)
s[CC].unroll(ki)
s[CC].unroll(yi)
else:
s[CC].unroll(ki)
s[CC].unroll(y)
if C != O:
y, x = s[O].op.axis
yt, yo, yi = cfg["tile_y"].apply(s, O, y)
xt, xo, xi = cfg["tile_x"].apply(s, O, x)
s[O].reorder(xt, yt, yo, xo, yi, xi)
xyt = s[O].fuse(xt, yt)
s[C].compute_at(s[O], xyt)
s[O].vectorize(xi)
s[O].parallel(xyt)
return s
def _schedule_dense_nopack_template(cfg, s, C):
y, x = s[C].op.axis
(kk,) = s[C].op.reduce_axis
yo, yi = cfg["tile_y"].apply(s, C, y)
xo, xi = cfg["tile_x"].apply(s, C, x)
s[C].reorder(yo, xo, yi, xi)
xyo = s[C].fuse(yo, xo)
s[C].parallel(xyo)
s[C].unroll(kk)
(CC,) = s[C].op.input_tensors
s[CC].compute_at(s[C], xyo)
z, y, x = s[CC].op.axis
(k,) = s[CC].op.reduce_axis
yz = s[CC].fuse(z, y)
s[CC].reorder(k, yz, x)
s[CC].unroll(yz)
s[CC].vectorize(x)
return s
def _default_dense_pack_config(cfg, M, N, K):
# Generate default schedule for dynamic shape.
if isinstance(M, (tvm.tir.Var, tvm.tir.Any)):
M = 16
if isinstance(N, (tvm.tir.Var, tvm.tir.Any)):
N = 16
if isinstance(K, (tvm.tir.Var, tvm.tir.Any)):
K = 16
vec_width = get_simd_32bit_lanes()
tilex_ii = 1
for bn in range(vec_width * 2, 0, -1):
if N % bn == 0:
tilex_ii = bn
break
NN = N // tilex_ii
tilex_oi = 1
while NN // tilex_oi > 4:
if (NN // tilex_oi) % 2 == 1:
break
tilex_oi *= 2
tiley_ii = 8
while M % tiley_ii != 0:
tiley_ii //= 2
MM = M // tiley_ii
tiley_oi = 1
while MM // tiley_oi > 4:
if (MM // tiley_oi) % 2 == 1:
break
tiley_oi *= 2
cfg["tile_y"] = SplitEntity([MM // tiley_oi, tiley_oi, tiley_ii])
cfg["tile_x"] = SplitEntity([NN // tilex_oi, tilex_oi, tilex_ii])
cfg["tile_k"] = SplitEntity([K, 1])
cfg["tile_inner"] = SplitEntity([M // tiley_ii, tiley_ii])
def _default_dense_nopack_config(cfg, M, N, K):
# Generate default schedule for dynamic shape.
if isinstance(M, (tvm.tir.Var, tvm.tir.Any)):
M = 16
if isinstance(N, (tvm.tir.Var, tvm.tir.Any)):
N = 16
if isinstance(K, (tvm.tir.Var, tvm.tir.Any)):
K = 16
vec_width = get_simd_32bit_lanes()
tilek_bn = 1
for bn in range(vec_width * 2, 0, -1):
if K % bn == 0:
tilek_bn = bn
break
cfg["tile_k"] = SplitEntity([K // tilek_bn, tilek_bn])
cfg["tile_x"] = SplitEntity([N, 1])
cfg["tile_y"] = SplitEntity([1, M])
@autotvm.register_topi_compute("dense_nopack.x86")
def dense_nopack(cfg, data, weight, bias=None, out_dtype=None):
"""Compute dense without packing"""
if out_dtype is None:
out_dtype = data.dtype
M, K = get_const_tuple(data.shape)
N, _ = get_const_tuple(weight.shape)
# create tuning space
cfg.define_split(
"tile_y", 32 if isinstance(M, (tvm.tir.Var, tvm.tir.Any)) else M, num_outputs=2
)
cfg.define_split(
"tile_x", 32 if isinstance(N, (tvm.tir.Var, tvm.tir.Any)) else N, num_outputs=2
)
cfg.define_split(
"tile_k", 32 if isinstance(K, (tvm.tir.Var, tvm.tir.Any)) else K, num_outputs=2
)
if cfg.is_fallback:
_default_dense_nopack_config(cfg, M, N, K)
vec = cfg["tile_k"].size[-1]
k = te.reduce_axis((0, K // vec), "k")
CC = te.compute(
(M, N, vec),
lambda z, y, x: te.sum(
data[z, k * vec + x].astype(out_dtype) * weight[y, k * vec + x].astype(out_dtype),
axis=k,
),
)
kk = te.reduce_axis((0, vec), "kk")
C = te.compute((M, N), lambda y, x: te.sum(CC[y, x, kk], axis=kk), tag="dense_nopack")
if bias is not None:
C = te.compute((M, N), lambda i, j: C[i, j] + bias[j].astype(out_dtype), tag=tag.BROADCAST)
return C
@autotvm.register_topi_schedule("dense_nopack.x86")
def schedule_dense_nopack(cfg, outs):
"""Create the schedule for dense_nopack"""
s = te.create_schedule([x.op for x in outs])
def _callback(op):
if "dense_nopack" in op.tag:
_schedule_dense_nopack_template(cfg, s, op.output(0))
traverse_inline(s, outs[0].op, _callback)
return s
@autotvm.register_topi_compute("dense_pack.x86")
def dense_pack(cfg, data, weight, bias=None, out_dtype=None):
"""Compute dense with transformed weight."""
if out_dtype is None:
out_dtype = data.dtype
M, K = get_const_tuple(data.shape) # batch, in_dim
if len(weight.shape) == 3:
N, _, packw_bn = get_const_tuple(weight.shape) # out_dim
N = N * packw_bn
else:
N, _ = get_const_tuple(weight.shape) # out_dim
# create tuning space
cfg.define_split(
"tile_y", 32 if isinstance(M, (tvm.tir.Var, tvm.tir.Any)) else M, num_outputs=3
)
cfg.define_split(
"tile_x", 32 if isinstance(N, (tvm.tir.Var, tvm.tir.Any)) else N, num_outputs=3
)
cfg.define_split(
"tile_k", 32 if isinstance(K, (tvm.tir.Var, tvm.tir.Any)) else K, num_outputs=2
)
cfg.define_split(
"tile_inner",
32 if isinstance(M, (tvm.tir.Var, tvm.tir.Any)) else M,
num_outputs=2,
filter=lambda y: y.size[-1] <= 16,
)
if cfg.is_fallback:
_default_dense_pack_config(cfg, M, N, K)
if len(weight.shape) == 2:
packw_bn = cfg["tile_x"].size[-1]
packw_shape = (N // packw_bn, K, packw_bn)
if autotvm.GLOBAL_SCOPE.in_tuning:
# Directly use modified data layout placeholder.
packw = tvm.te.placeholder(packw_shape, weight.dtype, name="packed_weight")
else:
packw = te.compute(
packw_shape, lambda z, y, x: weight[z * packw_bn + x, y], name="packed_weight"
)
else:
packw = weight
idxdiv = tvm.tir.indexdiv
idxmod = tvm.tir.indexmod
k = te.reduce_axis((0, K), name="k")
C = te.compute(
(M, N),
lambda y, x: te.sum(
data[y, k].astype(out_dtype)
* packw[idxdiv(x, packw_bn), k, idxmod(x, packw_bn)].astype(out_dtype),
axis=k,
),
tag="dense_pack",
)
if bias is not None:
C = te.compute((M, N), lambda i, j: C[i, j] + bias[j].astype(out_dtype), tag=tag.BROADCAST)
return C
@autotvm.register_topi_schedule("dense_pack.x86")
def schedule_dense_pack(cfg, outs):
"""Create the schedule for dense_pack"""
s = te.create_schedule([x.op for x in outs])
def _callback(op):
if "dense_pack" in op.tag:
_schedule_dense_pack_template(cfg, s, op.output(0), outs[0])
traverse_inline(s, outs[0].op, _callback)
return s
@autotvm.register_topi_compute("dense_int8.x86")
def dense_int8(cfg, data, weight, bias=None, out_dtype=None):
"""Compute for uint8 x int8 -> int32 dense"""
if out_dtype is None:
out_dtype = data.dtype
assert len(weight.shape) == 4
assert data.dtype == "uint8" and weight.dtype == "int8"
_, _, n_inner, k_inner = get_const_tuple(weight.shape) # out_dim
assert n_inner == 16 and k_inner == 4
return dense_int8_compute(cfg, data, weight, bias)
@autotvm.register_topi_schedule("dense_int8.x86")
def schedule_dense_int8(cfg, outs):
"""Create a schedule for dense__int8"""
s = te.create_schedule([x.op for x in outs])
mcpu = tvm.target.Target.current().mcpu
def _callback(op):
if "dense_int8" in op.tag:
if target_has_amx(mcpu):
dense_amx_int8_schedule(cfg, s, op.output(0), outs[0])
elif target_has_avx512(mcpu):
dense_int8_schedule(cfg, s, op.output(0), outs[0])
traverse_inline(s, outs[0].op, _callback)
return s
def dense_int8_compute(cfg, X, packed_w, bias=None):
"""Compute for uint8 x int8 -> int32 dense"""
m, k = X.shape
n_o, _, n_i, _ = packed_w.shape
ak = te.reduce_axis((0, k), name="k")
mcpu = tvm.target.Target.current().mcpu
if target_has_avx512(mcpu):
target_attr = {"schedule_rule": "meta_schedule.x86.dense_int8"}
else:
target_attr = None
C = te.compute(
(m, n_o * n_i),
lambda i, j: te.sum(
X[i, ak].astype("int32")
* packed_w[tvm.tir.indexdiv(j, 16), tvm.tir.indexdiv(ak, 4), j % 16, ak % 4].astype(
"int32"
),
axis=ak,
),
tag="dense_int8",
attrs=target_attr,
)
if bias is not None:
C = te.compute(C.shape, lambda i, j: C[i, j] + bias[j], tag=tag.BROADCAST)
return C
def dense_int8_schedule(cfg, s, C, O, do_parallel=True):
"""Schedule dense compute using avx512 or lower instructions
including VNNI vpdpbusd instruction if possible"""
# C: The output of GEMM
# O: The output of the fused op
def split_y(out):
default_y_split_factor = 32
a_y = out.op.axis[-2]
if cfg.is_fallback:
return s[out].split(a_y, factor=default_y_split_factor)
cfg.define_split("tile_y", a_y, num_outputs=2)
return cfg["tile_y"].apply(s, out, a_y)
(a_k,) = C.op.reduce_axis
a_yo, a_yi = split_y(C)
a_xo, a_xi = s[C].split(C.op.axis[-1], factor=16)
a_ko, a_ki = s[C].split(a_k, factor=4)
s[C].reorder(a_yo, a_xo, a_yi, a_ko, a_xi, a_ki)
pc = dot_16x1x16_uint8_int8_int32()
s[C].tensorize(a_xi, pc)
if C == O:
fused = s[O].fuse(a_yo, a_xo)
else:
a_yo, a_yi = split_y(O)
a_xo, a_xi = s[O].split(O.op.axis[-1], factor=16)
s[O].reorder(a_yo, a_xo, a_yi, a_xi)
s[O].vectorize(a_xi)
s[C].compute_at(s[O], a_yi)
fused = s[O].fuse(a_yo, a_xo)
if do_parallel:
s[O].parallel(fused)
return s, fused
def dense_amx_int8_schedule(cfg, s, C, O, do_parallel=True):
"""Schedule dense compute using AMX TMUL instruction"""
# C: The output of GEMM
# O: The output of the fused op
def split_x(out):
default_x_split_factor1 = 32
default_x_split_factor2 = 2
default_x_split_factor3 = 2
default_x_split_factor4 = 2
a_x = s[out].op.axis[-2]
if cfg.is_fallback:
a_xo, a_xi = s[out].split(a_x, factor=default_x_split_factor1)
a_xo2, a_xo1 = s[out].split(a_xo, factor=default_x_split_factor2)
a_xo3, a_xo2 = s[out].split(a_xo2, factor=default_x_split_factor3)
a_xo4, a_xo3 = s[out].split(a_xo3, factor=default_x_split_factor4)
return [a_xo4, a_xo3, a_xo2, a_xo1, a_xi]
cfg.define_split("tile_x", a_x, num_outputs=5, filter=lambda x: x.size[-1] == 32)
return cfg["tile_x"].apply(s, out, a_x)
def split_y(out):
default_y_split_factor1 = 32
default_y_split_factor2 = 4
default_y_split_factor3 = 4
default_y_split_factor4 = 4
a_y = s[out].op.axis[-1]
if cfg.is_fallback:
a_yo1, a_yo = s[out].split(a_y, factor=default_y_split_factor1)
a_yo2, a_yo1 = s[out].split(a_yo1, factor=default_y_split_factor2)
a_yo3, a_yo2 = s[out].split(a_yo2, factor=default_y_split_factor3)
a_yo4, a_yo3 = s[out].split(a_yo3, factor=default_y_split_factor4)
return [a_yo4, a_yo3, a_yo2, a_yo1, a_yo]
cfg.define_split("tile_y", a_y, num_outputs=5, filter=lambda y: y.size[-1] == 32)
return cfg["tile_y"].apply(s, out, a_y)
def split_k(out, rd_axis):
default_k_split_factor1 = 128
default_k_split_factor2 = 2
default_k_split_factor3 = 2
default_k_split_factor4 = 2
if cfg.is_fallback:
a_ko, a_ki = s[out].split(rd_axis, factor=default_k_split_factor1)
a_ko2, a_ko1 = s[out].split(a_ko, factor=default_k_split_factor2)
a_ko3, a_ko2 = s[out].split(a_ko2, factor=default_k_split_factor3)
a_ko4, a_ko3 = s[out].split(a_ko3, factor=default_k_split_factor4)
return [a_ko4, a_ko3, a_ko2, a_ko1, a_ki]
cfg.define_split("tile_k", rd_axis, num_outputs=5, filter=lambda y: y.size[-1] == 128)
return cfg["tile_k"].apply(s, out, rd_axis)
a_x, a_y = C.op.axis[-2:]
(a_k,) = C.op.reduce_axis
CF = s.cache_write(C, "amx.tmm")
a_x3, a_x2, a_x1, a_xo, a_xi = split_x(C)
a_y3, a_y2, a_y1, a_yo, a_yi = split_y(C)
s[C].reorder(a_x3, a_y3, a_x2, a_y2, a_x1, a_y1, a_xo, a_yo, a_xi, a_yi)
s[CF].compute_at(s[C], a_yo)
(a_k_f,) = CF.op.reduce_axis
a_x_f, a_y_f = CF.op.axis[-2:]
a_xo_f, a_xi_f = s[CF].split(a_x_f, factor=32)
a_yo_f, a_yi_f = s[CF].split(a_y_f, factor=32)
a_k3_f, a_k2_f, a_k1_f, a_ko_f, a_ki_f = split_k(CF, a_k_f)
s[CF].reorder(a_k3_f, a_k2_f, a_k1_f, a_ko_f, a_xo_f, a_yo_f, a_ki_f, a_xi_f, a_yi_f)
(m, k) = CF.op.input_tensors[0].shape[-2:]
(n, c, n_i, c_i) = CF.op.input_tensors[1].shape[-4:]
n = n * n_i
s[CF].tensorize(a_ki_f, dot_32x128x32_u8s8s32_sapphirerapids(LDA=int(k)))
s[C].tensorize(a_xi, acc_32x32_int32_sapphirerapids(LDC=int(n)))
if C == O:
fused = s[O].fuse(a_x3, a_y3)
else:
a_y3, a_y2, a_y1, a_yr, a_yi = split_y(O)
a_x3, a_x2, a_x1, a_xr, a_xi = split_x(O)
s[O].reorder(a_y3, a_x3, a_y2, a_x2, a_y1, a_x1, a_yr, a_xr, a_yi, a_xi)
s[O].vectorize(a_xi)
fused = s[O].fuse(a_x3, a_y3)
if do_parallel:
s[O].parallel(fused)
return s, fused
def matmul_blas_common(cfg, tensor_a, tensor_b, bias, out_dtype, transpose_a, transpose_b, lib):
"""Compute matmul/dense using a BLAS library"""
M, K = get_const_tuple(tensor_a.shape)
N, _ = get_const_tuple(tensor_b.shape)
if isinstance(M, int) and isinstance(K, int) and isinstance(N, int):
cfg.add_flop(M * K * N * 2)
if tensor_a.dtype == "uint8" and tensor_b.dtype == "int8" and out_dtype == "int32":
if not hasattr(lib, "matmul_u8s8s32"):
raise NotImplementedError(
f"Matmul/Dense with {lib.__name__} for {tensor_a.dtype} is not supported "
"(matmulu8s8s32 not imlemented)"
)
C = lib.matmul_u8s8s32(tensor_a, tensor_b, transpose_a, transpose_b, dtype=out_dtype)
elif tensor_a.dtype == "float32" or tensor_a.dtype == "float64":
C = lib.matmul(tensor_a, tensor_b, transpose_a, transpose_b)
else:
raise NotImplementedError(
f"Matmul/Dense with {lib.__name__} for {tensor_a.dtype} is not supported"
)
if bias is not None:
C = te.compute(C.shape, lambda i, j: C[i, j] + bias[j].astype(out_dtype), tag=tag.BROADCAST)
return C
@autotvm.register_topi_compute("dense_cblas.x86")
def dense_cblas(cfg, data, weight, bias=None, out_dtype=None):
"""Compute dense using cblas. This is an alias of matmul_nt operator."""
return matmul_blas_common(cfg, data, weight, bias, out_dtype, False, True, cblas)
@autotvm.register_topi_schedule("dense_cblas.x86")
def schedule_dense_cblas(_, outs):
"""Create schedule for dense_cblas. This is an alias of matmul_nt operator."""
return generic.schedule_extern(outs)
@autotvm.register_topi_compute("dense_mkl.x86")
def dense_mkl(cfg, data, weight, bias=None, out_dtype=None):
"""Compute dense using mkl. This is an alias of matmul_nt operator."""
return matmul_blas_common(cfg, data, weight, bias, out_dtype, False, True, mkl)
@autotvm.register_topi_schedule("dense_mkl.x86")
def schedule_dense_mkl(_, outs):
"""Create schedule for dense_mkl. This is an alias of matmul_nt operator."""
return generic.schedule_extern(outs)
@autotvm.register_topi_compute("dense_dnnl.x86")
def dense_dnnl(cfg, data, weight, bias=None, out_dtype=None):
"""Compute dense using dnnl. This is an alias of matmul_nt operator."""
return matmul_blas_common(cfg, data, weight, bias, out_dtype, False, True, dnnl)
@autotvm.register_topi_schedule("dense_dnnl.x86")
def schedule_dense_dnnl(_, outs):
"""Create schedule for dense_dnnl. This is an alias of matmul_nt operator."""
return generic.schedule_extern(outs)
@autotvm.register_topi_compute("matmul_cblas.x86")
def matmul_cblas(
cfg, tensor_a, tensor_b, bias=None, out_dtype=None, transpose_a=False, transpose_b=False
):
"""Compute matmul using cblas."""
return matmul_blas_common(
cfg, tensor_a, tensor_b, bias, out_dtype, transpose_a, transpose_b, cblas
)
@autotvm.register_topi_schedule("matmul_cblas.x86")
def schedule_matmul_cblas(_, outs):
"""Create schedule for matmul_cblas."""
return generic.schedule_extern(outs)
@autotvm.register_topi_compute("matmul_mkl.x86")
def matmul_mkl(
cfg, tensor_a, tensor_b, bias=None, out_dtype=None, transpose_a=False, transpose_b=False
):
"""Compute matmul using mkl."""
return matmul_blas_common(
cfg, tensor_a, tensor_b, bias, out_dtype, transpose_a, transpose_b, mkl
)
@autotvm.register_topi_schedule("matmul_mkl.x86")
def schedule_matmul_mkl(_, outs):
"""Create schedule for matmul_mkl."""
return generic.schedule_extern(outs)
@autotvm.register_topi_compute("matmul_dnnl.x86")
def matmul_dnnl(
cfg, tensor_a, tensor_b, bias=None, out_dtype=None, transpose_a=False, transpose_b=False
):
"""Compute matmul using dnnl."""
return matmul_blas_common(
cfg, tensor_a, tensor_b, bias, out_dtype, transpose_a, transpose_b, dnnl
)
@autotvm.register_topi_schedule("matmul_dnnl.x86")
def schedule_matmul_dnnl(_, outs):
"""Create schedule for matmul_dnnl."""
return generic.schedule_extern(outs)
def dense_dynamic(A, B, bias, dtype):
"""Compute for dense with dynamic shape"""
assert A.shape[0] == 1, "Only dynamic matrix vector multiplication with vector LHS is supported"
# Right now we only support matrix-vector multiplication with lhs as the
# vector. We don't need to do much optimization here because the access
# pattern and parallelization are straight forward.
def gen_ir(a, b, c):
ib = tvm.tir.ir_builder.create()
A = ib.buffer_ptr(a)
B = ib.buffer_ptr(b)
C = ib.buffer_ptr(c)
with ib.for_range(0, b.shape[0], name="j", kind="parallel") as j:
C[0, j] = 0.0
with ib.for_range(0, b.shape[1], name="k") as k:
C[0, j] += A[0, k] * B[j, k]
return ib.get()
def gen_ir_bias(a, b, bias, c):
ib = tvm.tir.ir_builder.create()
A = ib.buffer_ptr(a)
B = ib.buffer_ptr(b)
C = ib.buffer_ptr(c)
with ib.for_range(0, b.shape[0], name="j", kind="parallel") as j:
C[0, j] = bias[j]
with ib.for_range(0, b.shape[1], name="k") as k:
C[0, j] += A[0, k] * B[j, k]
return ib.get()
out_shape = (A.shape[0], B.shape[0])
out_buf = tvm.tir.decl_buffer(out_shape, dtype, "out_buf")
if bias is None:
out = te.extern(
[out_shape],
[A, B],
lambda ins, outs: gen_ir(*ins, *outs),
dtype=dtype,
out_buffers=[out_buf],
name="dense_dynamic_cpu",
tag="dense_dynamic_cpu",
)
else:
out = te.extern(
[out_shape],
[A, B, bias],
lambda ins, outs: gen_ir_bias(*ins, *outs),
dtype=dtype,
out_buffers=[out_buf],
name="dense_dynamic_cpu",
tag="dense_dynamic_cpu",
)
return out
def schedule_dense_dynamic(outs):
"""Create schedule for dense_dynamic."""
return generic.schedule_extern(outs)
| 21,828 | 32.480061 | 100 | py |
tvm | tvm-main/python/tvm/topi/x86/bitserial_conv2d.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unused-variable,invalid-name
"""Bitserial conv2d schedule on x86"""
import tvm
from tvm import te
from tvm import autotvm
from .. import tag
from ..utils import get_const_int, get_const_tuple
from ..nn.pad import pad
from ..nn.utils import get_pad_tuple
from ..nn.bitserial_util import bitpack, binary_op_multiplier
@autotvm.register_topi_compute("bitserial_conv2d_nchw.x86")
def bitserial_conv2d_nchw(
cfg,
data,
kernel,
stride,
padding,
in_bits,
weight_bits,
pack_dtype="uint32",
out_dtype="int16",
unipolar=True,
):
"""Compute convolution with pack on spatial axes."""
assert data.shape[0].value == 1, "spatial pack convolution only support batch size=1"
data_q = bitpack(data, in_bits, pack_axis=1, bit_axis=0, pack_type=pack_dtype)
# Check if kernel is already bitpacked
if len(kernel.shape) == 4:
kernel_q = bitpack(kernel, weight_bits, pack_axis=1, bit_axis=0, pack_type=pack_dtype)
KB, CO, _, KH, KW = get_const_tuple(kernel_q.shape)
else:
kernel_vec = kernel
OCO, _, KH, KW, KB, VC = get_const_tuple(kernel_vec.shape)
CO = OCO * VC
IB, N, CI, H, W = get_const_tuple(data_q.shape)
KB, CO, _, KH, KW = get_const_tuple(kernel_q.shape)
if isinstance(padding, int) or (isinstance(padding, (tuple, list)) and len(padding) == 2):
TPAD, LPAD, DPAD, RPAD = get_pad_tuple(padding, kernel)
else:
TPAD, LPAD, DPAD, RPAD = padding
pad_before = [0, 0, 0, TPAD, LPAD]
pad_after = [0, 0, 0, DPAD, RPAD]
if isinstance(stride, (tuple, list)):
HSTR, WSTR = stride
else:
HSTR, WSTR = stride, stride
HCAT, WCAT = KH - 1, KW - 1
TH = H + TPAD + DPAD
TW = W + LPAD + RPAD
OH = (H + TPAD + DPAD - KH) // HSTR + 1
OW = (W + LPAD + RPAD - KW) // WSTR + 1
# ==================== define configuration space ====================
n, co, oh, ow = cfg.axis(N), cfg.axis(CO), cfg.axis(OH), cfg.axis(OW)
ci, kh, kw = cfg.reduce_axis(CI), cfg.reduce_axis(KH), cfg.reduce_axis(KW)
ib, kb = cfg.reduce_axis(in_bits), cfg.reduce_axis(weight_bits)
co, vc = cfg.define_split("tile_co", co, num_outputs=2, filter=lambda x: max(x.size[1:]) <= 16)
oh, vh = cfg.define_split("tile_oh", oh, num_outputs=2, filter=lambda x: max(x.size[1:]) <= 16)
ow, vw = cfg.define_split("tile_ow", ow, num_outputs=2, filter=lambda x: max(x.size[1:]) <= 16)
cfg.define_annotate("ann_reduce", [ib, kb, kh, kw], policy="try_unroll")
cfg.define_reorder(
"reorder_0",
[n, co, oh, ow, vc, vh, vw, kh, kw, kb, ib, ci],
policy="interval_all",
interval=(6, 11),
)
# binary ops
cfg.add_flop(2 * N * OH * OW * CO * CI * KH * KW * binary_op_multiplier(pack_dtype))
# ====================
VC = cfg["tile_co"].size[-1]
VH = cfg["tile_oh"].size[-1]
VW = cfg["tile_ow"].size[-1]
dvshape = (1, TH // (VH * HSTR), TW // (VW * WSTR), CI, VH * HSTR + HCAT, VW * WSTR + WCAT, IB)
kvshape = (CO // VC, CI, KH, KW, KB, VC)
ovshape = (1, CO // VC, OH // VH, OW // VW, VH, VW, VC)
oshape = (1, CO, OH, OW)
if TPAD != 0 and RPAD != 0:
data_pad = pad(data_q, pad_before, pad_after, name="data_pad")
else:
data_pad = data_q
data_vec = te.compute(
dvshape,
lambda n, h, w, ci, vh, vw, b: data_pad[b][n][ci][h * VH * HSTR + vh][w * VW * WSTR + vw],
name="data_vec",
)
if len(kernel.shape) == 4:
kernel_vec = te.compute(
kvshape,
lambda co, ci, dh, dw, b, vc: kernel_q[b][co * VC + vc][ci][dh][dw],
name="kernel_vec",
)
ci = te.reduce_axis((0, CI), name="ci")
dh = te.reduce_axis((0, KH), name="dh")
dw = te.reduce_axis((0, KW), name="dw")
b1 = te.reduce_axis((0, IB), name="ib")
b2 = te.reduce_axis((0, KB), name="kb")
def _conv(n, co, h, w, vh, vw, vc):
b1b2 = (b1 + b2).astype(out_dtype)
if unipolar:
return te.sum(
(
tvm.tir.popcount(
data_vec[n, h, w, ci, vh * HSTR + dh, vw * WSTR + dw, b1].astype(out_dtype)
& kernel_vec[co, ci, dh, dw, b2, vc].astype(out_dtype)
)
- tvm.tir.popcount(
data_vec[n, h, w, ci, vh * HSTR + dh, vw * WSTR + dw, b1].astype(out_dtype)
& ~kernel_vec[co, ci, dh, dw, b2, vc]
).astype(out_dtype)
)
<< b1b2,
axis=[ci, dh, dw, b1, b2],
)
return te.sum(
(
tvm.tir.popcount(
data_vec[n, h, w, ci, vh * HSTR + dh, vw * WSTR + dw, b1]
& kernel_vec[co, ci, dh, dw, b2, vc]
)
).astype(out_dtype)
<< b1b2,
axis=[ci, dh, dw, b1, b2],
)
conv = te.compute(ovshape, _conv, name="conv_out")
idxd = tvm.tir.indexdiv
idxm = tvm.tir.indexmod
return te.compute(
oshape,
lambda n, co, h, w: conv[
n, idxd(co, VC), idxd(h, VH), idxd(w, VW), idxm(h, VH), idxm(w, VW), idxm(co, VC)
],
name="conv_vec",
tag="spatial_bitserial_conv_nchw",
)
@autotvm.register_topi_compute("bitserial_conv2d_nhwc.x86")
def bitserial_conv2d_nhwc(
cfg,
data,
kernel,
stride,
padding,
in_bits,
weight_bits,
pack_dtype="uint32",
out_dtype="int16",
unipolar=True,
):
"""Compute convolution with pack on spatial axes."""
assert data.shape[0].value == 1, "spatial pack convolution only support batch size=1"
data_q = bitpack(data, in_bits, pack_axis=3, bit_axis=4, pack_type=pack_dtype)
pack_kernel = len(kernel.shape) == 4
if pack_kernel:
kernel_q = bitpack(kernel, weight_bits, pack_axis=2, bit_axis=4, pack_type=pack_dtype)
else:
kernel_q = kernel
KH, KW, _, CO, KB = get_const_tuple(kernel_q.shape)
N, H, W, CI, IB = get_const_tuple(data_q.shape)
if isinstance(padding, int) or (isinstance(padding, (tuple, list)) and len(padding) == 2):
TPAD, LPAD, DPAD, RPAD = get_pad_tuple(padding, kernel)
else:
TPAD, LPAD, DPAD, RPAD = padding
pad_before = [0, TPAD, LPAD, 0, 0]
pad_after = [0, DPAD, RPAD, 0, 0]
if isinstance(stride, (tuple, list)):
HSTR, WSTR = stride
else:
HSTR, WSTR = stride, stride
HCAT, WCAT = KH - 1, KW - 1
PAD_H = H + (TPAD + DPAD)
PAD_W = W + (LPAD + RPAD)
OH = (PAD_H - KH) // HSTR + 1
OW = (PAD_W - KW) // WSTR + 1
oshape = (1, OH, OW, CO)
# ==================== define configuration space ====================
n, oh, ow, co = cfg.axis(N), cfg.axis(OH), cfg.axis(OW), cfg.axis(CO)
ci, kh, kw = cfg.reduce_axis(CI), cfg.reduce_axis(KH), cfg.reduce_axis(KW)
ib, kb = cfg.reduce_axis(in_bits), cfg.reduce_axis(weight_bits)
co, vc = cfg.define_split("tile_co", co, num_outputs=2, filter=lambda x: max(x.size[1:]) <= 16)
oh, vh = cfg.define_split("tile_oh", oh, num_outputs=2, filter=lambda x: max(x.size[1:]) <= 16)
ow, vw = cfg.define_split("tile_ow", ow, num_outputs=2, filter=lambda x: max(x.size[1:]) <= 16)
cfg.define_annotate("ann_reduce", [ib, kb, kh, kw], policy="try_unroll")
cfg.define_reorder(
"reorder_0",
[n, oh, ow, co, vh, vw, kh, kw, kb, ib, vc, ci],
policy="interval_all",
interval=(3, 7),
)
# binary ops
cfg.add_flop(2 * N * OH * OW * CO * CI * KH * KW * binary_op_multiplier(pack_dtype))
# ====================
VC = cfg["tile_co"].size[-1]
VH = cfg["tile_oh"].size[-1]
VW = cfg["tile_ow"].size[-1]
dvshape = (
1,
PAD_H // (VH * HSTR),
PAD_W // (VW * WSTR),
VH * HSTR + HCAT,
VW * WSTR + WCAT,
CI,
IB,
)
kvshape = (CO, KH, KW, CI, VC, KB)
ovshape = (1, OH, OW, CO, VH, VW, VC)
oshape = (1, OH, OW, CO)
if DPAD != 0 and RPAD != 0:
data_pad = pad(data_q, pad_before, pad_after, name="data_pad")
else:
data_pad = data_q
data_vec = te.compute(
dvshape,
lambda n, h, w, vh, vw, ci, b: data_pad[n][h * VH * HSTR + vh][w * VW * WSTR + vw][ci][b],
name="data_vec",
)
kernel_vec = te.compute(
kvshape,
lambda co, dh, dw, ci, vc, b: kernel_q[dh][dw][ci][co * VC + vc][b],
name="kernel_vec",
)
ci = te.reduce_axis((0, CI), name="ci")
dh = te.reduce_axis((0, KH), name="dh")
dw = te.reduce_axis((0, KW), name="dw")
b1 = te.reduce_axis((0, IB), name="ib")
b2 = te.reduce_axis((0, KB), name="kb")
def _conv(n, h, w, co, vh, vw, vc):
b1b2 = (b1 + b2).astype(out_dtype)
if unipolar:
return te.sum(
(
(
tvm.tir.popcount(
data_vec[n, h, w, vh * HSTR + dh, vw * WSTR + dw, ci, b1]
& kernel_vec[co, dh, dw, ci, vc, b2]
).astype(out_dtype)
- tvm.tir.popcount(
data_vec[n, h, w, vh * HSTR + dh, vw * WSTR + dw, ci, b1]
& ~kernel_vec[co, dh, dw, ci, vc, b2]
).astype(out_dtype)
)
<< b1b2
),
axis=[dh, dw, ci, b1, b2],
)
return te.sum(
tvm.tir.popcount(
data_vec[n, h, w, vh * HSTR + dh, vw * WSTR + dw, ci, b1]
& kernel_vec[co, dh, dw, ci, vc, b2]
).astype(out_dtype)
<< b1b2,
axis=[dh, dw, ci, b1, b2],
)
conv = te.compute(ovshape, _conv, name="conv")
idxd = tvm.tir.indexdiv
idxm = tvm.tir.indexmod
return te.compute(
oshape,
lambda n, h, w, co: conv[
n, idxd(h, VH), idxd(w, VW), idxd(co, VC), idxm(h, VH), idxm(w, VW), idxm(co, VC)
],
name="output_unpack",
tag="spatial_bitserial_conv_nhwc",
)
@autotvm.register_topi_schedule("bitserial_conv2d_nchw.x86")
def schedule_bitserial_conv2d_nchw(cfg, outs):
return _schedule_bitserial_conv2d(cfg, outs)
@autotvm.register_topi_schedule("bitserial_conv2d_nhwc.x86")
def schedule_bitserial_conv2d_nhwc(cfg, outs):
return _schedule_bitserial_conv2d(cfg, outs)
def _schedule_bitserial_conv2d(cfg, outs):
"""CPU schedule for bitserial convolutions NCHW and NHWC"""
s = te.create_schedule([x.op for x in outs])
scheduled_ops = []
def traverse(op):
"""Traverse operators from computation graph"""
output = op.output(0)
# inline all one-to-one-mapping operators except the last stage (output)
if tag.is_broadcast(op.tag) or "elemwise" in op.tag:
if op not in s.outputs:
s[op].compute_inline()
for tensor in op.input_tensors:
if tensor.op.input_tensors and (tensor.op not in scheduled_ops):
if isinstance(tensor.op, tvm.te.ComputeOp):
traverse(tensor.op)
elif "spatial_bitserial_conv_nchw" in op.tag or "spatial_bitserial_conv_nhwc" in op.tag:
conv_out = op.input_tensors[0]
kernel_vec = conv_out.op.input_tensors[1]
kernel_q = kernel_vec.op.input_tensors[0]
data_vec = conv_out.op.input_tensors[0]
data_q = data_vec.op.input_tensors[0]
data = data_q.op.input_tensors[0]
data_pad = None
if isinstance(data_q.op, tvm.te.ComputeOp) and "pad" in data_q.op.tag:
data_pad = data_q
data_q = data
data = data_q.op.input_tensors[0]
if "QuantizeInput" in data.op.name:
# Need to go up 1 further, from the combine in bitpack
data = data.op.input_tensors[0]
if "spatial_bitserial_conv_nchw" in op.tag:
_schedule_bitserial_conv2d_nchw(
cfg,
s,
data_q,
data_pad,
data_vec,
kernel_q,
kernel_vec,
conv_out,
output,
outs[0],
)
elif "spatial_bitserial_conv_nhwc" in op.tag:
_schedule_bitserial_conv2d_nhwc(
cfg,
s,
data_q,
data_pad,
data_vec,
kernel_q,
kernel_vec,
conv_out,
output,
outs[0],
)
scheduled_ops.append(op)
traverse(outs[0].op)
return s
def _schedule_bitserial_conv2d_nchw(
cfg, s, data_q, data_pad, data_vec, kernel_q, kernel_vec, conv_out, output, last
):
IB, _, CI, IH, IW = data_q.shape
KB, CO, _, KH, KW = kernel_q.shape
_, _, OH, OW = output.shape
# Infer padding and stride
if data_pad is None:
padding = (0, 0)
TH, TW = IH, IW
else:
_, _, _, TH, TW = data_pad.shape
hpad = get_const_int((TH - IH) // 2)
wpad = get_const_int((TW - IW) // 2)
padding = (hpad, wpad)
hstride = get_const_int((TH - KH) // (OH - 1))
wstride = get_const_int((TW - KW) // (OW - 1))
stride = (hstride, wstride)
VC = cfg["tile_co"].size[-1]
VH = cfg["tile_oh"].size[-1]
VW = cfg["tile_ow"].size[-1]
##### Schedule Data padding, and bitpacking
if data_pad is not None:
s[data_pad].compute_inline()
_, _, h, _, _, _, _ = s[data_vec].op.axis
cfg.define_split("tile_ah", cfg.axis(h), num_outputs=2, max_factor=32)
oh, ih = cfg["tile_ah"].apply(s, data_vec, h)
if cfg["tile_ah"].size[1] == 1:
oaxis = oh
paxis = oh
else:
oaxis = oh
paxis = ih
s[data_vec].parallel(paxis)
s[data_vec].pragma(oaxis, "parallel_launch_point")
s[data_vec].pragma(paxis, "parallel_stride_pattern")
s[data_vec].pragma(oaxis, "parallel_barrier_when_finish")
##### Schedule Kenerl bitpacking
co, _, _, _, _, _ = s[kernel_vec].op.axis
cfg.define_split("tile_bco", cfg.axis(co), num_outputs=2, max_factor=32)
oco, ico = cfg["tile_bco"].apply(s, kernel_vec, co)
if cfg["tile_bco"].size[1] == 1:
oaxis = oco
paxis = oco
else:
oaxis = oco
paxis = ico
s[kernel_vec].parallel(paxis)
s[kernel_vec].pragma(oaxis, "parallel_launch_point")
s[kernel_vec].pragma(paxis, "parallel_stride_pattern")
s[kernel_vec].pragma(oaxis, "parallel_barrier_when_finish")
##### Schedule Convolution
n, co, oh, ow, vh, vw, vc = s[conv_out].op.axis
ci, dh, dw, ib, kb = s[conv_out].op.reduce_axis
# s[conv_out].reorder(n, oh, ow, co, vh, vw, dh, dw, ci, vc, b1, b2)
cfg["reorder_0"].apply(s, conv_out, [n, co, oh, ow, vc, vh, vw, dh, dw, kb, ib, ci])
cfg["ann_reduce"].apply(
s,
conv_out,
[kb, ib, dh, dw],
axis_lens=[
get_const_int(kb.dom.extent),
get_const_int(ib.dom.extent),
get_const_int(dh.dom.extent),
get_const_int(dw.dom.extent),
],
max_unroll=16,
cfg=cfg,
)
s[conv_out].vectorize(vc)
# # Schedule output
n, co, h, w = s[last].op.axis
co, vc = s[last].split(co, VC)
oh, ow, vh, vw = s[last].tile(h, w, VH, VW)
s[last].reorder(n, co, oh, ow, vh, vw, vc)
if last != output:
s[output].compute_inline()
s[conv_out].compute_at(s[last], ow)
oco, ico = cfg["tile_oh"].apply(s, last, co)
if cfg["tile_oh"].size[1] == 1:
oaxis = oco
paxis = oco
else:
oco, ico = s[last].split(co, bc)
oaxis = oco
paxis = ico
s[last].parallel(oco)
return s
def _schedule_bitserial_conv2d_nhwc(
cfg, s, data_q, data_pad, data_vec, kernel_q, kernel_vec, conv_out, output, last
):
# no stride and padding info here
_, IH, IW, CI, IB = data_q.shape
KH, KW, _, CO, KB = kernel_q.shape
_, OH, OW, _ = output.shape
VC = cfg["tile_co"].size[-1]
VH = cfg["tile_oh"].size[-1]
VW = cfg["tile_ow"].size[-1]
##### Schedule data padding and packing
if data_pad is not None:
s[data_pad].compute_inline()
_, h, _, _, _, _, _ = s[data_vec].op.axis
cfg.define_split("tile_ah", cfg.axis(h), num_outputs=2, max_factor=32)
oh, ih = cfg["tile_ah"].apply(s, data_vec, h)
s[data_vec].parallel(oh)
##### Schedule kernel packing
co, _, _, _, _, _ = s[kernel_vec].op.axis
cfg.define_split("tile_bco", cfg.axis(co), num_outputs=2, max_factor=32)
oco, ico = cfg["tile_bco"].apply(s, kernel_vec, co)
s[kernel_vec].parallel(oco)
##### Schedule Convolution
n, oh, ow, co, vh, vw, vc = s[conv_out].op.axis
dh, dw, ci, b1, b2 = s[conv_out].op.reduce_axis
# s[conv_out].reorder(n, oh, ow, co, vh, vw, dh, dw, ci, vc, b1, b2)
cfg["reorder_0"].apply(s, conv_out, [n, oh, ow, co, vh, vw, dh, dw, ci, vc, b1, b2])
cfg["ann_reduce"].apply(
s,
conv_out,
[b1, b2, dh, dw],
axis_lens=[
get_const_int(b1.dom.extent),
get_const_int(b2.dom.extent),
get_const_int(dh.dom.extent),
get_const_int(dw.dom.extent),
],
max_unroll=16,
cfg=cfg,
)
s[conv_out].unroll(b1)
s[conv_out].unroll(b2)
s[conv_out].vectorize(vc)
# # Schedule output
n, h, w, co = s[last].op.axis
co, vc = s[last].split(co, VC)
oh, ow, vh, vw = s[last].tile(h, w, VH, VW)
s[last].reorder(n, oh, ow, co, vh, vw, vc)
s[last].vectorize(vc)
if last != output:
s[output].compute_inline()
s[conv_out].compute_at(s[last], ow)
oho, iho = cfg["tile_oh"].apply(s, last, oh) # reuse parameter
s[last].parallel(oho)
return s
| 18,896 | 32.564831 | 99 | py |
tvm | tvm-main/python/tvm/topi/x86/roi_align.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, no-member, too-many-locals, too-many-arguments, undefined-variable, too-many-nested-blocks, too-many-branches, too-many-statements
"""Non-maximum suppression operator for intel cpu"""
import math
import tvm
from tvm.te import hybrid
from ..tensor import full
from ..utils import get_const_tuple
@hybrid.script
def roi_align_nchw_ir(
data, rois, num_rois, w_pc, pos_pc, pooled_size, spatial_scale, sample_ratio, mode
):
"""Hybrid routing fo ROI align operator in NCHW layout.
Parameters
----------
data : tvm.te.Tensor or numpy NDArray
4-D with shape [batch, channel, height, width]
rois : tvm.te.Tensor or numpy NDArray
2-D with shape [num_roi, 5]. The last dimension should be in format of
[batch_index, w_start, h_start, w_end, h_end]
num_rois : tvm.tir.IntImm or tvm.tir.Var
Number of roi. We need to pass it in since hybrid script doesn't support
binding variable to symbolic dim.
w_pc : tvm.te.Tensor or numpy NDArray
3-D weight pre-calculation buffer
pos_pc : tvm.te.Tensor or numpy NDArray
3-D position pre-calculation buffer
pooled_size : tvm ConsExpr
[out_height, out_width]
spatial_scale : tvm.tir.const
Ratio of input feature map height (or w) to raw image height (or w). Equals the reciprocal
of total stride in convolutional layers, which should be in range (0.0, 1.0]
sample_ratio : tvm.tir.const
Sampling ratio of ROI align, using adaptive size by default.
mode : tvm.tir.const
Mode of RoiAlign. A value of 0 corrensponds to b'avg', while a value of 1 corresponds to
b'max'.
Returns
-------
output : tvm.te.Tensor or numpy NDArray
4-D with shape [num_roi, channel, pooled_size, pooled_size]
"""
channels = data.shape[1]
height = data.shape[2]
width = data.shape[3]
pooled_size_h = pooled_size[0]
pooled_size_w = pooled_size[1]
output = output_tensor((num_rois, channels, pooled_size_h, pooled_size_w), data.dtype)
for n in parallel(num_rois):
roi_batch_index = int32(rois[n, 0])
roi_start_w = rois[n, 1] * spatial_scale
roi_start_h = rois[n, 2] * spatial_scale
roi_end_w = rois[n, 3] * spatial_scale
roi_end_h = rois[n, 4] * spatial_scale
roi_h = max(roi_end_h - roi_start_h, 1.0)
roi_w = max(roi_end_w - roi_start_w, 1.0)
bin_h = roi_h / pooled_size_h
bin_w = roi_w / pooled_size_w
roi_bin_grid_h = sample_ratio
roi_bin_grid_w = roi_bin_grid_h
rounded_bin_h = int32(bin_h) * 1.0
rounded_bin_w = int32(bin_w) * 1.0
if sample_ratio <= 0:
# Cannot use ceil function since hybrid script
# doesn't support Call as indexing
roi_bin_grid_h = int32(bin_h)
roi_bin_grid_w = int32(bin_w)
if rounded_bin_h < bin_h:
roi_bin_grid_h += 1
if rounded_bin_w < bin_w:
roi_bin_grid_w += 1
count = roi_bin_grid_h * roi_bin_grid_w
# Pre-calculate indices and weights shared by all channels.
# This is the key point of optimization.
pre_calc_index = 0
iy_upper = roi_bin_grid_h
ix_upper = roi_bin_grid_w
for ph in range(pooled_size_h):
for pw in range(pooled_size_w):
for iy in range(iy_upper):
yy = roi_start_h + ph * bin_h + (iy + 0.5) * bin_h / roi_bin_grid_h
for ix in range(ix_upper):
xx = roi_start_w + pw * bin_w + (ix + 0.5) * bin_w / roi_bin_grid_w
x = xx
y = yy
if y < -1.0 or y > height or x < -1.0 or x > width:
for i in range(4):
w_pc[n, pre_calc_index, i] = 0.0
pos_pc[n, pre_calc_index, i] = 0
else:
if y < 0.0:
y = 0.0
if x < 0.0:
x = 0.0
y_low = int32(y)
x_low = int32(x)
x_high = x_low + 1
y_high = y_low + 1
if y_low >= height - 1:
y_high = height - 1
y_low = y_high
y = float32(y_low)
if x_low >= width - 1:
x_high = width - 1
x_low = x_high
x = float32(x_low)
ly = y - y_low
lx = x - x_low
hy = 1.0 - ly
hx = 1.0 - lx
w1 = hy * hx
w2 = hy * lx
w3 = ly * hx
w4 = ly * lx
pos_pc[n, pre_calc_index, 0] = x_low
pos_pc[n, pre_calc_index, 1] = x_high
pos_pc[n, pre_calc_index, 2] = y_low
pos_pc[n, pre_calc_index, 3] = y_high
w_pc[n, pre_calc_index, 0] = w1
w_pc[n, pre_calc_index, 1] = w2
w_pc[n, pre_calc_index, 2] = w3
w_pc[n, pre_calc_index, 3] = w4
pre_calc_index += 1
for c in range(channels):
pre_calc_index = 0
for ph in range(pooled_size_h):
for pw in range(pooled_size_w):
output_val = 0.0 # Avg mode
if mode == 1: # Max mode
output_val = ninf("float32")
for iy in range(roi_bin_grid_h):
for ix in range(roi_bin_grid_w):
bilinear_val = (
w_pc[n, pre_calc_index, 0]
* data[
roi_batch_index,
c,
pos_pc[n, pre_calc_index, 2],
pos_pc[n, pre_calc_index, 0],
]
+ w_pc[n, pre_calc_index, 1]
* data[
roi_batch_index,
c,
pos_pc[n, pre_calc_index, 2],
pos_pc[n, pre_calc_index, 1],
]
+ w_pc[n, pre_calc_index, 2]
* data[
roi_batch_index,
c,
pos_pc[n, pre_calc_index, 3],
pos_pc[n, pre_calc_index, 0],
]
+ w_pc[n, pre_calc_index, 3]
* data[
roi_batch_index,
c,
pos_pc[n, pre_calc_index, 3],
pos_pc[n, pre_calc_index, 1],
]
)
pre_calc_index += 1
if mode == 0: # Avg mode
output_val += bilinear_val / count
if mode == 1: # Max mode
output_val = max(output_val, bilinear_val)
output[n, c, ph, pw] = output_val
return output
def roi_align_nchw(data, rois, pooled_size, spatial_scale, mode, sample_ratio=-1):
"""ROI align operator in NCHW layout.
Parameters
----------
data : tvm.te.Tensor
4-D with shape [batch, channel, height, width]
rois : tvm.te.Tensor
2-D with shape [num_roi, 5]. The last dimension should be in format of
[batch_index, w_start, h_start, w_end, h_end]
pooled_size : int or list/tuple of two ints
output size, or [out_height, out_width]
spatial_scale : float
Ratio of input feature map height (or w) to raw image height (or w). Equals the reciprocal
of total stride in convolutional layers, which should be in range (0.0, 1.0]
mode : str
Mode of RoiAlign. Should be b'max' or b'avg'.
sample_ratio : int
Optional sampling ratio of ROI align, using adaptive size by default.
Returns
-------
output : tvm.te.Tensor
4-D with shape [num_roi, channel, pooled_size, pooled_size]
"""
if not isinstance(pooled_size, (tuple, list)):
pooled_size = (pooled_size, pooled_size)
# Pre-allocate intermediate buffer
if sample_ratio > 0:
max_roi_bin_grid_w = max_roi_bin_grid_h = sample_ratio
else:
_, _, height, width = get_const_tuple(data.shape)
max_roi_bin_grid_h = math.ceil(height / pooled_size[0])
max_roi_bin_grid_w = math.ceil(width / pooled_size[1])
num_rois = rois.shape[0]
max_pc_shape = (
rois.shape[0],
max_roi_bin_grid_h * max_roi_bin_grid_w * pooled_size[0] * pooled_size[1],
4,
)
w_pc_buffer = full(max_pc_shape, data.dtype, 0)
pos_pc_buffer = full(max_pc_shape, "int32", 0)
pooled_size = tvm.runtime.convert(pooled_size)
spatial_scale = tvm.tir.const(spatial_scale, "float32")
sample_ratio = tvm.tir.const(sample_ratio, "int32")
if mode in (b"avg", 0):
mode = tvm.tir.const(0, dtype="float32")
elif mode in (b"max", 1):
mode = tvm.tir.const(1, dtype="float32")
else:
raise ValueError(mode, "Value %s passed in for mode not supported", mode)
return roi_align_nchw_ir(
data,
rois,
num_rois,
w_pc_buffer,
pos_pc_buffer,
pooled_size,
spatial_scale,
sample_ratio,
mode,
)
| 11,063 | 38.095406 | 162 | py |
tvm | tvm-main/python/tvm/topi/x86/conv3d.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-variable, too-many-locals
# pylint: disable=unused-argument, redefined-builtin, no-else-return
"""Conv3D operators"""
from collections import namedtuple
import tvm
from tvm import autotvm, te
from tvm.autotvm.task.space import OtherOptionEntity, SplitEntity
from tvm.target.x86 import get_simd_32bit_lanes
from ..nn.pad import pad
from ..nn.utils import get_pad_tuple3d, infer_pad3d
from ..utils import get_const_int, get_const_tuple, simplify, traverse_inline
Workload3D = namedtuple(
"Workload",
[
"in_dtype",
"out_dtype",
"depth",
"height",
"width",
"in_filter",
"groups",
"out_filter",
"dkernel",
"hkernel",
"wkernel",
"dpad",
"hpad",
"wpad",
"dstride",
"hstride",
"wstride",
],
)
@autotvm.register_topi_compute("conv3d_ndhwc.x86")
def conv3d_ndhwc(cfg, data, kernel, strides, padding, dilation, groups, out_dtype):
"""3D convolution forward operator.
Parameters
----------
input : tvm.te.Tensor
5-D input data with shapes:
[batch, in_depth, in_height, in_width, in_channel] for NDHWC layout
filter : tvm.te.Tensor
5-D filter with shape [kernel_depth, kernel_height, kernel_width, in_channels, out_channels]
strides : int or a list/tuple of three ints
stride size, or [stride_depth, stride_height, stride_width]
padding : int or a list/tuple of three ints
padding size, or [pad_depth, pad_height, pad_width]
dilation: int or a list/tuple of three ints
dilation size, or [dilation_depth, dilation_height, dilation_width]
groups: int
Number of groups
Returns
-------
output : tvm.te.Tensor
5-D with shape [batch, out_depth, out_height, out_width, out_channel] for NDHWC layout
"""
layout = "NDHWC"
out_dtype = data.dtype if out_dtype is None else out_dtype
strides = strides if isinstance(strides, (tuple, list)) else (strides, strides, strides)
dilation = dilation if isinstance(dilation, (tuple, list)) else (dilation, dilation, dilation)
_create_tuning_space(cfg, data, kernel, strides, padding, dilation, groups, layout)
if cfg.is_fallback:
_get_default_config(cfg, data, kernel, strides, padding, groups, out_dtype, layout)
return _conv3d_ndhwc(cfg, data, kernel, strides, padding, dilation, groups, out_dtype)
@autotvm.register_topi_compute("conv3d_ncdhw.x86")
def conv3d_ncdhw(cfg, data, kernel, strides, padding, dilation, groups, out_dtype):
"""3D convolution forward operator.
Parameters
----------
input : tvm.te.Tensor
5-D input data with shapes:
[batch, in_channel, in_depth, in_height, in_width] for NCDHW layout
filter : tvm.te.Tensor
5-D filter with shape [out_channels, in_channels, kernel_depth, kernel_height, kernel_width]
strides : int or a list/tuple of three ints
stride size, or [stride_depth, stride_height, stride_width]
padding : int or a list/tuple of three ints
padding size, or [pad_depth, pad_height, pad_width]
dilation: int or a list/tuple of three ints
dilation size, or [dilation_depth, dilation_height, dilation_width]
groups: int
Number of groups
Returns
-------
output : tvm.te.Tensor
5-D with shape [batch, out_channel, out_depth, out_height, out_width] for NCDHW layout
"""
# assert groups == 1, "conv3d_ncdhw.x86 does not support groups"
layout = "NCDHW"
out_dtype = data.dtype if out_dtype is None else out_dtype
strides = strides if isinstance(strides, (tuple, list)) else (strides, strides, strides)
dilation = dilation if isinstance(dilation, (tuple, list)) else (dilation, dilation, dilation)
_create_tuning_space(cfg, data, kernel, strides, padding, dilation, groups, layout)
if cfg.is_fallback:
_get_default_config(cfg, data, kernel, strides, padding, groups, out_dtype, layout)
return _conv3d_ncdhw(cfg, data, kernel, strides, padding, dilation, layout, groups, out_dtype)
@autotvm.register_topi_schedule("conv3d_ndhwc.x86")
def schedule_conv3d_ndhwc(cfg, outs):
"""TOPI schedule callback for conv3d
Parameters
----------
outs: Array of Tensor
The computation graph description of conv3d
in the format of an array of tensors.
Returns
-------
s: Schedule
The computation schedule for conv3d.
"""
s = te.create_schedule([x.op for x in outs])
def _traverse(op):
if "conv3d_ndhwc" in op.tag:
output = op.output(0)
conv_out = op.input_tensors[0]
kernel_vec = conv_out.op.input_tensors[1]
kernel = kernel_vec.op.input_tensors[0]
if isinstance(kernel.op, tvm.te.ComputeOp) and "dilate" in kernel.op.tag:
s[kernel].compute_inline()
data_vec = conv_out.op.input_tensors[0]
data = data_vec.op.input_tensors[0]
data_pad = None
if isinstance(data.op, tvm.te.ComputeOp) and "pad" in data.op.tag:
data_pad = data
data = data_pad.op.input_tensors[0]
kd, kh, kw, i, o = get_const_tuple(kernel.shape)
args = [s, cfg, data, data_pad, data_vec, kernel_vec, conv_out, output, outs[0]]
_schedule_conv3d_ndhwc(*args)
traverse_inline(s, outs[0].op, _traverse)
return s
@autotvm.register_topi_schedule("conv3d_ncdhw.x86")
def schedule_conv3d_ncdhw(cfg, outs):
"""TOPI schedule callback for conv3d
Parameters
----------
outs: Array of Tensor
The computation graph description of conv3d
in the format of an array of tensors.
Returns
-------
s: Schedule
The computation schedule for conv3d.
"""
s = te.create_schedule([x.op for x in outs])
def _traverse(op):
if "conv3d_ncdhw" in op.tag:
output = op.output(0)
conv_out = op.input_tensors[0]
kernel_vec = conv_out.op.input_tensors[1]
kernel = kernel_vec.op.input_tensors[0]
if isinstance(kernel.op, tvm.te.ComputeOp) and "dilate" in kernel.op.tag:
s[kernel].compute_inline()
data_vec = conv_out.op.input_tensors[0]
data = data_vec.op.input_tensors[0]
data_pad = None
if isinstance(data.op, tvm.te.ComputeOp) and "pad" in data.op.tag:
data_pad = data
data = data_pad.op.input_tensors[0]
kd, kh, kw, i, o = get_const_tuple(kernel.shape)
args = [s, cfg, data, data_pad, data_vec, kernel_vec, conv_out, output, outs[0]]
_schedule_conv3d_ncdhw(*args)
traverse_inline(s, outs[0].op, _traverse)
return s
def _conv3d_ndhwc(cfg, data, kernel, strides, padding, dilation, groups, out_dtype):
out_dtype = data.dtype if out_dtype is None else out_dtype
assert isinstance(dilation, int) or len(dilation) == 3
if isinstance(dilation, int):
dilation_d, dilation_h, dilation_w = (dilation, dilation, dilation)
else:
dilation_d, dilation_h, dilation_w = dilation
DSTR, HSTR, WSTR = strides
batch_size, in_depth, in_height, in_width, in_channel = get_const_tuple(data.shape)
kernel_depth, kernel_height, kernel_width, _, num_filter = get_const_tuple(kernel.shape)
assert in_channel % groups == 0, "input channels must be a multiple of group size"
assert num_filter % groups == 0, "number of filters must be a multiple of group size"
dilated_kernel_d = (kernel_depth - 1) * dilation_d + 1
dilated_kernel_h = (kernel_height - 1) * dilation_h + 1
dilated_kernel_w = (kernel_width - 1) * dilation_w + 1
pad_front, pad_top, pad_left, pad_back, pad_down, pad_right = get_pad_tuple3d(
padding, (dilated_kernel_d, dilated_kernel_h, dilated_kernel_w)
)
pad_d = pad_front + pad_back
pad_h = pad_top + pad_down
pad_w = pad_left + pad_right
pad_depth = in_depth + pad_d
pad_height = in_height + pad_h
pad_width = in_width + pad_w
out_depth = simplify((in_depth + pad_d - dilated_kernel_d) // DSTR + 1)
out_height = simplify((in_height + pad_h - dilated_kernel_h) // HSTR + 1)
out_width = simplify((in_width + pad_w - dilated_kernel_w) // WSTR + 1)
# pack data
DOPAD = pad_d != 0 or pad_h != 0 or pad_w != 0
if DOPAD:
data_pad = pad(
data,
(0, pad_front, pad_top, pad_left, 0),
(0, pad_back, pad_down, pad_right, 0),
name="data_pad",
)
else:
data_pad = data
# fetch schedule
ic_bn, oc_bn = cfg["tile_ic"].size[-1], cfg["tile_oc"].size[-1]
assert groups == 1 or ic_bn <= groups
assert groups == 1 or oc_bn <= groups
shape = (batch_size, in_channel // ic_bn, pad_depth, pad_height, ic_bn, pad_width)
data_vec = te.compute(
shape, lambda n, C, d, h, c, w: data_pad[n, d, h, w, C * ic_bn + c], name="data_vec"
)
ci_tile = in_channel // groups // ic_bn
if ci_tile == 0 or ci_tile * ic_bn * groups < in_channel:
ci_tile += 1
# pack kernel
shape = (num_filter // oc_bn, ci_tile, kernel_depth, kernel_height, kernel_width, ic_bn, oc_bn)
kernel_vec = te.compute(
shape,
lambda CO, CI, d, h, w, ci, co: kernel[d, h, w, CI * ic_bn + ci, CO * oc_bn + co],
name="kernel_vec",
)
# convolution
oshape = (batch_size, num_filter // oc_bn, out_depth, out_height, out_width, oc_bn)
unpack_shape = (batch_size, out_depth, out_height, out_width, num_filter)
ic = te.reduce_axis((0, in_channel // groups), name="ic")
kh = te.reduce_axis((0, kernel_height), name="kh")
kw = te.reduce_axis((0, kernel_width), name="kw")
kd = te.reduce_axis((0, kernel_depth), name="kd")
idxmod = tvm.tir.indexmod
idxdiv = tvm.tir.indexdiv
conv = te.compute(
oshape,
lambda n, oc_chunk, od, oh, ow, oc_block: te.sum(
data_vec[
n,
idxdiv(
(oc_chunk * oc_bn + oc_block) // (num_filter // groups) * (in_channel // groups)
+ ic,
ic_bn,
),
od * DSTR + kd * dilation_d,
oh * HSTR + kh * dilation_h,
idxmod(
(oc_chunk * oc_bn + oc_block) // (num_filter // groups) * (in_channel // groups)
+ ic,
ic_bn,
),
ow * WSTR + kw * dilation_w,
].astype(out_dtype)
* kernel_vec[
oc_chunk, idxdiv(ic, ic_bn), kd, kh, kw, idxmod(ic, ic_bn), oc_block
].astype(out_dtype),
axis=[kd, kh, kw, ic],
),
name="conv",
)
conv_unpacked = te.compute(
unpack_shape,
lambda n, d, h, w, c: conv[n, idxdiv(c, oc_bn), d, h, w, idxmod(c, oc_bn)].astype(
out_dtype
),
name="output_unpack",
tag="conv3d_ndhwc",
)
return conv_unpacked
def _conv3d_ncdhw(cfg, data, kernel, strides, padding, dilation, layout, groups, out_dtype):
out_dtype = data.dtype if out_dtype is None else out_dtype
assert isinstance(dilation, int) or len(dilation) == 3
if isinstance(dilation, int):
dilation_d, dilation_h, dilation_w = (dilation, dilation, dilation)
else:
dilation_d, dilation_h, dilation_w = dilation
DSTR, HSTR, WSTR = strides
batch_size, in_channel, in_depth, in_height, in_width = get_const_tuple(data.shape)
num_filter, _, kernel_depth, kernel_height, kernel_width = get_const_tuple(kernel.shape)
dilated_kernel_d = (kernel_depth - 1) * dilation_d + 1
dilated_kernel_h = (kernel_height - 1) * dilation_h + 1
dilated_kernel_w = (kernel_width - 1) * dilation_w + 1
pad_front, pad_top, pad_left, pad_back, pad_down, pad_right = get_pad_tuple3d(
padding, (dilated_kernel_d, dilated_kernel_h, dilated_kernel_w)
)
pad_d = pad_front + pad_back
pad_h = pad_top + pad_down
pad_w = pad_left + pad_right
pad_depth = in_depth + pad_d
pad_height = in_height + pad_h
pad_width = in_width + pad_w
out_depth = simplify((in_depth + pad_d - dilated_kernel_d) // DSTR + 1)
out_height = simplify((in_height + pad_h - dilated_kernel_h) // HSTR + 1)
out_width = simplify((in_width + pad_w - dilated_kernel_w) // WSTR + 1)
# pack data
DOPAD = pad_d != 0 or pad_h != 0 or pad_w != 0
if DOPAD:
data_pad = pad(
data,
(0, 0, pad_front, pad_top, pad_left),
(0, 0, pad_back, pad_down, pad_right),
name="data_pad",
)
else:
data_pad = data
# fetch schedule
ic_bn, oc_bn = cfg["tile_ic"].size[-1], cfg["tile_oc"].size[-1]
shape = (batch_size, in_channel // ic_bn, pad_depth, pad_height, ic_bn, pad_width)
data_vec = te.compute(
shape, lambda n, C, d, h, c, w: data_pad[n, C * ic_bn + c, d, h, w], name="data_vec"
)
ci_tile = in_channel // groups // ic_bn
if ci_tile == 0 or ci_tile * ic_bn * groups < in_channel:
ci_tile += 1
# pack kernel
shape = (num_filter // oc_bn, ci_tile, kernel_depth, kernel_height, kernel_width, ic_bn, oc_bn)
kernel_vec = te.compute(
shape,
lambda CO, CI, d, h, w, ci, co: kernel[CO * oc_bn + co, CI * ic_bn + ci, d, h, w],
name="kernel_vec",
)
# convolution
oshape = (batch_size, num_filter // oc_bn, out_depth, out_height, out_width, oc_bn)
unpack_shape = (batch_size, num_filter, out_depth, out_height, out_width)
ic = te.reduce_axis((0, in_channel // groups), name="ic")
kh = te.reduce_axis((0, kernel_height), name="kh")
kw = te.reduce_axis((0, kernel_width), name="kw")
kd = te.reduce_axis((0, kernel_depth), name="kd")
idxmod = tvm.tir.indexmod
idxdiv = tvm.tir.indexdiv
conv = te.compute(
oshape,
lambda n, oc_chunk, od, oh, ow, oc_block: te.sum(
data_vec[
n,
idxdiv(
(oc_chunk * oc_bn + oc_block) // (num_filter // groups) * (in_channel // groups)
+ ic,
ic_bn,
),
od * DSTR + kd * dilation_d,
oh * HSTR + kh * dilation_h,
idxmod(
(oc_chunk * oc_bn + oc_block) // (num_filter // groups) * (in_channel // groups)
+ ic,
ic_bn,
),
ow * WSTR + kw * dilation_w,
].astype(out_dtype)
* kernel_vec[
oc_chunk, idxdiv(ic, ic_bn), kd, kh, kw, idxmod(ic, ic_bn), oc_block
].astype(out_dtype),
axis=[ic, kd, kh, kw],
),
name="conv",
)
conv_unpacked = te.compute(
unpack_shape,
lambda n, c, d, h, w: conv[n, idxdiv(c, oc_bn), d, h, w, idxmod(c, oc_bn)].astype(
out_dtype
),
name="output_unpack",
tag="conv3d_ncdhw",
)
return conv_unpacked
def _create_tuning_space(cfg, data, kernel, strides, padding, dilation, groups, layout):
"""Create schedule configuration from input arguments"""
dshape = get_const_tuple(data.shape)
kshape = get_const_tuple(kernel.shape)
if layout == "NDHWC":
n, d, h, w, ic = dshape
kd, kh, kw, _, oc = kshape
elif layout == "NCDHW":
n, ic, d, h, w = dshape
oc, _, kd, kh, kw = kshape
else:
raise ValueError(f"Not support this layout {layout} with schedule template.")
# pad_front, pad_top, pad_left, pad_back, pad_down(bottom), pad_right
pf, pt, pl, pb, pd, pr = get_pad_tuple3d(padding, (kd, kh, kw))
sd, sh, sw = strides if isinstance(strides, (tuple, list)) else (strides, strides, strides)
od = (d - kd + pf + pb) // sd + 1
oh = (h - kh + pt + pd) // sh + 1
ow = (w - kw + pl + pr) // sw + 1
# Create schedule config
cfg.define_split("tile_ic", ic, num_outputs=2)
cfg.define_split("tile_oc", oc, num_outputs=2)
cfg.define_split("tile_ow", ow, num_outputs=2, filter=lambda y: y.size[-1] <= 8)
cfg.define_knob("unroll_kw", [True, False])
def _get_default_config(cfg, data, kernel, strides, padding, groups, out_dtype, layout):
"""
Get default schedule config for the workload
"""
if layout not in ["NDHWC", "NCDHW"]:
raise ValueError(f"Layout {layout} is not supported")
static_data_shape = []
for dim in get_const_tuple(data.shape):
if isinstance(dim, tvm.tir.Var):
static_data_shape.append(1)
else:
static_data_shape.append(dim)
data = te.placeholder(static_data_shape, dtype=data.dtype)
wkl = _get_conv3d_workload(data, kernel, strides, padding, groups, out_dtype, layout)
_fallback_schedule(cfg, wkl)
def _get_conv3d_workload(data, kernel, stride, padding, groups, out_dtype, data_layout="NCHW"):
"""Get the workload structure."""
if data_layout == "NCDHW":
_, CI, ID, IH, IW = get_const_tuple(data.shape)
CO, CIG, KD, KH, KW = get_const_tuple(kernel.shape)
elif data_layout == "NDHWC":
_, ID, IH, IW, CI = get_const_tuple(data.shape)
KD, KH, KW, CIG, CO = get_const_tuple(kernel.shape)
else:
raise ValueError(f"not support this layout {data_layout} yet")
pad_front, pad_top, pad_left, pad_back, pad_down, pad_right = get_pad_tuple3d(
padding, (get_const_int(KD), get_const_int(KH), get_const_int(KW))
)
DPAD = pad_front + pad_back
HPAD = pad_top + pad_down
WPAD = pad_left + pad_right
if isinstance(stride, (tuple, list)):
DSTR, HSTR, WSTR = stride
else:
DSTR, HSTR, WSTR = stride, stride, stride
assert (data.dtype == kernel.dtype) or (
data.dtype == "uint8" and kernel.dtype == "int8"
), f"Do not support inputs with different data types now. {data.dtype} vs. {kernel.dtype}"
return Workload3D(
data.dtype,
out_dtype,
ID,
IH,
IW,
CI,
groups,
CO,
KD,
KH,
KW,
DPAD,
HPAD,
WPAD,
DSTR,
HSTR,
WSTR,
)
def _fallback_schedule(cfg, wkl):
simd_width = get_simd_32bit_lanes()
DPAD, HPAD, WPAD = wkl.dpad, wkl.hpad, wkl.wpad
DSTR, HSTR, WSTR = wkl.dstride, wkl.hstride, wkl.wstride
out_width = (wkl.width + 2 * WPAD - wkl.wkernel) // WSTR + 1
oc_bn = 1
for bn in range(simd_width, 0, -1):
if wkl.out_filter % bn == 0:
oc_bn = bn
break
ic_bn = 1
for bn in range(oc_bn, 0, -1):
if wkl.in_filter % bn == 0:
ic_bn = bn
break
reg_n = 1
for n in range(7, 0, -1):
if out_width % n == 0:
reg_n = n
break
cfg["tile_ic"] = SplitEntity([wkl.in_filter // ic_bn, ic_bn])
cfg["tile_oc"] = SplitEntity([wkl.out_filter // oc_bn, oc_bn])
cfg["tile_ow"] = SplitEntity([out_width // reg_n, reg_n])
cfg["unroll_kw"] = OtherOptionEntity(False)
def _schedule_conv3d_ndhwc(s, cfg, data, data_pad, data_vec, kernel_vec, conv_out, output, last):
# fetch schedule
ic_bn, oc_bn, reg_n, unroll_kw = (
cfg["tile_ic"].size[-1],
cfg["tile_oc"].size[-1],
cfg["tile_ow"].size[-1],
cfg["unroll_kw"].val,
)
# get padding size
padding = infer_pad3d(data, data_pad, "NDHWC")
DPAD, HPAD, WPAD = padding
DOPAD = DPAD != 0 or HPAD != 0 or WPAD != 0
A, W = data, kernel_vec
A0, A1 = data_pad, data_vec
# schedule data
if DOPAD:
s[A0].compute_inline()
batch, ic_chunk, idd, ih, ic_block, iw = s[A1].op.axis
parallel_axis = s[A1].fuse(batch, ic_chunk, idd, ih)
s[A1].parallel(parallel_axis)
# schedule kernel pack
oc_chunk, ic_chunk, od, oh, ow, ic_block, oc_block = s[W].op.axis
s[W].reorder(oc_chunk, od, oh, ic_chunk, ow, ic_block, oc_block)
if oc_bn > 1:
s[W].vectorize(oc_block)
parallel_axis = s[W].fuse(oc_chunk, od, oh)
s[W].parallel(parallel_axis)
# schedule conv
C, O0, O = conv_out, output, last
CC = s.cache_write(C, "global")
_, oc_chunk, od, oh, ow, oc_block = s[C].op.axis
ow_chunk, ow_block = s[C].split(ow, factor=reg_n)
s[C].reorder(oc_chunk, od, oh, ow_chunk, ow_block, oc_block)
s[C].fuse(oc_chunk, od, oh)
s[C].vectorize(oc_block)
s[CC].compute_at(s[C], ow_chunk)
_, oc_chunk, od, oh, ow, oc_block = s[CC].op.axis
kd, kh, kw, ic = s[CC].op.reduce_axis
ow_chunk, ow_block = s[CC].split(ow, factor=reg_n)
ic_chunk, ic_block = s[CC].split(ic, factor=ic_bn)
if unroll_kw:
s[CC].reorder(oc_chunk, oh, ow_chunk, ic_chunk, kd, kh, ic_block, kw, ow_block, oc_block)
s[CC].unroll(kw)
else:
s[CC].reorder(oc_chunk, oh, ow_chunk, ic_chunk, kd, kh, kw, ic_block, ow_block, oc_block)
s[CC].fuse(oc_chunk, od, oh)
s[CC].vectorize(oc_block)
s[CC].unroll(ow_block)
if O0 != O:
s[O0].compute_inline()
# unpacking
batch, od, oh, ow, oc = s[O].op.axis
ow_chunk, ow_block = s[O].split(ow, factor=reg_n)
oc_chunk, oc_block = s[O].split(oc, factor=oc_bn)
s[O].reorder(oc_chunk, od, oh, ow_chunk, ow_block, oc_block)
parallel_axis = s[O].fuse(batch, oc_chunk, od, oh)
s[C].compute_at(s[O], parallel_axis)
s[O].vectorize(oc_block)
s[O].parallel(parallel_axis)
return s
def _schedule_conv3d_ncdhw(s, cfg, data, data_pad, data_vec, kernel_vec, conv_out, output, last):
# fetch schedule
ic_bn, oc_bn, reg_n, unroll_kw = (
cfg["tile_ic"].size[-1],
cfg["tile_oc"].size[-1],
cfg["tile_ow"].size[-1],
cfg["unroll_kw"].val,
)
# get padding size
padding = infer_pad3d(data, data_pad, "NCDHW")
DPAD, HPAD, WPAD = padding
DOPAD = DPAD != 0 or HPAD != 0 or WPAD != 0
A, W = data, kernel_vec
A0, A1 = data_pad, data_vec
# schedule data
if DOPAD:
s[A0].compute_inline()
batch, ic_chunk, idd, ih, ic_block, iw = s[A1].op.axis
parallel_axis = s[A1].fuse(batch, ic_chunk, idd, ih)
s[A1].parallel(parallel_axis)
# schedule kernel pack
oc_chunk, ic_chunk, od, oh, ow, ic_block, oc_block = s[W].op.axis
s[W].reorder(oc_chunk, od, oh, ic_chunk, ow, ic_block, oc_block)
if oc_bn > 1:
s[W].vectorize(oc_block)
parallel_axis = s[W].fuse(oc_chunk, od, oh)
s[W].parallel(parallel_axis)
# schedule conv
C, O0, O = conv_out, output, last
CC = s.cache_write(C, "global")
_, oc_chunk, od, oh, ow, oc_block = s[C].op.axis
ow_chunk, ow_block = s[C].split(ow, factor=reg_n)
s[C].reorder(oc_chunk, od, oh, ow_chunk, ow_block, oc_block)
s[C].fuse(oc_chunk, od, oh)
s[C].vectorize(oc_block)
s[CC].compute_at(s[C], ow_chunk)
_, oc_chunk, od, oh, ow, oc_block = s[CC].op.axis
ic, kd, kh, kw = s[CC].op.reduce_axis
ow_chunk, ow_block = s[CC].split(ow, factor=reg_n)
ic_chunk, ic_block = s[CC].split(ic, factor=ic_bn)
if unroll_kw:
s[CC].reorder(oc_chunk, oh, ow_chunk, ic_chunk, kd, kh, ic_block, kw, ow_block, oc_block)
s[CC].unroll(kw)
else:
s[CC].reorder(oc_chunk, oh, ow_chunk, ic_chunk, kd, kh, kw, ic_block, ow_block, oc_block)
s[CC].fuse(oc_chunk, od, oh)
s[CC].vectorize(oc_block)
s[CC].unroll(ow_block)
if O0 != O:
s[O0].compute_inline()
# unpacking
batch, oc, od, oh, ow = s[O].op.axis
ow_chunk, ow_block = s[O].split(ow, factor=reg_n)
oc_chunk, oc_block = s[O].split(oc, factor=oc_bn)
s[O].reorder(oc_chunk, od, oh, ow_chunk, ow_block, oc_block)
parallel_axis = s[O].fuse(batch, oc_chunk, od, oh)
s[C].compute_at(s[O], parallel_axis)
s[O].vectorize(oc_block)
s[O].parallel(parallel_axis)
return s
| 24,797 | 33.731092 | 100 | py |
tvm | tvm-main/python/tvm/topi/x86/dense_alter_op.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unused-variable,unused-argument,no-member
"""Dense alter op functions for x86"""
import tvm
from tvm import autotvm, relay, te
from tvm.target.x86 import target_has_amx, target_has_avx512
from .. import nn
from ..nn import dense_alter_layout
from ..utils import get_const_tuple
from .dense import _default_dense_pack_config
def check_int8_applicable(x, y, allow_padding=False):
mcpu = tvm.target.Target.current().mcpu
# TODO(vvchernov): may be also target_has_avx2 or lower?
simd_avai = target_has_avx512(mcpu) or target_has_amx(mcpu)
return (
simd_avai
and "int8" in x.dtype
and "int8" in y.dtype
and (allow_padding or (y.shape[-2] % 16 == 0 and y.shape[-1] % 4 == 0))
)
@dense_alter_layout.register(["cpu", "arm_cpu"])
def _alter_dense_layout(attrs, inputs, tinfos, out_type):
target = tvm.target.Target.current(allow_none=False)
dispatch_ctx = autotvm.task.DispatchContext.current
data_tensor, weight_tensor = tinfos
out_dtype = out_type.dtype
M, K = get_const_tuple(data_tensor.shape)
N, _ = get_const_tuple(weight_tensor.shape)
if check_int8_applicable(data_tensor, weight_tensor) and data_tensor.dtype == "uint8":
weight_layout = "NC16n4c"
return relay.nn.contrib_dense_pack(inputs[0], inputs[1], weight_layout, None, out_dtype)
_, outs = relay.backend.te_compiler.select_implementation(
relay.op.get("nn.dense"), attrs, tinfos, out_type, target
)
workload = autotvm.task.get_workload(outs)
if workload:
cfg = dispatch_ctx.query(target, workload)
topi_impl = workload[0]
if topi_impl == "dense_pack.x86":
if cfg.is_fallback:
_default_dense_pack_config(cfg, M, N, K)
packw_bn = cfg["tile_x"].size[-1]
weight_layout = f"NC{packw_bn}n"
new_weight = te.placeholder((N // packw_bn, K, packw_bn), dtype=weight_tensor.dtype)
# Relay dense doesn't have bias.
new_workload = autotvm.task.args_to_workload(
[data_tensor, new_weight, None, out_dtype], topi_impl
)
dispatch_ctx.update(target, new_workload, cfg)
return relay.nn.contrib_dense_pack(inputs[0], inputs[1], weight_layout, None, out_dtype)
return None
def int8_int8_legalize(inputs, arg_types, op, attrs, need_expand=False):
"""Legalizes s8, s8 -> s32 GEMM op for VNNI."""
if (
check_int8_applicable(arg_types[0], arg_types[1], allow_padding=True)
and arg_types[0].dtype == "int8"
):
x, y = inputs
x = relay.cast(x, "int32")
x = relay.add(x, relay.const(128, "int32"))
x = relay.cast(x, "uint8")
adjust_shift = relay.const(128, "int32") * relay.sum(relay.cast(y, "int32"), axis=[-1])
if need_expand:
adjust_shift = relay.expand_dims(adjust_shift, axis=1)
analyzer = tvm.arith.Analyzer()
x_shape = arg_types[0].shape
y_shape = arg_types[1].shape
inst_n = 16
inst_k = 4
pad_n = analyzer.simplify((inst_n - y_shape[-2] % inst_n) % inst_n)
pad_k = analyzer.simplify((inst_k - y_shape[-1] % inst_k) % inst_k)
if pad_k != 0 or pad_n != 0:
ndim = len(x_shape)
unpadded_dims = [(0, 0)] * (ndim - 2)
padding_y = [(0, 0)] * (len(y_shape) - 2) + [(0, pad_n), (0, pad_k)]
padded_y = relay.nn.pad(y, pad_width=padding_y, pad_value=0)
if pad_k != 0:
padding_x = [(0, 0)] * (len(x_shape) - 1) + [(0, pad_k)]
padded_x = relay.nn.pad(x, pad_width=padding_x, pad_value=0)
else:
padded_x = x
out = op(padded_x, padded_y, **attrs)
if pad_n != 0:
begin = [0] * len(x_shape)
end = x_shape[:-2] + [x_shape[-2], y_shape[-2]]
out = relay.strided_slice(out, begin, end, slice_mode="size")
else:
out = op(x, y, **attrs)
return relay.subtract(out, adjust_shift)
return None
@nn.dense_legalize.register("cpu")
def _dense_legalize(attrs, inputs, arg_types):
"""Legalizes s8, s8 -> s32 dense for VNNI."""
return int8_int8_legalize(inputs, arg_types, relay.nn.dense, attrs)
@nn.batch_matmul_legalize.register("cpu")
def _batch_matmul_legalize(attrs, inputs, arg_types):
"""Legalizes s8, s8 -> s32 batch_matmul for VNNI."""
if attrs["transpose_a"] or not attrs["transpose_b"]:
return None
return int8_int8_legalize(inputs, arg_types, relay.nn.batch_matmul, attrs, need_expand=True)
| 5,433 | 38.664234 | 100 | py |
tvm | tvm-main/python/tvm/topi/x86/sparse.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""sparse_dense schedule on x86"""
from functools import partial, reduce
from tvm import autotvm, te, tir
from tvm.target.x86 import get_simd_32bit_lanes
from ..transform import reshape
from ..utils import get_const_int, traverse_inline
def schedule_sparse_dense(outs):
"""Create schedule for sparse dense"""
s = te.create_schedule([x.op for x in outs])
def _callback(op):
simd_width = get_simd_32bit_lanes()
if op.tag == "sparse_dense_sp_lhs_csrmm" or op.tag == "sparse_dense_sp_lhs_csrmm":
(y_o, y_i) = s[op].split(s[op].op.axis[1], 2)
fused = s[op].fuse(s[op].op.axis[0], y_o)
s[op].parallel(fused)
s[op].vectorize(y_i)
elif op.tag == "sparse_dense_sp_rhs_bsrmm" or op.tag == "sparse_dense_sp_rhs_bsrmm":
y_bsrmm = op.input_tensors[0]
assert (
y_bsrmm.op.tag == "sparse_dense_sp_rhs_bsrmm_block"
or y_bsrmm.op.tag == "sparse_dense_sp_lhs_bsrmm_block"
)
y_reshape = op
(m, num_blocks, b_r) = s[y_bsrmm].op.axis
bs_r = get_const_int(b_r.dom.extent)
(elem_idx, c) = s[y_bsrmm].op.reduce_axis
s[y_bsrmm].reorder(num_blocks, m, elem_idx, b_r, c)
s[y_bsrmm].vectorize(b_r)
(m_o, n_o) = s[y_reshape].op.axis
(noo, noi) = s[y_reshape].split(n_o, bs_r)
s[y_bsrmm].compute_at(s[y_reshape], noi)
s[y_reshape].vectorize(noi)
if op != s[outs[0]].op:
(y_o, y_i) = s[outs[0].op].split(s[outs[0].op].op.axis[1], 2 * simd_width)
s[y_reshape].compute_at(s[outs[0]], y_o)
s[outs[0].op].parallel(y_o)
s[outs[0].op].vectorize(y_i)
else:
m_o_noo = s[y_reshape].fuse(m_o, noo)
s[y_reshape].parallel(m_o_noo)
traverse_inline(s, outs[0].op, _callback)
return s
@autotvm.register_topi_compute("conv3x3_spNHWC.x86")
def spconv2d_3x3_nhwc(cfg, data, wdat, wind, wptr, layout="NHWC"):
"""Sparse Conv2d 3x3 compute (NHWC)."""
assert layout == "NHWC"
nsamples, imh, imw, chanin = [i.value for i in data.shape]
nelems, bsrr, bsrc = [i.value for i in wdat.shape]
chanout = (wptr.shape[0].value - 1) * bsrr
imglen, chanlen = nsamples * imh * imw, 9 * chanin
cfg.define_split("tile_y", imglen, num_outputs=3)
cfg.define_split("tile_x", chanout // bsrr, num_outputs=2)
cfg.add_flop(imglen * (nelems * bsrc * bsrr * 2 - chanout))
if cfg.is_fallback:
cfg["tile_y"] = autotvm.task.space.SplitEntity([-1, 160, 8])
cfg["tile_x"] = autotvm.task.space.SplitEntity([-1, 4])
idxsplit = lambda x, y: reduce(lambda a, b: a[:-1] + [a[-1] % b, a[-1] // b], y, [x])
@partial(te.compute, (imglen, chanlen), name="Im2Col")
def im2col(row, col):
j_w, j_h, j_n = idxsplit(row, [imw, imh])
j_c, k_w, k_h = idxsplit(col, [chanin, 3])
i_h, i_w = j_h + k_h - 1, j_w + k_w - 1
return tir.if_then_else(
tir.all(i_h >= 0, i_h < imh, i_w >= 0, i_w < imw), data[j_n, i_h, i_w, j_c], 0
)
@partial(te.compute, (imglen, chanout // bsrr, bsrr, bsrc), name="CC")
def matmul(drow, wrow, brow, bcol):
row_start, row_end = wptr[wrow], wptr[wrow + 1]
elem_idx = te.reduce_axis((0, row_end - row_start), name="elem_idx")
elem = row_start + elem_idx
return te.sum(
im2col[drow, wind[elem] * bsrc + bcol] * wdat[elem, brow, bcol], axis=elem_idx
)
sum_bsrc = te.reduce_axis((0, bsrc), name="k")
ret = te.compute(
(imglen, chanout),
lambda y, x: te.sum(matmul[y, x // bsrr, x % bsrr, sum_bsrc], axis=sum_bsrc),
name="C",
tag="conv3x3_spNHWC",
)
return reshape(ret, (nsamples, imh, imw, chanout))
@autotvm.register_topi_schedule("conv3x3_spNHWC.x86")
def schedule_spconv2d_3x3_nhwc(cfg, outs):
"""Sparse Conv2d 3x3 schedule (NHWC)."""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
def _callback(op):
if op.tag == "conv3x3_spNHWC":
(matmul,) = op.input_tensors
# wptr, wind, im2col, wdat
_, _, im2col, _ = matmul.op.input_tensors
(data,) = im2col.op.input_tensors
bsrr = matmul.shape[-2].value
chanin = data.shape[-1].value
mm_y, mm_x = s[op].op.axis
y_t, y_o, y_i = cfg["tile_y"].apply(s, op, mm_y)
x_o, x_i = s[op].split(mm_x, factor=bsrr)
x_t, x_o = cfg["tile_x"].apply(s, op, x_o)
(sum_ax,) = s[op].op.reduce_axis
s[op].reorder(y_t, x_t, y_o, x_o, y_i, x_i, sum_ax)
s[op].unroll(sum_ax)
s[op].vectorize(x_i)
s[op].unroll(y_i)
s[matmul].compute_at(s[op], x_o)
y_i, x_i, bsrr, bsrc = s[matmul].op.axis
(sum_ax,) = s[matmul].op.reduce_axis
s[matmul].reorder(x_i, sum_ax, y_i, bsrr, bsrc)
s[matmul].unroll(bsrc)
s[matmul].vectorize(bsrr)
s[matmul].unroll(y_i)
s[im2col].compute_at(s[op], y_o)
y_i, sum_ax = s[im2col].op.axis
_, k_i = s[im2col].split(sum_ax, factor=chanin)
s[im2col].vectorize(k_i)
traverse_inline(s, outs[0].op, _callback)
return s
@autotvm.register_topi_compute("conv3x3_spNCHW.x86")
def spconv2d_3x3_nchw(cfg, data, wdat, wind, wptr, layout="NCHW"):
"""Sparse Conv2d 3x3 compute (NCHW)."""
nsamples, chanin, imgh, imgw = [i.value for i in data.shape]
nelems, veclen, bsrc = [i.value for i in wdat.shape]
chanout = (wptr.shape[0].value - 1) * veclen
assert bsrc == 1 and layout == "NCHW"
cfg.add_flop(nsamples * imgh * imgw * (nelems * veclen * bsrc * 2 - chanout))
cfg.define_split("tile_hw", imgh * imgw, num_outputs=3)
cfg.define_split("tile_ckk", chanin * 9, num_outputs=3)
@partial(te.compute, (nsamples, chanin * 3 * 3, imgh * imgw), name="im2col")
def im2col(nsamples, ckk, imglen):
j_h, j_w = imglen // imgw, imglen % imgw
i_c, k_h, k_w = ckk // 9, ckk // 3 % 3, ckk % 3
i_h, i_w = j_h + k_h - 1, j_w + k_w - 1
return tir.if_then_else(
tir.all(i_h >= 0, i_h < imgh, i_w >= 0, i_w < imgw), data[nsamples, i_c, i_h, i_w], 0
)
@partial(
te.compute,
(nsamples, chanout // veclen, veclen, bsrc, imgh * imgw),
name="CC",
tag="conv3x3_spNCHW",
)
def matmul(nsamples, f_o, f_i, bsrk, imglen):
row_start, row_end = wptr[f_o], wptr[f_o + 1]
elem_idx = te.reduce_axis((0, row_end - row_start), name="elem_idx")
elem = row_start + elem_idx
return te.sum(
im2col[nsamples, wind[elem] * bsrc + bsrk, imglen] * wdat[elem, f_i, bsrk],
axis=elem_idx,
)
return reshape(matmul, [nsamples, chanout, imgh, imgw])
@autotvm.register_topi_schedule("conv3x3_spNCHW.x86")
def schedule_spconv2d_3x3_nchw(cfg, outs):
"""Sparse Conv2d 3x3 schedule (NCHW)."""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
def _callback(op):
if op.tag == "conv3x3_spNCHW":
# wptr, wind, im2col, wdat
_, _, im2col, _ = op.input_tensors
n_samples, f_o, f_i, b_c, imglen = s[op].op.axis
(sum_ax,) = s[op].op.reduce_axis
hw1, hw2, hw3 = cfg["tile_hw"].apply(s, op, imglen)
s[op].reorder(n_samples, hw1, f_o, hw2, sum_ax, f_i, b_c, hw3)
s[op].unroll(f_i)
s[op].unroll(b_c)
s[op].vectorize(hw3)
s[im2col].compute_at(s[op], hw1)
n_samples, ckk, imglen = s[im2col].op.axis
ckk1, ckk2, ckk3 = cfg["tile_ckk"].apply(s, im2col, ckk)
hw2, hw3 = s[im2col].split(imglen, factor=cfg["tile_hw"].size[-1])
s[im2col].reorder(n_samples, ckk1, ckk2, hw2, ckk3, hw3)
s[im2col].unroll(ckk3)
s[im2col].vectorize(hw3)
traverse_inline(s, outs[0].op, _callback)
return s
| 8,988 | 39.129464 | 97 | py |
tvm | tvm-main/python/tvm/topi/x86/binarize_pack.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""Schedule for binarization and bit-packing."""
from tvm import te
def schedule_binarize_pack(outs):
"""Schedule for binarize_pack.
Parameters
----------
outs: Array of Tensor
The computation graph description of binarize_pack
in the format of an array of tensors.
Returns
-------
s: Schedule
The computation schedule for binarize_pack.
"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
def _schedule(Out):
s[Out].parallel(Out.op.axis[0])
def traverse(OP):
# schedule binarize_pack
if OP.tag == "binarize_pack":
Out = OP.output(0)
_schedule(Out)
else:
raise RuntimeError(f"Unsupported operator: {OP.tag}")
traverse(outs[0].op)
return s
| 1,669 | 31.115385 | 65 | py |
tvm | tvm-main/python/tvm/topi/x86/conv1d.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unused-variable,unused-argument,invalid-name
"""Conv1D schedule on for Intel CPU"""
from tvm import te
from .. import tag
def schedule_conv1d_ncw(outs):
"""Create schedule for tensors"""
s = te.create_schedule([x.op for x in outs])
output_op = outs[0].op
scheduled_ops = []
def traverse(op):
"""Traverse operators from computation graph"""
# inline all one-to-one-mapping operators except the last stage (output)
if tag.is_broadcast(op.tag):
if op not in s.outputs:
s[op].compute_inline()
else: # inject custom schedule
if len(op.axis) == 3: # schedule bias + bn + relu
n, c, w = op.axis
fused = s[op].fuse(n, c)
s[op].parallel(fused)
s[op].vectorize(w)
for tensor in op.input_tensors:
if isinstance(tensor.op, te.tensor.ComputeOp) and tensor.op not in scheduled_ops:
traverse(tensor.op)
if "conv1d_ncw" in op.tag:
conv = op.output(0)
kernel = op.input_tensors[1]
if isinstance(kernel.op, te.tensor.ComputeOp) and "dilate" in kernel.op.tag:
s[kernel].compute_inline()
data = op.input_tensors[0]
data_pad = None
if isinstance(data.op, te.tensor.ComputeOp) and "pad" in data.op.tag:
data_pad = data
data = data_pad.op.input_tensors[0]
n_pad, c_pad, w_pad = data_pad.op.axis
pad_fused = s[data_pad].fuse(n_pad, c_pad)
s[data_pad].parallel(pad_fused)
C = conv
n, c, w = C.op.axis
rc, rw = C.op.reduce_axis
n_out, c_out, w_out = output_op.axis
s[C].vectorize(w)
if op != output_op: # fuse bias + bn + relu into conv
s[C].compute_at(s[output_op], w_out)
else:
fused = s[C].fuse(n, c)
s[C].parallel(fused)
scheduled_ops.append(op)
traverse(output_op)
return s
def schedule_conv1d_nwc(outs):
"""Create schedule for tensors"""
s = te.create_schedule([x.op for x in outs])
output_op = outs[0].op
scheduled_ops = []
def traverse(op):
"""Traverse operators from computation graph"""
# inline all one-to-one-mapping operators except the last stage (output)
if tag.is_broadcast(op.tag):
if op not in s.outputs:
s[op].compute_inline()
else: # inject custom schedule
if len(op.axis) == 3: # schedule bias + bn + relu
n, w, c = op.axis
fused = s[op].fuse(n, w)
s[op].parallel(fused)
s[op].vectorize(c)
for tensor in op.input_tensors:
if isinstance(tensor.op, te.tensor.ComputeOp) and tensor.op not in scheduled_ops:
traverse(tensor.op)
if "conv1d_nwc" in op.tag:
conv = op.output(0)
kernel = op.input_tensors[1]
if isinstance(kernel.op, te.tensor.ComputeOp) and "dilate" in kernel.op.tag:
s[kernel].compute_inline()
data = op.input_tensors[0]
data_pad = None
if isinstance(data.op, te.tensor.ComputeOp) and "pad" in data.op.tag:
data_pad = data
data = data_pad.op.input_tensors[0]
n_pad, w_pad, c_pad = data_pad.op.axis
pad_fused = s[data_pad].fuse(n_pad, w_pad)
s[data_pad].parallel(pad_fused)
C = conv
n, w, c = C.op.axis
rc, rw = C.op.reduce_axis
n_out, w_out, c_out = output_op.axis
s[C].vectorize(c)
if op != output_op: # fuse bias + bn + relu into conv
s[C].compute_at(s[output_op], c_out)
else:
fused = s[C].fuse(n, w)
s[C].parallel(fused)
scheduled_ops.append(op)
traverse(output_op)
return s
def schedule_group_conv1d_ncw(outs):
return schedule_conv1d_ncw(outs)
def schedule_group_conv1d_nwc(outs):
return schedule_conv1d_nwc(outs)
| 5,054 | 35.89781 | 97 | py |
tvm | tvm-main/python/tvm/topi/x86/conv2d_avx_common.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unused-variable,unused-argument,invalid-name
"""Conv2D schedule on for Intel CPU"""
import tvm
from tvm.autotvm.task.space import OtherOptionEntity, SplitEntity
from tvm.target.x86 import get_simd_32bit_lanes
from ..generic import conv2d as conv2d_generic
from ..utils import get_const_tuple
from .tensor_intrin import dot_16x1x16_uint8_int8_int32
def _fallback_schedule(cfg, wkl):
simd_width = get_simd_32bit_lanes()
pt, pl, pb, pr = wkl.padt, wkl.padl, wkl.padb, wkl.padr
HSTR, WSTR = wkl.stride_h, wkl.stride_w
dilated_kernel_w = (wkl.kernel_w - 1) * wkl.dilation_w + 1
out_width = (wkl.width + pl + pr - dilated_kernel_w) // WSTR + 1
oc_bn = 1
for bn in range(simd_width, 0, -1):
if wkl.out_filter % bn == 0:
oc_bn = bn
break
ic_bn = 1
for bn in range(oc_bn, 0, -1):
if wkl.in_filter % bn == 0:
ic_bn = bn
break
reg_n = 1
for n in range(31, 0, -1):
if out_width % n == 0:
reg_n = n
break
cfg["tile_ic"] = SplitEntity([wkl.in_filter // ic_bn, ic_bn])
cfg["tile_oc"] = SplitEntity([wkl.out_filter // oc_bn, oc_bn])
cfg["tile_ow"] = SplitEntity([out_width // reg_n, reg_n])
cfg["unroll_kw"] = OtherOptionEntity(False)
def _fallback_schedule_int8(cfg, wkl):
pt, pl, pb, pr = wkl.padt, wkl.padl, wkl.padb, wkl.padr
HSTR, WSTR = wkl.stride_h, wkl.stride_w
out_width = (wkl.width + pl + pr - wkl.kernel_w) // WSTR + 1
oc_bn = 16
assert wkl.out_filter % oc_bn == 0
ic_bn = 1
for bn in range(oc_bn, 0, -4):
if wkl.in_filter % bn == 0:
ic_bn = bn
break
assert wkl.in_filter % 4 == 0
reg_n = 1
for n in range(31, 0, -1):
if out_width % n == 0:
reg_n = n
break
cfg["tile_ic"] = SplitEntity([wkl.in_filter // ic_bn, ic_bn])
cfg["tile_oc"] = SplitEntity([wkl.out_filter // oc_bn, oc_bn])
cfg["tile_ow"] = SplitEntity([out_width // reg_n, reg_n])
cfg["unroll_kw"] = OtherOptionEntity(False)
def _schedule_conv_NCHWc(s, cfg, data_vec, kernel_vec, conv_out, last):
# fetch schedule
reg_n, unroll_kw = cfg["tile_ow"].size[-1], cfg["unroll_kw"].val
_, _, _, _, ic_bn = get_const_tuple(data_vec.shape)
# schedule pad
if isinstance(s[data_vec].op, tvm.te.ComputeOp) and "pad" in data_vec.op.tag:
batch, ic_chunk, ih, iw, ic_block = s[data_vec].op.axis
s[data_vec].vectorize(ic_block)
parallel_axis = s[data_vec].fuse(batch, ic_chunk, ih)
s[data_vec].parallel(parallel_axis)
data_vec = data_vec.op.input_tensors[0]
oc_bn = cfg["tile_oc"].size[-1]
if isinstance(kernel_vec.op, tvm.te.ComputeOp) and kernel_vec.name == "kernel_vec":
# data and kernel are not pre-computed, schedule layout transform here.
# this should only be used by x86 conv2d_nchw, which is for
# testing purpose.
batch, ic_chunk, ih, ic_block, iw = s[data_vec].op.axis
parallel_axis = s[data_vec].fuse(batch, ic_chunk, ih)
s[data_vec].parallel(parallel_axis)
oc_chunk, ic_chunk, oh, ow, ic_block, oc_block = s[kernel_vec].op.axis
s[kernel_vec].reorder(oc_chunk, oh, ic_chunk, ow, ic_block, oc_block)
if oc_bn > 1:
s[kernel_vec].vectorize(oc_block)
parallel_axis = s[kernel_vec].fuse(oc_chunk, oh)
s[kernel_vec].parallel(parallel_axis)
# schedule 5-D NCHW[x]c conv
C, O = conv_out, last
CC = s.cache_write(C, "global")
batch, oc_chunk, oh, ow, oc_block = s[C].op.axis
ow_chunk, ow_block = s[C].split(ow, factor=reg_n)
s[C].reorder(oc_chunk, oh, ow_chunk, ow_block, oc_block)
parallel_axis = s[C].fuse(batch, oc_chunk, oh)
s[C].vectorize(oc_block)
if C == O:
s[C].parallel(parallel_axis)
s[CC].compute_at(s[C], ow_chunk)
_, oc_chunk, oh, ow, oc_block = s[CC].op.axis
ic, kh, kw = s[CC].op.reduce_axis
ow_chunk, ow_block = s[CC].split(ow, factor=reg_n)
ic_chunk, ic_block = s[CC].split(ic, factor=ic_bn)
if unroll_kw:
s[CC].reorder(oc_chunk, oh, ow_chunk, ic_chunk, kh, ic_block, kw, ow_block, oc_block)
s[CC].unroll(kw)
else:
s[CC].reorder(oc_chunk, oh, ow_chunk, ic_chunk, kh, kw, ic_block, ow_block, oc_block)
s[CC].vectorize(oc_block)
s[CC].unroll(ow_block)
if C != O:
out_ndim = len(s[O].op.axis)
if out_ndim == 5:
batch, oc_chunk, oh, ow, oc_block = s[O].op.axis
ow_chunk, ow_block = s[O].split(ow, factor=reg_n)
s[O].reorder(oc_chunk, oh, ow_chunk, ow_block, oc_block)
parallel_axis = s[O].fuse(batch, oc_chunk, oh)
s[C].compute_at(s[O], parallel_axis)
s[O].vectorize(oc_block)
s[O].parallel(parallel_axis)
elif out_ndim == 4:
batch, oc, oh, ow = s[O].op.axis
ow_chunk, ow_block = s[O].split(ow, factor=reg_n)
oc_chunk, oc_block = s[O].split(oc, factor=oc_bn)
s[O].reorder(oc_chunk, oh, ow_chunk, ow_block, oc_block)
parallel_axis = s[O].fuse(batch, oc_chunk, oh)
s[C].compute_at(s[O], parallel_axis)
s[O].vectorize(oc_block)
s[O].parallel(parallel_axis)
else:
raise ValueError(f"Unsupported output ndim: {out_ndim}")
return s
def _schedule_conv_NCHWc_int8(s, cfg, data_vec, kernel_vec, conv_out, last):
return conv2d_generic.schedule_conv_NCHWc_cpu_common_int8(
s,
cfg,
data_vec,
kernel_vec,
conv_out,
last,
int32_lanes=get_simd_32bit_lanes(),
intrin=dot_16x1x16_uint8_int8_int32(),
inline_fused=True,
)
| 6,565 | 35.276243 | 93 | py |
tvm | tvm-main/python/tvm/topi/x86/conv2d_int8.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unused-variable,unused-argument,no-member
# pylint: disable=no-value-for-parameter,import-outside-toplevel
"""Conv2D int8 schedule on x86"""
import tvm
from tvm import autotvm, te
from tvm.target.x86 import target_has_sse42
from .. import nn, tag
from ..generic import conv2d as conv2d_generic
from ..nn.conv2d import _get_workload as _get_conv2d_workload
from ..nn.conv2d import unpack_NCHWc_to_nchw
from ..nn.depthwise_conv2d import _get_workload as _get_depthwise_conv2d_workload
from ..nn.utils import get_pad_tuple
from ..utils import get_const_tuple, traverse_inline
from . import conv2d_avx_1x1, conv2d_avx_common
def _get_default_config_int8(
cfg,
data,
kernel,
strides,
padding,
dilation,
out_dtype,
is_depthwise=False,
layout="NCHW",
int32_lanes=4,
):
"""
Get default schedule config for the workload
"""
if is_depthwise:
# Fallback to FP32 default config until a VNNI schedule is defined.
wkl = _get_depthwise_conv2d_workload(data, kernel, strides, padding, out_dtype)
from .depthwise_conv2d import _fallback_schedule
_fallback_schedule(cfg, wkl)
else:
wkl = _get_conv2d_workload(data, kernel, strides, padding, dilation, out_dtype, layout)
is_kernel_1x1 = wkl.kernel_h == 1 and wkl.kernel_w == 1
if is_kernel_1x1:
conv2d_generic.fallback_schedule_cpu_1x1_int8(
cfg, wkl, int32_lanes=int32_lanes, num_int8_elements=4
)
else:
conv2d_generic.fallback_schedule_cpu_common_int8(
cfg, wkl, int32_lanes=int32_lanes, num_int8_elements=4
)
def is_int8_hw_support(data_dtype, kernel_dtype):
"""
Checks to ensure that we can use Intel DLBoost instructions
1) The datatypes are correct.
2) LLVM version has support for the instructions.
3) Target is skylake and above.
"""
# 1) Check datatypes
is_dtype_support = data_dtype == "uint8" and kernel_dtype == "int8"
# 2) Check LLVM support
llvm_version = tvm.target.codegen.llvm_version_major()
is_llvm_support = llvm_version >= 8
# 3) Check target
mcpu = tvm.target.Target.current().mcpu
is_target_support = target_has_sse42(mcpu)
return is_dtype_support and is_llvm_support and is_target_support
def conv2d_nchw_int8(data, kernel, strides, padding, dilation, out_dtype):
"""Compute conv2d with NCHW layout and int8 dtype"""
layout = "NCHW"
packed_out = conv2d_NCHWc_int8(
data, kernel, strides, padding, dilation, layout, layout, out_dtype
)
return unpack_NCHWc_to_nchw(packed_out, out_dtype)
def schedule_conv2d_nchw_int8(outs):
"""Create the schedule for conv2d_nchw_int8"""
return schedule_conv2d_NCHWc_int8(outs)
def _pack_data(cfg, data, kernel):
n_elems = 4
n, _, ih, iw = get_const_tuple(data.shape)
oc, ic, kh, kw = get_const_tuple(kernel.shape)
ic_bn, oc_bn = cfg["tile_ic"].size[-1], cfg["tile_oc"].size[-1]
ic_chunk = ic // ic_bn
oc_chunk = oc // oc_bn
data = te.compute(
(n, ic_chunk, ih, iw, ic_bn),
lambda bs, c, h, w, vc: data[bs, c * ic_bn + vc, h, w],
name="data_vec",
)
kernel = te.compute(
(oc_chunk, ic_chunk, kh, kw, ic_bn // n_elems, oc_bn, n_elems),
lambda occ, icc, k_h, k_w, icbc, ocb, icbb: kernel[
occ * oc_bn + ocb, icc * ic_bn + icbc * n_elems + icbb, k_h, k_w
],
name="kernel_vec",
)
return data, kernel
@autotvm.register_topi_compute("conv2d_NCHWc_int8.x86")
def conv2d_NCHWc_int8(cfg, data, kernel, strides, padding, dilation, layout, out_layout, out_dtype):
"""Compute conv2d with NCHWc layout and int8 dtype"""
if len(data.shape) == 5:
n, ic_chunk, ih, iw, ic_bn = get_const_tuple(data.shape)
in_channel = ic_chunk * ic_bn
oc_chunk, ic_chunk_group, kernel_height, kernel_width, _, oc_bn, _ = get_const_tuple(
kernel.shape
)
num_filter = oc_chunk * oc_bn
else:
n, in_channel, ih, iw = get_const_tuple(data.shape)
num_filter, _, kernel_height, kernel_width = get_const_tuple(kernel.shape)
# Define autotvm tuning space
is_kernel_1x1 = kernel_height == 1 and kernel_width == 1
pt, pl, pb, pr = get_pad_tuple(padding, (kernel_height, kernel_width))
sh, sw = strides if isinstance(strides, (tuple, list)) else (strides, strides)
dh, dw = dilation if isinstance(dilation, (tuple, list)) else (dilation, dilation)
dilated_kernel_h = (kernel_height - 1) * dh + 1
dilated_kernel_w = (kernel_width - 1) * dw + 1
oh = (ih - dilated_kernel_h + pt + pb) // sh + 1
ow = (iw - dilated_kernel_w + pl + pr) // sw + 1
cfg.define_split("tile_ic", in_channel, num_outputs=2, filter=lambda y: y.size[-1] % 4 == 0)
cfg.define_split("tile_oc", num_filter, num_outputs=2, filter=lambda y: y.size[-1] % 16 == 0)
cfg.define_split("tile_ow", ow, num_outputs=2, filter=lambda y: y.size[-1] <= 64)
if is_kernel_1x1:
cfg.define_knob("tile_oh", [1, 2] if oh > 1 else [1])
else:
cfg.define_knob("unroll_kw", [True, False])
# If no config was set, we can fallback to default config.
if cfg.is_fallback:
_get_default_config_int8(
cfg,
te.placeholder((n, in_channel, ih, iw), dtype=data.dtype),
te.placeholder(
(num_filter, in_channel, kernel_height, kernel_width), dtype=kernel.dtype
),
strides,
padding,
dilation,
out_dtype,
int32_lanes=16,
)
# Pack data if raw 4-D data is provided.
# This can only happen when autotuning.
if len(data.shape) == 4:
data, kernel = _pack_data(cfg, data, kernel)
return nn.conv2d_NCHWc_int8(
data, kernel, strides, padding, dilation, layout, out_layout, out_dtype
)
@autotvm.register_topi_schedule("conv2d_NCHWc_int8.x86")
def schedule_conv2d_NCHWc_int8(cfg, outs):
"""Create schedule for tensors"""
s = te.create_schedule([x.op for x in outs])
def _callback(op):
"""Traverse operators from computation graph"""
if "conv2d_NCHWc_int8" in op.tag:
conv_out = op.output(0)
kernel_vec = conv_out.op.input_tensors[1]
data_vec = conv_out.op.input_tensors[0]
args = [s, cfg, data_vec, kernel_vec, conv_out, outs[0]]
# int8 conv kernel is 7-dim
_, _, kh, kw, _, _, _ = get_const_tuple(kernel_vec.shape)
if kh == 1 and kw == 1:
conv2d_avx_1x1._schedule_conv_NCHWc_int8(*args)
else:
conv2d_avx_common._schedule_conv_NCHWc_int8(*args)
traverse_inline(s, outs[0].op, _callback)
return s
@autotvm.register_topi_schedule("conv2d_nhwc_pack_int8.x86")
def schedule_conv2d_nhwc_pack_int8(cfg, outs):
"""Create schedule for tensors"""
s = te.create_schedule([x.op for x in outs])
output_op = outs[0].op
scheduled_ops = []
def traverse(op):
"""Traverse operators from computation graph"""
# inline all one-to-one-mapping operators except the last stage (output)
if tag.is_broadcast(op.tag):
if op not in s.outputs:
s[op].compute_inline()
else: # inject custom schedule
if len(op.axis) == 4: # schedule bias + bn + relu
n, h, w, c = op.axis
fused = s[op].fuse(n, h, w)
s[op].parallel(fused)
s[op].vectorize(c)
for tensor in op.input_tensors:
if isinstance(tensor.op, te.tensor.ComputeOp) and tensor.op not in scheduled_ops:
traverse(tensor.op)
if "conv2d_nhwc_pack_int8" in op.tag:
conv_out = op.output(0)
kernel = conv_out.op.input_tensors[1]
data_vec = conv_out.op.input_tensors[0]
data = (
data_vec.op.input_tensors[0]
if isinstance(data_vec.op, te.tensor.ComputeOp) and "pad" not in data_vec.op.tag
else data_vec
)
if isinstance(data.op, te.tensor.ComputeOp) and "pad" in data.op.tag:
data_pad = data
data = data_pad.op.input_tensors[0]
args = [s, cfg, data_vec, conv_out, outs[0]]
if data.dtype == "uint8":
kh, kw, _, _, _ = get_const_tuple(kernel.shape)
if kh == 1 and kw == 1:
conv2d_avx_1x1._schedule_conv_nhwc_pack_int8(*args)
else:
raise ValueError("Only support 1x1 kernel with schedule_conv2d_nhwc_pack.")
else:
raise ValueError(
f"Not support this data type {data.dtype} with "
f"schedule_conv2d_nhwc_pack. Only support int8"
)
scheduled_ops.append(op)
traverse(output_op)
return s
| 9,824 | 36.215909 | 100 | py |
tvm | tvm-main/python/tvm/topi/x86/conv2d_alter_op.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unused-variable,unused-argument,no-member
"""Conv2D alter op and legalize functions for x86"""
import logging
import re
import tvm
from tvm import te
from tvm import relay
from tvm import autotvm
from .conv2d import _get_default_config
from .conv2d_int8 import is_int8_hw_support, _get_default_config_int8
from ..utils import get_const_tuple
from ..nn import conv2d_legalize, conv2d_alter_layout
from ..generic.conv2d import conv2d_alter_int8_common
logger = logging.getLogger("topi")
_NCHWc_matcher = re.compile("^NCHW[0-9]+c$")
_OIHWio_matcher = re.compile("^OIHW[0-9]+i[0-9]+o$")
@conv2d_alter_layout.register("cpu")
def _alter_conv2d_layout(attrs, inputs, tinfos, out_type):
target = tvm.target.Target.current(allow_none=False)
dispatch_ctx = autotvm.task.DispatchContext.current
new_attrs = {k: attrs[k] for k in attrs.keys()}
# Parse the attributes.
padding = attrs.get_int_tuple("padding")
strides = attrs.get_int_tuple("strides")
dilation = attrs.get_int_tuple("dilation")
data_layout = attrs["data_layout"]
kernel_layout = attrs["kernel_layout"]
data_tensor, kernel_tensor = tinfos
data_dtype = data_tensor.dtype
kernel_dtype = kernel_tensor.dtype
out_dtype = out_type.dtype
if isinstance(dispatch_ctx, autotvm.task.ApplyGraphBest):
cfg = dispatch_ctx.query(target, None)
workload = cfg.workload
else:
impl, outs = relay.backend.te_compiler.select_implementation(
relay.op.get("nn.conv2d"), attrs, tinfos, out_type, target
)
workload = autotvm.task.get_workload(outs)
if workload is None:
# The best implementation is not an AutoTVM template.
# It may be from the auto-scheduler
if impl.name.find("winograd") != -1:
if dilation != (1, 1):
logger.warning("Does not support weight pre-transform for dilated convolution.")
return None
assert data_layout == "NHWC" and kernel_layout == "HWIO"
N, H, W, CI = get_const_tuple(data_tensor.shape)
KH, KW, _, CO = get_const_tuple(kernel_tensor.shape)
# Pre-compute weight transformation in winograd
tile_size = 4
# HWIO -> OIHW
kernel_transform = relay.transpose(inputs[1], axes=[3, 2, 0, 1])
# alpha, alpha, CO, CI
weight = relay.nn.contrib_conv2d_winograd_weight_transform(
kernel_transform, tile_size=tile_size
)
new_attrs["tile_size"] = tile_size
new_attrs["channels"] = CO
return relay.nn.contrib_conv2d_winograd_without_weight_transform(
inputs[0], weight, **new_attrs
)
return None
cfg = dispatch_ctx.query(target, workload)
topi_tmpl = workload[0]
if topi_tmpl == "conv2d_NCHWc.x86":
# we only convert conv2d_NCHW to conv2d_NCHWc for x86
if data_layout == "NCHW" and kernel_layout == "OIHW":
if cfg.is_fallback:
_get_default_config(
cfg,
data_tensor,
kernel_tensor,
strides,
padding,
dilation,
out_dtype,
False,
data_layout,
)
batch_size, in_channel, height, width = get_const_tuple(data_tensor.shape)
out_channel, _, kh, kw = get_const_tuple(kernel_tensor.shape)
ic_bn, oc_bn = cfg["tile_ic"].size[-1], cfg["tile_oc"].size[-1]
# update new attrs
new_attrs["channels"] = out_channel
new_attrs["data_layout"] = f"NCHW{ic_bn}c"
# (oc, ic, h, w) -> (OC, IC, h, w, ic, oc)
new_attrs["kernel_layout"] = f"OIHW{ic_bn}i{oc_bn}o"
new_attrs["out_layout"] = f"NCHW{oc_bn}c"
# Store altered operator's config
new_data = te.placeholder(
(batch_size, in_channel // ic_bn, height, width, ic_bn), dtype=data_dtype
)
new_kernel = te.placeholder(
(out_channel // oc_bn, in_channel // ic_bn, kh, kw, ic_bn, oc_bn),
dtype=kernel_tensor.dtype,
)
new_workload = autotvm.task.args_to_workload(
[
new_data,
new_kernel,
strides,
padding,
dilation,
new_attrs["data_layout"],
new_attrs["out_layout"],
out_dtype,
],
topi_tmpl,
)
dispatch_ctx.update(target, new_workload, cfg)
else:
assert _NCHWc_matcher.match(data_layout)
assert _OIHWio_matcher.match(kernel_layout)
return relay.nn.contrib_conv2d_nchwc(*inputs, **new_attrs)
if topi_tmpl == "conv2d_NCHWc_int8.x86":
# TODO(@icemelon9, @anijain2305): Need to support data layout NHWC with kernel layout HWIO
assert data_layout == "NCHW" and kernel_layout == "OIHW"
if cfg.is_fallback:
_get_default_config_int8(
cfg,
data_tensor,
kernel_tensor,
strides,
padding,
dilation,
out_dtype,
False,
data_layout,
int32_lanes=16,
)
batch_size, in_channel, height, width = get_const_tuple(data_tensor.shape)
out_channel, channel_multiplier, kh, kw = get_const_tuple(kernel_tensor.shape)
ic_bn, oc_bn = cfg["tile_ic"].size[-1], cfg["tile_oc"].size[-1]
# update new attrs
n_elems = 4
new_attrs["channels"] = out_channel
new_attrs["data_layout"] = f"NCHW{ic_bn}c"
new_attrs["kernel_layout"] = f"OIHW{ic_bn // n_elems:n}i{oc_bn:n}o{n_elems:n}i"
new_attrs["out_layout"] = f"NCHW{oc_bn}c"
# Store altered operator's config.
new_data = te.placeholder(
(batch_size, in_channel // ic_bn, height, width, ic_bn), dtype=data_dtype
)
new_kernel = te.placeholder(
(out_channel // oc_bn, in_channel // ic_bn, kh, kw, ic_bn // n_elems, oc_bn, n_elems),
dtype=kernel_dtype,
)
new_workload = autotvm.task.args_to_workload(
[
new_data,
new_kernel,
strides,
padding,
dilation,
new_attrs["data_layout"],
new_attrs["out_layout"],
out_dtype,
],
topi_tmpl,
)
dispatch_ctx.update(target, new_workload, cfg)
return relay.nn.contrib_conv2d_nchwc(*inputs, **new_attrs)
if topi_tmpl == "depthwise_conv2d_NCHWc.x86":
if data_layout == "NCHW" and kernel_layout == "OIHW":
if cfg.is_fallback:
_get_default_config(
cfg,
data_tensor,
kernel_tensor,
strides,
padding,
dilation,
out_dtype,
True,
data_layout,
)
batch_size, in_channel, height, width = get_const_tuple(data_tensor.shape)
out_channel, channel_multiplier, kh, kw = get_const_tuple(kernel_tensor.shape)
ic_bn, oc_bn = cfg["tile_ic"].size[-1], cfg["tile_oc"].size[-1]
assert channel_multiplier == 1
# update new attrs
new_attrs["channels"] = out_channel
new_attrs["data_layout"] = f"NCHW{ic_bn}c"
new_attrs["kernel_layout"] = f"OIHW1i{oc_bn}o"
new_attrs["out_layout"] = f"NCHW{oc_bn}c"
# Store altered operator's config.
new_data = te.placeholder(
(batch_size, in_channel // ic_bn, height, width, ic_bn), dtype=data_dtype
)
new_kernel = te.placeholder(
(out_channel // oc_bn, 1, kh, kw, 1, oc_bn), dtype=kernel_dtype
)
new_workload = autotvm.task.args_to_workload(
[
new_data,
new_kernel,
strides,
padding,
dilation,
new_attrs["data_layout"],
new_attrs["out_layout"],
out_dtype,
],
topi_tmpl,
)
dispatch_ctx.update(target, new_workload, cfg)
else:
assert _NCHWc_matcher.match(data_layout)
assert _OIHWio_matcher.match(kernel_layout)
return relay.nn.contrib_depthwise_conv2d_nchwc(*inputs, **new_attrs)
return None
@conv2d_legalize.register("cpu")
def _conv2d_legalize(attrs, inputs, arg_types):
"""Legalizes Conv2D op.
Parameters
----------
attrs : tvm.ir.Attrs
Attributes of current convolution
inputs : list of tvm.relay.Expr
The args of the Relay expr to be legalized
types : list of types
List of input and output types
Returns
-------
result : tvm.relay.Expr
The legalized expr
"""
# Collect the input tensors.
data_tensor, kernel_tensor = arg_types[0], arg_types[1]
data_dtype = data_tensor.dtype
kernel_dtype = kernel_tensor.dtype
# Collect the output tensor.
output_tensor = arg_types[2]
# Collect the input exprs.
data, kernel = inputs
# Intel vector intructions require data and kernel to have different dtypes.
if data_tensor.dtype == "int8" and kernel_tensor.dtype == "int8":
data_dtype = "uint8"
if is_int8_hw_support(data_dtype, kernel_dtype):
return conv2d_alter_int8_common(
data, data_tensor, kernel, kernel_tensor, output_tensor, attrs, data_dtype, 4, 16
)
return None
| 10,908 | 36.232082 | 100 | py |
tvm | tvm-main/python/tvm/topi/x86/conv2d_avx_1x1.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unused-variable,unused-argument,invalid-name
"""1x1 Conv2D schedule on for Intel CPU"""
from __future__ import absolute_import as _abs
import tvm
from tvm import te
from tvm.autotvm.task.space import OtherOptionEntity, SplitEntity
from tvm.target.x86 import get_simd_32bit_lanes
from ..generic import conv2d as conv2d_generic
from ..nn.pad import pad
from ..nn.utils import get_pad_tuple
from ..utils import get_const_tuple, simplify
from .tensor_intrin import dot_16x1x16_uint8_int8_int32
def _fallback_schedule(cfg, wkl):
simd_width = get_simd_32bit_lanes()
pt, pl, pb, pr = wkl.padt, wkl.padl, wkl.padb, wkl.padr
HSTR, WSTR = wkl.stride_h, wkl.stride_w
dilated_kernel_h = (wkl.kernel_h - 1) * wkl.dilation_h + 1
dilated_kernel_w = (wkl.kernel_w - 1) * wkl.dilation_w + 1
out_height = (wkl.height + pt + pb - dilated_kernel_h) // HSTR + 1
out_width = (wkl.width + pl + pr - dilated_kernel_w) // WSTR + 1
oc_bn = 1
for bn in range(simd_width, 0, -1):
if wkl.out_filter % bn == 0:
oc_bn = bn
break
ic_bn = 1
for bn in range(oc_bn, 0, -1):
if wkl.in_filter % bn == 0:
ic_bn = bn
break
for ow_factor in range(out_width, 0, -1):
if out_width % ow_factor == 0:
for oh_factor in range(out_height, 0, -1):
if out_height % oh_factor == 0 and ow_factor * oh_factor < 32:
cfg["tile_ic"] = SplitEntity([wkl.in_filter // ic_bn, ic_bn])
cfg["tile_oc"] = SplitEntity([wkl.out_filter // oc_bn, oc_bn])
cfg["tile_oh"] = OtherOptionEntity(oh_factor)
cfg["tile_ow"] = SplitEntity([out_width // ow_factor, ow_factor])
return
raise ValueError(f"cannot decide default schedule for workload: {wkl}")
def _schedule_conv_NCHWc(s, cfg, data_vec, kernel_vec, conv_out, last):
# fetch schedule
oh_factor, ow_factor = cfg["tile_oh"].val, cfg["tile_ow"].size[-1]
_, _, _, _, ic_bn = get_const_tuple(data_vec.shape)
# schedule pad
if isinstance(s[data_vec].op, tvm.te.ComputeOp) and "pad" in data_vec.op.tag:
batch, ic_chunk, ih, iw, ic_block = s[data_vec].op.axis
s[data_vec].vectorize(ic_block)
parallel_axis = s[data_vec].fuse(batch, ic_chunk, ih)
s[data_vec].parallel(parallel_axis)
data_vec = data_vec.op.input_tensors[0]
oc_bn = cfg["tile_oc"].size[-1]
if isinstance(kernel_vec.op, tvm.te.ComputeOp) and kernel_vec.name == "kernel_vec":
# data and kernel are not pre-computed, schedule layout transform here.
# this should only be used by x86 conv2d_nchw, which is for
# testing purpose.
batch, ic_chunk, ih, ic_block, iw = s[data_vec].op.axis
parallel_axis = s[data_vec].fuse(batch, ic_chunk, ih)
s[data_vec].parallel(parallel_axis)
oc_chunk, ic_chunk, oh, ow, ic_block, oc_block = s[kernel_vec].op.axis
s[kernel_vec].reorder(oc_chunk, oh, ic_chunk, ow, ic_block, oc_block)
if oc_bn > 1:
s[kernel_vec].vectorize(oc_block)
parallel_axis = s[kernel_vec].fuse(oc_chunk, oh)
s[kernel_vec].parallel(parallel_axis)
C, O = conv_out, last
CC = s.cache_write(C, "global")
batch, oc_chunk, oh, ow, oc_block = s[C].op.axis
oh_outer, oh_inner = s[C].split(oh, factor=oh_factor)
ow_outer, ow_inner = s[C].split(ow, factor=ow_factor)
s[C].reorder(oc_chunk, oh_outer, ow_outer, oh_inner, ow_inner, oc_block)
s[C].vectorize(oc_block)
parallel_axis = s[C].fuse(batch, oc_chunk, oh_outer)
s[CC].compute_at(s[C], parallel_axis)
if C == O:
s[C].parallel(parallel_axis)
_, oc_chunk, oh, ow, oc_block = s[CC].op.axis
ic, _, _ = s[CC].op.reduce_axis
ic_chunk, ic_block = s[CC].split(ic, factor=ic_bn)
oh_outer, oh_inner = s[CC].split(oh, factor=oh_factor)
ow_outer, ow_inner = s[CC].split(ow, factor=ow_factor)
s[CC].reorder(oc_chunk, oh_outer, ow_outer, ic_chunk, ic_block, oh_inner, ow_inner, oc_block)
s[CC].fuse(oc_chunk, oh_outer)
s[CC].vectorize(oc_block)
s[CC].unroll(ow_inner)
s[CC].unroll(oh_inner)
if C != O:
out_ndim = len(s[O].op.axis)
if out_ndim == 5:
batch, oc_chunk, oh, ow, oc_block = s[O].op.axis
oh_outer, oh_inner = s[O].split(oh, factor=oh_factor)
ow_outer, ow_inner = s[O].split(ow, factor=ow_factor)
s[O].reorder(oc_chunk, oh_outer, ow_outer, oh_inner, ow_inner, oc_block)
parallel_axis = s[O].fuse(batch, oc_chunk, oh_outer)
s[C].compute_at(s[O], parallel_axis)
s[O].vectorize(oc_block)
s[O].parallel(parallel_axis)
elif out_ndim == 4:
batch, oc, oh, ow = s[O].op.axis
oc_chunk, oc_block = s[O].split(oc, factor=oc_bn)
oh_outer, oh_inner = s[O].split(oh, factor=oh_factor)
ow_outer, ow_inner = s[O].split(ow, factor=ow_factor)
s[O].reorder(oc_chunk, oh_outer, ow_outer, oh_inner, ow_inner, oc_block)
parallel_axis = s[O].fuse(batch, oc_chunk, oh_outer)
s[C].compute_at(s[O], parallel_axis)
s[O].vectorize(oc_block)
s[O].parallel(parallel_axis)
else:
raise ValueError(f"Unsupported output ndim: {out_ndim}")
return s
def _schedule_conv_NCHWc_int8(s, cfg, data_vec, kernel_vec, conv_out, last):
return conv2d_generic.schedule_conv_NCHWc_cpu_1x1_int8(
s,
cfg,
data_vec,
kernel_vec,
conv_out,
last,
int32_lanes=get_simd_32bit_lanes(),
intrin=dot_16x1x16_uint8_int8_int32(),
)
def _declaration_conv_nhwc_pack(cfg, Input, Filter, stride, padding, dilation, out_dtype):
# more assertion for the shapes
assert isinstance(stride, int) or len(stride) == 2
assert isinstance(dilation, int) or len(dilation) == 2
if isinstance(stride, int):
stride_h = stride_w = stride
else:
stride_h, stride_w = stride
if isinstance(dilation, int):
dilation_h = dilation_w = dilation
else:
dilation_h, dilation_w = dilation
batch, in_height, in_width, in_channel = Input.shape
kernel_h, kernel_w, num_filter, channel = Filter.shape
# compute the output shape
dilated_kernel_h = (kernel_h - 1) * dilation_h + 1
dilated_kernel_w = (kernel_w - 1) * dilation_w + 1
pad_top, pad_left, pad_down, pad_right = get_pad_tuple(
padding, (dilated_kernel_h, dilated_kernel_w)
)
out_channel = num_filter
out_height = simplify((in_height - dilated_kernel_h + pad_top + pad_down) // stride_h + 1)
out_width = simplify((in_width - dilated_kernel_w + pad_left + pad_right) // stride_w + 1)
pad_before = [0, pad_top, pad_left, 0]
pad_after = [0, pad_down, pad_right, 0]
PaddedInput = pad(Input, pad_before, pad_after, name="PaddedInput")
# todo: padding filter to accommodate the intrinsic
# packing the Filter to let memory access be consecutive for AVX512 intrinsic
# Done in pre-compute stage
idxd = tvm.tir.indexdiv
idxm = tvm.tir.indexmod
packw_shape = (kernel_h, kernel_w, idxd(num_filter, 16), 16 * idxd(channel, 4), 4)
PackW = te.compute(
packw_shape,
lambda a, b, c, d, e: Filter[a, b, c * 16 + idxm(d, 16), idxd(d, 16) * 4 + e],
name="packed_filter",
)
rc = te.reduce_axis((0, in_channel), name="rc")
ry = te.reduce_axis((0, kernel_h), name="ry")
rx = te.reduce_axis((0, kernel_w), name="rx")
Output = te.compute(
(batch, out_height, out_width, out_channel),
lambda nn, yy, xx, ff: te.sum(
PaddedInput[
nn, yy * stride_h + ry * dilation_h, xx * stride_w + rx * dilation_w, rc
].astype(out_dtype)
* PackW[ry, rx, idxd(ff, 16), idxd(rc, 4) * 16 + idxm(ff, 16), idxm(rc, 4)].astype(
out_dtype
),
axis=[ry, rx, rc],
),
name="Conv2d_1x1_Output_int8",
tag="conv2d_nhwc_pack_int8",
)
return Output
def _schedule_conv_nhwc_pack_int8(s, cfg, data, conv_out, last):
"""
Defines the schedule for the int8 nhwc layout. For 1x1 conv, it
is a matrix-multiply operation by using nhwc layout. We will do
packing of weight to make the address access be friendly to int8
intrinsic
"""
# FIXME - https://github.com/apache/tvm/issues/3598
# pylint: disable=unreachable
return s
int32_lanes = 16
# assertion to fail the unhandled case
_, _, _, ic_num = get_const_tuple(data.shape)
_, _, _, oc_num = get_const_tuple(conv_out.shape)
assert ic_num % 4 == 0
assert oc_num % 16 == 0
ic_factor, oc_factor = cfg["tile_ic"].size[-1], cfg["tile_oc"].size[-1]
# schedule data
A = data
if isinstance(s[A].op, tvm.te.ComputeOp):
batch, ih, iw, ic = s[A].op.axis
d_ic_chunk, d_ic_block = s[A].split(ic, factor=4)
s[A].vectorize(d_ic_block)
C, O = conv_out, last
batch, oh, ow, oc = s[C].op.axis
kh, kw, ic = s[C].op.reduce_axis
# match the x86 intrinsic
ic_outer, ic_inner = s[C].split(ic, factor=4)
oc_outer, oc_inner = s[C].split(oc, factor=int32_lanes)
ic_f_outer, ic_s_outer = s[C].split(ic_outer, factor=ic_factor)
s[C].reorder(oc_outer, oh, ow, ic_f_outer, ic_s_outer, kh, kw, oc_inner, ic_inner)
pc = dot_16x1x16_uint8_int8_int32()
s[C].tensorize(oc_inner, pc)
if C != O:
batch, last_oh, last_ow, last_oc = s[O].op.axis
oc_chunk, oc_block = s[O].split(ochannel, 16)
# not saw perf improvement to split oh/ow here
s[O].vectorize(oc_block)
return s
| 10,572 | 37.169675 | 97 | py |
tvm | tvm-main/python/tvm/topi/x86/nn.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,too-many-locals,unused-variable
"""x86 nn operators"""
from tvm import te
from ..utils import traverse_inline
from .injective import schedule_injective_from_existing
def _schedule_softmax(softmax_op, s, outs):
op_tag = softmax_op.tag
if op_tag == "softmax_output":
exp = softmax_op.input_tensors[0]
expsum = softmax_op.input_tensors[1]
max_elem = s[exp].op.input_tensors[1]
delta = None
axis = int(softmax_op.attrs["axis"])
elif op_tag == "fast_softmax_output":
exp = softmax_op.input_tensors[0]
expsum = softmax_op.input_tensors[1]
delta = s[exp].op.input_tensors[0]
max_elem = s[delta].op.input_tensors[1]
axis = int(softmax_op.attrs["axis"])
elif op_tag == "log_softmax_output":
exp = None
delta = None
max_elem = softmax_op.input_tensors[1]
expsum = softmax_op.input_tensors[2]
axis = int(softmax_op.attrs["axis"])
else:
raise ValueError(
f"Tag is expected to be softmax_output or log_softmax_output. Got {op_tag}"
)
output = outs[0]
def _schedule(output_op, softmax_op):
# only parallelize outer dimensions up to axis
outer_axes = [output_op.axis[i] for i in range(0, axis)]
fused_outer_axes = s[output_op].fuse(*outer_axes)
s[output_op].parallel(fused_outer_axes)
if softmax_op != output_op:
# fuse softmax output with following elemwise ops.
s[softmax_op].compute_at(s[output_op], fused_outer_axes)
# move computations with the same outer dimensions under the same root
s[max_elem].compute_at(s[output_op], fused_outer_axes)
s[expsum].compute_at(s[output_op], fused_outer_axes)
if delta is not None:
s[exp].compute_inline()
s[delta].compute_inline()
if exp is not None:
s[exp].compute_at(s[output_op], fused_outer_axes)
if list(output.shape) == list(softmax_op.output(0).shape):
_schedule(output.op, softmax_op)
else:
# This case can happen, for example, if the 4D input to softmax
# is in the NCHW layout while the fused elemwise op takes the NCHWc layout.
# Since we parallelize over outer axes up to the "axis" parameter of softmax,
# softmax and the fused op need to be in the same layout if we want to
# fuse them under the same parallel loop.
# This case can be removed if softmax supported AlterLayout.
schedule_injective_from_existing(s, output)
_schedule(softmax_op, softmax_op)
def schedule_softmax(outs):
"""Schedule for softmax
Parameters
----------
outs: Array of Tensor
The computation graph description of softmax
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
def _callback(op):
if "softmax" in op.tag:
_schedule_softmax(op, s, outs)
traverse_inline(s, outs[0].op, _callback)
return s
def schedule_batch_norm(outs):
"""Schedule for batch_norm
Parameters
----------
outs: Array of Tensor
The computation graph description of batch_norm
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
s = te.create_schedule([x.op for x in outs])
# only parallelize outer dimensions up to axis
output_op = outs[0].op
axis = output_op.axis
outer_axes = [output_op.axis[i] for i in range(0, len(axis) - 1)]
fused_outer_axes = s[output_op].fuse(*outer_axes)
s[output_op].parallel(fused_outer_axes)
# when scale or center is enabled
if "divide" not in output_op.name:
div = output_op.input_tensors[0]
substract = s[div].op.input_tensors[0]
s[div].compute_inline()
s[substract].compute_inline()
return s
| 4,895 | 34.737226 | 87 | py |
tvm | tvm-main/python/tvm/topi/x86/group_conv2d.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unused-variable,unused-argument,no-member
# pylint: disable=no-value-for-parameter,import-outside-toplevel
"""Grouped Spatial Pack Convolution (Group Conv2D) schedule on x86"""
import tvm
from tvm import autotvm, te
from tvm.autotvm.task.space import OtherOptionEntity, SplitEntity
from tvm.target.x86 import get_simd_32bit_lanes
from .. import tag
from ..nn.conv2d import _get_workload as _get_conv2d_workload
from ..nn.pad import pad
from ..utils import get_const_tuple
def group_conv2d_nchw(data, kernel, strides, padding, dilation, groups, out_dtype):
"""Compute group_conv2d with NCHW layout"""
return group_conv2d_nchw_spatial_pack(
data, kernel, strides, padding, dilation, groups, out_dtype
)
def schedule_group_conv2d_nchw(outs):
"""Compute group_conv2d with NCHW layout"""
return schedule_group_conv2d_nchwc(outs)
def _get_default_config(
cfg, data, kernel, strides, padding, dilation, groups, out_dtype, layout="NCHW"
):
"""
Get default schedule config for the workload
"""
static_data_shape = []
for dim in get_const_tuple(data.shape):
if isinstance(dim, tvm.tir.Var):
static_data_shape.append(1)
else:
static_data_shape.append(dim)
data = te.placeholder(static_data_shape, dtype=data.dtype)
wkl = _get_conv2d_workload(data, kernel, strides, padding, dilation, out_dtype, layout)
_fallback_schedule(cfg, wkl)
def _fallback_schedule(cfg, wkl):
simd_width = get_simd_32bit_lanes()
pad_left, pad_right = wkl.padl, wkl.padr
stride_w = wkl.stride_w
out_width = (wkl.width + pad_left + pad_right - wkl.kernel_w) // stride_w + 1
groups = wkl.groups
kernels_per_group = wkl.out_filter // groups
kernel_depth = wkl.in_filter // groups
oc_bn = 1
oc_bn = 1
for bn in range(simd_width, 0, -1):
if kernels_per_group % bn == 0:
oc_bn = bn
break
if oc_bn > kernels_per_group:
oc_bn = kernels_per_group
ic_bn = 1
for bn in range(oc_bn, 0, -1):
if kernel_depth % bn == 0:
ic_bn = bn
break
if ic_bn > kernel_depth:
ic_bn = kernel_depth
reg_n = 1
for n in range(31, 0, -1):
if out_width % n == 0:
reg_n = n
break
cfg["tile_ic"] = SplitEntity([wkl.in_filter // ic_bn, ic_bn])
cfg["tile_oc"] = SplitEntity([wkl.out_filter // oc_bn, oc_bn])
cfg["tile_ow"] = SplitEntity([out_width // reg_n, reg_n])
cfg["unroll_kw"] = OtherOptionEntity(False)
@autotvm.register_topi_compute("group_conv2d_nchw.x86")
def group_conv2d_nchw_spatial_pack(
cfg, data, kernel, strides, padding, dilation, groups, out_dtype="float32"
):
"""
Compute group conv2d with NCHW layout, using GSPC algorithm.
https://arxiv.org/abs/2006.09791
"""
assert isinstance(dilation, int) or len(dilation) == 2
if isinstance(dilation, int):
dilation_h, dilation_w = dilation, dilation
else:
dilation_h, dilation_w = dilation
assert isinstance(padding, int) or len(padding) == 2 or len(padding) == 4
if isinstance(padding, int):
pad_top, pad_left, pad_bottom, pad_right = padding, padding, padding, padding
elif len(padding) == 2:
hpad, wpad = padding
pad_top, pad_bottom = hpad, hpad
pad_left, pad_right = wpad, wpad
else:
pad_top, pad_left, pad_bottom, pad_right = padding
hpad = pad_top + pad_bottom
wpad = pad_left + pad_right
assert isinstance(strides, int) or len(strides) == 2
if isinstance(strides, int):
stride_h, stride_w = strides, strides
else:
stride_h, stride_w = strides
batch_size, in_channel, in_height, in_width = get_const_tuple(data.shape)
out_channel, kernel_depth, k_height, k_width = get_const_tuple(kernel.shape)
pad_height = in_height + pad_top + pad_bottom
pad_width = in_width + pad_left + pad_right
dilated_kernel_h = (k_height - 1) * dilation_h + 1
dilated_kernel_w = (k_width - 1) * dilation_w + 1
out_height = (in_height + pad_top + pad_bottom - dilated_kernel_h) // stride_h + 1
out_width = (in_width + pad_left + pad_right - dilated_kernel_w) // stride_w + 1
kernels_per_group = out_channel // groups
cfg.define_split("tile_ic", in_channel, num_outputs=2)
cfg.define_split("tile_oc", out_channel, num_outputs=2)
cfg.define_split("tile_ow", out_width, num_outputs=2, filter=lambda y: y.size[-1] <= 64)
cfg.define_knob("unroll_kw", [True, False])
# If no config was set, we can fallback to default config.
if cfg.is_fallback:
_get_default_config(
cfg,
te.placeholder((batch_size, in_channel, in_height, in_width), dtype=data.dtype),
te.placeholder(
(out_channel, in_channel // groups, k_height, k_width), dtype=kernel.dtype
),
strides,
padding,
dilation,
groups,
out_dtype,
)
oc_bn = cfg["tile_oc"].size[-1]
ic_bn = cfg["tile_ic"].size[-1]
# pack data
DOPAD = hpad != 0 or wpad != 0
if DOPAD:
data_pad = pad(
data, (0, 0, pad_top, pad_left), (0, 0, pad_bottom, pad_right), name="data_pad"
)
else:
data_pad = data
shape = (groups, batch_size, kernel_depth // ic_bn, pad_height, ic_bn, pad_width)
data_vec = te.compute(
shape,
lambda g, n, C, h, c, w: data_pad[n, C * ic_bn + c + kernel_depth * g, h, w],
name="data_vec",
)
# pack kernel
shape = (
groups,
kernels_per_group // oc_bn,
kernel_depth // ic_bn,
k_height,
k_width,
ic_bn,
oc_bn,
)
kernel_vec = te.compute(
shape,
lambda g, out_channel, in_channel, h, w, ci, co: kernel[
(out_channel * oc_bn + co + g * kernels_per_group), in_channel * ic_bn + ci, h, w
],
name="kernel_vec",
)
# convolution
oshape = (groups, batch_size, kernels_per_group // oc_bn, out_height, out_width, oc_bn)
unpack_shape = (batch_size, out_channel, out_height, out_width)
ic = te.reduce_axis((0, (kernel_depth)), name="ic")
kh = te.reduce_axis((0, k_height), name="kh")
kw = te.reduce_axis((0, k_width), name="kw")
idxmod = tvm.tir.indexmod
idxdiv = tvm.tir.indexdiv
conv = te.compute(
oshape,
lambda g, n, oc_chunk, oh, ow, oc_block: te.sum(
data_vec[
g,
n,
idxdiv(ic, ic_bn),
oh * stride_h + kh * dilation_h,
idxmod(ic, ic_bn),
ow * stride_w + kw * dilation_w,
].astype(out_dtype)
* kernel_vec[
g, oc_chunk, idxdiv(ic, ic_bn), kh, kw, idxmod(ic, ic_bn), oc_block
].astype(out_dtype),
axis=[ic, kh, kw],
),
name="conv",
)
unpack = te.compute(
unpack_shape,
lambda n, c, h, w: conv[
idxdiv(c, kernels_per_group),
n,
idxmod(idxdiv(c, oc_bn), (kernels_per_group // oc_bn)),
h,
w,
idxmod(idxmod(c, oc_bn), kernels_per_group),
].astype(out_dtype),
name="output_unpack",
tag="group_conv2d_nchw",
)
return unpack
@autotvm.register_topi_schedule("group_conv2d_nchw.x86")
def schedule_group_conv2d_nchwc(cfg, outs):
"""Create schedule for tensors"""
s = te.create_schedule([x.op for x in outs])
scheduled_ops = []
def traverse(op):
"""Traverse operators from computation graph"""
# inline all one-to-one-mapping operators except the last stage (output)
if tag.is_broadcast(op.tag):
if op not in s.outputs:
s[op].compute_inline()
for tensor in op.input_tensors:
if isinstance(tensor.op, tvm.te.ComputeOp) and tensor.op not in scheduled_ops:
traverse(tensor.op)
if "group_conv2d_nchw" in op.tag:
output = op.output(0)
if "tile_ic" not in cfg:
return
conv_out = op.input_tensors[0]
kernel_vec = conv_out.op.input_tensors[1]
kernel = kernel_vec.op.input_tensors[0]
if isinstance(kernel.op, tvm.te.ComputeOp) and "dilate" in kernel.op.tag:
s[kernel].compute_inline()
data_vec = conv_out.op.input_tensors[0]
data = data_vec.op.input_tensors[0]
data_pad = None
if isinstance(data.op, tvm.te.ComputeOp) and "pad" in data.op.tag:
data_pad = data
data = data_pad.op.input_tensors[0]
args = [s, cfg, data, data_pad, data_vec, kernel_vec, conv_out, output, outs[0]]
_schedule_gspc_nchw(*args)
scheduled_ops.append(op)
traverse(outs[0].op)
return s
def _schedule_gspc_nchw(s, cfg, data, data_pad, data_vec, kernel_vec, conv_out, output, last):
"""Schedule GSPC"""
ic_bn, oc_bn, reg_n, unroll_kw = (
cfg["tile_ic"].size[-1],
cfg["tile_oc"].size[-1],
cfg["tile_ow"].size[-1],
cfg["unroll_kw"].val,
)
_, W = data, kernel_vec
A0, A1 = data_pad, data_vec
# schedule data
if (
data_pad is not None
and isinstance(data_pad.op, tvm.te.ComputeOp)
and "pad" in data_pad.op.tag
):
s[A0].compute_inline()
groups, batch, ic_chunk, ih, ic_block, _ = s[A1].op.axis
parallel_axis = s[A1].fuse(batch, ic_chunk, ih)
s[A1].parallel(parallel_axis)
# schedule kernel pack
groups, oc_chunk, ic_chunk, oh, ow, ic_block, oc_block = s[W].op.axis
s[W].reorder(oc_chunk, oh, ic_chunk, ow, ic_block, oc_block)
if oc_bn > 1:
s[W].vectorize(oc_block)
parallel_axis = s[W].fuse(groups, oc_chunk, oh)
s[W].parallel(parallel_axis)
# schedule conv
C, O0, O = conv_out, output, last
CC = s.cache_write(C, "global")
_, _, oc_chunk, oh, ow, oc_block = s[C].op.axis
ow_chunk, ow_block = s[C].split(ow, factor=reg_n)
s[C].reorder(oc_chunk, oh, ow_chunk, ow_block, oc_block)
s[C].fuse(oc_chunk, oh)
s[C].vectorize(oc_block)
groups, batch, oc_chunk, oh, ow, oc_block = s[CC].op.axis
ic, kh, kw = s[CC].op.reduce_axis
ow_chunk, ow_block = s[CC].split(ow, factor=reg_n)
ic_chunk, ic_block = s[CC].split(ic, factor=ic_bn)
if unroll_kw:
s[CC].reorder(oc_chunk, oh, ow_chunk, ic_chunk, kh, ic_block, kw, ow_block, oc_block)
s[CC].unroll(kw)
else:
s[CC].reorder(oc_chunk, oh, ow_chunk, ic_chunk, kh, kw, ic_block, ow_block, oc_block)
parallel_axis = s[CC].fuse(groups, batch, oc_chunk, oh)
s[CC].parallel(parallel_axis)
s[CC].vectorize(oc_block)
s[CC].unroll(ow_block)
if O0 != O:
s[O0].compute_inline()
batch, oc, oh, ow = s[O].op.axis
ow_chunk, ow_block = s[O].split(ow, factor=reg_n)
oc_chunk, oc_block = s[O].split(oc, factor=oc_bn)
s[O].reorder(batch, oc_chunk, oh, ow_chunk, ow_block, oc_block)
parallel_axis = s[O].fuse(oc_chunk, oh)
s[O].vectorize(oc_block)
s[O].parallel(parallel_axis)
return s
| 12,077 | 31.380697 | 94 | py |
tvm | tvm-main/python/tvm/topi/x86/batch_matmul.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,too-many-locals,unused-variable
# pylint: disable=unused-argument
"""x86 batch_matmul operators"""
import tvm
from tvm import autotvm, te
from tvm.autotvm.task.space import SplitEntity
from tvm.contrib import cblas, mkl
from tvm.target.x86 import target_has_amx, target_has_avx512
from .. import generic, nn
from ..transform import layout_transform
from ..utils import get_const_tuple, get_max_power2_factor, traverse_inline
from .dense import dense_amx_int8_schedule, dense_int8_schedule
from .injective import schedule_injective_from_existing
@autotvm.register_topi_compute("batch_matmul_int8.x86")
def batch_matmul_int8_compute(cfg, x, y, *_):
"""Compute for uint8 x int8 -> int32 batch_matmul"""
batch, m, k = x.shape
packed_y_layout = "BNK16n4k"
packed_y = layout_transform(y, "BNK", packed_y_layout)
_, n_o, _, n_i, _ = packed_y.shape
ak = te.reduce_axis((0, k), name="k")
mcpu = tvm.target.Target.current().mcpu
if target_has_avx512(mcpu):
attrs_info = {"schedule_rule": "batch_matmul_int8"}
else:
attrs_info = None
z = te.compute(
(batch, m, n_o * n_i),
lambda b, i, j: te.sum(
x[b, i, ak].astype("int32")
* packed_y[b, tvm.tir.indexdiv(j, 16), tvm.tir.indexdiv(ak, 4), j % 16, ak % 4].astype(
"int32"
),
axis=ak,
),
tag="batch_matmul_int8",
attrs=attrs_info,
)
return z
def batch_matmul_int8_schedule(cfg, s, C, O, layout_trans):
"""Schedule batch_matmul compute using avx512 or lower instructions
including VNNI vpdpbusd instruction if possible"""
# C: The output of batched GEMM
# O: The output of the fused op
# Schedule the GEMM part
s, fused_inner = dense_int8_schedule(cfg, s, C, O, do_parallel=False)
# Parallelize over batch
fused = s[O].fuse(O.op.axis[0], fused_inner)
s[O].parallel(fused)
cfg.define_knob("layout_trans_compute_root", [0, 1])
if cfg["layout_trans_compute_root"].val:
s[layout_trans].compute_root()
schedule_injective_from_existing(s, layout_trans)
else:
s[layout_trans].compute_at(s[O], fused)
_, _, _, ni, ki = s[layout_trans].op.axis
s[layout_trans].vectorize(ki)
s[layout_trans].unroll(ni)
return s
def batch_matmul_amx_schedule(cfg, s, C, O, layout_trans):
"""Schedule batch_matmul compute using AMX tdpbusd instruction"""
# C: The output of batched GEMM
# O: The output of the fused op
# Schedule the GEMM part
s, fused_inner = dense_amx_int8_schedule(cfg, s, C, O, do_parallel=False)
# Parallelize over ouuter loop
fused = s[O].fuse(O.op.axis[0], fused_inner)
s[O].parallel(fused)
cfg.define_knob("layout_trans_compute_root", [0, 1])
if cfg["layout_trans_compute_root"].val:
s[layout_trans].compute_root()
schedule_injective_from_existing(s, layout_trans)
else:
_, _, _, ni, ki = s[layout_trans].op.axis
s[layout_trans].vectorize(ki)
s[layout_trans].unroll(ni)
return s
@autotvm.register_topi_compute("batch_matmul.x86")
def batch_matmul(
cfg, tensor_a, tensor_b, out_shape=None, out_dtype=None, transpose_a=False, transpose_b=True
):
"""Compute batch matrix multiplication of `tensor_a` and `tensor_b`.
Both `tensor_a` and `tensor_b` can be transposed. For legacy reason, we use NT format
(transpose_a=False, transpose_b=True) by default.
Parameters
----------
cfg : ConfigSpace
Autotvm tuning space config file.
tensor_a : tvm.te.Tensor
3-D with shape [batch, M, K] or [batch, K, M].
tensor_b : tvm.te.Tensor
3-D with shape [batch, K, N] or [batch, N, K].
out_shape : List[Optional]
Explicit intended output shape of the computation. Can be useful in cases
with dynamic input shapes.
out_dtype : Optional[str]
Specifies the output data type for mixed precision batch matmul.
transpose_a : Optional[bool] = False
Whether the first tensor is in transposed format.
transpose_b : Optional[bool] = True
Whether the second tensor is in transposed format.
Returns
-------
output : tvm.te.Tensor
3-D with shape [batch, M, N]
"""
if cfg.is_fallback:
if transpose_a:
_, K, M = get_const_tuple(tensor_a.shape)
else:
_, M, K = get_const_tuple(tensor_a.shape)
if transpose_b:
_, N, _ = get_const_tuple(tensor_b.shape)
else:
_, _, N = get_const_tuple(tensor_b.shape)
_default_batch_matmul_config(cfg, M, N, K)
return nn.batch_matmul(
tensor_a,
tensor_b,
out_shape,
out_dtype,
transpose_a,
transpose_b,
)
@autotvm.register_topi_schedule("batch_matmul.x86")
def schedule_batch_matmul(cfg, outs):
"""Schedule for batch_matmul
Parameters
----------
cfg : ConfigSpace
AutoTVM tuning space config file.
outs : Array of Tensor
The computation graph description of batch_matmul
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
s = te.create_schedule([x.op for x in outs])
def _callback(op):
if "batch_matmul" in op.tag:
C = op.output(0)
A, B = op.input_tensors
if len(B.op.input_tensors) == 1 and B.op.input_tensors[0] == A:
s[B].compute_inline()
_, M, K = get_const_tuple(A.shape)
_, _, N = get_const_tuple(C.shape)
if op not in s.outputs:
s[C].compute_inline()
O = outs[0]
else:
O = C
CC = s.cache_write(C, "global")
# create tuning space
cfg.define_split("tile_y", M, num_outputs=2)
cfg.define_split("tile_x", N, num_outputs=2)
cfg.define_split("tile_k", K, num_outputs=2)
b, y, x = s[O].op.axis
yo, yi = cfg["tile_y"].apply(s, O, y)
xo, xi = cfg["tile_x"].apply(s, O, x)
s[O].reorder(b, yo, xo, yi, xi)
bxyo = s[O].fuse(b, yo, xo)
s[O].parallel(bxyo)
s[CC].compute_at(s[O], bxyo)
(k,) = s[CC].op.reduce_axis
ko, ki = cfg["tile_k"].apply(s, CC, k)
Crf = s.rfactor(CC, ki)
s[Crf].compute_at(s[CC], s[CC].op.axis[0])
_, _, y, x = s[Crf].op.axis
s[Crf].fuse(y, x)
s[Crf].vectorize(s[Crf].op.axis[0])
s[O].pragma(bxyo, "auto_unroll_max_step", 16)
traverse_inline(s, outs[0].op, _callback)
return s
@autotvm.register_topi_schedule("batch_matmul_int8.x86")
def schedule_batch_matmul_int8(cfg, outs):
"""Schedule for batch_matmul_int8"""
s = te.create_schedule([x.op for x in outs])
mcpu = tvm.target.Target.current().mcpu
def _callback(op):
if "batch_matmul_int8" in op.tag:
layout_trans = op.input_tensors[1]
if target_has_amx(mcpu):
batch_matmul_amx_schedule(cfg, s, op.output(0), outs[0], layout_trans)
elif target_has_avx512(mcpu):
batch_matmul_int8_schedule(cfg, s, op.output(0), outs[0], layout_trans)
traverse_inline(s, outs[0].op, _callback)
return s
def _default_batch_matmul_config(cfg, M, N, K):
cfg["tile_k"] = SplitEntity([K // 16, 16])
x_bn = get_max_power2_factor(N, 8)
cfg["tile_x"] = SplitEntity([N // x_bn, x_bn])
y_bn = get_max_power2_factor(M, 8)
cfg["tile_y"] = SplitEntity([M // y_bn, y_bn])
def batch_matmul_blas_common(cfg, tensor_a, tensor_b, out_shape, trans_a, trans_b, lib):
"""Computes batch matrix multiplication of `tensor_a` and `tensor_b` when `tensor_a` and
`tensor_b` are data in batch, using one of BLAS libraries. Supports broadcasting in batch
dimension.
Parameters
----------
cfg : ConfigSpace
Autotvm tuning space config file
tensor_a : tvm.te.Tensor
3-D with shape [batch, M, K] or [batch, K, M].
tensor_b : tvm.te.Tensor
3-D with shape [batch, K, N] or [batch, N, K].
out_shape : List[Optional]
Explicit intended output shape of the computation. Can be useful in cases
with dynamic input shapes.
trans_a : Optional[bool] = False
Whether the first tensor is in transposed format.
trans_b : Optional[bool] = True
Whether the second tensor is in transposed format.
lib : A contrib module which implements batch_matmul function
cblas and mkl are supported
Returns
-------
output : tvm.te.Tensor
3-D with shape [batch, M, N]
"""
assert len(tensor_a.shape) == 3 and len(tensor_b.shape) == 3, "only support 3-dim batch_matmul"
if trans_a:
XB, XK, M = get_const_tuple(tensor_a.shape)
else:
XB, M, XK = get_const_tuple(tensor_a.shape)
if trans_b:
YB, N, YK = get_const_tuple(tensor_b.shape)
else:
YB, YK, N = get_const_tuple(tensor_a.shape)
assert (XB == YB) or (YB == 1) or (XB == 1), "batch dimension doesn't match"
assert XK == YK, "shapes of x and y is inconsistent"
if out_shape is not None:
assert out_shape[0] in (XB, YB), "got invalid output shape"
assert out_shape[1] == M, "got invalid output shape"
assert out_shape[2] == N, "got invalid output shape"
cfg.add_flop(XB * M * N * XK * 2)
return lib.batch_matmul(tensor_a, tensor_b, trans_a, trans_b)
@autotvm.register_topi_compute("batch_matmul_cblas.x86")
def batch_matmul_cblas(
cfg, tensor_a, tensor_b, out_shape=None, out_dtype=None, transpose_a=False, transpose_b=True
):
"""Compute batch_matmul using cblas"""
del out_dtype # Unused argument
return batch_matmul_blas_common(
cfg, tensor_a, tensor_b, out_shape, transpose_a, transpose_b, cblas
)
@autotvm.register_topi_schedule("batch_matmul_cblas.x86")
def schedule_batch_matmul_cblas(_, outs):
"""Create schedule for batch_matmul_cblas"""
return generic.schedule_extern(outs)
@autotvm.register_topi_compute("batch_matmul_mkl.x86")
def batch_matmul_mkl(
cfg, tensor_a, tensor_b, out_shape=None, out_dtype=None, transpose_a=False, transpose_b=True
):
"""Compute batch_matmul using mkl"""
del out_dtype # Unused argument
return batch_matmul_blas_common(
cfg, tensor_a, tensor_b, out_shape, transpose_a, transpose_b, mkl
)
@autotvm.register_topi_schedule("batch_matmul_mkl.x86")
def schedule_batch_matmul_mkl(_, outs):
"""Create schedule for batch_matmul_mul"""
return generic.schedule_extern(outs)
| 11,550 | 32.676385 | 99 | py |
tvm | tvm-main/python/tvm/topi/x86/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=redefined-builtin, wildcard-import
"""x86 specific declaration and schedules."""
from __future__ import absolute_import as _abs
from .conv1d import *
from .conv2d import *
from .conv3d import *
from .binarize_pack import schedule_binarize_pack
from .binary_dense import schedule_binary_dense
from .nn import *
from .conv2d_int8 import *
from .injective import *
from .reduction import *
from .pooling import schedule_pool, schedule_adaptive_pool
from .bitserial_conv2d import *
from .bitserial_dense import *
from .depthwise_conv2d import *
from .dense import *
from .batch_matmul import *
from .roi_align import roi_align_nchw
from .conv2d_transpose import *
from .conv3d_transpose import *
from .sparse import *
from .conv2d_alter_op import *
from .dense_alter_op import *
from .group_conv2d import *
from .math_alter_op import *
from .concat import *
| 1,659 | 35.086957 | 62 | py |
tvm | tvm-main/python/tvm/topi/x86/conv3d_transpose.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unused-variable,unused-argument,no-member
# pylint: disable=no-value-for-parameter
"""Conv3D Transpose schedule on x86"""
from tvm import te
from ..utils import traverse_inline
from .. import nn
from .conv3d import conv3d_ncdhw, schedule_conv3d_ncdhw
def conv3d_transpose_ncdhw(data, kernel, strides, padding, out_dtype, output_padding):
data_pad, kernel_transform = nn.conv3d_transpose_ncdhw_preprocess(
data, kernel, strides, padding, out_dtype, output_padding
)
# reuse conv3d_ncdhw implementation
return conv3d_ncdhw(data_pad, kernel_transform, (1, 1, 1), (0, 0, 0), (1, 1, 1), 1, out_dtype)
def schedule_conv3d_transpose_ncdhw(outs):
"""Create schedule for tensors"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = schedule_conv3d_ncdhw(outs)
def _callback(op):
if "unpack_ncdhwc" in op.tag:
conv_out = op.input_tensors[0]
# retrieve data
data_vec = conv_out.op.input_tensors[0]
data_pad = data_vec.op.input_tensors[0]
data_dilate = data_pad.op.input_tensors[0]
s[data_dilate].compute_inline()
s[data_pad].compute_inline()
# retrieve kernel
kernel_vec = conv_out.op.input_tensors[1]
kernel_transform = kernel_vec.op.input_tensors[0]
s[kernel_transform].compute_inline()
traverse_inline(s, outs[0].op, _callback)
return s
| 2,257 | 38.614035 | 98 | py |
tvm | tvm-main/python/tvm/topi/x86/binary_dense.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-variable, unused-argument
"""Schedule for binary dense operator."""
from tvm import te
from .. import tag
def schedule_binary_dense(outs):
"""Schedule for binary_dense.
Parameters
----------
outs: Array of Tensor
The computation graph description of binary_dense
in the format of an array of tensors.
Returns
-------
s: Schedule
The computation schedule for binary_dense.
"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
scheduled_ops = []
def _schedule(A, B, C):
s[C].split(s[C].op.reduce_axis[0], factor=8)
s[C].parallel(s[C].op.axis[0])
if C.op in s.outputs:
Out = C
else:
Out = outs[0].op.output(0)
xo, xi = s[Out].split(Out.op.axis[1], factor=8)
s[Out].vectorize(xi)
def traverse(OP):
"""Internal traverse function"""
# inline all one-to-one-mapping operators except the last stage (output)
if tag.is_broadcast(OP.tag):
if OP not in s.outputs:
s[OP].compute_inline()
for tensor in OP.input_tensors:
if isinstance(tensor.op, te.tensor.ComputeOp) and tensor.op not in scheduled_ops:
traverse(tensor.op)
# schedule binary_dense
elif OP.tag == "binary_dense":
output = OP.output(0)
data = OP.input_tensors[0]
weight = OP.input_tensors[1]
_schedule(data, weight, output)
else:
raise RuntimeError(f"Unsupported operator: {OP.tag}")
scheduled_ops.append(OP)
traverse(outs[0].op)
return s
| 2,528 | 33.643836 | 97 | py |
tvm | tvm-main/python/tvm/topi/x86/conv2d_transpose.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unused-variable,unused-argument,no-member
"""Conv2D Transpose schedule on x86"""
from tvm import te
from ..utils import traverse_inline
from .. import nn
from .conv2d import conv2d_nchw, schedule_conv2d_nchw
def conv2d_transpose_nchw(data, kernel, strides, padding, out_dtype, output_padding):
data_pad, kernel_transform = nn.conv2d_transpose_nchw_preprocess(
data, kernel, strides, padding, out_dtype, output_padding
)
# reuse conv2d_nchw implementation
return conv2d_nchw(
data_pad,
kernel_transform,
strides=(1, 1),
padding=(0, 0),
dilation=(1, 1),
out_dtype=out_dtype,
)
def schedule_conv2d_transpose_nchw(outs):
"""Create schedule for tensors"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = schedule_conv2d_nchw(outs)
def _callback(op):
if "unpack_nchwc" in op.tag:
conv_out = op.input_tensors[0]
# retrieve data
data_vec = conv_out.op.input_tensors[0]
if isinstance(data_vec, te.ComputeOp):
data_pad = data_vec.op.input_tensors[0]
data_dilate = data_pad.op.input_tensors[0]
s[data_dilate].compute_inline()
s[data_pad].compute_inline()
# retrieve kernel
kernel_vec = conv_out.op.input_tensors[1]
if isinstance(kernel_vec, te.ComputeOp):
kernel_transform = kernel_vec.op.input_tensors[0]
s[kernel_transform].compute_inline()
traverse_inline(s, outs[0].op, _callback)
return s
| 2,411 | 37.285714 | 85 | py |
tvm | tvm-main/python/tvm/topi/x86/depthwise_conv2d.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unused-variable,unused-argument,no-member
# pylint: disable=no-value-for-parameter
"""Depthwise Conv2D schedule on x86"""
import tvm
from tvm import autotvm, te
from tvm.autotvm.task.space import OtherOptionEntity, SplitEntity
from tvm.target.x86 import get_simd_32bit_lanes
from ..nn.conv2d import unpack_NCHWc_to_nchw
from ..nn.depthwise_conv2d import _get_workload, depthwise_conv2d_infer_layout
from ..nn.pad import pad
from ..nn.utils import get_pad_tuple
from ..utils import get_const_tuple, traverse_inline
def _fallback_schedule(cfg, wkl):
"""
Get default schedule for the workload
Parameters
----------
cfg : tvm.autotvm.task.space.FallbackConfigEntity
Fallback config to be updated
wkl : topi.nn.depthwise_conv2d.Workload
Convolution workload
"""
simd_width = get_simd_32bit_lanes()
pt, pl, pb, pr = wkl.padt, wkl.padl, wkl.padb, wkl.padr
HSTR, WSTR = wkl.stride_h, wkl.stride_w
dilated_kernel_w = (wkl.kernel_w - 1) * wkl.dilation_w + 1
out_width = (wkl.width - dilated_kernel_w + pl + pr) // WSTR + 1
oc_bn = 1
for bn in range(simd_width, 0, -1):
if wkl.out_filter % bn == 0:
oc_bn = bn
break
ic_bn = 1
for bn in range(oc_bn, 0, -1):
if wkl.in_filter % bn == 0:
ic_bn = bn
break
reg_n = 1
for n in range(31, 0, -1):
if out_width % n == 0:
reg_n = n
break
cfg["tile_ic"] = SplitEntity([wkl.in_filter // ic_bn, ic_bn])
cfg["tile_oc"] = SplitEntity([wkl.out_filter // oc_bn, oc_bn])
cfg["tile_ow"] = SplitEntity([out_width // reg_n, reg_n])
cfg["unroll_kw"] = OtherOptionEntity(False)
def depthwise_conv2d_nchw(data, kernel, strides, padding, dilation, out_dtype):
"""Compute depthwise conv2d with NCHW layout."""
layout = "NCHW"
packed_out = depthwise_conv2d_NCHWc(
data, kernel, strides, padding, dilation, layout, layout, out_dtype
)
return unpack_NCHWc_to_nchw(packed_out, out_dtype)
def schedule_depthwise_conv2d_nchw(outs):
"""Create schedule for depthwise_conv2d_nchw."""
return schedule_depthwise_conv2d_NCHWc(outs)
def _pack_data(cfg, data, kernel):
n, ic, ih, iw = get_const_tuple(data.shape)
filters, cm, kh, kw = get_const_tuple(kernel.shape)
oc = filters * cm
ic_bn, oc_bn = cfg["tile_ic"].size[-1], cfg["tile_oc"].size[-1]
ic_chunk = ic // ic_bn
oc_chunk = oc // oc_bn
data = te.compute(
(n, ic_chunk, ih, iw, ic_bn),
lambda bs, c, h, w, vc: data[bs, c * ic_bn + vc, h, w],
name="data_vec",
)
kernel = te.compute(
(oc_chunk, 1, kh, kw, 1, oc_bn),
lambda occ, icc, k_h, k_w, icb, ocb: kernel[
(occ * oc_bn + ocb) // cm, (occ * oc_bn + ocb) % cm, k_h, k_w
],
name="kernel_vec",
)
return data, kernel
@autotvm.register_topi_compute("depthwise_conv2d_NCHWc.x86")
def depthwise_conv2d_NCHWc(
cfg, data, kernel, strides, padding, dilation, layout, out_layout, out_dtype=None
):
"""Compute depthwise conv2d with NCHWc layout"""
out_dtype = data.dtype if out_dtype is None else out_dtype
if len(data.shape) == 5:
batch, in_channel_chunk, in_height, in_width, in_channel_block = get_const_tuple(data.shape)
(
out_channel_chunk,
cm_chunk,
filter_height,
filter_width,
cm_block,
out_channel_block,
) = get_const_tuple(kernel.shape)
in_channel = in_channel_chunk * in_channel_block
out_channel = out_channel_chunk * out_channel_block
channel_multiplier = cm_chunk * cm_block
assert channel_multiplier * in_channel == out_channel
else:
batch, in_channel, in_height, in_width = get_const_tuple(data.shape)
out_channel, channel_multiplier, filter_height, filter_width = get_const_tuple(kernel.shape)
assert channel_multiplier == 1
strides = strides if isinstance(strides, (tuple, list)) else (strides, strides)
HSTR, WSTR = strides
dh, dw = dilation if isinstance(dilation, (tuple, list)) else (dilation, dilation)
dilated_kernel_h = (filter_height - 1) * dh + 1
dilated_kernel_w = (filter_width - 1) * dw + 1
pad_top, pad_left, pad_down, pad_right = get_pad_tuple(
padding, (dilated_kernel_h, dilated_kernel_w)
)
HPAD = pad_top + pad_down
WPAD = pad_left + pad_right
out_height = (in_height + HPAD - dilated_kernel_h) // HSTR + 1
out_width = (in_width + WPAD - dilated_kernel_w) // WSTR + 1
cfg.define_split("tile_ic", in_channel, num_outputs=2)
cfg.define_split("tile_oc", out_channel, num_outputs=2)
cfg.define_split("tile_ow", out_width, num_outputs=2, filter=lambda y: y.size[-1] <= 64)
cfg.define_knob("unroll_kw", [True, False])
# get workload and related schedule config
wkl = _get_workload(
te.placeholder((batch, in_channel, in_height, in_width), dtype=data.dtype),
te.placeholder(
(out_channel, channel_multiplier, filter_height, filter_width), dtype=kernel.dtype
),
strides,
(pad_top, pad_down),
dilation,
out_dtype,
)
if cfg.is_fallback:
_fallback_schedule(cfg, wkl)
# Pack data if raw 4-D data is provided.
# This can only happen when autotuning.
if len(data.shape) == 4:
if autotvm.GLOBAL_SCOPE.in_tuning:
# Directly use modified data layout placeholder.
in_channel_block = cfg["tile_ic"].size[-1]
in_channel_chunk = in_channel // in_channel_block
out_channel_block = cfg["tile_oc"].size[-1]
out_channel_chunk = out_channel // out_channel_block
dshape = (batch, in_channel_chunk, in_height, in_width, in_channel_block)
data = tvm.te.placeholder(dshape, data.dtype, name="data")
kshape = (out_channel_chunk, 1, filter_height, filter_width, 1, out_channel_block)
kernel = tvm.te.placeholder(kshape, kernel.dtype, name="kernel")
else:
data, kernel = _pack_data(cfg, data, kernel)
_, _, _, _, in_channel_block = get_const_tuple(data.shape)
out_channel_chunk, _, _, _, _, out_channel_block = get_const_tuple(kernel.shape)
# padding stage
DOPAD = pad_top != 0 or pad_left != 0 or pad_down != 0 or pad_right != 0
if DOPAD:
pad_before = [0, 0, pad_top, pad_left, 0]
pad_after = [0, 0, pad_down, pad_right, 0]
data_pad = pad(data, pad_before, pad_after, name="PaddedInput")
else:
data_pad = data
# depthconv stage
idxdiv = tvm.tir.indexdiv
idxmod = tvm.tir.indexmod
kh = te.reduce_axis((0, filter_height), name="kh")
kw = te.reduce_axis((0, filter_width), name="kw")
Output = te.compute(
(batch, out_channel_chunk, out_height, out_width, out_channel_block),
lambda b, oco, oh, ow, oci: te.sum(
(
data_pad[
b,
idxdiv(
idxdiv(oco * out_channel_block + oci, channel_multiplier), in_channel_block
),
oh * HSTR + kh * dh,
ow * WSTR + kw * dw,
idxmod(
idxdiv(oco * out_channel_block + oci, channel_multiplier), in_channel_block
),
].astype(out_dtype)
* kernel[oco, 0, kh, kw, 0, oci].astype(out_dtype)
),
axis=[kh, kw],
),
name="DepthwiseConv2d",
tag="depthwise_conv2d_NCHWc",
)
return Output
@autotvm.register_topi_schedule("depthwise_conv2d_NCHWc.x86")
def schedule_depthwise_conv2d_NCHWc(cfg, outs):
"""CPU schedule for depthwise conv2d in NCHW[x]c layout"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
def _callback(op):
"""Traverse operators from computation graph"""
if "depthwise_conv2d_NCHWc" in op.tag:
conv_out = op.output(0)
data = conv_out.op.input_tensors[0]
kernel = conv_out.op.input_tensors[1]
_schedule_depthwise_conv2d_NCHWc_impl(s, cfg, data, kernel, conv_out, outs[0])
traverse_inline(s, outs[0].op, _callback)
return s
def _schedule_depthwise_conv2d_NCHWc_impl(s, cfg, data_vec, kernel_vec, conv_out, output):
tile_ow, oc_bn = cfg["tile_ow"].size[-1], cfg["tile_oc"].size[-1]
unroll_kw = cfg["unroll_kw"].val
# schedule pad
if isinstance(s[data_vec].op, tvm.te.ComputeOp) and "pad" in data_vec.op.tag:
batch, ic_chunk, ih, iw, ic_block = s[data_vec].op.axis
s[data_vec].vectorize(ic_block)
parallel_axis = s[data_vec].fuse(batch, ic_chunk, ih)
s[data_vec].parallel(parallel_axis)
C, O = conv_out, output
CC = s.cache_write(C, "global")
_, ic_chunk, oh, ow, ic_block = s[C].op.axis
ow_chunk, ow_block = s[C].split(ow, factor=tile_ow)
s[C].reorder(ic_chunk, oh, ow_chunk, ow_block, ic_block)
s[C].vectorize(ic_block)
parallel_axis = s[C].fuse(ic_chunk, oh)
s[C].parallel(parallel_axis)
s[CC].compute_at(s[C], ow_chunk)
# the ow axis in the cached block CC is the ow_block in C
_, ic_chunk, oh, ow, ic_block = s[CC].op.axis
kh, kw = s[CC].op.reduce_axis
s[CC].reorder(ic_chunk, oh, kh, kw, ow, ic_block)
if unroll_kw:
s[CC].unroll(kw)
s[CC].vectorize(ic_block)
s[CC].unroll(ow)
if C != O:
out_ndim = len(s[O].op.axis)
if out_ndim == 5:
batch, oc_chunk, oh, ow, oc_block = s[O].op.axis
ow_chunk, ow_block = s[O].split(ow, factor=tile_ow)
s[O].reorder(oc_chunk, oh, ow_chunk, ow_block, oc_block)
parallel_axis = s[O].fuse(oc_chunk, oh)
s[C].compute_at(s[O], parallel_axis)
s[O].vectorize(oc_block)
s[O].parallel(parallel_axis)
elif out_ndim == 4:
batch, oc, oh, ow = s[O].op.axis
ow_chunk, ow_block = s[O].split(ow, factor=tile_ow)
oc_chunk, oc_block = s[O].split(oc, factor=oc_bn)
s[O].reorder(oc_chunk, oh, ow_chunk, ow_block, oc_block)
parallel_axis = s[O].fuse(oc_chunk, oh)
s[C].compute_at(s[O], parallel_axis)
s[O].vectorize(oc_block)
s[O].parallel(parallel_axis)
else:
raise ValueError(f"Unsupported output ndim: {out_ndim}")
return s
@depthwise_conv2d_infer_layout.register("cpu")
def _depthwise_conv2d_infer_layout(workload, cfg):
_, data, kernel, strides, padding, dilation, _, _, dtype = workload
batch_size, in_channel, in_height, in_width = data[1]
filter_channel, channel_multiplier, k_height, k_width = kernel[1]
out_channel = filter_channel * channel_multiplier
out_height = (in_height + padding[0] + padding[2] - k_height) // strides[0] + 1
out_width = (in_width + padding[1] + padding[3] - k_width) // strides[1] + 1
tile_ic, tile_oc = cfg["tile_ic"].size[-1], cfg["tile_oc"].size[-1]
in_shape = (batch_size, in_channel // tile_ic, in_height, in_width, tile_ic)
in_layout = f"NCHW{tile_ic}c"
out_shape = (batch_size, out_channel // tile_oc, out_height, out_width, tile_oc)
out_layout = f"NCHW{tile_oc}c"
return ((in_shape, in_layout),), ((out_shape, out_layout),)
| 12,259 | 37.193146 | 100 | py |
tvm | tvm-main/python/tvm/topi/x86/tensor_intrin.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Core kernel of dot product of 4 Int8 operations"""
# pylint: disable=invalid-name,unused-variable
import tvm
from tvm import te
import tvm.target.codegen
from tvm.target.x86 import target_has_sse42, target_has_vnni, get_simd_32bit_lanes
def dot_16x1x16_uint8_int8_int32():
"""Dispatch the most optimized intrin depending on the target"""
mcpu = tvm.target.Target.current().mcpu
assert target_has_sse42(mcpu), "An old Intel machine that does not have fast Int8 support."
if target_has_vnni(mcpu):
# VNNI capable platform
return dot_16x1x16_uint8_int8_int32_cascadelake()
# vpmaddubsw/vpmaddwd fallback
return dot_16x1x16_uint8_int8_int32_skylake()
def dot_16x1x16_uint8_int8_int32_skylake():
"""
Int8 dot product by every 4 elements using AVX512 Skylake instructions.
This function takes two arrays of uint8 and int8 datatype -- data[4] and
kernel[16][4] -- and computes a dot product of data[4] with every
4 elements of kernels, resulting in output[16] of int32 datatype.
The pseudo code is as follows.
.. code-block:: c
void dot_16x1x16_uint8_int8_int32(uint8 data[4], int8 kernel[16][4],
int32 output[16]){
for (int i = 0; i < 16; i++){
output[i] = 0;
for (int k = 0; k < 4; k++){
output[i] += data[k] * kernel[i][k]
}
}
}
Physically, the kernel array sits in an AVX512 vector register and
the data[4] is broadcasted to another AVX512 vector register. This
function returns a TensorIntrin that can be used to tensorize
a schedule.
Returns
-------
intrin : TensorIntrin
The Skylake int8 TensorIntrin that can be used in tensorizing schedule
"""
int32_lanes = get_simd_32bit_lanes()
num_int8_elements = 4 # 4 int8 elements in int32
data = te.placeholder((num_int8_elements,), dtype="uint8", name="data")
kernel = te.placeholder((int32_lanes, num_int8_elements), dtype="int8", name="kernel")
k = te.reduce_axis((0, num_int8_elements), name="k")
C = te.compute(
(int32_lanes,),
lambda i: te.sum(data[k].astype("int32") * kernel[i, k].astype("int32"), axis=k),
name="C",
)
a_buffer = tvm.tir.decl_buffer(
data.shape, dtype="uint8", name="a_buffer", offset_factor=1, strides=[1]
)
b_buffer = tvm.tir.decl_buffer(
kernel.shape, dtype="int8", name="b_buffer", offset_factor=1, strides=[te.var("ldw"), 1]
)
def _intrin_func(ins, outs):
def _instr(index):
# int_lx32 - output datatype after pmaddubs - 16 bits to number of lanes
# int_8xl - input datatype to pmaddubs - 8 bits to number of lanes
# int_32xl - output datatype after pmaddw - 32 bits per number of lanes
if int32_lanes == 4:
int_lx32 = "int16x8"
int_8xl = "int8x16"
int_32xl = "int32x4"
pmaddubs = "llvm.x86.ssse3.pmadd.ub.sw.128"
pmaddw = "llvm.x86.sse2.pmadd.wd"
elif int32_lanes == 8:
int_lx32 = "int16x16"
int_8xl = "int8x32"
int_32xl = "int32x8"
pmaddubs = "llvm.x86.avx2.pmadd.ub.sw"
pmaddw = "llvm.x86.avx2.pmadd.wd"
elif int32_lanes == 16:
int_lx32 = "int16x32"
int_8xl = "int8x64"
int_32xl = "int32x16"
pmaddubs = "llvm.x86.avx512.pmaddubs.w.512"
pmaddw = "llvm.x86.avx512.pmaddw.d.512"
ib = tvm.tir.ir_builder.create()
if index == 1:
ib.emit(outs[0].vstore(0, tvm.tir.const(0, int_32xl)))
return ib.get()
a_int8 = ins[0].vload([0], "uint8x4")
re_int32 = tvm.tir.call_intrin("int32", "tir.reinterpret", a_int8)
vec_ai32 = re_int32.astype(int_32xl)
vec_a = tvm.tir.call_intrin(int_8xl, "tir.reinterpret", vec_ai32)
vec_b = ins[1].vload([0, 0], int_8xl)
vec_one = tvm.tir.const(1, int_lx32)
pair_reduction = tvm.tir.call_llvm_pure_intrin(
int_lx32,
pmaddubs,
tvm.tir.const(2, "uint32"),
vec_a,
vec_b,
)
quad_reduction = tvm.tir.call_llvm_pure_intrin(
int_32xl,
pmaddw,
tvm.tir.const(2, "uint32"),
pair_reduction,
vec_one,
)
if index == 0:
ib.emit(outs[0].vstore(0, quad_reduction))
else:
ib.emit(outs[0].vstore(0, quad_reduction + outs[0].vload([0], int_32xl)))
return ib.get()
# body, reset, update
return _instr(0), _instr(1), _instr(2)
buffer_params = {"offset_factor": 1}
return te.decl_tensor_intrin(
C.op,
_intrin_func,
binds={data: a_buffer, kernel: b_buffer},
default_buffer_params=buffer_params,
)
def dot_16x1x16_uint8_int8_int16():
"""
Int8 dot product by every 2 elements using AVX512 Skylake instructions.
This function takes two arrays of uint8 and int8 datatype -- data[2] and
kernel[4][32][2] -- and computes a dot product of data[2] with every
2 elements of kernels, resulting in output[4][32] of int16 datatype.
The pseudo code is as follows.
.. code-block:: c
void dot_16x1x16_uint8_int8_int16(uint8 data[2], int8 kernel[32*4][2],
int16 output[32*4]){
for (int i = 0; i< 4; i++){
for (int j = 0; j < 32; j++){
output[i][i] = 0;
for (int k = 0; k < 2; k++){
output[i][j][k] += data[k] * kernel[i][j][k]
}
}
}
}
Physically, the kernel array sits in four AVX512 vector registers and
the data[2] is broadcasted to another AVX512 vector register. This
function returns a TensorIntrin that can be used to tensorize
a schedule.
Returns
-------
intrin : TensorIntrin
The Skylake int8 TensorIntrin that can be used in tensorizing schedule
"""
int16_lanes = 4 * 32 # 4*32 int32 lanes in 4 AVX512 vector registers
num_int8_elements = 2 # 2 int8 elements in int16
data = te.placeholder((num_int8_elements,), dtype="uint8", name="data")
kernel = te.placeholder((int16_lanes, num_int8_elements), dtype="int8", name="kernel")
k = te.reduce_axis((0, num_int8_elements), name="k")
C = te.compute(
(int16_lanes,),
lambda i: te.sum(data[k].astype("int16") * kernel[i, k].astype("int16"), axis=k),
name="C",
)
a_buffer = tvm.tir.decl_buffer(
data.shape, dtype="uint8", name="a_buffer", offset_factor=1, strides=[1]
)
b_buffer = tvm.tir.decl_buffer(kernel.shape, dtype="int8", name="b_buffer", offset_factor=1)
# strides=[te.var('ldw'), 1, 1])
def _intrin_func(ins, outs):
def _instr(index):
ib = tvm.tir.ir_builder.create()
if index == 1:
for i in range(4):
ib.emit(outs[0].vstore([i * 32], tvm.tir.const(0, "int16x32")))
return ib.get()
a_int8 = ins[0].vload([0], "uint8x2")
re_int16 = tvm.tir.call_intrin("int16", "tir.reinterpret", a_int8)
vec_ai16 = re_int16.astype("int16x32")
vec_a = tvm.tir.call_intrin("int8x64", "tir.reinterpret", vec_ai16)
for i in range(4):
vec_b = ins[1].vload([i * 32, 0], "int8x64")
pair_reduction = tvm.tir.call_llvm_pure_intrin(
"int16x32",
"llvm.x86.avx512.pmaddubs.w.512",
tvm.tir.const(2, "uint32"),
vec_a,
vec_b,
)
if index == 0:
ib.emit(outs[0].vstore([i * 32], pair_reduction))
else:
ib.emit(
outs[0].vstore(
[i * 32], pair_reduction + outs[0].vload([i * 32], "int16x32")
)
)
return ib.get()
# body, reset, update
return _instr(0), _instr(1), _instr(2)
buffer_params = {"offset_factor": 1}
return te.decl_tensor_intrin(
C.op,
_intrin_func,
binds={data: a_buffer, kernel: b_buffer},
default_buffer_params=buffer_params,
)
def dot_16x1x16_uint8_int8_int32_cascadelake():
"""
Int8 dot product by every 4 elements using AVX512VNNI Cascade Lake instructions.
This function takes two arrays of uint8 and int8 datatype -- data[4] and
kernel[16][4] -- and computes a dot product of data[4] with every
4 elements of kernels, resulting in output[16] of int32 datatype.
The pseudo code is as follows.
.. code-block:: c
void dot_16x1x16_uint8_int8_int32_cascadelake(uint8 data[4], int8 kernel[16][4],
int32 output[16]){
for (int i = 0; i < 16; i++){
output[i] = 0;
for (int k = 0; k < 4; k++){
output[i] += data[k] * kernel[i][k]
}
}
}
Physically, the kernel array sits in an AVX512 vector register and
the data[4] is broadcasted to another AVX512 vector register. This
function returns a TensorIntrin that can be used to tensorize
a schedule.
Returns
-------
intrin : TensorIntrin
The Cascade Lake int8 TensorIntrin that can be used in tensorizing schedule
"""
int32_lanes = 16 # 16 int32 lanes in AVX512
num_int8_elements = 4 # 4 int8 elements in int32
data = te.placeholder((num_int8_elements,), dtype="uint8", name="data")
kernel = te.placeholder((int32_lanes, num_int8_elements), dtype="int8", name="kernel")
k = te.reduce_axis((0, num_int8_elements), name="k")
C = te.compute(
(int32_lanes,),
lambda i: te.sum(data[k].astype("int32") * kernel[i, k].astype("int32"), axis=k),
name="C",
)
a_buffer = tvm.tir.decl_buffer(
data.shape, dtype="uint8", name="a_buffer", offset_factor=1, strides=[1]
)
b_buffer = tvm.tir.decl_buffer(
kernel.shape, dtype="int8", name="b_buffer", offset_factor=1, strides=[te.var("ldw"), 1]
)
def _intrin_func(ins, outs):
def _instr(index):
ib = tvm.tir.ir_builder.create()
if index == 1:
ib.emit(outs[0].vstore(0, tvm.tir.const(0, "int32x16")))
return ib.get()
a_int8 = ins[0].vload([0], "uint8x4")
re_int32 = tvm.tir.call_intrin("int32", "tir.reinterpret", a_int8)
vec_ai32 = re_int32.astype("int32x16")
vec_b = ins[1].vload([0, 0], "int8x64")
vnni_inst_name = "llvm.x86.avx512.vpdpbusd.512"
llvm_id = tvm.target.codegen.llvm_lookup_intrinsic_id(vnni_inst_name)
if llvm_id != 0: # VNNI is available for current LLVM version
vec_bi32 = tvm.tir.call_intrin("int32x16", "tir.reinterpret", vec_b)
vec_c = outs[0].vload([0], "int32x16")
quad_reduction = tvm.tir.call_llvm_pure_intrin(
"int32x16",
"llvm.x86.avx512.vpdpbusd.512",
tvm.tir.const(3, "uint32"),
vec_c,
vec_ai32,
vec_bi32,
)
ib.emit(outs[0].vstore(0, quad_reduction))
else: # Fall back to the normal AVX512
vec_a = tvm.tir.call_intrin("int8x64", "tir.reinterpret", vec_ai32)
vec_one = tvm.tir.const(1, "int16x32")
pair_reduction = tvm.tir.call_llvm_pure_intrin(
"int16x32",
"llvm.x86.avx512.pmaddubs.w.512",
tvm.tir.const(2, "uint32"),
vec_a,
vec_b,
)
quad_reduction = tvm.tir.call_llvm_pure_intrin(
"int32x16",
"llvm.x86.avx512.pmaddw.d.512",
tvm.tir.const(2, "uint32"),
pair_reduction,
vec_one,
)
if index == 0:
ib.emit(outs[0].vstore(0, quad_reduction))
else:
ib.emit(outs[0].vstore(0, quad_reduction + outs[0].vload([0], "int32x16")))
return ib.get()
# body, reset, update
return _instr(0), _instr(1), _instr(2)
buffer_params = {"offset_factor": 1}
return te.decl_tensor_intrin(
C.op,
_intrin_func,
binds={data: a_buffer, kernel: b_buffer},
default_buffer_params=buffer_params,
)
def dot_32x128x32_u8s8s32_sapphirerapids(LDA):
"""
Int8 dot product by every 16x64 elements using AMX-TMUL Sapphire Rapids instructions.
The tdpxxd instruction takes two tile of uint8 and int8 datatype -- data[16][64] and
kernel[1][16][16][4] -- and computes a dot product of data[16][16] in int32 datatype.
(Physically, to efficiently leveraging the tile register, we constructing a 2x2 tiles
matmul which performs 32x128x32 in total)
The pseudo code is as follows:
for(k=0; k<2; k++){
for(n=0; n<2; n++){
tileload64(tmm_b, B)
for(m=0; m<2; m++){
if(n==0)
tileload64(tmm_a, A)
tdpbusd(tmm_c, tmm_a, tmm_b)
}
}
}
Args:
LDA (int): the stride of the matrix A, which is uint8 type and use it to determine
memory strides of macro reduce axis.
Returns
-------
intrin : TensorIntrin
The Sapphire Rapids AMX-TMUL int8 tdpbusd TensorIntrin that can be used in tensorizing
schedule
"""
A = te.placeholder((32, 128), name="A", dtype="uint8")
B = te.placeholder((2, 32, 16, 4), name="B", dtype="int8")
k = te.reduce_axis((0, 128), name="k")
C = te.compute(
(32, 32),
lambda i, j: te.sum(
A[i, k].astype("int32")
* B[tvm.tir.indexdiv(j, 16), tvm.tir.indexdiv(k, 4), j % 16, k % 4].astype("int32"),
axis=k,
),
name="C",
)
BA = tvm.tir.decl_buffer(
A.shape, A.dtype, offset_factor=1, strides=[te.var("ldw"), 1], name="BA"
)
BB = tvm.tir.decl_buffer(
B.shape,
B.dtype,
offset_factor=1,
strides=[te.var("ldw"), te.var("ldw"), te.var("ldw"), 1],
name="BB",
)
BC = tvm.tir.decl_buffer(
C.shape, C.dtype, offset_factor=1, strides=[te.var("ldw"), 1], name="BC", scope="amx.tmm"
)
def intrin_func(ins, outs): # pylint: disable=unused-variable
bufA = ins[0]
bufB = ins[1]
bufC = outs[0]
assert LDA
_strides_A = tvm.tir.const(LDA, dtype="uint64")
_strides_B_tile = tvm.tir.const(LDA / 128, dtype="uint64")
def init():
ib = tvm.tir.ir_builder.create()
ib.emit(
tvm.tir.call_llvm_intrin(
"int32",
"llvm.x86.tilezero",
tvm.tir.const(1, "uint8"),
tvm.tir.const(0, dtype="uint8"),
)
) # tile C 0
ib.emit(
tvm.tir.call_llvm_intrin(
"int32",
"llvm.x86.tilezero",
tvm.tir.const(1, "uint8"),
tvm.tir.const(1, dtype="uint8"),
)
) # tile C 1
ib.emit(
tvm.tir.call_llvm_intrin(
"int32",
"llvm.x86.tilezero",
tvm.tir.const(1, "uint8"),
tvm.tir.const(2, dtype="uint8"),
)
) # tile C 2
ib.emit(
tvm.tir.call_llvm_intrin(
"int32",
"llvm.x86.tilezero",
tvm.tir.const(1, "uint8"),
tvm.tir.const(3, dtype="uint8"),
)
) # tile C 3
return ib.get()
def body(): # load A, load B, dpbusd, store C
ib = tvm.tir.ir_builder.create()
for k_tile in range(2): # reduced data blocks
for n_acc in range(2): # broadcast data blocks
tmm_B_ = tvm.tir.const(n_acc + 6, dtype="uint8")
ib.emit(
tvm.tir.call_llvm_intrin(
"int32",
"llvm.x86.tileloaddt164", # load B: tmm6, tmm7
tvm.tir.const(3, "uint8"),
tmm_B_,
bufB.access_ptr(
"r", offset=64 * 16 * (n_acc * 2 * _strides_B_tile + k_tile)
),
tvm.tir.const(64, dtype="uint64"),
)
)
for m_acc in range(2): # loaded data blocks
tmm_A_ = tvm.tir.const(m_acc + 4, dtype="uint8")
if n_acc == 0:
ib.emit(
tvm.tir.call_llvm_intrin(
"int32",
"llvm.x86.tileloaddt164", # load A: , tmm4, tmm5
tvm.tir.const(3, "uint8"),
tmm_A_,
bufA.access_ptr(
"r", offset=m_acc * 16 * _strides_A + k_tile * 64
),
_strides_A,
)
)
tmm_C_ = tvm.tir.const(m_acc * 2 + n_acc, dtype="uint8")
ib.emit(
tvm.tir.call_llvm_intrin(
"int32",
"llvm.x86.tdpbusd",
tvm.tir.const(3, "uint8"),
tmm_C_,
tmm_A_,
tmm_B_,
)
) # tdpxxd
return ib.get()
# body, reset, store
return (
body(),
init(),
body(),
)
return te.decl_tensor_intrin(C.op, intrin_func, binds={A: BA, B: BB, C: BC})
def acc_32x32_int32_sapphirerapids(LDC):
"""
Store the accumulated tile register in scope amx.tmm to global memory.
(tmm0, tmm1, tmm2, tmm3 --> global 4 tiles)
Args:
LDC (int): the stride of the matrix C, which is int32 type and use it to
determine memory strides.
Returns
-------
intrin : TensorIntrin
The Sapphirerapids AMX-TMUL int8 tilestored64 TensorIntrin that can be used
in tensorizing schedule
"""
A = te.placeholder((32, 32), name="A", dtype="int32")
bufA = tvm.tir.decl_buffer(
A.shape,
A.dtype,
scope="amx.tmm",
name="a_buffer",
offset_factor=1,
strides=[te.var("ldw"), 1],
)
C = te.compute((32, 32), lambda i, j: A[i, j], name="C")
bufC = tvm.tir.decl_buffer(
C.shape,
C.dtype,
scope="global",
name="c_buffer",
offset_factor=1,
strides=[te.var("ldw"), 1],
)
assert LDC
_strides_C = tvm.tir.const(4 * LDC, dtype="uint64")
def intrin_func(ins, outs): # pylint: disable=unused-variable
ib = tvm.tir.ir_builder.create()
bufA = ins[0]
bufC = outs[0]
for n_acc in range(2): # broadcast data blocks
for m_acc in range(2): # loaded data blocks
ib.emit(
tvm.tir.call_llvm_intrin(
"int32",
"llvm.x86.tilestored64",
tvm.tir.const(3, "uint8"),
tvm.tir.const(m_acc * 2 + n_acc, dtype="uint8"),
bufC.access_ptr("w", offset=n_acc * 16 + m_acc * 16 * _strides_C / 4),
_strides_C,
)
)
return ib.get()
return te.decl_tensor_intrin(C.op, intrin_func, binds={A: bufA, C: bufC})
| 21,451 | 36.307826 | 97 | py |
tvm | tvm-main/python/tvm/topi/x86/bitserial_dense.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, too-many-locals, too-many-arguments, condition-evals-to-constant
"""Schedule for bitserial dense operator."""
from __future__ import absolute_import as _abs
import tvm
from tvm import te
from tvm import autotvm
from tvm.topi.utils import get_const_int, get_const_tuple
from .. import tag
from ..nn.bitserial_util import bitpack, binary_op_multiplier
@autotvm.register_topi_compute("bitserial_dense.x86")
def bitserial_dense(
cfg, data, weight, data_bits, weight_bits, pack_dtype="uint32", out_dtype="int16", unipolar=True
):
"""Bitserial dense implementation. TODO: Why are these separate
Parameters
----------
data : tvm.te.Tensor
2-D with shape [batch, in_dim]
weight : tvm.te.Tensor
2-D with shape [out_dim, in_dim] or
3-D with shape [out_dim, weight_bits, in_dim]
Returns
-------
output : tvm.te.Tensor
2-D with shape [batch, out_dim]
"""
data_packed = bitpack(data, data_bits, pack_axis=1, bit_axis=1, pack_type=pack_dtype)
if len(weight.shape) == 2:
weight_packed = bitpack(weight, weight_bits, pack_axis=1, bit_axis=1, pack_type=pack_dtype)
else:
weight_packed = weight
Y, DB, K = get_const_tuple(data_packed.shape)
X, WB, _ = get_const_tuple(weight_packed.shape)
######## Search space
x, y = cfg.axis(X), cfg.axis(Y)
db, wb, k = cfg.reduce_axis(DB), cfg.reduce_axis(WB), cfg.reduce_axis(K)
ko, ki = cfg.define_split("tile_k", k, num_outputs=2)
yo, yi = cfg.define_split("tile_y", y, num_outputs=2)
xo, xi = cfg.define_split("tile_x", x, num_outputs=2)
cfg.define_reorder(
"reorder_0",
[yo, xo, ko, yi, wb, db, ki, xi],
policy="candidate",
candidate=[[yo, xo, ko, yi, wb, db, ki, xi], [yo, xo, yi, ko, wb, db, ki, xi]],
)
cfg.define_annotate("ann_reduce", [db, wb], policy="try_unroll")
cfg.define_annotate("ann_spatial", [yi, xi], policy="try_unroll_vec")
###### Compute rule
VX = cfg["tile_x"].size[-1]
wvshape = (X // VX, WB, VX, K)
oshape = (Y, X)
k = te.reduce_axis((0, K), name="k")
db = te.reduce_axis((0, DB), name="db")
wb = te.reduce_axis((0, WB), name="wb")
# Tile data and weights
weight_vec = te.compute(
wvshape, lambda xo, wb, vx, k: weight_packed[xo * VX + vx][wb][k], name="weight_vec"
)
idxdiv = tvm.tir.indexdiv
idxmod = tvm.tir.indexmod
matmul_unipolar = te.compute(
oshape,
lambda i, j: te.sum(
(
tvm.tir.popcount(
weight_vec[idxdiv(j, VX), wb, idxmod(j, VX), k] & data_packed[i, db, k]
)
- tvm.tir.popcount(
~weight_vec[idxdiv(j, VX), wb, idxmod(j, VX), k] & data_packed[i, db, k]
)
).astype(out_dtype)
<< (db + wb).astype(out_dtype),
axis=[wb, db, k],
),
tag="bitserial_dense_unipolar",
)
matmul = te.compute(
oshape,
lambda i, j: te.sum(
tvm.tir.popcount(
weight_vec[idxdiv(j, VX), wb, idxmod(j, VX), k] & data_packed[i, db, k]
).astype(out_dtype)
<< (db + wb).astype(out_dtype),
axis=[wb, db, k],
),
tag="bitserial_dense",
)
# binary ops
cfg.add_flop(2 * Y * X * K * binary_op_multiplier(pack_dtype))
if unipolar:
return matmul_unipolar
return matmul
@autotvm.register_topi_schedule("bitserial_dense.x86")
def schedule_bitserial_dense(cfg, outs):
"""Schedule for bitserial_dense.
Parameters
----------
outs: Array of Tensor
The computation graph description of bitserial dense operator.
in the format of an array of tensors.
Returns
-------
s: Schedule
The computation schedule for bitserial_dense.
"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
def _schedule(cfg, s, data_vec, weight_vec, output):
s[data_vec].parallel(s[data_vec].op.axis[0])
s[weight_vec].parallel(s[weight_vec].op.axis[0])
y, x = s[output].op.axis
wb, db, k = s[output].op.reduce_axis
yo, yi = cfg["tile_y"].apply(s, output, y)
xo, xi = cfg["tile_x"].apply(s, output, x)
ko, ki = cfg["tile_k"].apply(s, output, k)
cfg["reorder_0"].apply(s, output, [yo, xo, ko, yi, wb, db, ki, xi])
cfg["ann_reduce"].apply(
s,
output,
[db, wb],
axis_lens=[get_const_int(db.dom.extent), get_const_int(wb.dom.extent)],
max_unroll=8,
cfg=cfg,
)
cfg["ann_spatial"].apply(
s,
output,
[yi, xi],
axis_lens=[cfg["tile_y"].size[-1], cfg["tile_x"].size[-1]],
max_unroll=8,
cfg=cfg,
)
s[output].vectorize(xi)
s[output].parallel(yo)
return s
def traverse(op):
"""Internal traverse function"""
# inline all one-to-one-mapping operators except the last stage (output)
if tag.is_broadcast(op.tag) or "elemwise" in op.tag:
if op not in s.outputs:
s[op].compute_inline()
for tensor in op.input_tensors:
if isinstance(tensor.op, tvm.te.ComputeOp):
traverse(tensor.op)
elif op.tag == "bitserial_dense" or "bitserial_dense_unipolar":
output = op.output(0)
weight_vec = op.input_tensors[0]
data_vec = op.input_tensors[1]
data = data_vec.op.input_tensors[0]
if "QuantizeInput" in data.op.name:
data = data.op.input_tensors[0]
_schedule(cfg, s, data_vec, weight_vec, output)
else:
raise RuntimeError(f"Unsupported operator: {op.tag}")
traverse(outs[0].op)
return s
| 6,750 | 32.924623 | 100 | py |
tvm | tvm-main/python/tvm/topi/hexagon/injective.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Schedule for injective operators"""
import numpy as np
import tvm
def schedule_injective(outs):
"""Schedule for injective op.
Parameters
----------
outs: Array of Tensor
The computation graph description of injective in the format
of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
outs = [outs] if isinstance(outs, tvm.te.tensor.Tensor) else outs
s = tvm.te.create_schedule([x.op for x in outs])
tvm.te.schedule.AutoInlineInjective(s)
# Fuse axes and vectorize inner elements
for x in outs:
fused = s[x].fuse(*x.op.axis)
outer, inner = s[x].split(fused, factor=128 // np.dtype(x.dtype).itemsize)
s[x].vectorize(inner)
s[x].parallel(outer)
return s
def schedule_softmax(outs):
return schedule_injective(outs)
def schedule_elemwise(outs):
return schedule_injective(outs)
def schedule_broadcast(outs):
return schedule_injective(outs)
| 1,799 | 29.508475 | 82 | py |
tvm | tvm-main/python/tvm/topi/hexagon/pooling.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Schedule for pooling operators"""
import tvm
def schedule_pool(outs, layout="NHWC"): # pylint: disable=unused-argument
"""Schedule for pooling op.
Parameters
----------
outs: Array of Tensor
The computation graph description of injective in the format
of an array of tensors.
layout: str
The tensor layout.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
outs = [outs] if isinstance(outs, tvm.te.tensor.Tensor) else outs
s = tvm.te.create_schedule([x.op for x in outs])
tvm.te.schedule.AutoInlineInjective(s)
return s
def schedule_adaptive_pool(outs):
return schedule_pool(outs)
| 1,488 | 30.020833 | 74 | py |
tvm | tvm-main/python/tvm/topi/hexagon/conv2d.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""Schedule for conv2d"""
import tvm
from tvm import te
from .. import nn
from ..utils import traverse_inline
from .tensor_intrin import dot_vrmpy
from ..generic import conv2d as conv2d_generic
def schedule_conv2d_nhwc(outs):
"""Schedule for conv2d NHWC operator.
Parameters
----------
outs: Array of Tensor
The computation graph description of conv2d in the format
of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
outs = [outs] if isinstance(outs, tvm.te.tensor.Tensor) else outs
s = tvm.te.create_schedule([x.op for x in outs])
tvm.te.schedule.AutoInlineInjective(s)
return s
def schedule_conv2d_nchw(outs):
return schedule_conv2d_nhwc(outs)
def schedule_conv2d(outs, layout="NHWC"):
layout_uncase = layout.casefold()
if layout_uncase == "NHWC".casefold():
return schedule_conv2d_nhwc(outs)
if layout_uncase == "NCHW".casefold():
return schedule_conv2d_nchw(outs)
raise ValueError(f"Unexpected layout={layout}")
def schedule_depthwise_conv2d_nchw(outs):
return schedule_conv2d_nchw(outs)
def schedule_depthwise_conv2d_nhwc(out):
return schedule_conv2d_nhwc(out)
def schedule_conv2d_transpose_nchw(outs):
"""Create schedule for tensors"""
outs = [outs] if isinstance(outs, tvm.te.tensor.Tensor) else outs
s = schedule_conv2d_nchw(outs)
def _callback(op):
if "unpack_nchwc" in op.tag:
conv_out = op.input_tensors[0]
# retrieve data
data_vec = conv_out.op.input_tensors[0]
if isinstance(data_vec, tvm.te.ComputeOp):
data_pad = data_vec.op.input_tensors[0]
data_dilate = data_pad.op.input_tensors[0]
s[data_dilate].compute_inline()
s[data_pad].compute_inline()
# retrieve kernel
kernel_vec = conv_out.op.input_tensors[1]
if isinstance(kernel_vec, tvm.te.ComputeOp):
kernel_transform = kernel_vec.op.input_tensors[0]
s[kernel_transform].compute_inline()
traverse_inline(s, outs[0].op, _callback)
return s
def conv2d_NCHWc_int8(
data, kernel, stride, padding, dilation, layout, out_layout, out_dtype="int32"
):
"""Compute definition for int8 conv2d in NCHWc layout"""
n_elems = int(kernel.shape[-1])
return nn.conv2d_NCHWc_int8(
data, kernel, stride, padding, dilation, layout, out_layout, out_dtype, n_elems=n_elems
)
def schedule_conv2d_NCHWc_int8(outs):
"""Schedule for int8 conv2d in NCHWc layout using vrmpy tensorization"""
s = te.create_schedule([x.op for x in outs])
def _callback(op):
if "conv2d_NCHWc_int8" in op.tag:
conv_out = op.output(0)
kernel_vec = conv_out.op.input_tensors[1]
data_vec = conv_out.op.input_tensors[0]
out_width = conv_out.shape[3]
reg_n = 1
for n in range(31, 0, -1):
if out_width % n == 0:
reg_n = n
break
cfg = {"tile_ow": reg_n, "unroll_kw": False}
args = [s, cfg, data_vec, kernel_vec, conv_out, outs[0]]
intrin = dot_vrmpy(data_vec.dtype, kernel_vec.dtype)
conv2d_generic.schedule_conv_NCHWc_cpu_common_int8(
*args,
int32_lanes=32,
int8_elems=4,
intrin=intrin,
inline_fused=True,
)
traverse_inline(s, outs[0].op, _callback)
return s
| 4,408 | 31.419118 | 95 | py |
tvm | tvm-main/python/tvm/topi/hexagon/dense.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""Schedule for dense operator"""
import tvm
from tvm.topi.utils import traverse_inline
from tvm import te
from .. import tag
from .tensor_intrin import dot_vrmpy
def schedule_dense(outs):
"""Schedule for dense op.
Parameters
----------
outs: Array of Tensor
The computation graph description of dense in the format
of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
outs = [outs] if isinstance(outs, tvm.te.tensor.Tensor) else outs
s = tvm.te.create_schedule([x.op for x in outs])
tvm.te.schedule.AutoInlineInjective(s)
return s
def dense_u8u8i32_vrmpy_compute(X, packed_w, bias, out_dtype):
"""Compute for uint8 x uint8 -> int32 dense using vrmpy"""
assert X.dtype == "uint8" and packed_w.dtype == "uint8" and out_dtype == "int32"
m, k = X.shape
n_o, _, n_i, _ = packed_w.shape
assert n_i == 32
ak = te.reduce_axis((0, k), name="k")
C = te.compute(
(m, n_o * n_i),
lambda i, j: te.sum(
X[i, ak].astype("int32")
* packed_w[tvm.tir.indexdiv(j, 32), tvm.tir.indexdiv(ak, 4), j % 32, ak % 4].astype(
"int32"
),
axis=ak,
),
tag="dense_u8u8i32_vrmpy",
name="compute",
)
if bias is not None:
C = te.compute(C.shape, lambda i, j: C[i, j] + bias[j], tag=tag.BROADCAST)
return C
def dense_u8u8i32_vrmpy_schedule(outs):
"""Schedule for vrmpy dense"""
s = te.create_schedule([x.op for x in outs])
# O: The output of the fused op
O = outs[0]
def _schedule_dense(s, C, O):
(a_k,) = C.op.reduce_axis
a_y = C.op.axis[-2]
a_yo, a_yi = s[C].split(a_y, factor=32)
a_xo, a_xi = s[C].split(C.op.axis[-1], factor=32)
a_ko, a_ki = s[C].split(a_k, factor=4)
s[C].reorder(a_yo, a_xo, a_yi, a_ko, a_xi, a_ki)
pc = dot_vrmpy("uint8", "uint8")
s[C].tensorize(a_xi, pc)
s[C].parallel(s[C].fuse(a_yo, a_xo))
if C != O:
a_y = O.op.axis[-2]
a_yo, a_yi = s[O].split(a_y, factor=32)
a_xo, a_xi = s[O].split(O.op.axis[-1], factor=32)
s[O].reorder(a_yo, a_xo, a_yi, a_xi)
s[O].vectorize(a_xi)
s[C].compute_at(s[O], a_yi)
s[O].parallel(s[O].fuse(a_yo, a_xo))
def _callback(op):
if "u8u8i32_vrmpy" in op.tag:
# C: The output of GEMM
C = op.output(0)
_schedule_dense(s, C, O)
traverse_inline(s, outs[0].op, _callback)
return s
| 3,434 | 29.669643 | 96 | py |
tvm | tvm-main/python/tvm/topi/hexagon/dense_alter_op.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unused-variable,unused-argument,no-member
"""Dense alter op functions for ARM"""
import tvm
from tvm import relay
from .. import nn
from ..nn import dense_alter_layout
def check_vrmpy_applicable(x, y):
return (
"int8" in x.dtype and "int8" in y.dtype and y.shape[-2] % 32 == 0 and y.shape[-1] % 4 == 0
)
@dense_alter_layout.register(["hexagon"])
def _alter_dense_layout(attrs, inputs, tinfos, out_type):
data_tensor, weight_tensor = tinfos
out_dtype = out_type.dtype
if check_vrmpy_applicable(data_tensor, weight_tensor):
weight_layout = "NC32n4c"
return relay.nn.contrib_dense_pack(inputs[0], inputs[1], weight_layout, None, out_dtype)
else:
return None
def vrmpy_legalize(x, w, arg_types, op, attrs):
"""
Legalizes int8 inputs to dense for vrmpy.
X'_u8 = X_s8 + 128
X_s8 * W_s8 = (X'_u8 - 128) * (W'_u8 - 128)
= X'_u8 * W'_u8 - X'_u8 * 128 - 128 * W'_u8 + 128 * 128
X_u8 * W_s8 = X_u8 * (W'_u8 - 128)
= X'_u8 * W'_u8 - X_u8 * 128
"""
if not check_vrmpy_applicable(arg_types[0], arg_types[1]):
return None
def cast_to_uint8(x):
x = relay.cast(x, "int32")
x = relay.add(x, relay.const(128, "int32"))
return relay.cast(x, "uint8")
if arg_types[0].dtype == "int8" and arg_types[1].dtype == "int8":
x = cast_to_uint8(x)
w = cast_to_uint8(w)
W_u8x128 = relay.const(-128, "int32") * relay.sum(relay.cast(w, "int32"), axis=[-1])
X_u8x128 = relay.const(-128, "int32") * relay.sum(relay.cast(x, "int32"), axis=[-1])
X_u8x128 = relay.expand_dims(X_u8x128, axis=1)
out = op(x, w, **attrs)
out += W_u8x128
out += X_u8x128
k_dim = int(arg_types[0].shape[-1])
return out + relay.const(128 * 128 * k_dim, "int32")
if arg_types[0].dtype == "uint8" and arg_types[1].dtype == "int8":
w = cast_to_uint8(w)
X_u8x128 = relay.expand_dims(
relay.const(-128, "int32") * relay.sum(relay.cast(x, "int32"), axis=[-1]), axis=1
)
out = op(x, w, **attrs)
return out + X_u8x128
return None
@nn.dense_legalize.register("hexagon")
def _dense_legalize(attrs, inputs, arg_types):
"""Legalize dense op for HVX vectorization and vrmpy tensorization.
Given a workload with a matrix X of shape (M, K) and a matrix Y of (N, K),
we first pad the N dimension to be a multiple of the output vector length.
And if the inputs are signed or unsigned int8 and the Y matrix can be packed into the
NK32n4k layout, we convert both inputs to uint8 to apply the most efficient variant of vrmpy.
"""
new_attrs = {k: attrs[k] for k in attrs.keys()}
# Collect the input tensors.
x_tensor, y_tensor = arg_types[0], arg_types[1]
dtype = x_tensor.dtype
# Collect the output tensor.
output_tensor = arg_types[2]
# Collect the input exprs.
x, y = inputs
N, _ = y_tensor.shape
if dtype == "float16":
vec_len = 64
elif "int8" in dtype:
vec_len = 32
else:
return None
if N % vec_len != 0:
N_padded = ((N + vec_len) // vec_len) * vec_len
dn = N_padded - N
y_ = relay.nn.pad(y, pad_width=((0, dn), (0, 0)))
# If units is explicitly specified, it is used to compute the output shape.
# We need to update units after padding to prevent a type error.
if attrs["units"] is not None:
new_attrs["units"] = N + dn
arg_types = [
arg_types[0],
tvm.ir.tensor_type.TensorType([N + dn, arg_types[1].shape[1]], arg_types[1].dtype),
]
vrmpy_out = vrmpy_legalize(x, y_, arg_types, relay.nn.dense, new_attrs)
if vrmpy_out is None:
out_ = relay.nn.dense(x, y_, **new_attrs)
else:
out_ = vrmpy_out
out = relay.strided_slice(out_, begin=[0, 0], end=[x.value for x in output_tensor.shape])
return out
return vrmpy_legalize(inputs[0], inputs[1], arg_types, relay.nn.dense, attrs)
| 4,910 | 32.182432 | 98 | py |
tvm | tvm-main/python/tvm/topi/hexagon/reduce.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Schedule for composition of reduction operator"""
import tvm
def schedule_reduce(outs):
"""Schedule for reduction op.
Parameters
----------
outs: Array of Tensor
The computation graph description of reduction in the format
of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
outs = [outs] if isinstance(outs, tvm.te.tensor.Tensor) else outs
s = tvm.te.create_schedule([x.op for x in outs])
tvm.te.schedule.AutoInlineInjective(s)
return s
| 1,347 | 31.878049 | 69 | py |
tvm | tvm-main/python/tvm/topi/hexagon/utils.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""Common hexagon specific utilities"""
import math
import struct
from typing import Dict, Tuple, Union
import tvm
from tvm import IRModule, te, tir
from tvm.tir import IndexMap, PrimFunc
def is_scalar(expr):
if isinstance(expr, te.Tensor):
return expr.ndim == 0 and (isinstance(expr.op.body[0], (tir.FloatImm, tir.IntImm)))
return isinstance(expr, (tir.FloatImm, tir.IntImm))
def get_const_int_value(expr):
if isinstance(expr, te.Tensor):
assert isinstance(expr.op.body[0], tir.IntImm)
return expr.op.body[0].value
return tvm.topi.utils.get_const_int(expr)
def get_const_float_value(expr):
if isinstance(expr, te.Tensor):
assert isinstance(expr.op.body[0], tir.FloatImm)
return expr.op.body[0].value
return tvm.topi.utils.get_const_float(expr)
def n11c_1024c_2d(n, h, w, c):
"""Return index map for n11c_1024 2d layout"""
return [n, h, w, c // 1024, IndexMap.AXIS_SEPARATOR, c % 1024]
def n11c_1024c_1d(n, h, w, c):
"""Return index map for n11c_1024 1d layout"""
return [n, h, w, c // 1024, c % 1024]
def nc11_1024c_2d(n, c, h, w):
"""Return index map for nc11_1024 2d layout"""
return [n, c // 1024, IndexMap.AXIS_SEPARATOR, c % 1024, h, w]
def nhwc_8h2w32c2w_2d(n, h, w, c):
"""Return index map for nhwc_8h2w32c2w 2d layout"""
return [n, h // 8, w // 4, c // 32, IndexMap.AXIS_SEPARATOR, h % 8, (w % 4) // 2, c % 32, w % 2]
def nhwc_8h2w32c2w_1d(n, h, w, c):
"""Return index map for nhwc_8h2w32c2w 1d layout"""
return [n, h // 8, w // 4, c // 32, h % 8, (w % 4) // 2, c % 32, w % 2]
def nchw_8h2w32c2w_2d(n, c, h, w):
"""Return index map for nchw_8h2w32c2w 2d layout"""
return [n, c // 32, h // 8, w // 4, IndexMap.AXIS_SEPARATOR, h % 8, (w % 4) // 2, c % 32, w % 2]
def nhw_32h16w_2d(n, h, w):
"""Return index map for nhw_32h16w 2d layout"""
return [n, h // 32, w // 16, IndexMap.AXIS_SEPARATOR, h % 32, w % 16]
def nhwc_4h4w32c_1d(n, h, w, c):
"""Return index map for nhwc_4h4232c 1d layout"""
return [n, h // 4, w // 4, c // 32, h % 4, w % 4, c % 32]
def nhwc_4h4w32c_2d(n, h, w, c):
"""Return index map for nhwc_4h4w32c 2d layout"""
return [n, h // 4, w // 4, c // 32, IndexMap.AXIS_SEPARATOR, h % 4, w % 4, c % 32]
def nc_512c_1d(n, c):
"""Return index map for nc_512c 1d layout"""
return [n, c // 512, c % 512]
def nc_512c_2d(n, c):
"""Return index map for nc_512c 2d layout"""
return [n, c // 512, IndexMap.AXIS_SEPARATOR, c % 512]
def nc_1024c_2d(n, c):
"""Return index map for nc_1024c 2d layout"""
return [n, c // 1024, IndexMap.AXIS_SEPARATOR, c % 1024]
def nc_2048c_1d(n, c):
"""Return index map for nc_2024c 1d layout"""
return [n, c // 2048, c % 2048]
def nc_2048c_2d(n, c):
"""Return index map for nc_2024c 2d layout"""
return [n, c // 2048, IndexMap.AXIS_SEPARATOR, c % 2048]
def nc11_2048c_2d(n, c, h, w):
"""Return index map for nc11_2048c 2d layout"""
return [n, c // 2048, IndexMap.AXIS_SEPARATOR, h, w, c % 2048]
def nc_1024c_1d(n, c):
"""Return index map for nc_1024c 1d layout"""
return [n, c // 1024, c % 1024]
def nhwc_4h2w32c2w_2d(n, h, w, c):
"""Return index map for nhwc_4h2w32c2w 2d layout"""
return [n, h // 4, w // 4, c // 32, IndexMap.AXIS_SEPARATOR, h % 4, (w % 4) // 2, c % 32, w % 2]
def nhwc_1024c_2d(n, h, w, c):
"""Return index map for nhwc_1024 2d layout"""
return [n, h, w, c // 1024, IndexMap.AXIS_SEPARATOR, c % 1024]
def nc_1024_2d(n, c):
"""Return index map for nc_1024 2d layout"""
return [n, c // 1024, IndexMap.AXIS_SEPARATOR, c % 1024]
def nhwc_2048c_2d(n, h, w, c):
"""Return index map for nhwc_2048 2d layout"""
return [n, h, w, c // 2048, IndexMap.AXIS_SEPARATOR, c % 2048]
def nc_2048_2d(n, c):
"""Return index map for nc_2048 2d layout"""
return [n, c // 2048, IndexMap.AXIS_SEPARATOR, c % 2048]
def nhwc_8h8w32c_2d(n, h, w, c):
"""Return index map for nhwc_8h8w32c 2d layout"""
return [n, h // 8, w // 8, c // 32, IndexMap.AXIS_SEPARATOR, h % 8, w % 8, c % 32]
def nhwc_8h8w32c_1d(n, h, w, c):
"""Return index map for nhwc_8h8w32c 1d layout"""
return [n, h // 8, w // 8, c // 32, h % 8, w % 8, c % 32]
def nchw_8h8w32c_2d(n, c, h, w):
return [n, c // 32, h // 8, w // 8, IndexMap.AXIS_SEPARATOR, h % 8, w % 8, c % 32]
def n11c_2048c_2d(n, h, w, c):
"""Return index map for n11c_2048c 2d layout"""
return [n, h, w, c // 2048, IndexMap.AXIS_SEPARATOR, c % 2048]
def n11c_2048c_1d(n, h, w, c):
"""Return index map for n11c_2048c 1 layout"""
return [n, h, w, c // 2048, c % 2048]
def iohw_16i32o2i_1d(height, width, in_channel, out_channel):
return [
in_channel // 32,
out_channel // 32,
height,
width,
(in_channel % 32) // 2,
out_channel % 32,
in_channel % 2,
]
def ohwi32o_1d(height, width, in_channel, out_channel):
return [out_channel // 32, height, width, in_channel, out_channel % 32]
def ncw_32c64w_2d(n, c, w):
"""Return index map for ncw_32c64w 2d layout"""
return [n, c // 32, w // 64, IndexMap.AXIS_SEPARATOR, c % 32, w % 64]
def nchw_32c8h8w_2d(n, c, h, w):
return [n, c // 32, h // 8, w // 8, IndexMap.AXIS_SEPARATOR, c % 32, h % 8, w % 8]
def nchw_32c8h4w_2d(n, c, h, w):
return [n, c // 32, h // 8, w // 4, IndexMap.AXIS_SEPARATOR, c % 32, h % 8, w % 4]
def get_layout_transform_fn(layout):
"""Return index map function as per the layout string"""
if layout == "nhwc-8h2w32c2w-2d":
return nhwc_8h2w32c2w_2d
if layout == "nhwc-8h2w32c2w-1d":
return nhwc_8h2w32c2w_1d
if layout == "nchw-8h2w32c2w-2d":
return nchw_8h2w32c2w_2d
if layout == "n11c-1024c-2d":
return n11c_1024c_2d
if layout == "n11c-1024c-1d":
return n11c_1024c_1d
if layout == "nhwc-1024c-2d":
return nhwc_1024c_2d
if layout == "nc11-1024c-2d":
return nc11_1024c_2d
if layout == "nc-1024-2d":
return nc_1024_2d
if layout == "nhw-32h16w-2d":
return nhw_32h16w_2d
if layout == "nhwc-4h4w32c-2d":
return nhwc_4h4w32c_2d
if layout == "nhwc-4h4w32c-1d":
return nhwc_4h4w32c_1d
if layout == "nc-512c-2d":
return nc_512c_2d
if layout == "nc-512c-1d":
return nc_512c_1d
if layout == "nhwc-4h2w32c2w-2d":
return nhwc_4h2w32c2w_2d
if layout == "nc-2048c-1d":
return nc_2048c_1d
if layout == "nc-2048c-2d":
return nc_2048c_2d
if layout == "nc-1024c-2d":
return nc_1024c_2d
if layout == "nc-1024c-1d":
return nc_1024c_1d
if layout == "iohw-16i32o2i-1d":
return iohw_16i32o2i_1d
if layout == "nhwc-2048c-2d":
return nhwc_2048c_2d
if layout == "nc-2048-2d":
return nc_2048_2d
if layout == "nc-2048c-2d":
return nc_2048c_2d
if layout == "nhwc-8h8w32c-2d":
return nhwc_8h8w32c_2d
if layout == "nhwc-8h8w32c-1d":
return nhwc_8h8w32c_1d
if layout == "nchw-8h8w32c-2d":
return nchw_8h8w32c_2d
if layout == "n11c-2048c-2d":
return n11c_2048c_2d
if layout == "n11c-2048c-1d":
return n11c_2048c_1d
if layout == "ohwi32o-1d":
return ohwi32o_1d
if layout == "nc11-2048c-2d":
return nc11_2048c_2d
if layout == "ncw-32c64w-2d":
return ncw_32c64w_2d
if layout == "nchw-32c8h8w-2d":
return nchw_32c8h8w_2d
if layout == "nchw-32c8h4w-2d":
return nchw_32c8h4w_2d
if layout == "nchw-8h8w32c-2d":
return nchw_8h8w32c_2d
raise RuntimeError(f"Unexpected layout '{layout}'")
def get_fixed_point_value(flp: float, dtype: str = "int16") -> Tuple[int, int]:
"""
Return fixed-point value and the corresponding log2 of the scale factor used to compute
this value.
Parameters
----------
flp : float
Floating-point value to be converted
dtype : str
Type of the resulting fixed-point value. By default, it's set to "int16"
Returns
-------
fixed_point_value : int
Fixed-point value for the given floating-point value
exp_scale_factor : int
log2 of the scale factor
Convert floating-point value into fixed-point number. This is done by
multiplying the value by a scaling factor and then rounding it to the nearest
integer value.
As per IEEE-754 standard, a floating-point value can be represented as follows
[see: https://en.wikipedia.org/wiki/IEEE_754-1985]:
(-1)^S * M * 2^(E-Bias)
Here,
* S is the signed bit (0 or 1).
* M is the mantissa. It's composed of an implicit 1 for the normalized floating-point
values or 0 for the denormalized values, and the fraction part. This ensures that
mantissa is always within [0, 2) range. Please note that this function doesn't
handle denormalized values.
* E is the exponent.
In single precision, 23 bits are used to represent the fraction part of
the mantissa (and therefore, '23' shows up in one of the computations below) and
8 bits are used for the exponent. Since exponent field needs to reperesent both
positive and negative values, a bias (127 for single precision) is added to the actual
value. Therefore, to compute the actual exponent, 127 must be subtracted from the stored
value.
As mentioned above, to find the corresponding fixed-point number, we multiply the
value with a scaling factor and then round it to the nearest integer. The scaling factor
is chosen to be a power for 2 and it's the largest value that can be safely multiplied
to the floating-point value, without causing the resulting value to overflow the range
of the integer type used to represent the fixed-point value.
So, if we assume the scaling factor to be 2^x, the resulting fixed-point value will be:
round((-1)^S * (M) * 2^(E-Bias) * 2^x)
This can be simplified to:
round((-1)^S * M * 2^(E-Bias+x)
Now, if 'int16' is used for fixed-point value, then it has to be >= -(2 * 2^14)
and <= (2 * 2^14) - 1. Since M (Mantissa) is always < 2, in order for the fixed-point value
to be within this range, 2^(E - Bias + x) must be <= 2^14 - 1.
And, if we ignore -1, (E - Bias + x) should be <= 14. Note: if mantissa gets too close to 2,
this will cause the resulting value to go out of range and require it to be saturated.
In the following implementation, we perform range check and adjust the scale to avoid
saturation.
For most cases, 2^x, where x = 14 - (E - Bias) or 14 - (E - 127) for single precision, is the
best scaling factor for 'int16' type that can be used to convert the floating-point value to
fixed-point with the least amount of precision loss.
Here is a more rigorous explanation of the above, for non-negative scale values, which are of
interest. M < 2, so M * 2^(E-Bias+x) < 2 ^ (E-Bias+x+1) [Note: LHS is a fraction, RHS int]
=> round(M * 2^(E-Bias+x)) <= 2 ^ (E-Bias+x+1) [Note the "<=", not "<"]
We want x s.t. round(M * 2^(E-Bias+x)) <= 2^15 - 1
We know round(M * 2^(E-Bias+x)) <= 2^(E-Bias+x+1)
It will be sufficient to choose x s.t. 2^(E-Bias+x+1) <= 2^15 - 1
That is, max x. s.t. 2^(E-Bias+x+1) < 2^15
E-Bias+x+1 < 15
E-Bias+x+1 <= 14
Max x will make E-Bias+x+1 = 14
x = 13 - E + Bias
Additonal notes on various floating-point values:
------------------------------------------------
1) Denormalized values: causes assertion failure. The problem with the denormalized values
is that they require a very large scale factor (>= 2^127) to be converted to a fixed-point
value. As the denormalzied values get smaller, the scale factor becomes too large to be
represented as a IEEE-754 floating point value (as being done in the computaton below)
and therefore, the denormalized values aren't being handled here.
2) NaN and INF: assertion failure
"""
def within_range(val, dtype):
if dtype == "int16":
return -32768 <= val <= 32767
raise RuntimeError(f"Unsupported dtype, {dtype}'")
# Make sure that 'flp' isn't NaN or infinity
if math.isnan(flp) or math.isinf(flp):
raise RuntimeError("NaN or INF can not be represented as fixed-point")
flp_f = struct.pack("f", flp)
flp_i = struct.unpack("I", flp_f)
exp_stored_value = (flp_i[0] >> 23) & 0xFF
if exp_stored_value == 0:
raise RuntimeError(
"Denormalized values are not considered for float -> fixed-point conversion!"
)
exp_value = ((flp_i[0] >> 23) & 0xFF) - 127
if dtype == "int16":
max_bits = 14
else:
raise RuntimeError(f"Unsupported dtype, {dtype}'")
exp_scale_factor = max_bits - exp_value # log2 of the scale_factor
if exp_scale_factor > 127:
raise RuntimeError("Value too small for fixed-point conversion!")
# Scaling factor = 2^exp_scale_factor
# Since exp_scale_factor can be -ve or +ve, scaling factor is calculated by first
# representing the value in the binary format as per IEEE floating-point standand and then
# reinterpreting it as a float using struct.pack and struct.unpack functions.
# struct.pack returns a bytes object packed as integer and struct.unpack
# unpacks this bytes object into float.
scale = ((exp_scale_factor + 127) & 0xFF) << 23
scale_i = struct.pack("I", scale)
scale_f = struct.unpack("f", scale_i)
fixed_point_value = int(round(flp * scale_f[0]))
if not within_range(fixed_point_value, dtype):
# Adjust scale factor to avoid overflow.
exp_scale_factor -= 1
scale = ((exp_scale_factor + 127) & 0xFF) << 23
scale_i = struct.pack("I", scale)
scale_f = struct.unpack("f", scale_i)
fixed_point_value = int(round(flp * scale_f[0]))
return fixed_point_value, exp_scale_factor
def saturate(x: te.Tensor, dtype: str):
"""Saturate value for the specified data type"""
return te.max(te.min_value(dtype), te.min(x, te.max_value(dtype)))
def get_vtcm_allocation_sizes(
func_or_mod: Union[PrimFunc, IRModule], compacted=True
) -> Dict[str, int]:
"""Calculate and return the vtcm allocation sizes for all the functions in
the IRModule or just the vtcm size if a single PrimFunc is passed
Parameters
----------
func_or_mod : Union[PrimFunc, IRModule]
PrimFunc or IRModule for which VTCM allocation size is to be calculated
compacted :
Whether to calculate the sizes after applying VTCM lowering passes for
buffer compaction. This helps return the VTCM size that would get
allocated after lowering
Returns
-------
result : Dict[str, int]
A dict with function names as keys and vtcm allocated
inside that function as values
"""
if not isinstance(func_or_mod, (PrimFunc, IRModule)):
raise TypeError(
f"Expected argument to be PrimFunc or IRModule, but received {type(func_or_mod)}"
)
if isinstance(func_or_mod, tvm.tir.PrimFunc):
mod = tvm.IRModule.from_expr(func_or_mod)
else:
mod = func_or_mod
if compacted:
passes = tvm.tir.analysis.get_vtcm_compaction_passes()
mod = tvm.transform.Sequential(list(passes))(mod)
result = {}
all_sizes = tvm.tir.analysis.calculate_allocated_bytes(mod)
for func_name, sizes in all_sizes.items():
if "global.vtcm" in sizes:
result[func_name] = sizes["global.vtcm"]
else:
result[func_name] = 0
return result
| 16,525 | 34.463519 | 100 | py |
tvm | tvm-main/python/tvm/topi/hexagon/resize2d.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""Compute and schedule for resize2d
Please note the following assumptions made by the implementation:
1) The input and output data will be multiple of crouton layout
2) And the supported layout is NHWC"""
from tvm import te
from tvm import tir
from tvm import topi
from .utils import get_layout_transform_fn
def resize2d_compute(
data,
roi,
size,
layout,
method="linear",
coordinate_transformation_mode="half_pixel",
rounding_method="",
bicubic_alpha=-0.5,
bicubic_exclude=0,
extrapolation_value=0.0,
out_dtype=None,
output_shape=None,
):
"""Call resize2d op from topi.image"""
return topi.image.resize2d(
data,
roi,
size,
layout,
method,
coordinate_transformation_mode,
rounding_method,
bicubic_alpha,
bicubic_exclude,
extrapolation_value,
out_dtype,
output_shape,
)
def tir_resize2d_schedule(
out_m,
input_a,
input_layout: str,
output_layout: str,
):
"""Schedule for input and output layout nhwc-8h2w32c2w-2d and nhwc-8h8w32c-2d"""
func = te.create_prim_func([input_a, out_m])
s = tir.Schedule(func)
block = s.get_block("resize")
if input_layout in (
"nhwc-8h2w32c2w-2d",
"nhwc-8h8w32c-2d",
):
input_transformed_layout = get_layout_transform_fn(input_layout)
s.transform_layout(block, buffer=("read", 0), index_map=input_transformed_layout)
output_transformed_layout = get_layout_transform_fn(output_layout)
s.transform_layout(block, buffer=("write", 0), index_map=output_transformed_layout)
if output_layout == "nhwc-8h2w32c2w-2d":
# Fixed chunk size is 2048 byte
# For fp16 the layout for fixed chunk is 8x4x32
# where each element is 2 bytes
# Split and reorder is done to iterate over the fixed chunk
# Channel is split by a factor of 32
# Width is split by a factor of 4
# Height is split by a factor of 8
n, h, w, c = s.get_loops(block)
ho, hi = s.split(h, [None, 8])
wo, wi = s.split(w, [None, 4])
co, ci = s.split(c, [None, 32])
s.reorder(n, ho, wo, co, hi, wi, ci)
elif output_layout == "nhwc-8h8w32c-2d":
# Fixed chunk size is 2048 byte
# For uint8 the layout for fixed chunk is 8x8x32
# where each element is 1 bytes
# Split and reorder is done to iterate over the fixed chunk
# Channel is split by a factor of 32
# Width is split by a factor of 8
# Height is split by a factor of 8
n, h, w, c = s.get_loops(block)
ho, hi = s.split(h, [None, 8])
wo, wi = s.split(w, [None, 8])
co, ci = s.split(c, [None, 32])
s.reorder(n, ho, wo, co, hi, wi, ci)
return s
| 3,642 | 30.136752 | 89 | py |
tvm | tvm-main/python/tvm/topi/hexagon/conv2d_alter_op.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unused-variable,unused-argument,no-member
"""Conv2d alter op functions for Hexagon"""
from tvm import relay
from ..utils import get_const_tuple
from .. import nn
from ..nn import conv2d_alter_layout
from ..generic.conv2d import conv2d_alter_int8_common
@conv2d_alter_layout.register("hexagon")
def _alter_conv2d_layout(attrs, inputs, tinfos, out_type):
"""Convert nn.conv2d into nn.contrib_conv2d_nchwc if vrmpy is applicable."""
new_attrs = {k: attrs[k] for k in attrs.keys()}
data_layout = attrs["data_layout"]
kernel_layout = attrs["kernel_layout"]
data_tensor, kernel_tensor = tinfos
out_channel, in_channel, _, _ = get_const_tuple(kernel_tensor.shape)
if (
"int8" in data_tensor.dtype
and "int8" in kernel_tensor.dtype
and out_channel % 32 == 0
and in_channel % 4 == 0
and data_layout == "NCHW"
and kernel_layout == "OIHW"
):
out_channel, in_channel, _, _ = get_const_tuple(kernel_tensor.shape)
n_elems = 4
oc_bn = 32
ic_bn = min(in_channel, 32)
new_attrs = {k: attrs[k] for k in attrs.keys()}
new_attrs["channels"] = out_channel
new_attrs["data_layout"] = f"NCHW{ic_bn}c"
new_attrs["kernel_layout"] = f"OIHW{ic_bn // n_elems:n}i{oc_bn:n}o{n_elems:n}i"
new_attrs["out_layout"] = f"NCHW{oc_bn}c"
return relay.nn.contrib_conv2d_nchwc(*inputs, **new_attrs)
return None
@nn.conv2d_legalize.register("hexagon")
def _conv2d_legalize(attrs, inputs, arg_types):
"""Legalize conv2d op for vrmpy tensorization.
If the inputs are signed or unsigned int8, the input and output channels are padded to be
a multiple of 4 and 32 respectively.
If the input data types are (int8, int8), they are converted to (uint8, int8) and
the vector-by-vector variant of vrmpy is applied.
If the input data types are (uint8, uint8), the more efficient vector-by-scalar variant of vrmpy
is applied.
Unlike the nn.dense case (see dense_alter_op.py), we do not convert (uint8, int8) to
(uint8, uint8). That would introduce another convolution by a constant (128 or 1) filter,
to compensate for the dtype legalization. In the nn.dense case, such compensation factor is
just a sum over the K axis.
"""
data_layout = attrs["data_layout"]
kernel_layout = attrs["kernel_layout"]
output_tensor = arg_types[2]
data, kernel = inputs
if data_layout != "NCHW" or kernel_layout != "OIHW":
return None
data_tensor, kernel_tensor = arg_types[0], arg_types[1]
if "int8" in data_tensor.dtype and "int8" in data_tensor.dtype:
output_tensor = arg_types[2]
data, kernel = inputs
desired_data_dtype = "uint8"
in_channel_vector_length = 4
out_channel_vector_length = 32
return conv2d_alter_int8_common(
data,
data_tensor,
kernel,
kernel_tensor,
output_tensor,
attrs,
desired_data_dtype,
in_channel_vector_length,
out_channel_vector_length,
)
return None
| 3,961 | 34.375 | 100 | py |
tvm | tvm-main/python/tvm/topi/hexagon/batch_matmul.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Schedule for composition of batch_matmul operator"""
import tvm
def schedule_batch_matmul(outs):
"""Schedule for batch_matmul op.
Parameters
----------
outs: Array of Tensor
The computation graph description of batch_matmul in the format
of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
outs = [outs] if isinstance(outs, tvm.te.tensor.Tensor) else outs
s = tvm.te.create_schedule([x.op for x in outs])
tvm.te.schedule.AutoInlineInjective(s)
return s
| 1,362 | 32.243902 | 71 | py |
tvm | tvm-main/python/tvm/topi/hexagon/compute_poolarea.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-variable, unused-argument, too-many-locals
"""Compute PoolArea size which is used to exclude the zero-padding elements in the averaging
calculation.
"""
from tvm import te, tir
def compute_PoolArea(i, j, ih, iw, kh, kw, sh, sw, dh, dw, pad_top, pad_left):
"""
Parameters
----------
i,j:
index of output tensor along H and W axis
This is equal to the starting point of the sliding window for which the average is computed
ih, iw:
input data size along H and W axis
kh, kw:
Kernel size along H and W axis
sh, sw:
Stride size along H and W axis
dh, dw:
Dilation size along H and W axis
pad_top, pad_left:
Pad size on Top and left side of input data
# PoolArea refers to the area of that portion of each sliding window which only includes
# the input data and not the padded area.
# Motivation: The following example shows the location of the first sliding window (at i=0, j=0)
# on a 6*6 array, with kernel=[3,3] and padding=[1, 1, 1, 1].
# The input data elements are shown with (X) and padding data with (0).
# As shown, the number of non-padding elements that should be used for computing
# the average of values inside this window is 4, while the windows area is 3*3=9.
# To compute the PoolArea, we have to move the top/left edge of the window down/right
# to exclude zero-padding elements. The edge adjustment can be formulated as
# top_edge = max(i , pad_top)
# left_edge= max(j , pad_left)
# Note that pad_top and pad_left represent point 0 of the input data along i and j direction.
# In this example, bottom_edge and right_edge of the PoolArea do not need any adjustment,
# because there is no padding data on those side of the window.
# However, as we slide the window down and to the right, the window might go
# beyond the input data boundaries (ih and iw). In these cases, bottom/right edge should be
# moved up/left to be located inside the input data.
# This can be formulated as
# bottom_edge = min(i + kh, ih + pad_top)
# left_edge = min(j + kw, iw + pad_left)
# Having all the edges,
# PoolArea = (bottom_edge - top_edge) * (right_edge - left_edge)
# _______
# |0 0 0|0 0 0 0 0 0 0 0 0 0 0 0 0
# | | _______
# |0 X X|X X X X 0 |0 X X|X X X X 0
# | | | |
# |0 X X|X X X X 0 ====> |0 X X|X X X X 0
# |_____| |_____|
# 0 X X X X X X 0 0 X X X X X X 0
# 0 X X X X X X 0 0 X X X X X X 0
# 0 X X X X X X 0 0 X X X X X X 0
# 0 X X X X X X 0 0 X X X X X X 0
# 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
# The above equations are derived under the assumption of having default value (1)
# for stride and dilation. However, we need to expand them to support non-default
# stride and dilation values.
# Stride impacts the starting location of the sliding windows, so i and j should be
# replaced by (i * sh) and j by (j * sw) in the equations.
# Dilation changes the window size, making k kernel elements scattered into a d*(k - 1) + 1
# window.
# Non-1 dilation means that, we need to divide the adjusted window size by the dilation value
# to find out how many kernel elements inside the sliding window are inside the input data
# boundaries:
# top_edge= max(i * sh , pad_top)
# left_edge= max(j * sw , pad_left)
# bottom_edge = min(i * sh + (kh - 1) * dh + 1, ih + pad_top)
# left_edge = min(j * sw + (kw - 1) * dw + 1, data_w + pad_left)
# PoolArea = ceil_div((bottom_edge - top_edge), dh) * ceil_div((right_edge - left_edge), dw)
#
# Finally, we need to address one corner case related to the non-default dilation:
# Consider the following example along W axis, where iw = 3, kw = 3 and dw = 2.
# The first figure on the left shows the sliding window of size 5 starting at index 0,
# and the first figure on the right shows the same example with sliding window at index 1.
# The second row of figures show the PoolArea after adjusting the edges
# (both left_edge - right_edge = 3)
# The third row of figures show the location of dialated kernel points(*).
# As shown, although the distance between left and right edge in both cases is 3 and
# dilation is 2 and ceil_div(3,2)=2, the right PoolArea only includes 1 kernel point.
# Sliding Window: |0 0 X X X |0 0 |0 X X X 0|
# PoolArea(after edge adjustment): 0 0|X X X |0 0 0|X X X| 0
# location of dilated kernel points: * 0|* X * |0 0 *|X * X| 0
# PoolArea (dilated_point_aware): * 0|* X * |0 0 * X|* X| 0
# To address this issue, instead of moving the left_edge to bring it just inside the input
# data boundary, we should move the edge to the right untill we get to the first dilated kernel
# point inside the input data boundary.
# The third row of figures shows how this row adjustment can solve the problem.
# So the problem is reduced to finding the the first dilated kernel point inside the data
# boundary.# For that, we can find the number of dialted points which are mapped to the padded
# area and find the location of the next one which should be inside the input data:
# num_of_prev_points = (pad_top - i * sh - 1) // dh
# next_point_index = i * sh + (num_prev_points + 1) * dh
#
# With that, Top_edge and left_edge can be reformulated as:
# if i*sh - pad_top < 0:
# top_edge = i * sh + ((pad_top - i * sh - 1) // dh + 1) * dh
# else:
# top_edge = i * sh
#
# if j * sw - pad_left < 0:
# left_edge = j * sw + ((pad_left - j * sw - 1) // dw + 1) * dw
# else:
# left_edge= j * sw
"""
top_edge = tir.if_then_else(
tir.all(i * sh - pad_top < 0), i * sh + ((pad_top - i * sh - 1) // dh + 1) * dh, i * sh
)
bottom_edge = te.min(i * sh + (kh - 1) * dh + 1, ih + pad_top)
left_edge = tir.if_then_else(
tir.all(j * sw - pad_left < 0), j * sw + ((pad_left - j * sw - 1) // dw + 1) * dw, j * sw
)
right_edge = te.min(j * sw + (kw - 1) * dw + 1, iw + pad_left)
return -((bottom_edge - top_edge) // -dh) * -((right_edge - left_edge) // -dw)
| 7,557 | 51.486111 | 100 | py |
tvm | tvm-main/python/tvm/topi/hexagon/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" Schedules for Hexagon. """
# pylint: disable=wildcard-import
from .batch_matmul import *
from .conv2d import *
from .dense import *
from .injective import *
from .pad import *
from .pooling import *
from .reduce import *
from .resize2d import *
from .tensor_intrin import *
from .qnn import *
from .dense_alter_op import *
from .conv2d_alter_op import *
| 1,146 | 32.735294 | 62 | py |
tvm | tvm-main/python/tvm/topi/hexagon/pad.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Schedule for nn.pad operator"""
import numpy as np
import tvm
def schedule_pad(outs):
"""Schedule for pad op.
Parameters
----------
outs: Array of Tensor
The computation graph description of injective in the format
of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
outs = [outs] if isinstance(outs, tvm.te.tensor.Tensor) else outs
s = tvm.te.create_schedule([x.op for x in outs])
tvm.te.schedule.AutoInlineInjective(s)
# Fuse axes and vectorize only if last output tensor dimension is divisible by a factor:
factor = 128 // np.dtype(outs[0].dtype).itemsize
last_dim = outs[0].shape[-1]
if last_dim % factor == 0 and last_dim // factor >= 0:
fused = s[outs[0]].fuse(*outs[0].op.axis)
_, inner = s[outs[0]].split(fused, factor=factor)
s[outs[0]].vectorize(inner)
return s
| 1,723 | 32.803922 | 92 | py |
tvm | tvm-main/python/tvm/topi/hexagon/tensor_intrin.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""Optimized implementation of q_multiply_shift based on LLVM intrinsics"""
import tvm
from tvm.ir import register_intrin_lowering
from tvm import te
def get_lanes(dtype: str):
if "x" not in dtype:
return 1
_, lanes = dtype.split("x")
return int(lanes)
def is_vector_type(dtype: str):
return get_lanes(dtype) != 1
def is_power_of_2(n: int):
return (n & (n - 1) == 0) and n != 0
def _adapt_to_highest_lanes(*args, intrinsic=None, intrinsic_lanes: int = 0):
"""Apply provided lowering intrinsic to arguments with longer vector data type.
This wrapper will do next actions:
* Split each argument into chunks with size equal intrinsic_lanes
* Apply provided intrinsic for each argument chunk
* Concatenate results
Parameters
----------
args: List[PrimExpr]
List of arguments. Each arg expression should have vector type with lanes
equal `intrinsic_lanes * 2**n`.
intrinsic: callable
Intrinsic implementation to apply.
intrinsic_lanes: int
Vector length required by intrinsic implementation.
Returns
-------
res : PrimExpr
Resulting expression.
"""
def split_args(args_set):
res_args_set = []
for args_chunk in args_set:
res_args_chunk_l = []
res_args_chunk_h = []
for arg_chunk in args_chunk:
element, lanes = arg_chunk.dtype.split("x")
res_arg_chunk_dtype = f"{element}x{int(lanes) // 2}"
res_args_chunk_l.append(tvm.tir.op.vectorlow(res_arg_chunk_dtype, arg_chunk))
res_args_chunk_h.append(tvm.tir.op.vectorhigh(res_arg_chunk_dtype, arg_chunk))
res_args_set += [res_args_chunk_l, res_args_chunk_h]
return res_args_set
def concat_args(res_chunks):
merged_res_chunks = []
for i in range(0, len(res_chunks), 2):
arg_chunk_l = res_chunks[i]
arg_chunk_h = res_chunks[i + 1]
element, lanes = arg_chunk_l.dtype.split("x")
res_arg_chunk_dtype = f"{element}x{int(lanes) * 2}"
merged_res_chunks.append(
tvm.tir.op.vectorcombine(res_arg_chunk_dtype, arg_chunk_l, arg_chunk_h)
)
return merged_res_chunks
num_chunks = None
for arg in args:
_, lanes = arg.dtype.split("x")
lanes = int(lanes)
assert lanes % intrinsic_lanes == 0
if num_chunks is None:
assert is_power_of_2(lanes // intrinsic_lanes)
num_chunks = lanes // intrinsic_lanes
assert num_chunks == lanes // intrinsic_lanes
# Split arguments
lowered_args = [args]
while len(lowered_args) != num_chunks:
lowered_args = split_args(lowered_args)
# Intrinsic application
lowered_res = []
for l_arg in lowered_args:
res = intrinsic(*l_arg)
lowered_res.append(res)
# Result concatenation
while len(lowered_res) != 1:
lowered_res = concat_args(lowered_res)
return lowered_res[0]
def _q_multiply_shift_hexagon(op):
"""
Implementation of q_multiply_shift through hexagon intrinsics vmpyewuh and vmpyowh when q == 31.
"""
arg_x = op.args[0]
arg_fractional_bits = op.args[2]
# Don't use this intrinsic if we are not multiplying q31 numbers
if arg_fractional_bits.value != 31:
return op
x_lanes = get_lanes(arg_x.dtype)
if x_lanes % 32 != 0 or not is_power_of_2(x_lanes // 32):
return op
# pylint: disable=unused-argument
def intrinsic_lowering_32(x, y, fractional_bits, shift):
lowered_dtype = "int32x32"
# Case 1, shift is negative
mul_e_1 = tvm.tir.call_llvm_intrin(
lowered_dtype, "llvm.hexagon.V6.vmpyewuh.128B", tvm.tir.const(2, "uint32"), x, y
)
mul_o_1 = tvm.tir.call_llvm_intrin(
lowered_dtype,
"llvm.hexagon.V6.vmpyowh.sacc.128B",
tvm.tir.const(3, "uint32"),
mul_e_1,
x,
y,
)
fixup = 1 << (-shift - 1)
round_mul = mul_o_1 + fixup
out_negative_shift = tvm.tir.call_llvm_intrin(
lowered_dtype,
"llvm.hexagon.V6.vaslwv.128B",
tvm.tir.const(2, "uint32"),
round_mul,
shift,
)
# Case 2, shift is positive
x = x * (1 << (shift))
mul_e_2 = tvm.tir.call_llvm_intrin(
lowered_dtype, "llvm.hexagon.V6.vmpyewuh.128B", tvm.tir.const(2, "uint32"), x, y
)
mul_o_2 = tvm.tir.call_llvm_intrin(
lowered_dtype,
"llvm.hexagon.V6.vmpyowh.rnd.sacc.128B",
tvm.tir.const(3, "uint32"),
mul_e_2,
x,
y,
)
# Select depending on the shift
return tvm.tir.Select(shift < 0, out_negative_shift, mul_o_2)
return _adapt_to_highest_lanes(*op.args, intrinsic=intrinsic_lowering_32, intrinsic_lanes=32)
register_intrin_lowering(
"tir.q_multiply_shift", target="hexagon", f=_q_multiply_shift_hexagon, level=99
)
def _q_multiply_shift_per_axis_hexagon(op):
"""
Implementation of q_multiply_shift_per_axis through hexagon intrinsics vmpyewuh and vmpyowh when
q == 31.
"""
arg_x = op.args[0]
arg_fractional_bits = op.args[4]
arg_is_lshift_required = op.args[5]
arg_is_rshift_required = op.args[6]
# Don't use this intrinsic if we are not multiplying q31 numbers
if arg_fractional_bits.value != 31:
return op
x_lanes = get_lanes(arg_x.dtype)
if x_lanes % 32 != 0 or not is_power_of_2(x_lanes // 32):
return op
# Don't use this intrinsic when we need do both: left and right shifts.
# For now it is not clear how to implement this case through vector HVX instructions without
# accuracy drop.
if arg_is_rshift_required.value and arg_is_lshift_required.value:
return op
# pylint: disable=unused-argument
def intrinsic_impl_32(
x, y, left_shift, right_shift, fractional_bits, is_lshift_required, is_rshift_required
):
lowered_dtype = "int32x32"
# Case 1: do the left shift
shifted_x = x << left_shift
mul_e_1 = tvm.tir.call_llvm_intrin(
lowered_dtype, "llvm.hexagon.V6.vmpyewuh.128B", tvm.tir.const(2, "uint32"), shifted_x, y
)
left_shift_out = tvm.tir.call_llvm_intrin(
lowered_dtype,
"llvm.hexagon.V6.vmpyowh.rnd.sacc.128B",
tvm.tir.const(3, "uint32"),
mul_e_1,
shifted_x,
y,
)
# Case 2: do the right shift
mul_e_2 = tvm.tir.call_llvm_intrin(
lowered_dtype, "llvm.hexagon.V6.vmpyewuh.128B", tvm.tir.const(2, "uint32"), x, y
)
mul_o_2 = tvm.tir.call_llvm_intrin(
lowered_dtype,
"llvm.hexagon.V6.vmpyowh.sacc.128B",
tvm.tir.const(3, "uint32"),
mul_e_2,
x,
y,
)
fixup = 1 << (right_shift - 1)
round_mul = mul_o_2 + fixup
right_shift_out = tvm.tir.call_llvm_intrin(
lowered_dtype,
"llvm.hexagon.V6.vasrwv.128B",
tvm.tir.const(2, "uint32"),
round_mul,
right_shift,
)
# Case 3: do neither right nor left shift
mul_e_3 = tvm.tir.call_llvm_intrin(
lowered_dtype, "llvm.hexagon.V6.vmpyewuh.128B", tvm.tir.const(2, "uint32"), x, y
)
no_shift_out = tvm.tir.call_llvm_intrin(
lowered_dtype,
"llvm.hexagon.V6.vmpyowh.rnd.sacc.128B",
tvm.tir.const(3, "uint32"),
mul_e_3,
x,
y,
)
return tvm.tir.Select(
tvm.tir.Not(tvm.tir.Or(is_lshift_required, is_rshift_required)),
no_shift_out,
tvm.tir.Select(is_lshift_required, left_shift_out, right_shift_out),
)
return _adapt_to_highest_lanes(*op.args, intrinsic=intrinsic_impl_32, intrinsic_lanes=32)
register_intrin_lowering(
"tir.q_multiply_shift_per_axis",
target="hexagon",
f=_q_multiply_shift_per_axis_hexagon,
level=99,
)
def dot_vrmpy(x_ty, y_ty):
"""Generates vrmpy instruciton for tensorization."""
int32_lanes = 32
num_int8_elements = 4 # 4 int8 elements in int32
data = te.placeholder((num_int8_elements,), dtype=x_ty, name="data")
kernel = te.placeholder((int32_lanes, num_int8_elements), dtype=y_ty, name="kernel")
k = te.reduce_axis((0, num_int8_elements), name="k")
C = te.compute(
(int32_lanes,),
lambda i: te.sum(data[k].astype("int32") * kernel[i, k].astype("int32"), axis=k),
name="C",
)
a_buffer = tvm.tir.decl_buffer(
data.shape, dtype=x_ty, name="a_buffer", offset_factor=1, strides=[1]
)
b_buffer = tvm.tir.decl_buffer(
kernel.shape, dtype=y_ty, name="b_buffer", offset_factor=1, strides=[te.var("ldw"), 1]
)
def _intrin_func(ins, outs):
def _instr(index):
ib = tvm.tir.ir_builder.create()
if index == 1:
ib.emit(outs[0].vstore(0, tvm.tir.const(0, "int32x32")))
return ib.get()
vec_zero = tvm.tir.const(0, "int32x32")
if x_ty == "uint8" and y_ty == "uint8":
a_uint8 = ins[0].vload([0], "uint8x4")
re_int32 = tvm.tir.call_intrin("int32", "tir.reinterpret", a_uint8)
vec_b = ins[1].vload([0, 0], "uint8x128")
vrmpy_inst_name = "llvm.hexagon.V6.vrmpyub.acc.128B"
vec_bi32 = tvm.tir.call_intrin("int32x32", "tir.reinterpret", vec_b)
quad_reduction = tvm.tir.call_llvm_pure_intrin(
"int32x32",
vrmpy_inst_name,
tvm.tir.const(3, "uint32"),
vec_zero,
vec_bi32,
re_int32,
)
elif x_ty == "uint8" and y_ty == "int8":
a_uint8 = ins[0].vload([0], "uint8x4")
re_int32 = tvm.tir.call_intrin("int32", "tir.reinterpret", a_uint8)
vec_b = ins[1].vload([0, 0], "int8x128")
vrmpy_inst_name = "llvm.hexagon.V6.vrmpybusv.acc.128B"
vec_bi32 = tvm.tir.call_intrin("int32x32", "tir.reinterpret", vec_b)
quad_reduction = tvm.tir.call_llvm_pure_intrin(
"int32x32",
vrmpy_inst_name,
tvm.tir.const(3, "uint32"),
vec_zero,
re_int32.astype("int32x32"),
vec_bi32,
)
else:
raise ValueError("Only (u8, u8) or (u8, i8) dtype pairs are supported by vrmpy.")
if index == 0:
ib.emit(outs[0].vstore(0, quad_reduction))
else:
ib.emit(outs[0].vstore(0, quad_reduction + outs[0].vload([0], "int32x32")))
return ib.get()
# body, reset, update
return _instr(0), _instr(1), _instr(2)
buffer_params = {"offset_factor": 1}
return te.decl_tensor_intrin(
C.op,
_intrin_func,
binds={data: a_buffer, kernel: b_buffer},
default_buffer_params=buffer_params,
)
| 12,176 | 32 | 100 | py |
tvm | tvm-main/python/tvm/topi/hexagon/qnn/qadd_qsub_qmul.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""Compute and schedule for quantized add, multiply, subtract op
Please note the following assumptions made by the implementation:
1) The inputs will be multiple of crouton layout except for the axis that needs broadcasting."""
from tvm import te
from tvm import tir
from ..utils import get_layout_transform_fn, get_fixed_point_value
def broadcast_axis(tensor_A, tensor_B):
"""Find out the indices that will have broadcasting"""
A_broadcast = []
B_broadcast = []
for i in range(len(tensor_A.shape)):
if tensor_A.shape[i] == tensor_B.shape[i]:
A_broadcast.append(1)
B_broadcast.append(1)
elif tensor_A.shape[i] == 1:
A_broadcast.append(0)
B_broadcast.append(1)
elif tensor_B.shape[i] == 1:
A_broadcast.append(1)
B_broadcast.append(0)
return A_broadcast, B_broadcast
def saturate(x: te.Tensor, dtype: str):
"""Saturate value for the specified data type"""
return te.max(te.min_value(dtype), te.min(x, te.max_value(dtype)))
def get_int_scale(
scale_A: float,
scale_B: float,
scale_M: float,
zero_point_A: int,
zero_point_B: int,
zero_point_M: int,
op: str,
):
"""
Get fixed-point number and exp_scale_factor from topi.hexagon.utils.get_fixed_point_value.
Also, depending on the op, this function uses exp_scale_factor(log2 of the scale factor)
to adjust the output's zero_point.
"""
C_recip = 1 / scale_M
if op == "qmul":
scale = scale_A * scale_B * C_recip
scale_fixed_point, rsh = get_fixed_point_value(scale, "int16")
# We need to adjust output's zero point value since the compute for the op is multiplied
# by a scaling factor.
# The scaling factor is 2^x where x is the exp_scale_factor which is assigned to rsh here.
# Since zero_point_M is multipled by 2^rsh while converting floating-point scale value
# into fixed-point number, we left shift it by rsh in our compute to reflect that.
corr = zero_point_M << rsh
return scale_fixed_point, rsh, corr
a_scale_f = scale_A * C_recip
b_scale_f = scale_B * C_recip
scale_fixed_point_a, rsh_a = get_fixed_point_value(a_scale_f, "int16")
scale_fixed_point_b, rsh_b = get_fixed_point_value(b_scale_f, "int16")
# Here we have two exp_scale_factors rsh_a and rsh_b.
# To avoid complexity, we want to use a common exp_scale_factor and
# we want to use the lowest of the two.
# Since, either of scale_fixed_point_a or scale_fixed_point_b has already been multiplied
# by 2^max(rsh_a, rsh_b) in topi.hexagon.utils.get_fixed_point_value,
# we want to undo that by right shifting that scale_fixed_point value
# by the difference of rsh_a and rsh_b.
# This results into having a common exp_scale_factor for both scale_fixed_point_a
# and scale_fixed_point_b.
# We also set rsh here which is used to adjust the zero_point_M and compute the corr value,
# computation of which comes from the original equation of the op's compute.
if rsh_a > rsh_b:
scale_fixed_point_a = scale_fixed_point_a >> (rsh_a - rsh_b)
rsh = rsh_b
else:
scale_fixed_point_b = scale_fixed_point_b >> (rsh_b - rsh_a)
rsh = rsh_a
if op == "qadd":
corr = (zero_point_M << rsh) - (
zero_point_A * scale_fixed_point_a + zero_point_B * scale_fixed_point_b
)
else:
corr = (zero_point_M << rsh) - (
zero_point_A * scale_fixed_point_a - zero_point_B * scale_fixed_point_b
)
return scale_fixed_point_a, scale_fixed_point_b, rsh, corr
def qadd_broadcast_compute(
tensor_A: te.Tensor,
tensor_B: te.Tensor,
output_shape: list,
zero_point_A: int,
scale_A: float,
zero_point_B: int,
scale_B: float,
zero_point_M: int,
scale_M: float,
dtype: str,
):
"""Compute quantized add with broadcasting"""
A_broadcast, B_broadcast = broadcast_axis(tensor_A, tensor_B)
n_a, h_a, w_a, c_a = A_broadcast
n_b, h_b, w_b, c_b = B_broadcast
scale_a, scale_b, rsh, corr = get_int_scale(
scale_A, scale_B, scale_M, zero_point_A, zero_point_B, zero_point_M, "qadd"
)
return te.compute(
output_shape,
lambda n, h, w, c: saturate(
(
(
(tensor_A[n * n_a, h * h_a, w * w_a, c * c_a] * scale_a)
+ (tensor_B[n * n_b, h * h_b, w * w_b, c * c_b] * scale_b)
+ corr
)
>> rsh
),
dtype,
).astype(dtype),
)
def qsubtract_broadcast_compute(
tensor_A: te.Tensor,
tensor_B: te.Tensor,
output_shape: list,
zero_point_A: int,
scale_A: float,
zero_point_B: int,
scale_B: float,
zero_point_M: int,
scale_M: float,
dtype: str,
):
"""Compute quantized subtract with broadcasting"""
A_broadcast, B_broadcast = broadcast_axis(tensor_A, tensor_B)
n_a, h_a, w_a, c_a = A_broadcast
n_b, h_b, w_b, c_b = B_broadcast
scale_a, scale_b, rsh, corr = get_int_scale(
scale_A, scale_B, scale_M, zero_point_A, zero_point_B, zero_point_M, "qsub"
)
return te.compute(
output_shape,
lambda n, h, w, c: saturate(
(
(
(tensor_A[n * n_a, h * h_a, w * w_a, c * c_a] * scale_a)
- (tensor_B[n * n_b, h * h_b, w * w_b, c * c_b] * scale_b)
+ corr
)
>> rsh
),
dtype,
).astype(dtype),
)
def qmultiply_broadcast_compute(
tensor_A: te.Tensor,
tensor_B: te.Tensor,
output_shape: list,
zero_point_A: int,
scale_A: float,
zero_point_B: int,
scale_B: float,
zero_point_M: int,
scale_M: float,
dtype: str,
):
"""Compute quantized multiply with broadcasting"""
A_broadcast, B_broadcast = broadcast_axis(tensor_A, tensor_B)
n_a, h_a, w_a, c_a = A_broadcast
n_b, h_b, w_b, c_b = B_broadcast
scale_int, rsh, corr = get_int_scale(
scale_A, scale_B, scale_M, zero_point_A, zero_point_B, zero_point_M, "qmul"
)
return te.compute(
output_shape,
lambda n, h, w, c: saturate(
(
(
scale_int
* (tensor_A[n * n_a, h * h_a, w * w_a, c * c_a] - zero_point_A)
* (tensor_B[n * n_b, h * h_b, w * w_b, c * c_b] - zero_point_B)
+ corr
)
>> rsh
),
dtype,
).astype(dtype),
)
def tir_schedule_quant(
out_M: te.Tensor,
tensor_A: te.Tensor,
tensor_B: te.Tensor,
output_layout: str,
tensor_A_layout: str,
tensor_B_layout: str,
):
"""Schedule for output layout nhwc-8h8w32c-2d"""
func = te.create_prim_func([tensor_A, tensor_B, out_M])
s = tir.Schedule(func)
block = s.get_block("compute")
if tensor_A_layout == "nhwc-8h8w32c-2d":
tensor_A_transformed_layout = get_layout_transform_fn(tensor_A_layout)
s.transform_layout(block, buffer=tensor_A.name, index_map=tensor_A_transformed_layout)
if tensor_B_layout == "nhwc-8h8w32c-2d":
tensor_B_transformed_layout = get_layout_transform_fn(tensor_B_layout)
s.transform_layout(block, buffer=tensor_B.name, index_map=tensor_B_transformed_layout)
output_transformed_layout = get_layout_transform_fn(output_layout)
s.transform_layout(block, buffer=out_M.name, index_map=output_transformed_layout)
n, h, w, c = s.get_loops(block)
h_o, h_i = s.split(h, [None, 8])
w_o, w_i = s.split(w, [None, 8])
c_o, c_i = s.split(c, [None, 32])
wio, wii = s.split(w_i, [None, 4])
s.reorder(n, h_o, w_o, c_o, h_i, wio, wii, c_i)
return s
| 8,713 | 31.154982 | 98 | py |
tvm | tvm-main/python/tvm/topi/hexagon/qnn/dense_alter_op.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""QNN Dense alter op functions for Hexagon"""
from tvm import relay
from ..dense_alter_op import check_vrmpy_applicable
from ...nn import qnn_dense_alter_layout
@qnn_dense_alter_layout.register("hexagon")
def _alter_qnn_dense_layout(_attrs, inputs, tinfos, out_type):
data_tensor = tinfos[0]
weight_tensor = tinfos[1]
if check_vrmpy_applicable(data_tensor, weight_tensor):
weight_layout = "NC32n4c"
return relay.qnn.op.contrib_dense_pack(*inputs, weight_layout, None, out_type.dtype)
else:
return None
| 1,332 | 38.205882 | 92 | py |
tvm | tvm-main/python/tvm/topi/hexagon/qnn/dequantize.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
""" Hexagon qnn.dequantize slice op compute and schedule"""
from tvm import te
from tvm import tir
from ..utils import get_layout_transform_fn
def dequantize_compute(tensor_A, scale_A, zero_point_A):
return te.compute(
tensor_A.shape,
lambda *indices: (scale_A * (tensor_A[indices] - zero_point_A)).astype("float32"),
name="dequantize",
)
def dequantize_stir_schedule_nhwc_8h8w32c(
_in,
_out,
in_layout,
out_layout,
):
"""Schedule for nhwc int8/uint8 to f32 : nhwc layout"""
func = te.create_prim_func([_in, _out])
sch = tir.Schedule(func, debug_mask="all")
block_name = "dequantize"
n, h, w, c = sch.get_loops(sch.get_block(block_name))
ho, hi = sch.split(h, [None, 4])
wo, wi = sch.split(w, [None, 8])
wio, wii = sch.split(wi, [None, 4])
co, ci = sch.split(c, [None, 32])
sch.transform_layout(block_name, "A", in_layout)
sch.transform_layout(block_name, block_name, out_layout)
sch.reorder(n, ho, wo, co, hi, wio, wii, ci)
wii_ci = sch.fuse(wii, ci)
sch.vectorize(wii_ci)
return sch
def dequantize_stir_schedule_nc(
_in,
_out,
in_layout,
out_layout,
):
"""Schedule for nc int8/uint8 to f32 : nc layout"""
func = te.create_prim_func([_in, _out])
sch = tir.Schedule(func, debug_mask="all")
block_name = "dequantize"
_, c_orig = sch.get_loops(sch.get_block(block_name))
_, c_inner = sch.split(c_orig, [None, 512])
sch.transform_layout(block_name, "A", in_layout)
sch.transform_layout(block_name, block_name, out_layout)
sch.vectorize(c_inner)
return sch
def dequantize_schedule(_in, _output, in_layout_str, out_layout_str):
"""Schedule for int8/uint8 to f32 : top level function"""
f32_layout_transform_func = get_layout_transform_fn(out_layout_str)
in_layout_transform_func = get_layout_transform_fn(in_layout_str)
if out_layout_str == "nhwc-4h2w32c2w-2d":
return dequantize_stir_schedule_nhwc_8h8w32c(
_in,
_output,
in_layout_transform_func,
f32_layout_transform_func,
)
if out_layout_str == "nc-512c-2d":
return dequantize_stir_schedule_nc(
_in,
_output,
in_layout_transform_func,
f32_layout_transform_func,
)
raise RuntimeError(f"Unexpected layout '{layout}'")
| 3,207 | 32.768421 | 90 | py |
tvm | tvm-main/python/tvm/topi/hexagon/qnn/conv2d_alter_op.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""QNN Conv2d alter op functions for Hexagon"""
from tvm import relay
from ...nn import qnn_conv2d_alter_layout
from ...utils import get_const_tuple
@qnn_conv2d_alter_layout.register("hexagon")
def _alter_qnn_conv2d_layout(attrs, inputs, tinfos, _out_type):
data_layout = attrs["data_layout"]
kernel_layout = attrs["kernel_layout"]
data_tensor, kernel_tensor, _, _, _, _ = tinfos
if (
"int8" in data_tensor.dtype
and "int8" in kernel_tensor.dtype
and data_layout == "NCHW"
and kernel_layout == "OIHW"
):
out_channel, in_channel, _, _ = get_const_tuple(kernel_tensor.shape)
if out_channel % 32 != 0 or in_channel % 4 != 0:
return None
n_elems = 4
oc_bn = 32
ic_bn = min(in_channel, 32)
new_attrs = dict(attrs)
new_attrs["channels"] = out_channel
new_attrs["data_layout"] = f"NCHW{ic_bn}c"
new_attrs["kernel_layout"] = f"OIHW{ic_bn // n_elems:n}i{oc_bn:n}o{n_elems:n}i"
new_attrs["out_layout"] = f"NCHW{oc_bn}c"
return relay.qnn.op.conv2d(*inputs, **new_attrs)
return None
| 1,921 | 34.592593 | 87 | py |
tvm | tvm-main/python/tvm/topi/hexagon/qnn/avg_pool2d.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-variable, unused-argument, too-many-locals
""" Compute and schedule for quantized avg_pool2d op """
import tvm
from tvm import te
from tvm import tir
from ..utils import (
get_layout_transform_fn,
get_fixed_point_value,
is_scalar,
get_const_int_value,
get_const_float_value,
)
from ...utils import get_const_tuple
from ...nn.utils import get_pad_tuple
from ...nn.pad import pad
from ..compute_poolarea import compute_PoolArea
def saturate(x: te.Tensor, dtype: str):
"""Saturate value for the specified data type"""
return te.max(te.min_value(dtype), te.min(x, te.max_value(dtype)))
def qnn_avg_pool2d_NCHW(
data: te.Tensor,
kernel: list,
stride: list,
padding: list,
dilation: list,
count_include_pad: bool,
oshape: list,
odtype: str,
# quantization params:
input_scale: float,
input_zero_point: int,
output_scale: float,
output_zero_point: int,
):
"""Compute for quantized avg_pool2d"""
kh, kw = kernel
rh = te.reduce_axis((0, kh), name="rh")
rw = te.reduce_axis((0, kw), name="rw")
if odtype == "uint8":
temp_dtype = "uint16"
elif odtype == "int8":
temp_dtype = "int16"
else:
raise RuntimeError(f"Unsupported output dtype, {odtype}'")
sh, sw = stride
dh, dw = dilation
scale = input_scale / output_scale
scale_fixed_point, rsh = get_fixed_point_value(scale, "int16")
corr = (output_zero_point << rsh) - input_zero_point * scale_fixed_point
dilated_kh = (kh - 1) * dh + 1
dilated_kw = (kw - 1) * dw + 1
pad_top, pad_left, pad_down, pad_right = get_pad_tuple(
get_const_tuple(padding), (dilated_kh, dilated_kw)
)
# DOPAD
if pad_top != 0 or pad_down != 0 or pad_left != 0 or pad_right != 0:
pad_before = (0, 0, pad_top, pad_left)
pad_after = (0, 0, pad_down, pad_right)
data_pad = pad(data, pad_before, pad_after, pad_value=input_zero_point, name="data_pad")
else:
# By definition when True, zero-padding will be included in the averaging calculation
# This is equivalent to PoolArea = (kh * kw)
count_include_pad = True
data_pad = data
Sum = te.compute(
oshape,
lambda b, c, h, w: te.sum(
data_pad[b, c, h * sh + dh * rh, w * sw + dw * rw].astype(temp_dtype), axis=[rh, rw]
),
name="pool_sum",
)
if not count_include_pad:
# Compute PoolArea using unpadded input tensor
_, _, oh, ow = oshape
_, _, ih, iw = data.shape
PoolArea = te.compute(
(oh, ow),
lambda i, j: compute_PoolArea(i, j, ih, iw, kh, kw, sh, sw, dh, dw, pad_top, pad_left),
name="pool_area",
)
ScaleWithArea = te.compute(
(oh, ow),
lambda i, j: (scale_fixed_point // PoolArea[i, j]).astype("int32"),
name="scale_with_area",
)
Avg = te.compute(
oshape,
lambda b, c, h, w: saturate(
((Sum[b, c, h, w] * ScaleWithArea[h, w]) + corr + (1 << (rsh - 1))) >> rsh, odtype
).astype(odtype),
name="pool_avg",
)
else:
ScaleWithArea = scale_fixed_point // (kh * kw)
Avg = te.compute(
oshape,
lambda b, c, h, w: saturate(
((Sum[b, c, h, w] * ScaleWithArea) + corr + (1 << (rsh - 1))) >> rsh, odtype
).astype(odtype),
name="pool_avg",
)
return Avg
def qnn_avg_pool2d_NHWC(
data: te.Tensor,
kernel: list,
stride: list,
padding: list,
dilation: list,
count_include_pad: bool,
oshape: list,
odtype: str,
# quantization params:
input_scale: float,
input_zero_point: int,
output_scale: float,
output_zero_point: int,
):
"""Compute for quantized avg_pool2d"""
kh, kw = kernel
rh = te.reduce_axis((0, kh), name="rh")
rw = te.reduce_axis((0, kw), name="rw")
if odtype == "uint8":
temp_dtype = "uint16"
elif odtype == "int8":
temp_dtype = "int16"
else:
raise RuntimeError(f"Unsupported output dtype, {odtype}'")
sh, sw = stride
dh, dw = dilation
scale = input_scale / output_scale
scale_fixed_point, rsh = get_fixed_point_value(scale, "int16")
corr = (output_zero_point << rsh) - input_zero_point * scale_fixed_point
dilated_kh = (kh - 1) * dh + 1
dilated_kw = (kw - 1) * dw + 1
# Compute Area
pad_top, pad_left, pad_down, pad_right = get_pad_tuple(
get_const_tuple(padding), (dilated_kh, dilated_kw)
)
# DOPAD
if pad_top != 0 or pad_down != 0 or pad_left != 0 or pad_right != 0:
pad_before = (0, pad_top, pad_left, 0)
pad_after = (0, pad_down, pad_right, 0)
data_pad = pad(data, pad_before, pad_after, pad_value=input_zero_point, name="data_pad")
else:
# By definition when True, zero-padding will be included in the averaging calculation
# This is equivalent to PoolArea = (kh * kw)
count_include_pad = True
data_pad = data
Sum = te.compute(
oshape,
lambda b, h, w, c: te.sum(
data_pad[b, h * sh + dh * rh, w * sw + dw * rw, c].astype(temp_dtype), axis=[rh, rw]
),
name="pool_sum",
)
if not count_include_pad:
# Compute PoolArea using unpadded input tensor
_, oh, ow, _ = oshape
_, ih, iw, _ = data.shape
PoolArea = te.compute(
(oh, ow),
lambda i, j: compute_PoolArea(i, j, ih, iw, kh, kw, sh, sw, dh, dw, pad_top, pad_left),
name="pool_area",
)
ScaleWithArea = te.compute(
(oh, ow),
lambda i, j: tir.if_then_else(
tir.all(PoolArea[i, j] > 0),
(scale_fixed_point // PoolArea[i, j]).astype("int32"),
0,
),
name="scale_with_area",
)
Avg = te.compute(
oshape,
lambda b, h, w, c: saturate(
((Sum[b, h, w, c] * ScaleWithArea[h, w]) + corr + (1 << (rsh - 1))) >> rsh, odtype
).astype(odtype),
name="pool_avg",
)
else:
ScaleWithArea = scale_fixed_point // (kh * kw)
Avg = te.compute(
oshape,
lambda b, h, w, c: saturate(
((Sum[b, h, w, c] * ScaleWithArea) + corr + (1 << (rsh - 1))) >> rsh, odtype
).astype(odtype),
name="pool_avg",
)
return Avg
def qnn_avg_pool2d_wrapper_compute_NCHW(
data: te.Tensor,
kernel: list,
stride: list,
padding: list,
dilation: list,
count_include_pad: bool,
oshape: list,
odtype: str,
# quantization params:
input_scale: float,
input_zero_point: int,
output_scale: float,
output_zero_point: int,
):
"""Extract qnn params"""
if (
is_scalar(input_scale)
and is_scalar(output_scale)
and is_scalar(input_zero_point)
and is_scalar(output_zero_point)
):
iscale = get_const_float_value(input_scale)
oscale = get_const_float_value(output_scale)
izero_point = get_const_int_value(input_zero_point)
ozero_point = get_const_int_value(output_zero_point)
return qnn_avg_pool2d_NCHW(
data,
kernel,
stride,
padding,
dilation,
count_include_pad,
oshape,
odtype,
iscale,
izero_point,
oscale,
ozero_point,
)
else:
raise RuntimeError("quantization parameters should be scalar tensors")
def qnn_avg_pool2d_wrapper_compute_NHWC(
data: te.Tensor,
kernel: list,
stride: list,
padding: list,
dilation: list,
count_include_pad: bool,
oshape: list,
odtype: str,
# quantization params:
input_scale: float,
input_zero_point: int,
output_scale: float,
output_zero_point: int,
):
"""Extract qnn params"""
if (
is_scalar(input_scale)
and is_scalar(output_scale)
and is_scalar(input_zero_point)
and is_scalar(output_zero_point)
):
iscale = get_const_float_value(input_scale)
oscale = get_const_float_value(output_scale)
izero_point = get_const_int_value(input_zero_point)
ozero_point = get_const_int_value(output_zero_point)
return qnn_avg_pool2d_NHWC(
data,
kernel,
stride,
padding,
dilation,
count_include_pad,
oshape,
odtype,
iscale,
izero_point,
oscale,
ozero_point,
)
else:
raise RuntimeError("quantization parameters should be scalar tensors")
def schedule_qnn_avg_pool2d(outs):
"""Schedule for qnn.avg_pool2d
Parameters
----------
outs: Array of Tensor
The computation graph description of qnn.avg_pool2d
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
outs = [outs] if isinstance(outs, tvm.te.tensor.Tensor) else outs
s = tvm.te.create_schedule([x.op for x in outs])
tvm.te.schedule.AutoInlineInjective(s)
return s
def schedule_8h8w32c(outs: te.Tensor, ins: te.Tensor, output_layout: str, input_layout: str):
"""Schedule for input and output layout 8h8w32c"""
func = te.create_prim_func([ins, outs])
s = tir.Schedule(func)
Sum = s.get_block("pool_sum")
Avg = s.get_block("pool_avg")
mem_scope = "global.vtcm"
sum_read = s.cache_read(Sum, 0, mem_scope)
avg_read = s.cache_read(Avg, 0, mem_scope)
avg_write = s.cache_write(Avg, 0, mem_scope)
input_transform_fn = get_layout_transform_fn(input_layout)
output_transform_fn = get_layout_transform_fn(output_layout)
s.transform_layout(Sum, ("read", 0), input_transform_fn, pad_value=0)
s.transform_layout(Avg, ("read", 0), input_transform_fn, pad_value=0)
s.transform_layout(Avg, ("write", 0), output_transform_fn, pad_value=0)
return s
def schedule_2048c(outs: te.Tensor, ins: te.Tensor, output_layout: str, input_layout: str):
"""Schedule for output layout: 2048c, input layout: 8h8w32c"""
func = te.create_prim_func([ins, outs])
s = tir.Schedule(func)
Sum = s.get_block("pool_sum")
Avg = s.get_block("pool_avg")
mem_scope = "global.vtcm"
sum_read = s.cache_read(Sum, 0, mem_scope)
avg_write = s.cache_write(Avg, 0, mem_scope)
input_transform_fn = get_layout_transform_fn(input_layout)
output_transform_fn = get_layout_transform_fn(output_layout)
s.transform_layout(Sum, ("read", 0), input_transform_fn, pad_value=0)
s.transform_layout(Avg, ("write", 0), output_transform_fn, pad_value=0)
# Schedule 'Avg'
# Split and reorder the axes to iterate over the output tensor chunks.
# Each chunk consists for 2048 bytes. For n11c-2048c tensor layout, each chunk
# only contains 2048 channels which get split by a factor of 128 to be vectorized.
# NOTE: These schedules are a work in progress and may require
# adjustments in future as some of the missing features for 2-d tensors
# become available.
if output_layout == "n11c-2048c-2d":
_, _, _, c = s.get_loops(Avg)
else:
_, c, _, _ = s.get_loops(Avg)
# n, h, w, c = s.get_loops(Avg)
co, ci = s.split(c, [None, 2048])
cio, cii = s.split(ci, [None, 128])
s.vectorize(cii)
# Schedule 'Sum'
# Compute for 'Sum' includes reduction along height and width. The axes are being
# reordered so that 128 channels become the inner-most loop and can be vectorized.
# However, vectorization of the 2-d tensors doesn't work when reduction is
# involved and requires codegen support that is yet to be added.
s.compute_at(Sum, cio)
Sum_axis = s.get_loops(Sum)
s.reorder(Sum_axis[-2], Sum_axis[-1], Sum_axis[-3])
# s.vectorize(Sum_axis[-3]) # Doesn't work
return s
def qnn_avg_pool2d_schedule(outs: te.Tensor, ins: te.Tensor, output_layout: str, input_layout: str):
"""Quantized avg_pool2d schedule"""
if output_layout == "nhwc-8h8w32c-2d" or output_layout == "nchw-8h8w32c-2d":
return schedule_8h8w32c(outs, ins, output_layout, input_layout)
if output_layout == "n11c-2048c-2d" or output_layout == "nc11-2048c-2d":
return schedule_2048c(outs, ins, output_layout, input_layout)
raise RuntimeError(f"Unexpected layout '{output_layout}'")
| 13,456 | 31.19378 | 100 | py |
tvm | tvm-main/python/tvm/topi/hexagon/qnn/nn.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Hexagon QNN operators"""
# pylint: disable=invalid-name
from typing import Union
import numpy as np
import tvm
from tvm import te, topi
from ..utils import (
saturate,
is_scalar,
get_const_int_value,
get_const_float_value,
get_fixed_point_value,
)
from ...utils import get_const_tuple
from ...nn.utils import get_pad_tuple
from ...nn.pad import pad
from ... import tag, nn
from ..conv2d import conv2d_NCHWc_int8
from ...transform import concatenate
def clip_cast(val, dtype):
# clip + cast:
const_min = tvm.tir.min_value(dtype)
const_max = tvm.tir.max_value(dtype)
return te.max(tvm.te.min(val, const_max), const_min).astype(dtype)
def get_qnn_param(param, indices, axis):
# Account scalar and 1D quantization parameters:
if len(param.shape) == 0:
return param
param_idx = tvm.tir.indexmod(indices[axis], topi.shape(param)[0])
return param[param_idx]
def subtract_zero_point(tensor: te.Tensor, zero_point: Union[te.Tensor, tvm.tir.IntImm], name: str):
"""
Subtract zero point from given tensor. If zero point is scalar constant and is equal to 0, then
it can be optimized and return tensor as it is.
This new block is marked with 'meta_schedule.inline_rule = disable' attribute to disable inline.
Otherwise, inline prevents from tensorization and leveraging vrmpy intrinsic
"""
if is_scalar(zero_point) and get_const_int_value(zero_point) == 0:
return tensor
else:
return te.compute(
tensor.shape,
lambda *i: te.subtract(tensor(*i), zero_point).astype(tensor.dtype),
name=name,
attrs={"meta_schedule.inline_rule": "disable"},
)
def default_schedule(outs):
"""Simple default schedule for QNN ops.
Parameters
----------
outs: Array of Tensor
The computation graph description of dense in the format
of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
outs = [outs] if isinstance(outs, tvm.te.tensor.Tensor) else outs
s = tvm.te.create_schedule([x.op for x in outs])
tvm.te.schedule.AutoInlineInjective(s)
for x in outs:
fused = s[x].fuse(*x.op.axis)
outer, inner = s[x].split(fused, factor=128 // np.dtype(x.dtype).itemsize)
s[x].vectorize(inner)
s[x].parallel(outer)
return s
def qnn_quantize(data, output_scale, output_zero_point, axis=-1, out_dtype="int8"):
"""Compute for qnn.quantize
Q_output = clamp((round(input_tensor/output_scale) + output_zero_point),
out_dtype::min,
out_dtype::max)
"""
assert len(output_scale.shape) == 0 or len(output_scale.shape) == 1
assert len(output_zero_point.shape) == 0 or len(output_zero_point.shape) == 1
def _compute(*indices):
value = data(*indices)
scale = get_qnn_param(output_scale, indices, axis)
zp = get_qnn_param(output_zero_point, indices, axis)
val = te.add(te.round(te.div(value, scale)), zp)
return clip_cast(val, out_dtype)
return te.compute(data.shape, _compute, tag=tag.ELEMWISE)
def schedule_qnn_quantize(outs):
"""Schedule for qnn.quantize
Parameters
----------
outs: Array of Tensor
The computation graph description of qnn.quantize
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return default_schedule(outs)
def qnn_dequantize(data, input_scale, input_zero_point, axis=-1):
"""Compute for qnn.dequantize
fp_output = input_scale * (Q_input - input_zero_point)
"""
def _compute(*indices):
value = data(*indices)
scale = get_qnn_param(input_scale, indices, axis)
zp = get_qnn_param(input_zero_point, indices, axis)
return te.multiply(scale, te.subtract(value, zp))
return te.compute(data.shape, _compute, tag=tag.ELEMWISE)
def schedule_qnn_dequantize(outs):
"""Schedule for qnn.dequantize
Parameters
----------
outs: Array of Tensor
The computation graph description of qnn.dequantize
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return default_schedule(outs)
def qnn_requantize(
data: te.Tensor,
input_scale: te.Tensor,
input_zp: te.Tensor,
output_scale: te.Tensor,
output_zp: te.Tensor,
axis=-1,
out_dtype="int8",
):
"""Compute for qnn.requantize
If both input and output scales are constant scalars then we convert scale to fixed point value
and use integer arithmetic only for performance optimization purpose.
But this is a tradeoff between performance and accuracy, since we use int16 data type to
represent fixed point values (against QNN lowering approach where we use int32 for that).
if input and/or output scales are not constant scalars then we use the following formula:
Q_output = zp_output + round((scale_input)/(scale_output) * (Q_input - zp_input))
TODO: support 'rounding' and 'compute_dtype' arguments.
"""
if is_scalar(input_scale) and is_scalar(output_scale):
iscale = get_const_float_value(input_scale)
oscale = get_const_float_value(output_scale)
scale = iscale / oscale
scale_fixed_point, rsh = get_fixed_point_value(scale, "int16")
def _compute(*indices):
value = data(*indices)
# Subtract input zero point:
sub = te.subtract(value, input_zp)
# Fixed point multiply + roundup delta:
mul = (sub * scale_fixed_point + (1 << (rsh - 1))) >> rsh
# Add output zero point + clip + cast:
return saturate(te.add(mul, output_zp), out_dtype).astype(out_dtype)
return te.compute(data.shape, _compute, name="requantize")
else:
def _compute(*indices):
value = data(*indices)
iscale = get_qnn_param(input_scale, indices, axis)
oscale = get_qnn_param(output_scale, indices, axis)
# Subtract input zero point:
sub = te.subtract(value, input_zp)
mul = te.div(iscale, oscale)
val = te.add(te.round(te.multiply(mul, sub)), output_zp)
# clip + cast:
return saturate(val, out_dtype).astype(out_dtype)
return te.compute(data.shape, _compute, name="requantize")
def schedule_qnn_requantize(outs):
"""Schedule for qnn.requantize
Parameters
----------
outs: Array of Tensor
The computation graph description of qnn.requantize
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return default_schedule(outs)
def compute_qnn_binary_op(
lhs, rhs, lhs_scale, lhs_zp, rhs_scale, rhs_zp, output_scale, output_zp, func
):
"""Compute for QNN binary operation
If rhs/lhs/output scales are constant scalars then we convert scale to fixed point value
and use integer arithmetic only for performance optimization purpose.
But this is a tradeoff between performance and accuracy, since we use int16 data type to
represent fixed point values (against QNN lowering approach where we use int32 for that).
if rhs/lhs/output scales are not constant scalars then we use the following formula:
Q_output = output_zp + round((lhs_scale)/(output_scale) * (lhs_input - lhs_zp))
_OP_ round((rhs_scale)/(output_scale) * (rhs_input - rhs_zp))
where _OP_ is add/subtract
"""
assert lhs.dtype == rhs.dtype
dtype = lhs.dtype
def _compute_const(x: te.Tensor, iscale, input_zp):
return te.round(te.multiply(te.div(iscale, output_scale), te.subtract(x, input_zp))).astype(
"int32"
)
def _compute_tensor(x: te.Tensor, input_scale, input_zp):
if is_scalar(input_scale) and is_scalar(output_scale):
iscale = input_scale.op.body[0].value
oscale = output_scale.op.body[0].value
scale = iscale / oscale
scale_fixed_point, rsh = get_fixed_point_value(scale, "int16")
return te.compute(
x.shape,
lambda *i: (te.subtract(x(*i), input_zp) * scale_fixed_point + (1 << (rsh - 1)))
>> rsh,
)
else:
return te.compute(
x.shape,
lambda *i: te.round(
te.multiply(te.div(input_scale, output_scale), te.subtract(x(*i), input_zp))
).astype("int32"),
)
if is_scalar(lhs):
lhs_tensor = _compute_const(lhs, lhs_scale, lhs_zp)
else:
lhs_tensor = _compute_tensor(lhs, lhs_scale, lhs_zp)
if is_scalar(rhs):
rhs_tensor = _compute_const(rhs, rhs_scale, rhs_zp)
else:
rhs_tensor = _compute_tensor(rhs, rhs_scale, rhs_zp)
# Binary op with broadcasting
tensor = func(lhs_tensor, rhs_tensor)
# Add output zero point and clip+cast.
def _compute(*indices):
return saturate(te.add(tensor(*indices), output_zp), dtype).astype(dtype)
return te.compute(tensor.shape, _compute)
def qnn_add(lhs, rhs, lhs_scale, lhs_zp, rhs_scale, rhs_zp, output_scale, output_zp):
"""Compute for qnn.add
TODO: support 'axis' argument.
"""
return compute_qnn_binary_op(
lhs, rhs, lhs_scale, lhs_zp, rhs_scale, rhs_zp, output_scale, output_zp, topi.add
)
def schedule_qnn_add(outs):
"""Schedule for qnn.add
Parameters
----------
outs: Array of Tensor
The computation graph description of qnn.add
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return default_schedule(outs)
def qnn_subtract(lhs, rhs, lhs_scale, lhs_zp, rhs_scale, rhs_zp, output_scale, output_zp):
"""Compute for qnn.subtract"""
return compute_qnn_binary_op(
lhs, rhs, lhs_scale, lhs_zp, rhs_scale, rhs_zp, output_scale, output_zp, topi.subtract
)
def schedule_qnn_subtract(outs):
"""Schedule for qnn.subtract
Parameters
----------
outs: Array of Tensor
The computation graph description of qnn.add
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return default_schedule(outs)
def qnn_mul(
lhs: te.Tensor,
rhs: te.Tensor,
lhs_scale: te.Tensor,
lhs_zp: te.Tensor,
rhs_scale: te.Tensor,
rhs_zp: te.Tensor,
output_scale: te.Tensor,
output_zp: te.Tensor,
):
"""Compute for qnn.mul
mul = (lhs_input - lhs_zp) * (rhs_input - rhs_zp)
Q_output = requantize(mul, lhs_scale * rhs_scale, 0, output_scale, output_zp)
"""
assert lhs.dtype == rhs.dtype
odtype = lhs.dtype
def _compute_tensor(tensor, zero_point):
if is_scalar(tensor):
return tensor - zero_point
else:
return te.compute(tensor.shape, lambda *i: te.subtract(tensor(*i), zero_point))
lhs_tensor = _compute_tensor(lhs, lhs_zp)
rhs_tensor = _compute_tensor(rhs, rhs_zp)
# Multiply with broadcasting.
mul = topi.multiply(lhs_tensor, rhs_tensor)
if is_scalar(lhs_scale) and is_scalar(rhs_scale):
assert isinstance(lhs_scale, te.Tensor)
assert isinstance(rhs_scale, te.Tensor)
iscale = lhs_scale.op.body[0] * rhs_scale.op.body[0]
else:
iscale = lhs_scale * rhs_scale
return qnn_requantize(mul, iscale, tvm.tir.const(0), output_scale, output_zp, out_dtype=odtype)
def schedule_qnn_mul(outs):
"""Schedule for qnn.mul
Parameters
----------
outs: Array of Tensor
The computation graph description of qnn.add
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return default_schedule(outs)
def qnn_tanh(data, input_scale, input_zp, output_scale, output_zp):
"""Compute for qnn.tanh
Q_output = quantize(tanh(dequantize(data)))
"""
dq_tensor = qnn_dequantize(data, input_scale, input_zp)
tanh = te.compute(dq_tensor.shape, lambda *i: te.tanh(dq_tensor(*i)))
return qnn_quantize(tanh, output_scale, output_zp, out_dtype=data.dtype)
def schedule_qnn_tanh(outs):
"""Schedule for qnn.tanh
Parameters
----------
outs: Array of Tensor
The computation graph description of qnn.add
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return default_schedule(outs)
def qnn_concatenate(data, axis, out_dtype):
"""Compute for qnn.concatenate
Parameters
----------
data: Array of Tensor
The computation graph description of qnn.concatenate
in the format of an array of tensors.
axis: int
The axis along which the tensors are concatenated.
out_dtype: string
Data type of output tensor
Returns
-------
out: Tensor
The computation for the op.
"""
# Get output quantization parameters.
o_scale = data[-2]
o_zp = data[-1]
# Initially qnn.concatenate had 3 tuples: (1) tuple with input tensors, (2) tuple with input
# scales and (3) tuple with input zero points.
# Last 2 elements in data represent output scale and zero point.
num_of_tuples = 3
assert ((len(data) - 2) % num_of_tuples) == 0
args_num = (len(data) - 2) // num_of_tuples
args = []
for i in range(args_num):
# Get next tensor and its quantization parameters.
tensor = data[i]
i_scale = data[i + args_num]
i_zp = data[i + args_num * 2]
# Requantize tensors and add them to the list.
args.append(qnn_requantize(tensor, i_scale, i_zp, o_scale, o_zp, out_dtype=out_dtype))
# Call generic implementation of concatenate.
return concatenate(args, axis)
def schedule_qnn_concatenate(outs):
"""Schedule for qnn.concatenate
Parameters
----------
outs: Array of Tensor
The computation graph description of qnn.add
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return default_schedule(outs)
def qnn_conv2d( # Conv2d inputs
data,
weight,
# Conv2d quantization params:
input_zero_point,
kernel_zero_point,
_input_scale,
_kernel_scale,
# bias
bias,
# Requantization params:
rq_input_scale,
rq_input_zero_point,
rq_output_scale,
rq_output_zero_point,
# Conv2d attributes:
strides,
padding,
dilation,
oshape,
odtype,
):
"""Compute for qnn.conv2d with NCHW layout.
Output data type should be specified through the 'odtype' parameter. qnn.conv2d leverages int32
type to store intermediate results. If 'odtype' differs from int32, you need to specify
requantization parameters.
"""
in_channel = data.shape[1] # NCHW layout
kernel_height = weight.shape[2] # OIHW layout
kernel_width = weight.shape[3] # OIHW layout
height_stride, width_stride = strides
dilation_h, dilation_w = dilation
dilated_kernel_h = (kernel_height - 1) * dilation_h + 1
dilated_kernel_w = (kernel_width - 1) * dilation_w + 1
pad_top, pad_left, pad_down, pad_right = get_pad_tuple(
get_const_tuple(padding), (dilated_kernel_h, dilated_kernel_w)
)
# Subtract zero point from weights. axis=0 in get_qnn_param means 'O' dimension in "OIHW"
# weights layout.
weight = te.compute(
weight.shape,
lambda *indices: te.subtract(
weight(*indices), get_qnn_param(kernel_zero_point, indices, axis=0)
),
)
# Subtract zero point from input and then do padding with 0 value
data = te.compute(data.shape, lambda *indices: te.subtract(data(*indices), input_zero_point))
# DOPAD
if pad_top != 0 or pad_down != 0 or pad_left != 0 or pad_right != 0:
pad_before = (0, 0, pad_top, pad_left)
pad_after = (0, 0, pad_down, pad_right)
data_pad = pad(data, pad_before, pad_after, name="data_pad")
else:
data_pad = data
ic = te.reduce_axis((0, in_channel), name="ic")
kh = te.reduce_axis((0, kernel_height), name="kh")
kw = te.reduce_axis((0, kernel_width), name="kw")
out = te.compute(
oshape,
lambda n, oc, oh, ow: te.sum(
data_pad[
n, ic, oh * height_stride + kh * dilation_h, ow * width_stride + kw * dilation_w
].astype("int32")
* weight[oc, ic, kh, kw].astype("int32"),
axis=[ic, kh, kw],
),
)
# Add bias
if bias is not None:
assert len(out.shape) == len(bias.shape)
assert bias.shape[2] == 1 and bias.shape[3] == 1
out = te.compute(out.shape, lambda n, c, h, w: out[n, c, h, w] + bias[n, c, 0, 0])
# Requantize output of convolution
# Q_output = zp_output + round((scale_input)/(scale_output) * (Q_input - zp_input))
if rq_input_scale is not None and rq_output_scale is not None:
# Now supported only scalar and 1D quantization parameters
assert len(rq_input_scale.shape) == 0 or len(rq_input_scale.shape) == 1
assert len(rq_output_scale.shape) == 0 or len(rq_output_scale.shape) == 1
axis = -1
if len(rq_input_scale.shape) == 1 or len(rq_output_scale.shape) == 1:
axis = 1 # Axis param should correspond to 'C' dimension.
return qnn_requantize(
out,
rq_input_scale,
rq_input_zero_point,
rq_output_scale,
rq_output_zero_point,
axis,
odtype,
)
return out
def schedule_qnn_conv2d(outs):
"""Schedule for qnn.conv2d
Parameters
----------
outs: Array of Tensor
The computation graph description of qnn.conv2d
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return default_schedule(outs)
def qnn_conv2d_NCHWc_int8( # Conv2d inputs
data,
weight,
# Conv2d quantization params:
input_zero_point,
kernel_zero_point,
_input_scale,
_kernel_scale,
# bias
bias,
# Requantization params:
rq_input_scale,
rq_input_zero_point,
rq_output_scale,
rq_output_zero_point,
# Conv2d attributes:
strides,
padding,
dilation,
_oshape,
odtype,
):
"""Compute for qnn.conv2d with NCHWc layout."""
# Subtract zero point from input and weights.
weight = subtract_zero_point(weight, kernel_zero_point, "weight_zp")
data = subtract_zero_point(data, input_zero_point, "data_zp")
strides = get_const_tuple(strides)
padding = get_const_tuple(padding)
dilation = get_const_tuple(dilation)
out = conv2d_NCHWc_int8(data, weight, strides, padding, dilation, "NCHW32c", "NCHW32c")
# Add bias
if bias is not None:
assert len(out.shape) == len(bias.shape)
assert bias.shape[2] == 1 and bias.shape[3] == 1
out = te.compute(
out.shape,
lambda n, c, h, w, ci: out[n, c, h, w, ci] + bias[n, c, 0, 0, ci],
name="bias_add",
)
# Requantize output of convolution
# Q_output = zp_output + round((scale_input)/(scale_output) * (Q_input - zp_input))
if rq_input_scale is not None and rq_output_scale is not None:
# Now supported only scalar and 1D quantization parameters
assert len(rq_input_scale.shape) == 0 or len(rq_input_scale.shape) == 1
assert len(rq_output_scale.shape) == 0 or len(rq_output_scale.shape) == 1
axis = -1
if len(rq_input_scale.shape) == 1 or len(rq_output_scale.shape) == 1:
axis = 1 # Axis param should correspond to 'C' dimension.
return qnn_requantize(
out,
rq_input_scale,
rq_input_zero_point,
rq_output_scale,
rq_output_zero_point,
axis,
odtype,
)
return out
def schedule_qnn_conv2d_NCHWc_int8(outs):
"""Schedule for qnn.conv2d with NCHWc layout."""
return default_schedule(outs)
def qnn_depthwise_conv2d( # Conv2d inputs
data,
weight,
# Conv2d quantization params:
input_zero_point,
kernel_zero_point,
_input_scale,
_kernel_scale,
# bias
bias,
# Requantization params:
rq_input_scale,
rq_input_zero_point,
rq_output_scale,
rq_output_zero_point,
# Conv2d attributes:
strides,
padding,
dilation,
oshape,
odtype,
):
"""Compute for qnn.conv2d with NCHW layout
Output data type should be specified through the 'odtype' parameter. qdepthwise nn.conv2d
leverages int32 type to store intermediate results. If 'odtype' differs from int32, you need to
specify requantization parameters.
"""
kernel_height = weight.shape[2] # OIHW layout
kernel_width = weight.shape[3] # OIHW layout
height_stride, width_stride = strides
dilation_h, dilation_w = dilation
dilated_kernel_h = (kernel_height - 1) * dilation_h + 1
dilated_kernel_w = (kernel_width - 1) * dilation_w + 1
pad_top, pad_left, pad_down, pad_right = get_pad_tuple(
get_const_tuple(padding), (dilated_kernel_h, dilated_kernel_w)
)
# Subtract zero point from input and then do padding with 0 value
data = te.compute(data.shape, lambda *indices: te.subtract(data(*indices), input_zero_point))
# DOPAD
if pad_top != 0 or pad_down != 0 or pad_left != 0 or pad_right != 0:
pad_before = (0, 0, pad_top, pad_left)
pad_after = (0, 0, pad_down, pad_right)
data_pad = pad(data, pad_before, pad_after, name="data_pad")
else:
data_pad = data
kh = te.reduce_axis((0, kernel_height), name="kh")
kw = te.reduce_axis((0, kernel_width), name="kw")
out = te.compute(
oshape,
lambda n, oc, oh, ow: te.sum(
data_pad[
n, oc, oh * height_stride + kh * dilation_h, ow * width_stride + kw * dilation_w
].astype("int32")
* te.subtract(weight[oc, 0, kh, kw], kernel_zero_point).astype("int32"),
axis=[kh, kw],
),
)
# Add bias
if bias is not None:
assert len(out.shape) == len(bias.shape)
assert bias.shape[2] == 1 and bias.shape[3] == 1
out = te.compute(out.shape, lambda n, c, h, w: out[n, c, h, w] + bias[n, c, 0, 0])
# Requantize output of convolution
# Q_output = zp_output + round((scale_input)/(scale_output) * (Q_input - zp_input))
if rq_input_scale is not None and rq_output_scale is not None:
# Now supported only scalar and 1D quantization parameters
assert len(rq_input_scale.shape) == 0 or len(rq_input_scale.shape) == 1
assert len(rq_output_scale.shape) == 0 or len(rq_output_scale.shape) == 1
axis = -1
if len(rq_input_scale.shape) == 1 or len(rq_output_scale.shape) == 1:
axis = 1 # Axis param should correspond to 'C' dimension.
return qnn_requantize(
out,
rq_input_scale,
rq_input_zero_point,
rq_output_scale,
rq_output_zero_point,
axis,
odtype,
)
return out
def schedule_qnn_depthwise_conv2d(outs):
"""Schedule for depthwise qnn.conv2d
Parameters
----------
outs: Array of Tensor
The computation graph description of qnn.conv2d
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return default_schedule(outs)
def qnn_dense(
data,
weight,
# Dense quantization params:
input_zero_point,
kernel_zero_point,
_input_scale,
_kernel_scale,
# bias
bias,
# Requantization params:
rq_input_scale,
rq_input_zero_point,
rq_output_scale,
rq_output_zero_point,
out_dtype,
):
"""Compute for qnn.dense
Output data type should be specified through the 'odtype' parameter. qnn.dense leverages int32
type to store intermediate results. If 'odtype' differs from int32, you need to specify
requantization parameters.
"""
M, K = get_const_tuple(data.shape)
N, _ = get_const_tuple(weight.shape)
k = te.reduce_axis((0, K), "k")
# This implementation uses "int32" dense output data type.
# axis=0 in get_qnn_param mean 'N' dimension in "NK" weights layout.
out = te.compute(
(M, N),
lambda m, n: te.sum(
te.subtract(data[m, k], input_zero_point).astype("int32")
* te.subtract(weight[n, k], get_qnn_param(kernel_zero_point, (n, k), axis=0)).astype(
"int32"
),
axis=k,
),
)
# Add bias
if bias is not None:
out = te.compute(out.shape, lambda n, c: out[n, c] + bias[0, c])
# Requantize output of dense
# Q_output = zp_output + round((scale_input)/(scale_output) * (Q_input - zp_input))
if rq_input_scale is not None and rq_output_scale is not None:
# Now supported only scalar and 1D quantization parameters
assert len(rq_input_scale.shape) == 0 or len(rq_input_scale.shape) == 1
assert len(rq_output_scale.shape) == 0 or len(rq_output_scale.shape) == 1
axis = -1
if len(rq_input_scale.shape) == 1 or len(rq_output_scale.shape) == 1:
axis = 1 # Axis param should correspond to 'N' dimension.
return qnn_requantize(
out,
rq_input_scale,
rq_input_zero_point,
rq_output_scale,
rq_output_zero_point,
axis,
out_dtype,
)
return out
def schedule_qnn_dense(outs):
"""Schedule for qnn.dense
Parameters
----------
outs: Array of Tensor
The computation graph description of qnn.dense
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return default_schedule(outs)
def qnn_dense_pack_vrmpy(
data: te.Tensor,
weight: te.Tensor,
# Dense quantization params:
input_zero_point: te.Tensor,
kernel_zero_point: te.Tensor,
_input_scale: te.Tensor,
_kernel_scale: te.Tensor,
# bias
bias: te.Tensor,
# Requantization params:
rq_input_scale: te.Tensor,
rq_input_zero_point: te.Tensor,
rq_output_scale: te.Tensor,
rq_output_zero_point: te.Tensor,
out_dtype: str,
):
"""Compute for qnn.contrib_dense_pack
Output data type should be specified through the 'odtype' parameter. qnn.dense leverages int32
type to store intermediate results. If 'odtype' differs from int32, you need to specify
requantization parameters.
"""
# Subtract zero point from input and weights.
weight = subtract_zero_point(weight, kernel_zero_point, "weight_zp")
data = subtract_zero_point(data, input_zero_point, "data_zp")
# Required for vrmpy intrinsic
assert "int8" in weight.dtype and "int8" in data.dtype
M, K = get_const_tuple(data.shape)
N_O, _, N_I, _ = get_const_tuple(weight.shape)
k = te.reduce_axis((0, K), "k")
out = te.compute(
(M, N_O * N_I),
lambda m, n: te.sum(
data[m, k].astype("int32")
* weight[
tvm.tir.indexdiv(n, 32),
tvm.tir.indexdiv(k, 4),
tvm.tir.indexmod(n, 32),
tvm.tir.indexmod(k, 4),
].astype("int32"),
axis=k,
),
name="qnn_dense_pack",
)
# Add bias
if bias is not None:
assert bias.ndim == 2
out = te.compute(out.shape, lambda n, c: out[n, c] + bias[0, c])
# Requantize output of qnn.contrib_dense_pack
if rq_input_scale is not None and rq_output_scale is not None:
# Now supported only scalar and 1D quantization parameters
assert rq_input_scale.ndim == 0 or rq_input_scale.ndim == 1
assert rq_output_scale.ndim == 0 or rq_output_scale.ndim == 1
axis = -1
if rq_input_scale.ndim == 1 or rq_output_scale.ndim == 1:
axis = 1 # Axis param should correspond to 'C' dimension.
return qnn_requantize(
out,
rq_input_scale,
rq_input_zero_point,
rq_output_scale,
rq_output_zero_point,
axis,
out_dtype,
)
return out
def schedule_qnn_dense_pack_vrmpy(outs):
"""Schedule for qnn.contrib_dense_pack
Parameters
----------
outs: Array of Tensor
The computation graph description of qnn.dense
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return default_schedule(outs)
def qnn_batch_matmul(
tensor_a,
tensor_b,
# batch_matmul quantization params:
a_zero_point,
b_zero_point,
_a_scale,
_b_scale,
# Attributes
transpose_a,
transpose_b,
out_dtype,
):
"""Compute for qnn.batch_matmul"""
# Preprocess tensor_a: subtract zp
a_sub_zp = te.compute(
tensor_a.shape, lambda *indices: te.subtract(tensor_a(*indices), a_zero_point)
)
# Preprocess tensor_b: subtract zp
b_sub_zp = te.compute(
tensor_b.shape, lambda *indices: te.subtract(tensor_b(*indices), b_zero_point)
)
return nn.batch_matmul(a_sub_zp, b_sub_zp, None, out_dtype, transpose_a, transpose_b)
def schedule_qnn_batch_matmul(outs):
"""Schedule for qnn.batch_matmul
Parameters
----------
outs: Array of Tensor
The computation graph description of qnn.batch_matmul
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return default_schedule(outs)
| 31,077 | 29.231518 | 100 | py |
tvm | tvm-main/python/tvm/topi/hexagon/qnn/global_avg_pool2d.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Assumptions:
1) The input is in NCHW layout. Squeezenet is the only model that calls
nn.global_avg_pool2d and the only layout it uses is 'NCHW'.
2) Both input and output dtype is uint8 and
quantization parameter is provided to the op.
3) Input is assumed to always be multiple of fixed chunk 32c8h8w.
"""
from tvm import te
from tvm import tir
from ..utils import get_layout_transform_fn, get_fixed_point_value, saturate
def global_avg_pool2d_u8(
data: te.Tensor,
odtype: str,
input_zero_point: int,
input_scale: float,
output_zero_point: int,
output_scale: float,
):
"""global_avg_pool2d"""
input_b, input_c, input_h, input_w = data.shape
oshape = (input_b, input_c) + (1, 1)
if input_h * input_w < 256:
bits = "16"
else:
bits = "32"
if odtype == "uint8":
temp_dtype = "uint" + bits
elif odtype == "int8":
temp_dtype = "int" + bits
else:
raise RuntimeError(f"Unsupported output dtype, {odtype}'")
pool_area = input_h * input_w
rh_r = te.reduce_axis((0, input_h), name="rh_r")
rw_r = te.reduce_axis((0, input_w), name="rw_r")
scale_with_area = input_scale / (output_scale * int(pool_area))
scale_fixed_point, rsh = get_fixed_point_value(scale_with_area, "int16")
corr = (output_zero_point << rsh) - input_zero_point * pool_area * scale_fixed_point
sum_compute = te.compute(
oshape,
lambda n, c, h, w: te.sum(
data[n, c, h + rh_r, w + rw_r].astype(temp_dtype), axis=[rh_r, rw_r]
),
name="sum",
)
avg_compute = te.compute(
oshape,
lambda n, c, h, w: saturate(
((sum_compute[n, c, h, w] * scale_fixed_point) + corr) >> rsh, odtype
).astype(odtype),
name="global_avg_pool2d",
)
return avg_compute
def stir_global_avg_pool2d_u8_schedule(outs: te.Tensor, ins: te.Tensor, input_layout: str):
"""Schedule"""
func = te.create_prim_func([ins, outs])
s = tir.Schedule(func)
sum_block = s.get_block("sum")
# Input is multiple of fixed chunk but output is NxCx1x1
# Hence transform_layout is only applied on input
input_transformed_layout = get_layout_transform_fn(input_layout)
s.transform_layout(sum_block, buffer=("read", 0), index_map=input_transformed_layout)
return s
| 3,136 | 31.677083 | 91 | py |
tvm | tvm-main/python/tvm/topi/hexagon/qnn/qdepthwise_conv2d_slice.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-variable, unused-argument, too-many-locals
"""
Please note the following assumptions made by the implementation:
1) The input must be padded in advance to account for 'padding'. In addition,
both input and output must be padded as per the physical buffer layout.
2) 'padding' is ignored. It must be handled outside of the sliced op.
3) The weights are expected to be as per physical layout
The initial compute for quantized depthwise conv2d is as follows
where cm = channel_multiplier; assumed to be 1,
zp_a = Activation_zero_point,
zp_w = Weight_zero_point,
Qa = Quantized Activation,
Qw = Quantized Weights.
a) Qc(n, oh, ow, oc) = (Sigma(r, s) (Qw(r, s, oc%cm, oc/cm) - zp_w)
* (Qa(n, oh + r, ow + s, oc/cm) - zp_a))
* scale_value
where scale_value = (activation_scale * weight_scale) / output_scale
This can be written as
b) Qc(n, oh, ow, oc) = (t1 - t2 - t3 + t4) * scale_value
where t1 = Sigma(r, s) Qw(r, s, oc%cm, oc/cm) * Qa(n, oh + r, ow + s, oc/cm)
t2 = Sigma(r, s) zp_w * Qa(n, oh + r, ow + s, oc/cm)
t3 = Sigma(r, s) zp_a * Qw(r, s, oc%cm, oc/cm)
t4 = Sigma(r, s) zp_a * zp_w
c) Qc(n, oh, ow, oc) = saturate(((t1 - t2 - t3 + t4) * fixed_scale_value)) >> rsh)
where fixed_scale_value, rsh are fixed point values for scale_value.
Compute and schedule for quantized depthwise conv2d slice op"""
import typing
import tvm
from tvm import te
from ..utils import get_layout_transform_fn, get_fixed_point_value, saturate
def qdepthwise_conv2d_compute(
activations: te.Tensor,
weights: te.Tensor,
out_shape: typing.Tuple,
stride: typing.Tuple,
dilation: typing.Tuple,
dtype: str,
# quantization params:
activation_zero_point,
activation_scale,
weight_zero_point,
weight_scale,
output_zero_point,
output_scale,
):
"""Compute for quantized depthwise conv2d"""
filt_shape = weights.shape
ob, oh, ow, oc = out_shape
if dtype == "uint8":
temp_dtype = "int32"
big_dtype = "int64"
elif dtype == "int8":
temp_dtype = "int32"
big_dtype = "int64"
else:
raise RuntimeError(f"Unsupported output dtype, {odtype}'")
reduce_height = tvm.te.reduce_axis((0, filt_shape[0]), name="reduce_height")
reduce_width = tvm.te.reduce_axis((0, filt_shape[1]), name="reduce_width")
stride_height, stride_width = stride
dilation_height, dilation_width = dilation
scale_value = (activation_scale * weight_scale) / output_scale
fixed_scale_value, rsh = get_fixed_point_value(scale_value, "int16")
t1 = tvm.te.compute(
out_shape,
lambda n, h, w, c: tvm.te.sum(
(
(
activations[
n,
h * stride_height + reduce_height * dilation_height,
w * stride_width + reduce_width * dilation_width,
c,
].astype(temp_dtype)
)
* (weights[reduce_height, reduce_width, 0, c].astype(temp_dtype))
).astype(temp_dtype),
axis=[reduce_height, reduce_width],
),
name="t1",
)
t2 = tvm.te.compute(
out_shape,
lambda n, h, w, c: tvm.te.sum(
(
(
activations[
n,
h * stride_height + reduce_height * dilation_height,
w * stride_width + reduce_width * dilation_width,
c,
].astype(temp_dtype)
)
* weight_zero_point
).astype(temp_dtype),
axis=[reduce_height, reduce_width],
),
name="t2",
)
t3 = tvm.te.compute(
(oc,),
lambda c: tvm.te.sum(
(
((weights[reduce_height, reduce_width, 0, c].astype(temp_dtype)))
* activation_zero_point
).astype(temp_dtype),
axis=[reduce_height, reduce_width],
),
name="t3",
)
t4 = activation_zero_point * weight_zero_point * reduce_height * reduce_width
output = tvm.te.compute(
out_shape,
lambda n, h, w, c: saturate(
(
(
(
((t1[n, h, w, c]).astype(big_dtype) - t2[n, h, w, c] - t3[c] + t4)
* fixed_scale_value
)
>> rsh
)
+ (output_zero_point).astype(big_dtype)
),
dtype,
).astype(dtype),
name="output",
)
return output
def qdepthwise_conv2d_schedule(
outs: te.Tensor,
ins: typing.List[te.Tensor],
transform_activation_layout: str,
transform_weights: str,
):
"""
Schedule for quantized depthwise conv2d for input layout nhwc-8h8w32c
assert len(ins) == 2, "This schedule expects only 2 inputs - Activations and Weights
"""
source_expr = ins + [outs]
prim_func = tvm.te.create_prim_func(source_expr)
sch = tvm.tir.Schedule(prim_func)
compute = sch.get_block("output")
compute1 = sch.get_block("t1")
transform_layout_fn = get_layout_transform_fn(transform_activation_layout)
transform_layout_weights = get_layout_transform_fn(transform_weights)
# Apply layout_transform for activation
sch.transform_layout(compute1, ins[0].name, transform_layout_fn)
# Apply layout_transform for weights
sch.transform_layout(compute1, ins[1].name, transform_layout_weights)
# Apply layout_transform for output
sch.transform_layout(compute, outs.name, transform_layout_fn)
# This returns the original 6d loop
batch, height, width, channel, reduce_height, reduce_width = sch.get_loops(compute1)
h_outer, h_inner = sch.split(height, [None, 8])
w_outer, w_inner = sch.split(width, [None, 8])
c_outer, c_inner = sch.split(channel, [None, 32])
sch.reorder(
batch,
h_outer,
w_outer,
c_outer,
h_inner,
reduce_height,
reduce_width,
w_inner,
c_inner,
)
sch.decompose_reduction(compute1, reduce_height)
# wi_ci = sch.fuse(w_inner,c_inner)
# sch.vectorize(wi_ci)
return sch
| 7,240 | 32.215596 | 90 | py |
tvm | tvm-main/python/tvm/topi/hexagon/qnn/adaptive_avg_pool1d.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" Compute and schedule for adaptive_avg_pool1d slice op
Following are few notes and assumptions made by the implementation:
Assumptions:
1) The input is in NCW layout. Distilbert is the only model that calls
nn.adaptive_avg_pool1d and the only layout it uses is 'NCW'.
2) The op takes output_size as an argument and
only handles the specialized case where output_size is 1.
The argument output_size is used as the value of output_width.
3) Both input and output dtype is uint8/int8 and
quantization parameter is provided to the op.
4) Input is assumed to always be multiple of fixed chunk 32c64w.
Notes:
1) If input width is used as output width, there can be two cases:
a. If the quantization parameters of input and output are same,
it can return the input as output so the op will be a no-op.
b. If the quantization parameters of input and output are different,
it will essentially be a requantize op.
2) If output_size is a value besides 1 or input_width,
adaptive_avg_pool1d may use dynamic stride and kernel for each output element.
When this case occurs, kernel won't be known at compile time. We want to use
the generic implementation nn.adaptive_avg_pool1d() for this case.
"""
from tvm import te
from tvm import tir
from ..utils import get_layout_transform_fn, get_fixed_point_value, saturate
def adaptive_avg_pool1d(
data: te.Tensor,
output_size: list,
odtype: str,
input_zero_point: int,
input_scale: float,
output_zero_point: int,
output_scale: float,
):
"""adaptive_avg_pool1d compute"""
_, _, inw = data.shape
out_width = output_size[0]
n, c = data.shape[:2]
oshape = (n, c) + (out_width,)
# Kernel is same as input_width since output_width is assumed to be 1
if out_width == 1:
kw_r = inw
else:
raise RuntimeError(f"Unsupported output_size, {out_width}'")
if odtype == "uint8":
temp_dtype = "uint32"
elif odtype == "int8":
temp_dtype = "int32"
else:
raise RuntimeError(f"Unsupported output dtype, {odtype}'")
scale_with_area = input_scale / (output_scale * int(kw_r))
scale_fixed_point, rsh = get_fixed_point_value(scale_with_area, "int16")
corr = (output_zero_point << rsh) - input_zero_point * kw_r * scale_fixed_point
rw_r = te.reduce_axis((0, kw_r), name="rw_r")
sum_compute = te.compute(
oshape,
lambda n, c, w: te.sum(data[n, c, w + rw_r].astype(temp_dtype), axis=[rw_r]),
name="sum",
)
avg_compute = te.compute(
oshape,
lambda n, c, w: saturate(
((sum_compute[n, c, w] * scale_fixed_point) + corr) >> rsh, odtype
).astype(odtype),
name="adaptive_avg_1d",
)
return avg_compute
def stir_schedule_ncw_32c64w(outs, ins, input_layout: str):
"""Schedule for input layout ncw-32c64w and output layout ncw"""
func = te.create_prim_func([ins, outs])
s = tir.Schedule(func)
sum_block = s.get_block("sum")
# Input is multiple of fixed chunk but output is NxCx1
# Hence transform_layout is only applied on input
input_transformed_layout = get_layout_transform_fn(input_layout)
s.transform_layout(sum_block, buffer=("read", 0), index_map=input_transformed_layout)
return s
def tir_adaptive_avg_pool1d_schedule(outs, ins, output_layout: str, input_layout: str):
"""STIR based schedule"""
if output_layout == "ncw":
return stir_schedule_ncw_32c64w(outs, ins, input_layout)
raise RuntimeError(f"Unexpected layout '{output_layout}'")
| 4,364 | 35.07438 | 89 | py |
tvm | tvm-main/python/tvm/topi/hexagon/qnn/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" Computes and schedules for Hexagon quantized ops """
from .adaptive_avg_pool1d import *
from .avg_pool2d import *
from .conv2d_alter_op import *
from .dense_alter_op import *
from .dequantize import dequantize_compute, dequantize_schedule
from .global_avg_pool2d import *
from .nn import *
from .qadd_qsub_qmul import *
from .qdense import *
from .qdepthwise_conv2d_slice import qdepthwise_conv2d_compute, qdepthwise_conv2d_schedule
from .quantize import quantize_compute, tir_quantize_schedule
| 1,286 | 40.516129 | 90 | py |
tvm | tvm-main/python/tvm/topi/hexagon/qnn/qdense.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Schedule for dense operator"""
from tvm import te, tir
from tvm.topi import tag
from ..utils import get_layout_transform_fn
def qdense_compute(
tensor_a,
tensor_b,
zero_a,
scale_a,
zero_b,
scale_b,
zero_out=None,
scale_out=None,
bias=None,
q_dtype=None,
):
"""Hexagon's implementation of a sliced dense operator in Topi.
Uses matmul.
Parameters
----------
tensor_a : tvm.te.Tensor
data 2-D with shape [batch, in_dim]
tensor_b : tvm.te.Tensor
weight 2-D with shape [in_dim, out_dim]
zero_a : integer
quantization zero point for tensor a.
scale_a : float
quantization scale for tensor a.
zero_b : integer
quantization zero point for tensor b.
scale_b : float
quantization scale for tensor b.
zero_out : Optional[integer]
quantization zero point for output.
scale_out : Optional[float]
quantization scale for output.
bias : Optional[tvm.te.Tensor]
1-D with shape [out_dim]
q_dtype : Optional[str]
The output type.
Returns
-------
mat : tvm.te.Tensor
2-D with shape [batch, out_dim]
"""
if bias is not None:
assert len(bias.shape) == 1
if q_dtype is None:
q_dtype = tensor_a.dtype
batch, in_dim = tensor_a.shape
out_dim, red_dim = tensor_b.shape
# cmp should be done by values
assert int(in_dim) == int(red_dim)
k = te.reduce_axis((0, in_dim), name="k")
compute_lambda = lambda n, m: te.sum(
scale_a
* (tensor_a[n, k].astype("float32") - zero_a)
* scale_b
* (tensor_b[k, m].astype("float32") - zero_b),
axis=k,
)
compute_name = "qmatmul_sliced"
out = te.compute(
(batch, out_dim),
compute_lambda,
name=compute_name,
attrs={"layout_free_placeholders": [tensor_b]},
)
if bias is not None:
out = te.compute(
(batch, out_dim),
lambda i, j: out[i, j] + bias[j],
tag=tag.BROADCAST,
name="bias",
)
# Requantization of dense
if scale_out is not None:
out = te.compute(
(batch, out_dim),
lambda *i: (out[i] / scale_out + zero_out).astype(q_dtype),
name="requantize",
)
return out
def qdense_schedule(outs, ins, output_layout: str, input_layout: str):
"""Schedule for dense op.
Parameters
----------
outs: Array of Tensor
The computation graph description of dense in the format
of an array of tensors.
ins: Array of Tensor
Input tensors into graph.
output_layout: str
Descriptor string for physical layout
input_layout: str
Descriptor string for physical layout
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
if not isinstance(ins, list):
ins = [ins]
if not isinstance(outs, list):
outs = [outs]
func = te.create_prim_func([*ins, *outs])
s = tir.Schedule(func)
matmul = s.get_block("qmatmul_sliced")
try:
requantize = s.get_block("requantize")
except tir.schedule.schedule.ScheduleError:
requantize = None
try:
bias = s.get_block("bias")
except tir.schedule.schedule.ScheduleError:
bias = None
input_transform_fn = get_layout_transform_fn(input_layout)
output_transform_fn = get_layout_transform_fn(output_layout)
# Transform input and output buffer
s.transform_layout(matmul, ("read", 0), input_transform_fn)
if requantize is not None:
s.transform_layout(requantize, ("write", 0), output_transform_fn)
elif bias is not None:
s.transform_layout(bias, ("write", 0), output_transform_fn)
else:
s.transform_layout(matmul, ("write", 0), output_transform_fn)
# Vectorize
_, matmul_c, _ = s.get_loops(matmul)
_, matmul_c_inner = s.split(matmul_c, [None, 128])
s.vectorize(matmul_c_inner)
# Compute everything inline
if bias is not None and requantize is not None:
_, bias_c = s.get_loops(bias)
s.compute_at(matmul, bias_c)
_, out_c = s.get_loops(requantize)
s.compute_at(bias, out_c)
elif bias is not None and requantize is None:
_, out_c = s.get_loops(bias)
s.compute_at(matmul, out_c)
return s
| 5,202 | 25.819588 | 73 | py |
tvm | tvm-main/python/tvm/topi/hexagon/qnn/quantize.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""Compute and schedule for hexagon quantize
Please note the following assumptions made by the implementation:
1) The input and output data will be multiple of crouton layout
2) And the supported layout is NHWC
3) The input layout will be nhwc-4h2w32c2w-2d and
output layout will be nhwc-8h8w32c-2d"""
from tvm import te
from tvm import tir
from ..utils import get_layout_transform_fn, saturate
def quantize_compute(tensor_A: te.Tensor, scale: float, zero_point: int, dtype: str):
"""Compute for quantize"""
scale_recip = 1 / scale
return te.compute(
tensor_A.shape,
lambda n, h, w, c: saturate(
((tensor_A[n, h, w, c] * scale_recip).astype("int32") + zero_point),
dtype,
).astype(dtype),
name="quantize",
)
def tir_quantize_schedule(
out_M: te.Tensor,
tensor_A: te.Tensor,
input_layout: str,
output_layout: str,
):
"""Schedule for output layout nhwc-8h8w32c-2d"""
func = te.create_prim_func([tensor_A, out_M])
s = tir.Schedule(func)
block = s.get_block("quantize")
input_transformed_layout = get_layout_transform_fn(input_layout)
s.transform_layout(block, buffer=tensor_A.name, index_map=input_transformed_layout)
output_transformed_layout = get_layout_transform_fn(output_layout)
s.transform_layout(block, buffer=out_M.name, index_map=output_transformed_layout)
# Fixed chunk size is 2048 byte
# For uint8 the layout for fixed chunk is 8x8x32
# where each element is 1 bytes
# Split and reorder is done to iterate over the fixed chunk
# Channel is split by a factor of 32
# Width is split by a factor of 8
# Height is split by a factor of 8
n, h, w, c = s.get_loops(block)
h_o, h_i = s.split(h, [None, 8])
w_o, w_i = s.split(w, [None, 8])
c_o, c_i = s.split(c, [None, 32])
wio, wii = s.split(w_i, [None, 4])
s.reorder(n, h_o, w_o, c_o, h_i, wio, wii, c_i)
return s
| 2,777 | 33.296296 | 87 | py |
tvm | tvm-main/python/tvm/topi/hexagon/slice_ops/conv2d.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=line-too-long
"""Hexagon slice conv2d compute and schedule"""
import typing
import tvm
from tvm import te
from ..utils import get_layout_transform_fn
def conv2d_compute(
activations: te.Tensor,
weights: te.Tensor,
out_shape: typing.Tuple,
stride: typing.Tuple,
dilation: typing.Tuple,
dtype: str,
output_name: str,
weights_width_reversed: bool = True,
) -> te.Tensor:
"""Compute for slice conv2d op for hexagon.
This op makes the following assumptions:
1. This op is written for a sliced convolution with 2d physical buffers
2. The input activations is assumed to be in NHWC layout and filter is in HWIO layout
3. Grouped convolutions are not supported. and there will be a separate compute definition for depthwise convolution
4. In order to get grouped convolutions, it is assumed that the op will be sliced according to the groups and multiple calls to this compute would be placed.
Parameters
----------
activations : te.Tensor
Input activations padded for inner dimension size
weights : te.Tensor
Weights without dilation
out_shape : typing.Tuple
The logical output shape without considering input padding
stride : typing.Tuple
stride
dilation : typing.Tuple
dilation
dtype : str
dtype
output_name : str
The name to be given to output. This would become the block name for the corresponding STIR compute
weights_width_reversed : bool
The width axis of weights are expected in reverse order if weights_width_reversed is True
Returns
-------
output : te.Tensor
Output of applying 2D convolution of Weights on Input
"""
filt_shape = weights.shape
reduce_channel = tvm.te.reduce_axis((0, filt_shape[2]), name="reduce_channel")
reduce_height = tvm.te.reduce_axis((0, filt_shape[0]), name="reduce_height")
reduce_width = tvm.te.reduce_axis((0, filt_shape[1]), name="reduce_width")
stride_height, stride_width = stride
dilation_height, dilation_width = dilation
if weights_width_reversed:
weights_width_var = filt_shape[1] - reduce_width - 1
else:
weights_width_var = reduce_width
output = tvm.te.compute(
out_shape,
lambda n, h, w, c: tvm.te.sum(
(
activations[
n,
h * stride_height + reduce_height * dilation_height,
w * stride_width + reduce_width * dilation_width,
reduce_channel,
]
* weights[reduce_height, weights_width_var, reduce_channel, c]
).astype(dtype),
axis=[reduce_channel, reduce_height, reduce_width],
),
name=output_name,
)
return output
def conv2d_te_schedule(
out: te.Tensor,
ins: typing.List[te.Tensor],
transform_activation_layout: str,
transform_weights_layout: str,
transform_output_layout: str,
) -> te.Schedule:
"""TE Schedule for the sliced conv2d op
This schedule makes the following assumptions:
1. There is only one output tensor
2. The activations and weights have specific layouts defined by the last 2 arguments
3. All transformation functions are expected to be a bijection for now
Parameters
----------
out : te.Tensor
The output tensor returned by a call to conv2d_compute
ins : typing.List[te.Tensor]
The list of 2 Tensors which would be the input activations and weights
transform_activation_layout : str
The expected activations layout
transform_weights_layout : str
String representing the weights layout as defined in get_layout_transform_fn
transform_output_layout: str
String representing the output layout as defined in get_layout_transform_fn
Returns
-------
sch : te.Schedule
The TE schedule for slice conv2d
"""
activations, weights = ins
output = out
sch = tvm.te.create_schedule(output.op)
reduce_channel, reduce_height, reduce_width = sch[output].op.reduce_axis
sch[activations].transform_layout(get_layout_transform_fn(transform_activation_layout))
sch[weights].transform_layout(get_layout_transform_fn(transform_weights_layout))
transformed_axis = sch[output].transform_layout(
get_layout_transform_fn(transform_output_layout)
)
fused_out_axis = sch[output].fuse(transformed_axis[-1], transformed_axis[-2])
sch[output].reorder(
*[*transformed_axis[:-2], reduce_height, reduce_width, reduce_channel, fused_out_axis]
)
# The below code doesn't work yet as vectorization across 2D boundary is not yet supported
# s[output].vectorize(fused_out_axis)
return sch
def conv2d_schedule(
outs: te.Tensor,
ins: typing.List[te.Tensor],
transform_activation_layout: str,
transform_weights_layout: str,
transform_output_layout: str,
output_name: str,
) -> tvm.tir.Schedule:
"""STIR schedule definition for the compute defined above by conv2d_compute.
- Auto-generated prim_func before applying schedule primitives for reference
- The below TVMScript code is for conv2d with padded input dimensions and a stride of 1x1
# from tvm.script import tir as T
@T.prim_func
def func(InputTensor: T.Buffer((1, 24, 12, 32), "float16"), Weights: T.Buffer((3, 3, 32, 32), "float16"), compute: T.Buffer((1, 16, 8, 32), "float16")) -> None:
# function attr dict
T.func_attr({"global_symbol": "main", "tir.noalias": True})
# body
# with T.block("root")
for i0, i1, i2, i3, i4, i5, i6 in T.grid(1, 16, 8, 32, 32, 3, 3):
with T.block("compute"):
n, h, w, c, rc, rh, rw = T.axis.remap("SSSSRRR", [i0, i1, i2, i3, i4, i5, i6])
T.reads(InputTensor[n, h + rh, w + rw, rc], Weights[rh, rw, rc, c])
T.writes(compute[n, h, w, c])
with T.init():
compute[n, h, w, c] = T.float16(0)
compute[n, h, w, c] = compute[n, h, w, c] + InputTensor[n, h + rh, w + rw, rc] * Weights[rh, rw, rc, c]
Parameters
----------
outs : te.Tensor
The output Tensor as returned by a call to conv2d_compute
ins : typing.List[te.Tensor]
This is a list of 2 tensors - Input activations and Weights
transform_activation_layout : str
String representing the activations layout as defined in get_layout_transform_fn
transform_weights_layout : str
String representing the weights layout as defined in get_layout_transform_fn
transform_output_layout: str
String representing the output layout as defined in get_layout_transform_fn
output_name : str
The name that was given to the output compute and which can be used to get the block name
Returns
-------
sch : tvm.tir.Schedule
The STIR schedule for slice conv2d compute
"""
assert len(ins) == 2, "This schedule expects only 2 inputs - Activations and Weights"
source_expr = ins + [outs]
prim_func = tvm.te.create_prim_func(source_expr)
sch = tvm.tir.Schedule(prim_func)
compute = sch.get_block(output_name)
# Apply layout_transform for activation
sch.transform_layout(compute, ins[0].name, get_layout_transform_fn(transform_activation_layout))
# Apply layout_transform for weights
sch.transform_layout(compute, ins[1].name, get_layout_transform_fn(transform_weights_layout))
# Apply layout_transform for output
sch.transform_layout(compute, outs.name, get_layout_transform_fn(transform_output_layout))
batch, height, width, channel, reduce_channel, reduce_height, reduce_width = sch.get_loops(
compute
) # This still returns the original 7d loop
h_outer, h_inner = sch.split(height, [None, 8])
w_outer, w_inner = sch.split(width, [None, 4])
w_inner_outer, w_inner_inner = sch.split(w_inner, [2, 2])
c_outer, c_inner = sch.split(channel, [None, 32])
sch.reorder(
batch,
h_outer,
w_outer,
c_outer,
h_inner,
w_inner_outer,
reduce_height,
reduce_width,
reduce_channel,
c_inner,
w_inner_inner,
)
sch.decompose_reduction(compute, reduce_height)
# ci_wii = s.fuse(ci, wii)
# s.vectorize(ci_wii)
return sch
| 9,197 | 36.851852 | 164 | py |
tvm | tvm-main/python/tvm/topi/hexagon/slice_ops/dense.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Schedule for dense operator"""
from tvm import te, tir
from tvm.topi import tag
from ..utils import get_layout_transform_fn
def dense_compute(tensor_a, tensor_b, bias=None, out_dtype=None):
"""Hexagon's implementation of a sliced dense operator in Topi.
Uses matmul.
Parameters
----------
tensor_a : tvm.te.Tensor
data 2-D with shape [batch, in_dim]
tensor_b : tvm.te.Tensor
weight 2-D with shape [in_dim, out_dim]
bias : Optional[tvm.te.Tensor]
1-D with shape [out_dim]
out_dtype : Optional[str]
The output type. This is used for mixed precision.
Returns
-------
output : tvm.te.Tensor
2-D with shape [batch, out_dim]
"""
if bias is not None:
assert len(bias.shape) == 1
if out_dtype is None:
out_dtype = tensor_a.dtype
batch, in_dim = tensor_a.shape
out_dim, red_dim = tensor_b.shape
# cmp should be done by values
assert int(in_dim) == int(red_dim)
k = te.reduce_axis((0, in_dim), name="k")
compute_lambda = lambda n, m: te.sum(
tensor_a[n, k].astype(out_dtype) * tensor_b[k, m].astype(out_dtype), axis=k
)
compute_name = "matmul_sliced"
compute_tag = "matmul"
mat = te.compute(
(batch, out_dim),
compute_lambda,
name=compute_name,
tag=compute_tag,
attrs={"layout_free_placeholders": [tensor_b]},
)
if bias is not None:
mat = te.compute(
(batch, out_dim),
lambda i, j: mat[i, j] + bias[j],
tag=tag.BROADCAST,
name="bias",
)
return mat
def dense_schedule(outs, ins, output_layout: str, input_layout: str):
"""Schedule for dense op.
Parameters
----------
outs: Array of Tensor
The computation graph description of dense in the format
of an array of tensors.
ins: Array of Tensor
Input tensors into graph.
output_layout: str
Descriptor string for physical layout
input_layout: str
Descriptor string for physical layout
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
if not isinstance(ins, list):
ins = [ins]
if not isinstance(outs, list):
outs = [outs]
func = te.create_prim_func([*ins, *outs])
s = tir.Schedule(func)
matmul = s.get_block("matmul_sliced")
try:
bias = s.get_block("bias")
except tir.schedule.schedule.ScheduleError:
bias = None
input_transform_fn = get_layout_transform_fn(input_layout)
output_transform_fn = get_layout_transform_fn(output_layout)
# No bias
if bias is None:
s.transform_layout(matmul, ("read", 0), input_transform_fn)
# s.transform_layout(matmul, ("read", 1), input_transform_fn)
s.transform_layout(matmul, ("write", 0), output_transform_fn)
else:
s.transform_layout(matmul, ("read", 0), input_transform_fn)
s.transform_layout(bias, ("write", 0), output_transform_fn)
_, matmul_c, _ = s.get_loops(matmul)
_, matmul_c_inner = s.split(matmul_c, [None, 64])
s.vectorize(matmul_c_inner)
if bias is not None:
_, bias_c = s.get_loops(bias)
_, bias_c_inner = s.split(bias_c, [None, 64])
s.vectorize(bias_c_inner)
return s
| 4,122 | 27.434483 | 83 | py |
tvm | tvm-main/python/tvm/topi/hexagon/slice_ops/clip.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""
Clip the elements in `A` between `A_min` and `A_max`.
"""
from tvm import te, tir, topi
from ..utils import get_layout_transform_fn
def clip_compute(A, A_min, A_max):
"""
Use topi clip implementation
"""
return topi.clip(A, A_min, A_max)
def clip_schedule(outs, ins, output_layout: str, input_layout: str):
"""
Hexagon clip schedule
"""
A = ins
M = outs
func = te.create_prim_func([A, M])
s = tir.Schedule(func)
block = s.get_block("compute")
input_transformed_layout = get_layout_transform_fn(input_layout)
s.transform_layout(block, buffer=("read", 0), index_map=input_transformed_layout)
output_transformed_layout = get_layout_transform_fn(output_layout)
s.transform_layout(block, buffer=("write", 0), index_map=output_transformed_layout)
n, h, w, c = s.get_loops(block)
ho, hi = s.split(h, [None, 8])
wo, wi = s.split(w, [None, 4])
co, ci = s.split(c, [None, 32])
wio, wii = s.split(wi, [None, 2])
s.reorder(n, ho, wo, co, hi, wio, ci, wii)
fused = s.fuse(ci, wii)
s.vectorize(fused)
return s
| 1,933 | 27.865672 | 87 | py |
tvm | tvm-main/python/tvm/topi/hexagon/slice_ops/batch_flatten.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Hexagon slice batch flatten compute and schedule"""
from tvm import te, tir, topi
from ..utils import get_layout_transform_fn
def batch_flatten_compute(inp: te.Tensor) -> te.Tensor:
"""Compute for slice batch flatten op for hexagon.
This op makes the following assumptions:
1. This op is written for a sliced batch flatten operation.
2. The input is assumed to be in NHWC layout.
Parameters
----------
Input : te.Tensor
Input activations padded for inner dimension size
Returns
-------
Output : te.Tensor
Output of applying batch flatten operation on input
"""
return topi.nn.flatten(inp)
def batch_flatten_stir_schedule(
out: te.Tensor,
inp: te.Tensor,
out_layout: str,
in_layout: str,
) -> tir.Schedule:
"""STIR schedule definition for the compute of batch flatten compute.
Parameters
----------
outputs : te.Tensor
The output tensor as returned by a call to batch_flatten_compute
input : te.Tensor
Input tensor to batch_flatten
out_layout: typing.Callable
The transformation function definition for the expected output layout
in_layout: typing.Callable
The transformation function definition for the input layout
Returns
-------
sch : tvm.tir.Schedule
The STIR schedule for slice batch flatten compute
"""
batch_flatten_func = te.create_prim_func([inp, out])
sch = tir.Schedule(batch_flatten_func, debug_mask="all")
compute = sch.get_block("compute")
sch.transform_layout(compute, inp.name, get_layout_transform_fn(in_layout))
sch.transform_layout(compute, out.name, get_layout_transform_fn(out_layout))
i, j = sch.get_loops(compute)
jout, channel = sch.split(j, [None, inp.shape[3]])
height, width = sch.split(jout, [inp.shape[1], inp.shape[2]])
channelo, channeli = sch.split(channel, [None, 1024])
channelio, channelii = sch.split(channeli, [None, 64])
sch.reorder(i, height, width, channelo, channelio, channelii)
sch.vectorize(channelii)
return sch
| 2,869 | 35.794872 | 80 | py |
tvm | tvm-main/python/tvm/topi/hexagon/slice_ops/softmax_slice.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Hexagon slice softmax compute and schedule"""
import typing
from tvm import te, tir, topi
from ..utils import get_layout_transform_fn
def softmax_compute(in_tensor):
"""
Compute for slice softmax op for hexagon.
This op makes the following assumptions:
1. This op is written for a sliced softmax operation.
2. The input is assumed to be in NC layout.
"""
return topi.nn.softmax(in_tensor, axis=1)
def softmax_stir_schedule(
out: te.Tensor, inp: te.Tensor, out_layout: typing.Callable, in_layout: typing.Callable
):
"""
STIR schedule definition for the compute of softmax
"""
in_layout = get_layout_transform_fn(in_layout)
out_layout = get_layout_transform_fn(out_layout)
func = te.create_prim_func([inp, out])
sch = tir.Schedule(func, debug_mask="all")
max_tensor = sch.get_block("T_softmax_maxelem")
exp_tensor = sch.get_block("T_softmax_exp")
sum_tensor = sch.get_block("T_softmax_expsum")
out_tensor = sch.get_block("T_softmax_norm")
sch.transform_layout(max_tensor, inp.name, in_layout)
sch.transform_layout(out_tensor, out.name, out_layout)
_, c_inner = sch.get_loops(max_tensor)
_, c_inner_i = sch.split(c_inner, [None, 64])
rf_max = sch.rfactor(c_inner_i, 0)
_, _, max_inner = sch.get_loops(rf_max)
sch.vectorize(max_inner)
_, loopi = sch.get_loops(exp_tensor)
_, loopi_i = sch.split(loopi, [None, 512])
sch.vectorize(loopi_i)
_, c_sum_inner = sch.get_loops(sum_tensor)
_, c_sum_inner_i = sch.split(c_sum_inner, [None, 64])
rf_sum = sch.rfactor(c_sum_inner_i, 0)
_, _, sum_inner = sch.get_loops(rf_sum)
sch.vectorize(sum_inner)
_, c_out_inner = sch.get_loops(out_tensor)
_, c_out_inner_i = sch.split(c_out_inner, [None, 512])
sch.vectorize(c_out_inner_i)
return sch
| 2,628 | 33.142857 | 91 | py |
tvm | tvm-main/python/tvm/topi/hexagon/slice_ops/depth_to_space.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" Compute and schedule for depth to space slice op
"""
from tvm import te, tir, topi
from ..utils import get_layout_transform_fn
def d2s_compute(inp, block_size, layout, mode):
"""depth_to_space compute"""
return topi.nn.depth_to_space(inp, block_size=block_size, layout=layout, mode=mode)
def d2s_schedule(inp, out, input_layout, output_layout):
"""Schedule for depth to space: top level function"""
if (input_layout != output_layout) or (
output_layout not in ("nhwc-8h2w32c2w-2d", "nhwc-8h8w32c-2d")
):
raise RuntimeError(
f"Unexpected input_layout, output_layout '{input_layout, output_layout}'"
)
d2s_func = te.create_prim_func([inp, out])
sch = tir.Schedule(d2s_func, debug_mask="all")
compute = sch.get_block("depth_to_space")
sch.transform_layout(compute, inp.name, get_layout_transform_fn(input_layout))
sch.transform_layout(compute, out.name, get_layout_transform_fn(output_layout))
return sch
| 1,777 | 39.409091 | 87 | py |
tvm | tvm-main/python/tvm/topi/hexagon/slice_ops/cast.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" Hexagon slice cast op compute and schedule"""
from tvm import te
from tvm import tir
from ..utils import get_layout_transform_fn
def get_layout_transform_for_f32(f32_layout_string):
"""
Given f32 layout string, return transform_layout function and
channel/height split factor to be used for scheduling
"""
layout_transform_fn = get_layout_transform_fn(f32_layout_string)
if f32_layout_string == "nhwc-8h2w32c2w-2d":
return [layout_transform_fn, 8]
if f32_layout_string == "nhwc-4h2w32c2w-2d":
return [layout_transform_fn, 4]
if f32_layout_string == "nc-1024c-2d":
return [layout_transform_fn, 1024]
if f32_layout_string == "nc-512c-2d":
return [layout_transform_fn, 512]
raise RuntimeError(f"Unexpected f32_layout '{f32_layout_string}'")
def cast_f16_f32_compute(in_tensor):
out_tensor = te.compute(
in_tensor.shape, lambda *indices: in_tensor[indices].astype("float32"), name="CastF16F32"
)
return out_tensor
def cast_f16_f32_stir_schedule_nhwc(func, in_layout, out_layout, h_split_factor):
"""Schedule for nhwc f16 to f32 cast: nhwc layout"""
sch = tir.Schedule(func, debug_mask="all")
block_name = "CastF16F32"
n_orig, h_orig, w_orig, c_orig = sch.get_loops(sch.get_block(block_name))
h_outer, h_inner = sch.split(h_orig, [None, h_split_factor])
w_outer, w_inner = sch.split(w_orig, [None, 4])
c_outer, c_inner = sch.split(c_orig, [None, 32])
w_inner_o, w_inner_i = sch.split(w_inner, [None, 2])
sch.reorder(n_orig, h_outer, w_outer, c_outer, h_inner, w_inner_o, c_inner, w_inner_i)
sch.transform_layout(block_name, "A", in_layout)
sch.transform_layout(block_name, block_name, out_layout)
fused = sch.fuse(c_inner, w_inner_i)
sch.vectorize(fused)
return sch
def cast_f16_f32_stir_schedule_nc(func, in_layout, out_layout, c_split_factor):
"""Schedule for nc f16 to f32 cast: nc layout"""
sch = tir.Schedule(func, debug_mask="all")
block_name = "CastF16F32"
_, c_orig = sch.get_loops(sch.get_block(block_name))
_, c_inner = sch.split(c_orig, [None, c_split_factor])
_, c_inner_inner = sch.split(c_inner, [None, 64])
sch.transform_layout(block_name, "A", in_layout)
sch.transform_layout(block_name, block_name, out_layout)
sch.vectorize(c_inner_inner)
return sch
def cast_f16_f32_schedule(cast_func, in_layout_str, out_layout_str):
"""Schedule for f16 to f32 cast: top level function"""
f32_layout_transform_func, split_factor = get_layout_transform_for_f32(out_layout_str)
f16_layout_transform_func = get_layout_transform_fn(in_layout_str)
if in_layout_str == "nhwc-8h2w32c2w-2d":
return cast_f16_f32_stir_schedule_nhwc(
cast_func,
f16_layout_transform_func,
f32_layout_transform_func,
split_factor,
)
if in_layout_str == "nc-1024c-2d":
return cast_f16_f32_stir_schedule_nc(
cast_func, f16_layout_transform_func, f32_layout_transform_func, split_factor
)
raise RuntimeError(f"Unexpected input_layout, output_layout '{input_layout, output_layout}'")
def cast_f32_f16_compute(in_tensor):
out_tensor = te.compute(
in_tensor.shape, lambda *indices: in_tensor[indices].astype("float16"), name="CastF32F16"
)
return out_tensor
def cast_f32_f16_stir_schedule_nhwc(func, in_layout, out_layout, h_split_factor):
"""Schedule for nhwc f32 to f16 cast: nhwc layout"""
sch = tir.Schedule(func, debug_mask="all")
block_name = "CastF32F16"
n_orig, h_orig, w_orig, c_orig = sch.get_loops(sch.get_block(block_name))
h_outer, h_inner = sch.split(h_orig, [None, h_split_factor])
w_outer, w_inner = sch.split(w_orig, [None, 4])
c_outer, c_inner = sch.split(c_orig, [None, 32])
w_inner_o, w_inner_i = sch.split(w_inner, [None, 2])
sch.reorder(n_orig, h_outer, w_outer, c_outer, h_inner, w_inner_o, c_inner, w_inner_i)
sch.transform_layout(block_name, "A", in_layout)
sch.transform_layout(block_name, block_name, out_layout)
fused = sch.fuse(c_inner, w_inner_i)
sch.vectorize(fused)
return sch
def cast_f32_f16_stir_schedule_nc(func, in_layout, out_layout, c_split_factor):
"""Schedule for nc f32 to f16 cast: nc layout"""
sch = tir.Schedule(func, debug_mask="all")
block_name = "CastF32F16"
_, c_orig = sch.get_loops(sch.get_block(block_name))
_, c_inner = sch.split(c_orig, [None, c_split_factor])
_, c_inner_inner = sch.split(c_inner, [None, 64])
sch.transform_layout(block_name, "A", in_layout)
sch.transform_layout(block_name, block_name, out_layout)
sch.vectorize(c_inner_inner)
return sch
def cast_f32_f16_schedule(cast_func, in_layout_str, out_layout_str):
"""Schedule for f32 to f16 cast: top level function"""
f32_layout_transform_func, split_factor = get_layout_transform_for_f32(in_layout_str)
f16_layout_transform_func = get_layout_transform_fn(out_layout_str)
if out_layout_str == "nhwc-8h2w32c2w-2d":
return cast_f32_f16_stir_schedule_nhwc(
cast_func, f32_layout_transform_func, f16_layout_transform_func, split_factor
)
if out_layout_str == "nc-1024c-2d":
return cast_f32_f16_stir_schedule_nc(
cast_func, f32_layout_transform_func, f16_layout_transform_func, split_factor
)
raise RuntimeError(f"Unexpected input_layout, output_layout '{in_layout_str, out_layout_str}'")
| 6,273 | 41.972603 | 99 | py |
tvm | tvm-main/python/tvm/topi/hexagon/slice_ops/add_subtract_multiply.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""Compute and schedule for add, multiply, subtract slice op
Please note the following assumptions made by the implementation:
1) The inputs will be multiple of crouton layout except for the axis that needs broadcasting."""
from tvm import te
from tvm import tir
from tvm import topi
from ..utils import get_layout_transform_fn
def add_broadcast_compute(input_a, input_b):
"""Call the add op from topi"""
return topi.add(input_a, input_b)
def subtract_broadcast_compute(input_a, input_b):
"""Call the subtract op from topi"""
return topi.subtract(input_a, input_b)
def multiply_broadcast_compute(input_a, input_b):
"""Call the multiply op from topi"""
return topi.multiply(input_a, input_b)
def tir_broadcast_schedule(
out_m,
input_a,
input_b,
output_layout: str,
input_a_layout: str,
input_b_layout: str,
op_name: str,
):
"""Schedule for input and output layout nhwc-8h2w32c2w-2d considering broadcast"""
func = te.create_prim_func([input_a, input_b, out_m])
s = tir.Schedule(func)
block_dict = {"add": "T_add", "subtract": "T_subtract", "multiply": "T_multiply"}
block = s.get_block(block_dict[op_name])
if input_a_layout == "nhwc-8h2w32c2w-2d":
input_a_transformed_layout = get_layout_transform_fn(input_a_layout)
s.transform_layout(block, buffer=("read", 0), index_map=input_a_transformed_layout)
if input_b_layout == "nhwc-8h2w32c2w-2d":
input_b_transformed_layout = get_layout_transform_fn(input_b_layout)
s.transform_layout(block, buffer=("read", 1), index_map=input_b_transformed_layout)
output_transformed_layout = get_layout_transform_fn(output_layout)
s.transform_layout(block, buffer=("write", 0), index_map=output_transformed_layout)
n, h, w, c = s.get_loops(block)
h_o, h_i = s.split(h, [None, 8])
w_o, w_i = s.split(w, [None, 4])
c_o, c_i = s.split(c, [None, 32])
wio, wii = s.split(w_i, [None, 2])
s.reorder(n, h_o, w_o, c_o, h_i, wio, c_i, wii)
fused = s.fuse(c_i, wii)
s.vectorize(fused)
return s
| 2,910 | 32.079545 | 96 | py |
tvm | tvm-main/python/tvm/topi/hexagon/slice_ops/reshape.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Hexagon slice reshape compute and schedule"""
from tvm import te, tir, topi
from ..utils import get_layout_transform_fn
def reshape_compute(inp: te.Tensor, new_shape: tuple) -> te.Tensor:
"""Compute for slice reshape op for hexagon.
This op makes the following assumptions:
1. This op is written for a sliced reshape operation.
2. The input is assumed to be in NHWC layout.
Parameters
----------
Input : te.Tensor
Input tensor
New Shape: tuple
Output shape
Returns
-------
Output : te.Tensor
Output of applying reshape operation on input
"""
return topi.transform.reshape(inp, new_shape)
def stir_sched_nhwc_2d_op(
out: te.Tensor,
inp: te.Tensor,
out_layout: str,
in_layout: str,
c_split: int,
) -> tir.Schedule:
"""Schedule for output layout: nc-1024-2d, nc-2048-2d"""
reshape_func = te.create_prim_func([inp, out])
sch = tir.Schedule(reshape_func, debug_mask="all")
compute = sch.get_block("T_reshape")
sch.transform_layout(compute, inp.name, get_layout_transform_fn(in_layout))
sch.transform_layout(compute, out.name, get_layout_transform_fn(out_layout))
i, j = sch.get_loops(compute)
jout, channel = sch.split(j, [None, inp.shape[3]])
height, width = sch.split(jout, [inp.shape[1], inp.shape[2]])
channelo, channeli = sch.split(channel, [None, 1024])
channelio, channelii = sch.split(channeli, [None, c_split])
sch.reorder(i, height, width, channelo, channelio, channelii)
sch.vectorize(channelii)
return sch
def stir_schedule_nhwc_8h2w32c2w(
out: te.Tensor,
inp: te.Tensor,
out_layout: str,
in_layout: str,
) -> tir.Schedule:
"""Schedule for input and output layout nhwc-8h2w32c2w"""
reshape_func = te.create_prim_func([inp, out])
sch = tir.Schedule(reshape_func, debug_mask="all")
compute = sch.get_block("T_reshape")
sch.transform_layout(compute, inp.name, get_layout_transform_fn(in_layout))
sch.transform_layout(compute, out.name, get_layout_transform_fn(out_layout))
return sch
def reshape_stir_schedule(
out: te.Tensor,
inp: te.Tensor,
output_layout: str,
input_layout: str,
) -> tir.Schedule:
"""STIR schedule definition for the compute of reshape compute.
Parameters
----------
outputs : te.Tensor
The output tensor as returned by a call to reshape_compute
input : te.Tensor
Input tensor to reshape
out_layout: str
The transformation function definition for the expected output layout
in_layout: str
The transformation function definition for the input layout
Returns
-------
sch : tvm.tir.Schedule
The STIR schedule for slice reshape compute
"""
if output_layout in ["nhwc-8h2w32c2w-2d", "nhwc-8h8w32c-2d"]:
return stir_schedule_nhwc_8h2w32c2w(out, inp, output_layout, input_layout)
if output_layout == "nc-1024-2d":
return stir_sched_nhwc_2d_op(out, inp, output_layout, input_layout, 64)
if output_layout == "nc-2048-2d":
return stir_sched_nhwc_2d_op(out, inp, output_layout, input_layout, 128)
raise RuntimeError(f"Unexpected layout '{output_layout}'")
| 4,005 | 34.767857 | 82 | py |
tvm | tvm-main/python/tvm/topi/hexagon/slice_ops/max_pool2d.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-variable, unused-argument, too-many-locals, condition-evals-to-constant
""" Compute and schedule for max_pool2d slice op
Please note the following assumptions made by the implementation:
1) The input must be padded in advance to account for 'padding'. In addition,
both input and output must be padded as per the physical buffer layout.
2) The current implementation assumes 'count_include_pad' to be 'True'. It can be
modified to support 'False' case but the element count for the pooling window
must be pre-computed and provided as an input to reduce the run-time overhead.
3) 'padding' is ignored. It must be handled outside of the sliced op.
4) This implementation will not work if the output includes any physical layout
related padding, as it can result into out-of-bound access for the input.
"""
from tvm import te
from tvm import tir
from ..utils import get_layout_transform_fn
def validate_out_shape(out_shape, in_shape, kernel, stride, dilation):
"""Validate output shape"""
_, oh, ow, _ = out_shape
_, ih, iw, _ = in_shape
kh, kw = kernel
sh, sw = stride
dh, dw = dilation
if ih < (oh - 1) * sh + dh * (kh - 1) + 1:
raise RuntimeError("Output height is too large")
if iw < (ow - 1) * sw + dw * (kw - 1) + 1:
raise RuntimeError("Output width is too large")
def max_pool2d_compute(A, out_shape, kernel, stride, dilation):
"""max_pool2d compute"""
kh, kw = kernel
rh = te.reduce_axis((0, kh), name="rh")
rw = te.reduce_axis((0, kw), name="rw")
ob, oh, ow, oc = out_shape
if isinstance(ob, int):
validate_out_shape(out_shape, A.shape, kernel, stride, dilation)
sh, sw = stride
dh, dw = dilation
Max = te.compute(
out_shape,
lambda b, h, w, c: te.max(
A[b, h * sh + dh * rh, w * sw + dw * rw, c].astype(A.dtype), axis=[rh, rw]
),
name="max",
)
return Max
def STIR_schedule_nhwc_8h2w32c2w_nhwc_8h8w32c(
outs: te.Tensor, ins: te.Tensor, output_layout: str, input_layout: str
):
"""Schedule for input and output layout nhwc-8h2w32c2w and nhwc-8h8w32c"""
func = te.create_prim_func([ins, outs])
s = tir.Schedule(func)
# NOTE!!! This scheduling logic is a work in progress.
# It is not known to ultimately result in near-optimal Hexagon performance.
# The schedule below strives to implement these heuristics:
#
# (1) For mathematical operations on tensor values, prefer HVX SIMD operations
# over per-element scalar operations.
#
# (2) Minimize the number of memory transfers used to operate on tensor values:
# host-memory <--> Hexagon DDR <--> VTCM <--> HVX registers
#
# As a consequence of (1) + (2), prefer TIR schedules that load each value
# into an HVX SIMD tensor exactly once.
Max = s.get_block("max")
if input_layout in (
"nhwc-8h2w32c2w-2d",
"nhwc-8h8w32c-2d",
):
input_transform_fn = get_layout_transform_fn(input_layout)
s.transform_layout(Max, ("read", 0), input_transform_fn)
output_transform_fn = get_layout_transform_fn(output_layout)
s.transform_layout(Max, ("write", 0), output_transform_fn)
# pylint: disable=line-too-long
#
# Restructure the loop nestings to have this overall structure:
# (loop over different 128-byte output-tensor chunks) : n, ho, wo, co }- the first level of a two-level tensor layout
# (loop within one 128-byte output-tensor chunk) : hi, wio, ci, wii }- the second level of a two-level tensor layout
# (loop over reduction axes) : rh, rw }- loop over multiple elements of the input tensor
#
# Note: This schedule is a work in progress. We *expect* that it's
# crucially important for the loops to have this relative ordering:
# n ... ho ... wo ... co ... hi ... wio ... ci ... wii
# because it lets us visit each of the 128-byte output chunks precisely once.
(
n,
h,
w,
c,
rh,
rw,
) = s.get_loops(Max)
# Restructure the loops from NHWC to nhwc_8h2w32c2w or nhwc_8h8w32c, with loops for 'max's reduction
# axes at the very end.
# nhwc_8h2w32c2w layout is for float16 and nhwc-8h8w32c-2d layout is for uint8/int8
if output_layout == "nhwc-8h2w32c2w-2d":
ho, hi = s.split(h, [None, 8])
wo, wi = s.split(w, [None, 4])
wio, wii = s.split(wi, [None, 2])
co, ci = s.split(c, [None, 32])
s.reorder(n, ho, wo, co, hi, wio, ci, wii, rh, rw)
elif output_layout == "nhwc-8h8w32c-2d":
ho, hi = s.split(h, [None, 8])
wo, wi = s.split(w, [None, 8])
co, ci = s.split(c, [None, 32])
s.reorder(n, ho, wo, co, hi, wi, ci, rh, rw)
# TODO: Enable vectorization.
# Hexagon v69's HVX units support SIMD operations on 64-element float16 vectors.
#
# TVM's 'vectorize' schedule primitive is the idiomatic way to encourage lower layers of the
# compiler to generate this kind of SIMD object code.
#
# Several requirements must be met to use 'vectorize':
#
# 1) It can only be applied to a schedule's innermost loop variable.
#
# 2) Any block-iterator(s) bound to that innermost loop variable must be
# *data-parallel* block iterators.
#
# 3) Ideally, the innermost loop variable will iterate only over the output
# tensor's fastest-changing indices and nothing else. But in our case,
# our two innermost loops correspond to the the max operator's reduction axes.
#
# Finding a good way to satisfy all of these requirements at the same time is
# left for future work.
# ci_wii = s.fuse(ci, wii)
# s.vectorize(ci_wii_rh_rw)
return s
def STIR_schedule_n11c(outs, ins, output_layout: str, input_layout: str):
"""Schedule for output layout: n11c-1024c, n11c-2048c-2d;"""
# NOTE: This function is a variation of the STIR_schedule_maxpool2d
# functions. Most of that function's code comments apply to this function
# as well, but are ommited for brevity.
# NOTE: the "n11c-1024c" output layout is shorthand for this axis mapping:
# [n, h, w, c // 1024, te.AXIS_SEPARATOR, c % 1024]
func = te.create_prim_func([ins, outs])
s = tir.Schedule(func)
Max = s.get_block("max")
input_transform_fn = get_layout_transform_fn(input_layout)
output_transform_fn = get_layout_transform_fn(output_layout)
s.transform_layout(Max, ("read", 0), input_transform_fn)
s.transform_layout(Max, ("write", 0), output_transform_fn)
(
n,
h,
w,
c,
rh,
rw,
) = s.get_loops(Max)
if output_layout == "n11c-1024c-2d":
co, ci = s.split(c, [None, 1024])
else:
co, ci = s.split(c, [None, 2048])
# s.vectorize(ci)
return s
def max_pool2d_STIR_schedule(outs, ins, output_layout: str, input_layout: str):
"""STIR based schedule"""
if output_layout == "nhwc-8h2w32c2w-2d" or "nhwc-8h8w32c-2d":
return STIR_schedule_nhwc_8h2w32c2w_nhwc_8h8w32c(outs, ins, output_layout, input_layout)
if output_layout == "n11c-1024c-2d" or "n11c-2048c-2d":
return STIR_schedule_n11c(outs, ins, output_layout, input_layout)
raise RuntimeError(f"Unexpected layout '{output_layout}'")
| 8,187 | 37.261682 | 126 | py |
tvm | tvm-main/python/tvm/topi/hexagon/slice_ops/avg_pool2d.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-variable, unused-argument, too-many-locals, pointless-exception-statement
""" Compute and schedule for avg_pool2d slice op """
from tvm import te
from tvm import tir
from ..utils import get_layout_transform_fn
from ...utils import get_const_tuple
from ...nn.utils import get_pad_tuple
from ...nn.pad import pad
from ..compute_poolarea import compute_PoolArea
def avg_pool2d_NCHW(
data, kernel, stride, padding, dilation, count_include_pad, oshape, odtype="float16"
):
"""avg_pool2d compute"""
if odtype != "float16":
raise RuntimeError(f"Unsupported output dtype '{odtype}'")
kh, kw = kernel
rh = te.reduce_axis((0, kh), name="rh")
rw = te.reduce_axis((0, kw), name="rw")
sh, sw = stride
dh, dw = dilation
dilated_kh = (kh - 1) * dh + 1
dilated_kw = (kw - 1) * dw + 1
pad_top, pad_left, pad_down, pad_right = get_pad_tuple(
get_const_tuple(padding), (dilated_kh, dilated_kw)
)
# DOPAD
if pad_top != 0 or pad_down != 0 or pad_left != 0 or pad_right != 0:
pad_before = (0, 0, pad_top, pad_left)
pad_after = (0, 0, pad_down, pad_right)
data_pad = pad(data, pad_before, pad_after, name="data_pad")
else:
# By definition when True, zero-padding will be included in the averaging calculation
# This is equivalent to PoolArea = (kh * kw)
count_include_pad = True
data_pad = data
Sum = te.compute(
oshape,
lambda b, c, h, w: te.sum(
data_pad[b, c, h * sh + dh * rh, w * sw + dw * rw].astype("float32"), axis=[rh, rw]
),
name="pool_sum",
)
if not count_include_pad:
# Compute PoolArea using unpadded input tensor
_, _, oh, ow = oshape
_, _, ih, iw = data.shape
PoolArea = te.compute(
(oh, ow),
lambda i, j: compute_PoolArea(i, j, ih, iw, kh, kw, sh, sw, dh, dw, pad_top, pad_left),
name="pool_area",
)
InvArea = te.compute(
(oh, ow),
lambda i, j: tir.if_then_else(
tir.all(PoolArea[i, j] > 0), (float(1) / PoolArea[i, j]), 0
),
name="inverse_area",
)
Avg = te.compute(
oshape,
lambda b, c, h, w: (Sum[b, c, h, w] * InvArea[h, w]).astype(odtype),
name="pool_avg",
)
else:
InvArea = float(1) / (kh * kw)
Avg = te.compute(
oshape, lambda b, c, h, w: (Sum[b, c, h, w] * InvArea).astype(odtype), name="pool_avg"
)
return Avg
def avg_pool2d_NHWC(
data, kernel, stride, padding, dilation, count_include_pad, oshape, odtype="float16"
):
"""avg_pool2d compute"""
if odtype != "float16":
raise RuntimeError(f"Unsupported output dtype '{odtype}'")
kh, kw = kernel
rh = te.reduce_axis((0, kh), name="rh")
rw = te.reduce_axis((0, kw), name="rw")
sh, sw = stride
dh, dw = dilation
InvArea = float(1) / (kh * kw)
dilated_kh = (kh - 1) * dh + 1
dilated_kw = (kw - 1) * dw + 1
pad_top, pad_left, pad_down, pad_right = get_pad_tuple(
get_const_tuple(padding), (dilated_kh, dilated_kw)
)
# DOPAD
if pad_top != 0 or pad_down != 0 or pad_left != 0 or pad_right != 0:
pad_before = (0, pad_top, pad_left, 0)
pad_after = (0, pad_down, pad_right, 0)
data_pad = pad(data, pad_before, pad_after, name="data_pad")
else:
# By definition when True, zero-padding will be included in the averaging calculation
# This is equivalent to PoolArea = (kh * kw)
count_include_pad = True
data_pad = data
Sum = te.compute(
oshape,
lambda b, h, w, c: te.sum(
data_pad[b, h * sh + dh * rh, w * sw + dw * rw, c].astype("float32"), axis=[rh, rw]
),
name="pool_sum",
)
if not count_include_pad:
# Compute PoolArea using unpadded input tensor
_, oh, ow, _ = oshape
_, ih, iw, _ = data.shape
PoolArea = te.compute(
(oh, ow),
lambda i, j: compute_PoolArea(i, j, ih, iw, kh, kw, sh, sw, dh, dw, pad_top, pad_left),
name="pool_area",
)
InvArea = te.compute(
(oh, ow),
lambda i, j: tir.if_then_else(
tir.all(PoolArea[i, j] > 0), (float(1) / PoolArea[i, j]), 0
),
name="inverse_area",
)
Avg = te.compute(
oshape,
lambda b, h, w, c: (Sum[b, h, w, c] * InvArea[h, w]).astype(odtype),
name="pool_avg",
)
else:
InvArea = float(1) / (kh * kw)
Avg = te.compute(
oshape, lambda b, h, w, c: (Sum[b, h, w, c] * InvArea).astype(odtype), name="pool_avg"
)
return Avg
def schedule_8h2w32c2w(outs, ins, output_layout: str, input_layout: str):
"""Schedule for input and output layout 8h2w32c2w"""
func = te.create_prim_func([ins, outs])
print(func)
s = tir.Schedule(func)
Sum = s.get_block("pool_sum")
Avg = s.get_block("pool_avg")
mem_scope = "global.vtcm"
sum_read = s.cache_read(Sum, 0, mem_scope)
avg_write = s.cache_write(Avg, 0, mem_scope)
input_transform_fn = get_layout_transform_fn(input_layout)
output_transform_fn = get_layout_transform_fn(output_layout)
s.transform_layout(Sum, ("read", 0), input_transform_fn, pad_value=0.0)
s.transform_layout(Avg, ("write", 0), output_transform_fn, pad_value=0.0)
return s
def schedule_1024c(outs, ins, output_layout: str, input_layout: str):
"""Schedule for output layout: 1024c, input layout: 8h2w32c2w"""
func = te.create_prim_func([ins, outs])
s = tir.Schedule(func)
Sum = s.get_block("pool_sum")
Avg = s.get_block("pool_avg")
mem_scope = "global.vtcm"
sum_read = s.cache_read(Sum, 0, mem_scope)
avg_write = s.cache_write(Avg, 0, mem_scope)
input_transform_fn = get_layout_transform_fn(input_layout)
output_transform_fn = get_layout_transform_fn(output_layout)
s.transform_layout(Sum, ("read", 0), input_transform_fn, pad_value=0.0)
s.transform_layout(Avg, ("write", 0), output_transform_fn, pad_value=0.0)
# Schedule 'Avg'
if output_layout == "n11c-1024c-2d":
n, h, w, c = s.get_loops(Avg)
else:
n, c, h, w = s.get_loops(Avg)
_, ci = s.split(c, [None, 1024])
cio, cii = s.split(ci, [None, 64])
s.vectorize(cii)
# Schedule 'Sum'
Sum_axis = s.get_loops(Sum)
s.reorder(Sum_axis[-2], Sum_axis[-1], Sum_axis[-3])
return s
def avg_pool2d_schedule(outs, ins, output_layout: str, input_layout: str):
"""avg_pool2d schedule"""
if output_layout == "nhwc-8h2w32c2w-2d" or output_layout == "nchw-8h2w32c2w-2d":
return schedule_8h2w32c2w(outs, ins, output_layout, input_layout)
if output_layout == "n11c-1024c-2d" or output_layout == "nc11-1024c-2d":
return schedule_1024c(outs, ins, output_layout, input_layout)
raise RuntimeError(f"Unexpected layout '{output_layout}'")
| 7,872 | 33.230435 | 112 | py |
tvm | tvm-main/python/tvm/topi/hexagon/slice_ops/argmax.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" Hexagon slice argmax compute and schedule"""
from tvm import tir
from tvm import topi
from ..utils import get_layout_transform_fn
def argmax_compute(in_tensor, axis):
out_tensor = topi.argmax(in_tensor, axis)
return out_tensor
def argmax_stir_schedule_nhwc(func, in_layout, out_layout):
"""Schedule for nhwc argmax"""
sch = tir.Schedule(func, debug_mask="all")
sch.transform_layout("A_red_temp", "A", in_layout)
sch.transform_layout("A_red", "A_red", out_layout)
return sch
def argmax_schedule(argmax_func, in_layout_str, out_layout_str):
"""Schedule for argmax: top level function"""
if (in_layout_str == "nhwc-8h2w32c2w-2d") and (out_layout_str == "nhw-32h16w-2d"):
fp16_layout_transform = get_layout_transform_fn(in_layout_str)
int32_layout_transform = get_layout_transform_fn(out_layout_str)
tir_s = argmax_stir_schedule_nhwc(
argmax_func, fp16_layout_transform, int32_layout_transform
)
return tir_s
if (in_layout_str == "nhwc-8h8w32c-2d") and (out_layout_str == "nhw-32h16w-2d"):
int8_layout_transform = get_layout_transform_fn(in_layout_str)
int32_layout_transform = get_layout_transform_fn(out_layout_str)
tir_s = argmax_stir_schedule_nhwc(
argmax_func, int8_layout_transform, int32_layout_transform
)
return tir_s
raise RuntimeError(f"Unexpected input_layout, output_layout '{in_layout_str, out_layout_str}'")
| 2,263 | 40.925926 | 99 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.