repo stringlengths 2 99 | file stringlengths 13 225 | code stringlengths 0 18.3M | file_length int64 0 18.3M | avg_line_length float64 0 1.36M | max_line_length int64 0 4.26M | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
tvm | tvm-main/python/tvm/relay/testing/densenet.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, line-too-long
"""
Port of MxNet version of Densenet to Relay.
https://github.com/apache/incubator-mxnet/blob/master/python/mxnet/gluon/model_zoo/vision/densenet.py
"""
# pylint: enable=line-too-long
from tvm import relay
from . import layers
from .init import create_workload
def _make_dense_layer(data, growth_rate, bn_size, index):
"""Single densenet layer."""
bn1 = layers.batch_norm_infer(data, name=f"batch_1_{index}")
relu1 = relay.nn.relu(bn1)
conv1 = layers.conv2d(
relu1, channels=bn_size * growth_rate, kernel_size=(1, 1), name=f"conv2d_1_{index}"
)
bn2 = layers.batch_norm_infer(conv1, name="batch_2_" + index)
relu2 = relay.nn.relu(bn2)
conv2 = layers.conv2d(
relu2, channels=growth_rate, kernel_size=(3, 3), padding=(1, 1), name=f"conv2d_2_{index}"
)
return conv2
def _make_dense_block(data, num_layers, bn_size, growth_rate, index):
"""Makes a block of dense layers of the specified size."""
layer_out = data
blocks = []
for i in range(num_layers):
layer_out = _make_dense_layer(layer_out, growth_rate, bn_size, f"{index}_{i}")
blocks.append(layer_out)
block_out = relay.concatenate(blocks, 1)
return block_out
def _make_transition(data, num_output_features, index):
"""Transition between layers."""
bn = layers.batch_norm_infer(data, name=f"batch_t_{index}")
relu = relay.nn.relu(bn)
conv = layers.conv2d(
relu, channels=num_output_features, kernel_size=(1, 1), name=f"conv_t_{index}"
)
return relay.nn.avg_pool2d(conv, pool_size=(2, 2), strides=(2, 2))
def _make_dense_net(
num_init_features, growth_rate, block_config, data_shape, data_dtype, bn_size=4, classes=1000
):
"""Builds up a densenet."""
data = relay.Var(
"data", relay.TensorType(data_shape, data_dtype)
) # (batch_size, 3, 224, 224)))
conv1 = layers.conv2d(
data,
channels=num_init_features,
kernel_size=(7, 7),
strides=(2, 2),
padding=(3, 3),
name="conv1",
)
bn1 = layers.batch_norm_infer(conv1, name="batch1")
relu1 = relay.nn.relu(bn1)
mp = relay.nn.max_pool2d(relu1, pool_size=(3, 3), strides=(2, 2), padding=(1, 1))
num_features = num_init_features
layer_out = mp
for i, num_layers in enumerate(block_config):
layer_out = _make_dense_block(layer_out, num_layers, bn_size, growth_rate, i)
num_features = num_features + num_layers * growth_rate
if i != len(block_config) - 1:
layer_out = _make_transition(layer_out, num_features // 2, i)
num_features = num_features // 2
bn2 = layers.batch_norm_infer(layer_out, name="batch2")
relu2 = relay.nn.relu(bn2)
avg = relay.nn.avg_pool2d(relu2, pool_size=(7, 7))
flat = relay.nn.batch_flatten(avg)
ret = layers.dense_add_bias(flat, units=classes, name="dense")
return relay.Function(relay.analysis.free_vars(ret), ret)
def get_workload(
densenet_size=121, classes=1000, batch_size=4, image_shape=(3, 224, 224), dtype="float32"
):
"""Gets benchmark workload for densenet.
Parameters
----------
densenet_size : int, optional (default 121)
Parameter for the network size. The supported sizes
are 121, 161, 169, and 201.
classes : int, optional (default 1000)
The number of classes.
batch_size : int, optional (detault 4)
The batch size for the network.
image_shape : shape, optional (default (3, 224, 224))
The shape of the input data.
dtype : data type, optional (default 'float32')
The data type of the input data.
Returns
-------
mod: tvm.IRModule
The relay module that contains a DenseNet network.
params : dict of str to NDArray
The benchmark paraeters.
"""
specs = {
121: (64, 32, [6, 12, 24, 16]),
161: (96, 48, [6, 12, 36, 24]),
169: (69, 32, [6, 12, 32, 32]),
201: (64, 32, [6, 12, 48, 32]),
}
bn_size = 4
num_init_features, growth_rate, block_config = specs[densenet_size]
data_shape = tuple([batch_size] + list(image_shape))
net = _make_dense_net(
num_init_features, growth_rate, block_config, data_shape, dtype, bn_size, classes
)
return create_workload(net)
| 5,127 | 34.123288 | 101 | py |
tvm | tvm-main/python/tvm/relay/testing/dqn.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Net of Nature DQN
Reference:
Mnih, Volodymyr, et al. "Human-level control through deep reinforcement learning."
Nature 518.7540 (2015): 529.
"""
from tvm import relay
from . import layers
from .init import create_workload
def get_net(batch_size, num_actions=18, image_shape=(4, 84, 84), dtype="float32", layout="NCHW"):
"""get symbol of nature dqn"""
data_shape = (batch_size,) + image_shape
data = relay.var("data", shape=data_shape, dtype=dtype)
bias_axis = layout.index("C")
conv1_bias = relay.var("conv1_bias")
conv1 = layers.conv2d(
data,
kernel_size=(8, 8),
strides=(4, 4),
padding=(0, 0),
channels=32,
name="conv1",
data_layout=layout,
kernel_layout=layers.conv_kernel_layout(layout),
)
conv1 = relay.nn.bias_add(conv1, conv1_bias, bias_axis)
relu1 = relay.nn.relu(conv1)
conv2_bias = relay.var("conv2_bias")
conv2 = layers.conv2d(
relu1,
kernel_size=(4, 4),
strides=(2, 2),
padding=(0, 0),
channels=64,
name="conv2",
data_layout=layout,
kernel_layout=layers.conv_kernel_layout(layout),
)
conv2 = relay.nn.bias_add(conv2, conv2_bias, bias_axis)
relu2 = relay.nn.relu(conv2)
conv3_bias = relay.var("conv3_bias")
conv3 = layers.conv2d(
relu2,
kernel_size=(3, 3),
strides=(1, 1),
padding=(0, 0),
channels=64,
name="conv3",
data_layout=layout,
kernel_layout=layers.conv_kernel_layout(layout),
)
conv3 = relay.nn.bias_add(conv3, conv3_bias, bias_axis)
relu3 = relay.nn.relu(conv3)
bf1 = relay.nn.batch_flatten(relu3)
dense1 = layers.dense_add_bias(bf1, units=512, name="dense1")
relu4 = relay.nn.relu(dense1)
dense2 = layers.dense_add_bias(relu4, units=num_actions, name="dense2")
args = relay.analysis.free_vars(dense2)
return relay.Function(args, dense2)
def get_workload(
batch_size, num_actions=18, image_shape=(4, 84, 84), dtype="float32", layout="NCHW"
):
"""Get benchmark workload for a Deep Q Network
Parameters
----------
batch_size : int
The batch size used in the model
num_actions : int, optional
Number of actions
image_shape : tuple, optional
The input image shape
dtype : str, optional
The data type
Returns
-------
mod : tvm.IRModule
The relay module that contains a DQN network.
params : dict of str to NDArray
The parameters.
"""
net = get_net(
batch_size, num_actions=num_actions, image_shape=image_shape, dtype=dtype, layout=layout
)
return create_workload(net)
| 3,497 | 29.955752 | 97 | py |
tvm | tvm-main/python/tvm/relay/testing/synthetic.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Synthetic networks for testing purposes. Ideally, these networks are similar in
structure to real world networks, but are much smaller in order to make testing
faster.
"""
from __future__ import absolute_import
from tvm import relay
from .init import create_workload, Constant
from . import layers
def get_net(input_shape=(1, 3, 24, 12), dtype="float32", wtype=None):
"""Get synthetic testing network.
Parameters
----------
image_shape : tuple, optional
The input shape as (batch_size, channels, height, width).
dtype : str, optional
The data type for the input.
wtype : str, optional
The data type for weights. Defaults to `dtype`.
Returns
-------
net : relay.Function
The dataflow.
"""
if wtype is None:
wtype = dtype
data = relay.var("data", shape=input_shape, dtype=dtype)
dense_shape = [-1, input_shape[3]]
dense = relay.nn.relu(
relay.nn.dense(
relay.reshape(data, dense_shape),
relay.var("dense_weight", shape=[input_shape[3], dense_shape[1]], dtype=wtype),
)
)
dense = relay.reshape_like(dense, data)
conv_shape = [input_shape[1], input_shape[1], 3, 3]
conv = relay.nn.softmax(
relay.nn.conv2d(
data,
relay.var("conv_weight", shape=conv_shape, dtype=wtype),
padding=1,
kernel_size=3,
)
)
added = relay.add(dense, conv)
biased = layers.batch_norm_infer(
relay.nn.bias_add(added, relay.var("bias", dtype=wtype)), name="batch_norm"
)
dense = relay.nn.relu(
relay.nn.dense(
relay.reshape(biased, dense_shape),
relay.var("dense2_weight", shape=[input_shape[3], dense_shape[1]], dtype=wtype),
)
)
dense = relay.reshape_like(dense, data)
conv = relay.nn.softmax(
relay.nn.conv2d(
biased,
relay.var("conv2_weight", shape=conv_shape, dtype=wtype),
padding=1,
kernel_size=3,
)
)
added = relay.add(dense, conv)
args = relay.analysis.free_vars(added)
return relay.Function(args, added)
def get_workload(input_shape=(1, 3, 24, 12), dtype="float32", wtype=None):
"""Get benchmark workload for the synthetic net.
Parameters
----------
image_shape : tuple, optional
The input shape as (batch_size, channels, height, width).
dtype : str, optional
The data type for the input.
wtype : str, optional
The data type for weights. Defaults to `dtype`.
Returns
-------
mod : tvm.IRModule
The relay module that contains a synthetic network.
params : dict of str to NDArray
The parameters.
"""
return create_workload(
get_net(input_shape=input_shape, dtype=dtype, wtype=wtype),
initializer=Constant(),
)
| 3,672 | 30.393162 | 92 | py |
tvm | tvm-main/python/tvm/relay/testing/dcgan.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=unused-argument
"""
Net of the generator of DCGAN
Adopted from:
https://github.com/tqchen/mxnet-gan/blob/main/mxgan/generator.py
Reference:
Radford, Alec, Luke Metz, and Soumith Chintala.
"Unsupervised representation learning with deep convolutional generative adversarial networks."
arXiv preprint arXiv:1511.06434 (2015).
"""
from tvm import relay
from . import layers
from .init import create_workload
def deconv2d(data, ishape, oshape, kshape, layout, name, stride=(2, 2)):
"""a deconv layer that enlarges the feature map"""
target_shape = (oshape[-2], oshape[-1])
pad_y = (kshape[0] - 1) // 2
pad_x = (kshape[1] - 1) // 2
adj_y = (target_shape[0] + 2 * pad_y - kshape[0]) % stride[0]
adj_x = (target_shape[1] + 2 * pad_x - kshape[1]) % stride[1]
if layout == "NCHW":
kernel_layout = "IOHW"
elif layout == "NHWC":
kernel_layout = "HWOI"
else:
raise ValueError("Invalid layout: " + layout)
net = layers.conv2d_transpose(
data,
kernel_size=kshape,
strides=stride,
channels=oshape[0],
padding=(pad_y, pad_x),
output_padding=(adj_y, adj_x),
data_layout=layout,
kernel_layout=kernel_layout,
name=name,
)
return net
def deconv2d_bn_relu(data, prefix, **kwargs):
"""a block of deconv + batch norm + relu"""
eps = 1e-5 + 1e-12
net = deconv2d(data, name=f"{prefix}_deconv", **kwargs)
bn_axis = kwargs.get("layout", "NCHW").index("C")
net = layers.batch_norm_infer(
net, epsilon=eps, scale=False, axis=bn_axis, name=f"{prefix}_batch_norm"
)
net = relay.nn.relu(net)
return net
def get_net(
batch_size,
random_len=100,
oshape=(3, 64, 64),
ngf=128,
code=None,
layout="NCHW",
dtype="float32",
):
"""get net of dcgan generator"""
assert oshape[-1] == 64, "Only support 64x64 image"
assert oshape[-2] == 64, "Only support 64x64 image"
code = relay.var("data", dtype=dtype, shape=(batch_size, random_len)) if code is None else code
dense_weight = relay.var("dense_weight")
dense = relay.nn.dense(code, weight=dense_weight, units=4 * 4 * ngf * 8)
relu = relay.nn.relu(dense)
# 4 x 4
if layout == "NCHW":
reshape = relay.reshape(relu, newshape=(-1, ngf * 8, 4, 4))
elif layout == "NHWC":
reshape = relay.reshape(relu, newshape=(-1, 4, 4, ngf * 8))
else:
raise ValueError("Invalid layout: " + layout)
# 8 x 8
dc8 = deconv2d_bn_relu(
reshape,
ishape=(ngf * 8, 4, 4),
oshape=(ngf * 4, 8, 8),
kshape=(4, 4),
layout=layout,
prefix="g2",
)
# 16x16
dc16 = deconv2d_bn_relu(
dc8,
ishape=(ngf * 4, 8, 8),
oshape=(ngf * 2, 16, 16),
kshape=(4, 4),
layout=layout,
prefix="g3",
)
# 32x32
dc32 = deconv2d_bn_relu(
dc16,
ishape=(ngf * 2, 16, 16),
oshape=(ngf, 32, 32),
kshape=(4, 4),
layout=layout,
prefix="g4",
)
# 64x64
dc64 = deconv2d(
dc32,
ishape=(ngf, 32, 32),
oshape=oshape[-3:],
kshape=(4, 4),
layout=layout,
name="g5_deconv",
)
tanh = relay.tanh(dc64)
args = relay.analysis.free_vars(tanh)
return relay.Function(args, tanh)
def get_workload(
batch_size, oshape=(3, 64, 64), ngf=128, random_len=100, layout="NCHW", dtype="float32"
):
"""Get benchmark workload for a DCGAN generator
Parameters
----------
batch_size : int
The batch size used in the model
oshape : tuple, optional
The shape of output image, layout="CHW"
ngf: int, optional
The number of final feature maps in the generator
random_len : int, optional
The length of random input
layout: str, optional
The layout of conv2d transpose
dtype : str, optional
The data type
Returns
-------
mod : tvm.IRModule
The relay module that contains a DCGAN network.
params : dict of str to NDArray
The parameters.
"""
net = get_net(batch_size, random_len, oshape=oshape, ngf=ngf, layout=layout, dtype=dtype)
return create_workload(net)
| 5,048 | 28.354651 | 99 | py |
tvm | tvm-main/python/tvm/relay/testing/layers.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Simple Layer DSL wrapper to ease creation of neural nets."""
from tvm import relay
def batch_norm_infer(data, gamma=None, beta=None, moving_mean=None, moving_var=None, **kwargs):
"""Wrapper of batch_norm.
This function automatically creates weights and return
the first output(normalized result).
Parameters
----------
data : relay.Expr
The input expression.
gamma : relay.Expr
The gamma scale factor.
beta : relay.Expr
The beta offset factor.
moving_mean : relay.Expr
Running mean of input,
moving_var : relay.Expr
Running variance of input.
kwargs : dict
Additional arguments.
Returns
-------
result : relay.Expr
The result.
"""
name = kwargs.get("name")
kwargs.pop("name")
if not gamma:
gamma = relay.var(name + "_gamma")
if not beta:
beta = relay.var(name + "_beta")
if not moving_mean:
moving_mean = relay.var(name + "_moving_mean")
if not moving_var:
moving_var = relay.var(name + "_moving_var")
return relay.nn.batch_norm(
data, gamma=gamma, beta=beta, moving_mean=moving_mean, moving_var=moving_var, **kwargs
)[0]
def conv2d(data, weight=None, **kwargs):
"""Wrapper of conv2d which automatically creates weights if not given.
Parameters
----------
data : relay.Expr
The input expression.
weight : relay.Expr
The weight to conv2d.
kwargs : dict
Additional arguments.
Returns
-------
result : relay.Expr
The result.
"""
name = kwargs.get("name")
kwargs.pop("name")
if not weight:
weight = relay.var(name + "_weight")
return relay.nn.conv2d(data, weight, **kwargs)
def conv3d(data, weight=None, **kwargs):
"""Wrapper of conv3d which automatically creates weights if not given.
Parameters
----------
data : relay.Expr
The input expression.
weight : relay.Expr
The weight to conv3d.
kwargs : dict
Additional arguments.
Returns
-------
result : relay.Expr
The result.
"""
name = kwargs.get("name")
kwargs.pop("name")
if not weight:
weight = relay.var(name + "_weight")
return relay.nn.conv3d(data, weight, **kwargs)
def conv2d_transpose(data, weight=None, **kwargs):
"""Wrapper of conv2d_transpose which automatically creates weights if not given.
Parameters
----------
data : relay.Expr
The input expression.
weight : relay.Expr
The weight to conv2d_transpose.
kwargs : dict
Additional arguments.
Returns
-------
result : relay.Expr
The result.
"""
name = kwargs.get("name")
kwargs.pop("name")
if not weight:
weight = relay.var(name + "_weight")
return relay.nn.conv2d_transpose(data, weight, **kwargs)
def dense_add_bias(data, weight=None, bias=None, units=None, **kwargs):
"""Wrapper of dense which automatically creates weights if not given.
Parameters
----------
data : relay.Expr
The input expression.
weight : relay.Expr
The weight to conv2d.
bias : relay.Expr
The bias.
kwargs : dict
Additional arguments.
Returns
-------
result : relay.Expr
The result.
"""
name = kwargs.get("name")
kwargs.pop("name")
if not weight:
weight = relay.var(name + "_weight")
if not bias:
bias = relay.var(name + "_bias")
data = relay.nn.dense(data, weight, units, **kwargs)
data = relay.nn.bias_add(data, bias, axis=-1)
return data
def conv_kernel_layout(data_layout, is_depthwise=False):
"""Map the data layout to corresponding kernel layout.
Arbitrary layout is not fully supported in TOPI yet.
Parameters
----------
data_layout : str
The data_layout, can be 'NCHW', 'NHWC'.
is_depthwise : bool, optional
Whether the conv is a depthwise convolution.
Returns
-------
result : str
The corresponding kernel layout.
"""
conv_layout_map = {"NCHW": "OIHW", "NHWC": "HWIO"}
depthwise_conv_layout_map = {"NCHW": "OIHW", "NHWC": "HWOI"}
mapping = depthwise_conv_layout_map if is_depthwise else conv_layout_map
assert data_layout in mapping, f"Unknown data layout {data_layout}"
return mapping[data_layout]
| 5,202 | 25.411168 | 95 | py |
tvm | tvm-main/python/tvm/relay/testing/inception_v3.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Inception V3, suitable for images with around 299 x 299
Reference:
Szegedy, Christian, et al. "Rethinking the Inception Architecture for Computer Vision."
arXiv preprint arXiv:1512.00567 (2015).
Adopted from https://github.com/apache/incubator-mxnet/blob/master/
example/image-classification/symbols/inception-v3.py
"""
# pylint: disable=invalid-name,missing-docstring,unused-argument, superfluous-parens
from tvm import relay
from .init import create_workload
from . import layers
def Conv(data, num_filter, kernel=(1, 1), stride=(1, 1), pad=(0, 0), name=None, suffix=""):
conv = layers.conv2d(
data=data,
channels=int(num_filter),
kernel_size=kernel,
strides=stride,
padding=pad,
name=f"{name}{suffix}_conv1",
)
bn = layers.batch_norm_infer(data=conv, epsilon=2e-5, scale=False, name=f"{name}{suffix}_bn")
act = relay.nn.relu(data=bn)
return act
def Pooling(data, kernel, stride, pad, pool_type, name):
if pool_type == "max":
return relay.nn.max_pool2d(data=data, pool_size=kernel, strides=stride, padding=pad)
if pool_type == "avg":
return relay.nn.avg_pool2d(
data=data, pool_size=kernel, strides=stride, padding=pad, count_include_pad=True
)
raise ValueError("Invalid pooling type: " + pool_type)
def Inception7A(
data, num_1x1, num_3x3_red, num_3x3_1, num_3x3_2, num_5x5_red, num_5x5, pool, proj, name
):
tower_1x1 = Conv(data, num_1x1, name=f"{name}_conv")
tower_5x5 = Conv(data, num_5x5_red, name=f"{name}_tower", suffix="_conv")
tower_5x5 = Conv(
tower_5x5, num_5x5, kernel=(5, 5), pad=(2, 2), name=f"{name}_tower", suffix="_conv_1"
)
tower_3x3 = Conv(data, num_3x3_red, name=f"{name}_tower_1", suffix="_conv")
tower_3x3 = Conv(
tower_3x3, num_3x3_1, kernel=(3, 3), pad=(1, 1), name=f"{name}_tower_1", suffix="_conv_1"
)
tower_3x3 = Conv(
tower_3x3, num_3x3_2, kernel=(3, 3), pad=(1, 1), name=f"{name}_tower_1", suffix="_conv_2"
)
pooling = Pooling(
data=data,
kernel=(3, 3),
stride=(1, 1),
pad=(1, 1),
pool_type=pool,
name=f"{pool}_pool_{name}_pool",
)
cproj = Conv(pooling, proj, name=f"{name}_tower_2", suffix="_conv")
concat = relay.concatenate((tower_1x1, tower_5x5, tower_3x3, cproj), axis=1)
return concat
# First Downsample
def Inception7B(data, num_3x3, num_d3x3_red, num_d3x3_1, num_d3x3_2, pool, name):
tower_3x3 = Conv(data, num_3x3, kernel=(3, 3), pad=(0, 0), stride=(2, 2), name=f"{name}_conv")
tower_d3x3 = Conv(data, num_d3x3_red, name=f"{name}_tower", suffix="_conv")
tower_d3x3 = Conv(
tower_d3x3,
num_d3x3_1,
kernel=(3, 3),
pad=(1, 1),
stride=(1, 1),
name=f"{name}_tower",
suffix="_conv_1",
)
tower_d3x3 = Conv(
tower_d3x3,
num_d3x3_2,
kernel=(3, 3),
pad=(0, 0),
stride=(2, 2),
name=f"{name}_tower",
suffix="_conv_2",
)
pooling = Pooling(
data=data,
kernel=(3, 3),
stride=(2, 2),
pad=(0, 0),
pool_type="max",
name=f"max_pool_{name}_pool",
)
concat = relay.concatenate((tower_3x3, tower_d3x3, pooling), axis=1)
return concat
def Inception7C(
data,
num_1x1,
num_d7_red,
num_d7_1,
num_d7_2,
num_q7_red,
num_q7_1,
num_q7_2,
num_q7_3,
num_q7_4,
pool,
proj,
name,
):
tower_1x1 = Conv(data=data, num_filter=num_1x1, kernel=(1, 1), name=f"{name}_conv")
tower_d7 = Conv(data=data, num_filter=num_d7_red, name=f"{name}_tower", suffix="_conv")
tower_d7 = Conv(
data=tower_d7,
num_filter=num_d7_1,
kernel=(1, 7),
pad=(0, 3),
name=f"{name}_tower",
suffix="_conv_1",
)
tower_d7 = Conv(
data=tower_d7,
num_filter=num_d7_2,
kernel=(7, 1),
pad=(3, 0),
name=f"{name}_tower",
suffix="_conv_2",
)
tower_q7 = Conv(data=data, num_filter=num_q7_red, name=f"{name}_tower_1", suffix="_conv")
tower_q7 = Conv(
data=tower_q7,
num_filter=num_q7_1,
kernel=(7, 1),
pad=(3, 0),
name=f"{name}_tower_1",
suffix="_conv_1",
)
tower_q7 = Conv(
data=tower_q7,
num_filter=num_q7_2,
kernel=(1, 7),
pad=(0, 3),
name=f"{name}_tower_1",
suffix="_conv_2",
)
tower_q7 = Conv(
data=tower_q7,
num_filter=num_q7_3,
kernel=(7, 1),
pad=(3, 0),
name=f"{name}_tower_1",
suffix="_conv_3",
)
tower_q7 = Conv(
data=tower_q7,
num_filter=num_q7_4,
kernel=(1, 7),
pad=(0, 3),
name=f"{name}_tower_1",
suffix="_conv_4",
)
pooling = Pooling(
data=data,
kernel=(3, 3),
stride=(1, 1),
pad=(1, 1),
pool_type=pool,
name=f"{pool}_pool_{name}_pool",
)
cproj = Conv(
data=pooling, num_filter=proj, kernel=(1, 1), name=f"{name}_tower_2", suffix="_conv"
)
# concat
concat = relay.concatenate((tower_1x1, tower_d7, tower_q7, cproj), axis=1)
return concat
def Inception7D(
data, num_3x3_red, num_3x3, num_d7_3x3_red, num_d7_1, num_d7_2, num_d7_3x3, pool, name
):
tower_3x3 = Conv(data=data, num_filter=num_3x3_red, name=f"{name}_tower", suffix="_conv")
tower_3x3 = Conv(
data=tower_3x3,
num_filter=num_3x3,
kernel=(3, 3),
pad=(0, 0),
stride=(2, 2),
name=f"{name}_tower",
suffix="_conv_1",
)
tower_d7_3x3 = Conv(
data=data, num_filter=num_d7_3x3_red, name=f"{name}_tower_1", suffix="_conv"
)
tower_d7_3x3 = Conv(
data=tower_d7_3x3,
num_filter=num_d7_1,
kernel=(1, 7),
pad=(0, 3),
name=f"{name}_tower_1",
suffix="_conv_1",
)
tower_d7_3x3 = Conv(
data=tower_d7_3x3,
num_filter=num_d7_2,
kernel=(7, 1),
pad=(3, 0),
name=f"{name}_tower_1",
suffix="_conv_2",
)
tower_d7_3x3 = Conv(
data=tower_d7_3x3,
num_filter=num_d7_3x3,
kernel=(3, 3),
stride=(2, 2),
name=f"{name}_tower_1",
suffix="_conv_3",
)
pooling = Pooling(
data=data,
kernel=(3, 3),
stride=(2, 2),
pool_type=pool,
pad=(0, 0),
name=f"{pool}_pool_{name}_pool",
)
# concat
concat = relay.concatenate((tower_3x3, tower_d7_3x3, pooling), axis=1)
return concat
def Inception7E(
data,
num_1x1,
num_d3_red,
num_d3_1,
num_d3_2,
num_3x3_d3_red,
num_3x3,
num_3x3_d3_1,
num_3x3_d3_2,
pool,
proj,
name,
):
tower_1x1 = Conv(data=data, num_filter=num_1x1, kernel=(1, 1), name=f"{name}_conv")
tower_d3 = Conv(data=data, num_filter=num_d3_red, name=f"{name}_tower", suffix="_conv")
tower_d3_a = Conv(
data=tower_d3,
num_filter=num_d3_1,
kernel=(1, 3),
pad=(0, 1),
name=f"{name}_tower",
suffix="_mixed_conv",
)
tower_d3_b = Conv(
data=tower_d3,
num_filter=num_d3_2,
kernel=(3, 1),
pad=(1, 0),
name=f"{name}_tower",
suffix="_mixed_conv_1",
)
tower_3x3_d3 = Conv(
data=data, num_filter=num_3x3_d3_red, name=f"{name}_tower_1", suffix="_conv"
)
tower_3x3_d3 = Conv(
data=tower_3x3_d3,
num_filter=num_3x3,
kernel=(3, 3),
pad=(1, 1),
name=f"{name}_tower_1",
suffix="_conv_1",
)
tower_3x3_d3_a = Conv(
data=tower_3x3_d3,
num_filter=num_3x3_d3_1,
kernel=(1, 3),
pad=(0, 1),
name=f"{name}_tower_1",
suffix="_mixed_conv",
)
tower_3x3_d3_b = Conv(
data=tower_3x3_d3,
num_filter=num_3x3_d3_2,
kernel=(3, 1),
pad=(1, 0),
name=f"{name}_tower_1",
suffix="_mixed_conv_1",
)
pooling = Pooling(
data=data,
kernel=(3, 3),
stride=(1, 1),
pad=(1, 1),
pool_type=pool,
name=f"{pool}_pool_{name}_pool",
)
cproj = Conv(
data=pooling, num_filter=proj, kernel=(1, 1), name=f"{name}_tower_2", suffix="_conv"
)
# concat
concat = relay.concatenate(
(tower_1x1, tower_d3_a, tower_d3_b, tower_3x3_d3_a, tower_3x3_d3_b, cproj), axis=1
)
return concat
def get_net(batch_size, num_classes, image_shape, dtype):
"""Get network a Inception v3 network.
batch_size : int
The batch size used in the model
num_classes : int, optional
Number of claseses
image_shape : tuple, optional
The input image shape
dtype : str, optional
The data type
Returns
-------
net : relay.Function
The dataflow.
"""
data_shape = (batch_size,) + image_shape
data = relay.var("data", shape=data_shape, dtype=dtype)
# stage 1
conv = Conv(data, 32, kernel=(3, 3), stride=(2, 2), name="conv")
conv_1 = Conv(conv, 32, kernel=(3, 3), name="conv_1")
conv_2 = Conv(conv_1, 64, kernel=(3, 3), pad=(1, 1), name="conv_2")
pool = Pooling(
data=conv_2, kernel=(3, 3), stride=(2, 2), pool_type="max", pad=(0, 0), name="pool"
)
# stage 2
conv_3 = Conv(pool, 80, kernel=(1, 1), name="conv_3")
conv_4 = Conv(conv_3, 192, kernel=(3, 3), name="conv_4")
pool1 = Pooling(
data=conv_4, kernel=(3, 3), stride=(2, 2), pool_type="max", pad=(0, 0), name="pool1"
)
# stage 3
in3a = Inception7A(pool1, 64, 64, 96, 96, 48, 64, "avg", 32, "mixed")
in3b = Inception7A(in3a, 64, 64, 96, 96, 48, 64, "avg", 64, "mixed_1")
in3c = Inception7A(in3b, 64, 64, 96, 96, 48, 64, "avg", 64, "mixed_2")
in3d = Inception7B(in3c, 384, 64, 96, 96, "max", "mixed_3")
# stage 4
in4a = Inception7C(in3d, 192, 128, 128, 192, 128, 128, 128, 128, 192, "avg", 192, "mixed_4")
in4b = Inception7C(in4a, 192, 160, 160, 192, 160, 160, 160, 160, 192, "avg", 192, "mixed_5")
in4c = Inception7C(in4b, 192, 160, 160, 192, 160, 160, 160, 160, 192, "avg", 192, "mixed_6")
in4d = Inception7C(in4c, 192, 192, 192, 192, 192, 192, 192, 192, 192, "avg", 192, "mixed_7")
in4e = Inception7D(in4d, 192, 320, 192, 192, 192, 192, "max", "mixed_8")
# stage 5
in5a = Inception7E(in4e, 320, 384, 384, 384, 448, 384, 384, 384, "avg", 192, "mixed_9")
in5b = Inception7E(in5a, 320, 384, 384, 384, 448, 384, 384, 384, "max", 192, "mixed_10")
# pool
pool = Pooling(
data=in5b, kernel=(8, 8), stride=(1, 1), pool_type="avg", pad=(0, 0), name="global_pool"
)
flatten = relay.nn.batch_flatten(pool)
fc1 = relay.nn.dense(flatten, relay.var("fc1_weight"), units=num_classes)
fc1 = relay.nn.bias_add(fc1, relay.var("fc2_bias"), axis=-1)
inception_v3 = relay.nn.softmax(data=fc1)
args = relay.analysis.free_vars(inception_v3)
return relay.Function(args, inception_v3)
def get_workload(batch_size=1, num_classes=1000, image_shape=(3, 299, 299), dtype="float32"):
"""Get benchmark workload for InceptionV3
Parameters
----------
batch_size : int
The batch size used in the model
num_classes : int, optional
Number of classes
image_shape : tuple, optional
The input image shape
dtype : str, optional
The data type
Returns
-------
mod : tvm.IRModule
The relay module that contains an Inception V3 network.
params : dict of str to NDArray
The parameters.
"""
net = get_net(batch_size, num_classes, image_shape, dtype)
return create_workload(net)
| 12,575 | 28.521127 | 98 | py |
tvm | tvm-main/python/tvm/relay/testing/init.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Initializer of parameters."""
from functools import reduce
import numpy as np
import tvm
from tvm import relay
class Initializer(object):
"""The base class of an initializer."""
def __init__(self, **kwargs):
self._kwargs = kwargs
def __call__(self, desc, arr):
"""Initialize an array
Parameters
----------
desc : str
Initialization pattern descriptor.
arr : NDArray
The array to be initialized.
"""
if desc.endswith("weight"):
self._init_weight(desc, arr)
elif desc.endswith("bias"):
self._init_bias(desc, arr)
elif desc.endswith("gamma"):
self._init_gamma(desc, arr)
elif desc.endswith("beta"):
self._init_beta(desc, arr)
elif desc.endswith("mean"):
self._init_mean(desc, arr)
elif desc.endswith("var"):
self._init_var(desc, arr)
else:
self._init_default(desc, arr)
def _init_bias(self, _, arr):
arr[:] = 0.0
def _init_gamma(self, _, arr):
arr[:] = 1.0
def _init_beta(self, _, arr):
arr[:] = 0.0
def _init_mean(self, _, arr):
arr[:] = 0.0
def _init_var(self, _, arr):
arr[:] = 1.0
def _init_weight(self, name, arr):
"""Abstract method to Initialize weight."""
raise NotImplementedError("Must override it")
def _init_default(self, name, _):
raise ValueError(
f"Unknown initialization pattern for {name}. "
f"Default initialization is now limited to "
f'"weight", "bias", "gamma" (1.0), and "beta" (0.0).'
f"Please use mx.sym.Variable(init=mx.init.*) to set initialization pattern"
)
class Xavier(Initializer):
""" "Xavier" initialization for weights
Parameters
----------
rnd_type: str, optional
Random generator type, can be ``'gaussian'`` or ``'uniform'``.
factor_type: str, optional
Can be ``'avg'``, ``'in'``, or ``'out'``.
magnitude: float, optional
Scale of random number.
"""
def __init__(self, rnd_type="uniform", factor_type="avg", magnitude=3):
super(Xavier, self).__init__(
rnd_type=rnd_type, factor_type=factor_type, magnitude=magnitude
)
self.rnd_type = rnd_type
self.factor_type = factor_type
self.magnitude = float(magnitude)
def _init_weight(self, name, arr):
shape = arr.shape
hw_scale = 1.0
if len(shape) < 2:
raise ValueError(
f"Xavier initializer cannot be applied to vector {name}. It requires at least 2D."
)
if len(shape) > 2:
hw_scale = np.prod(shape[2:])
fan_in, fan_out = shape[1] * hw_scale, shape[0] * hw_scale
factor = 1.0
if self.factor_type == "avg":
factor = (fan_in + fan_out) / 2.0
elif self.factor_type == "in":
factor = fan_in
elif self.factor_type == "out":
factor = fan_out
else:
raise ValueError("Incorrect factor type")
# Hack for mobilenet, because there is less connectivity
if "depthwise" in name:
factor = hw_scale
scale = np.sqrt(self.magnitude / factor)
if self.rnd_type == "uniform":
arr[:] = np.random.uniform(-scale, scale, size=arr.shape)
else:
raise ValueError("Unknown random type")
class Constant(Initializer):
"""Constant initialization of weights. Sum of weights in the matrix is 1."""
def _init_weight(self, name, arr):
num_elements = reduce(lambda x, y: x * y, arr.shape)
arr[:] = 1.0 / num_elements
def create_workload(net, initializer=None, seed=0):
"""Helper function to create benchmark image classification workload.
Parameters
----------
net : tvm.relay.Function
The selected function of the network.
initializer : Initializer
The initializer used
seed : int
The seed used in initialization.
Returns
-------
mod : tvm.IRModule
The created relay module.
params : dict of str to NDArray
The parameters.
"""
mod = tvm.IRModule.from_expr(net)
mod = relay.transform.InferType()(mod)
shape_dict = {v.name_hint: v.checked_type for v in mod["main"].params}
np.random.seed(seed)
initializer = initializer if initializer else Xavier()
params = {}
for k, v in shape_dict.items():
if k == "data":
continue
init_value = np.zeros(v.concrete_shape).astype(v.dtype)
initializer(k, init_value)
params[k] = tvm.nd.array(init_value, device=tvm.cpu(0))
return mod, params
| 5,566 | 29.927778 | 98 | py |
tvm | tvm-main/python/tvm/relay/testing/tflite.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Common utilities for creating TFLite models"""
from distutils.version import LooseVersion
import numpy as np
import pytest
import tflite.Model # pylint: disable=wrong-import-position
import tensorflow as tf # pylint: disable=wrong-import-position
import tvm
pytest.importorskip("tflite")
pytest.importorskip("tensorflow")
class TFLiteModel:
"""Creates TFLite Model and facilitates reference data generation"""
def __init__(self, dtype):
self.serial_model = None # This is what TFLite convert() provides
self.dtype = dtype # This is the dtype of graph inputs
self.shape_dict = {}
self.dtype_dict = {}
def create_conv2d_single(self, kernel_shape, strides, padding, dilation, activation):
"""Returns tf.function that creates TFLite Conv2d layer"""
@tf.function
def conv2d_single_function(ifm_tensor):
"""Returns TFLite Conv2d layer"""
op = tf.nn.conv2d(
ifm_tensor,
filters=tf.constant(
np.random.uniform(size=[kernel_shape[0], kernel_shape[1], 3, 3]),
dtype=tf.float32,
),
strides=[1, strides[0], strides[1], 1],
padding=padding,
dilations=dilation,
)
if activation == "RELU":
op = tf.nn.relu(op)
elif activation == "NONE":
pass
else:
assert False, f"Unsupported activation {activation}"
return op
return conv2d_single_function
def load_from_file(self, model_file, shapes):
"""Load tflite model from a tflite file"""
for i, shape in enumerate(shapes):
input_name = "input_" + str(i)
self.shape_dict.update({input_name: shape})
self.dtype_dict.update({input_name: self.dtype})
with open(model_file, "rb") as f:
self.serial_model = f.read()
def create_tflite_model(self, tfl_function, shapes, ranges=None):
"""Creates TFLite serial graph"""
tensor_specs = []
for i, shape in enumerate(shapes):
input_name = "input_" + str(i)
self.shape_dict.update({input_name: shape})
self.dtype_dict.update({input_name: self.dtype})
tensor_specs.append(tf.TensorSpec(shape, dtype=tf.float32, name=input_name))
concrete_func = tfl_function.get_concrete_function(*tensor_specs)
if not ranges:
ranges = [(0, 1) for _ in shapes]
def representative_dataset():
for _ in range(100):
inputs = []
for i, shape in enumerate(shapes):
data = np.random.uniform(
low=ranges[i][0], high=ranges[i][1], size=tuple(shape)
).astype("float32")
inputs.append(data)
yield inputs
converter = tf.lite.TFLiteConverter.from_concrete_functions([concrete_func])
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.representative_dataset = representative_dataset
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
converter.inference_input_type = tf.int8
converter.inference_output_type = tf.int8
self.serial_model = converter.convert()
def convert_to_relay(self):
"""Converts TFLite serialized graph into Relay"""
assert self.serial_model is not None, "TFLite model is empty!"
tflite_model = tflite.Model.Model.GetRootAsModel(self.serial_model, 0)
relay_module, relay_params = tvm.relay.frontend.from_tflite(
tflite_model, self.shape_dict, self.dtype_dict
)
return relay_module, relay_params
def generate_randomized_input_data(self, seed, shape, dtype):
"""Generates randomized input numpy arrays based on shape and dtype."""
random_state = np.random.RandomState(seed)
random_data = None
if dtype == np.float32:
random_data = random_state.uniform(-1, 1, size).astype(dtype)
else:
low = np.iinfo(dtype).min
high = np.iinfo(dtype).max + 1
random_data = random_state.randint(low, high, shape, dtype)
return random_data
# pylint: disable=import-outside-toplevel
def generate_reference_data(self):
"""
This method uses TFLite reference kernels to generate reference output.
It returns randomized inputs and reference outputs.
"""
assert self.serial_model is not None, "TFLite model was not created."
output_tolerance = None
if tf.__version__ < LooseVersion("2.5.0"):
output_tolerance = 1
interpreter = tf.lite.Interpreter(model_content=self.serial_model)
else:
output_tolerance = 0
interpreter = tf.lite.Interpreter(
model_content=self.serial_model,
experimental_op_resolver_type=tf.lite.experimental.OpResolverType.BUILTIN_REF,
experimental_preserve_all_tensors=False,
)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
# Generate predictable randomized input
seed = 0
input_data = {}
for input_detail in input_details:
input_values = self.generate_randomized_input_data(
seed, input_detail["shape"], input_detail["dtype"]
)
interpreter.set_tensor(input_detail["index"], input_values)
input_data.update({input_detail["name"]: input_values})
interpreter.invoke()
# Obtain the expected output from interpreter
expected_output_data = {}
for output_detail in output_details:
expected_output_data.update(
{output_detail["name"]: interpreter.get_tensor(output_detail["index"])}
)
return input_data, expected_output_data, output_tolerance
| 6,889 | 39.05814 | 94 | py |
tvm | tvm-main/python/tvm/relay/testing/temp_op_attr.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" Defines a TempOpAttr class that allows temporarily changing an attr of the
operator to allow unit testing. This is useful for AlterOpLayout and Legalize
tests."""
from tvm import relay
class TempOpAttr(object):
"""Temporarily changes the attr of an op."""
def __init__(self, op_name, attr_key, attr_value):
"""Saves the required info for RAII pattern usage.
Parameters
----------
op_name : str
The op name.
attr_key : str
The attribute name.
attr_value : object
The attribute value.
Examples
--------
.. code-block:: python
# Temporarily update FTVMAlterOpLayout to a user-defined packed function.
# After the test is finished, the attr value will be set back to the original value.
with TempOpAttr("nn.conv2d", "FTVMAlterOpLayout", alter_conv2d):
my_mod = relay.transform.AlterOpLayout()(my_mod)
"""
self.op = relay.op.get(op_name)
self.attr_key = attr_key
self.attr_value = attr_value
def __enter__(self):
self.older_attr = self.op.get_attr(self.attr_key)
self.op.reset_attr(self.attr_key)
self.op.set_attr(self.attr_key, self.attr_value)
return self
def __exit__(self, ptype, value, trace):
self.op.reset_attr(self.attr_key)
if self.older_attr:
self.op.set_attr(self.attr_key, self.older_attr)
| 2,251 | 33.121212 | 92 | py |
tvm | tvm-main/python/tvm/relay/testing/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""Utilities for testing and benchmarks"""
from __future__ import absolute_import as _abs
import collections
import numpy as np
import tvm
from tvm import te
from tvm import relay
from tvm.relay import op
from tvm.relay.prelude import Prelude
from tvm.testing import enabled_targets
from . import mlp
from . import resnet
from . import resnet_3d
from . import dqn
from . import dcgan
from . import mobilenet
from . import lstm
from . import inception_v3
from . import squeezenet
from . import vgg
from . import densenet
from . import yolo_detection
from . import temp_op_attr
from . import synthetic
from .init import create_workload
from .nat import count, make_nat_value, make_nat_expr
from .py_converter import to_python, run_as_python
from ..transform import gradient
def run_opt_pass(expr, opt_pass, import_prelude=False):
assert isinstance(opt_pass, tvm.transform.Pass)
mod = tvm.IRModule.from_expr(expr)
if import_prelude:
Prelude(mod)
mod = relay.transform.InferType()(mod)
mod = opt_pass(mod)
entry = mod["main"]
return entry if isinstance(expr, relay.Function) else entry.body
def run_infer_type(expr):
return run_opt_pass(expr, relay.transform.InferType())
def _np_randn_from_type(t, scale=1, mean=0):
res = mean + (scale * np.random.randn(*(int(d) for d in t.shape)))
# if t.shape == (), then randn returns a scalar so we need to wrap for dtype conversion
if np.isscalar(res):
res = np.array(res)
return res.astype(t.dtype)
def check_grad(
func,
inputs=None,
test_inputs=None,
eps=1e-6,
atol=1e-5,
rtol=1e-3,
scale=None,
mean=0,
mode="higher_order",
target_devices=None,
executor_kind="debug",
):
"""Perform numerical gradient checking given a relay function.
Compare analytical gradients to numerical gradients derived from two-sided approximation. Note
that this test may fail if your function input types are not of high enough precision.
Parameters
----------
func : tvm.relay.Function
The relay function to test.
inputs: List[np.array]
Optional user-provided input parameters to use. If not given, will generate random normal
inputs scaled to be close to the chosen epsilon value to avoid numerical precision loss.
test_inputs: List[np.array]
The inputs to test for gradient matching. Useful in cases where some inputs are not
differentiable, such as symbolic inputs to dynamic ops. If not given, all inputs are
tested.
eps: float
The epsilon value to use for computing numerical gradient approximation.
atol: float
The absolute tolerance on difference between numerical and analytical gradients. Note that
this needs to be scaled appropriately relative to the chosen eps and inputs.
rtol: float
The relative tolerance on difference between numerical and analytical gradients. Note that
this needs to be scaled appropriately relative to the chosen eps.
scale: float
The standard deviation of the inputs.
mean: float
The mean of the inputs.
target_devices: Optional[List[Tuple[tvm.target.Target, tvm.runtime.Device]]]
A list of targets/devices on which the gradient should be
tested. If not specified, will default to `tvm.testing.enabled_targets()`.
"""
fwd_func = run_infer_type(func)
bwd_func = run_infer_type(gradient(fwd_func, mode=mode))
bwd_func = run_opt_pass(bwd_func, relay.transform.Legalize())
if scale is None:
scale = 10 * eps
if inputs is None:
params = fwd_func.params
# Generate random inputs on the same scale as epsilon to avoid numerical precision loss.
inputs = [_np_randn_from_type(x.checked_type, scale=scale, mean=mean) for x in params]
if test_inputs is None:
test_inputs = inputs
if target_devices is None:
target_devices = enabled_targets()
for target, dev in target_devices:
# Eval the backward and forward functions
# TODO(mbs): Evaluate a pair of functions so can share preparation between them.
bwd_func_compiled = relay.create_executor(
executor_kind, device=dev, target=target
).evaluate(bwd_func)
fwd_func_compiled = relay.create_executor(
executor_kind, device=dev, target=target
).evaluate(fwd_func)
# Get analytic gradients.
_, grads = bwd_func_compiled(*inputs)
grads = [grad.numpy().astype("float64") for grad in grads]
# Throw out gradients we aren't testing
if inputs != test_inputs:
tmp = []
# find the gradient that corresponds to every test input
for test_input in test_inputs:
for i, grad in enumerate(grads):
if inputs[i] is test_input:
tmp.append(grad)
break
grads = tmp
assert len(grads) > 0, "You must test at least one gradient."
# Get numeric gradients for each dimension of each param, using two-sided approximation.
approx_grads = []
for x in test_inputs:
approx_grad = np.zeros(x.shape)
for i in np.ndindex(*x.shape):
x_i = x[i]
x[i] = x_i + eps
fwd_plus = fwd_func_compiled(*inputs).numpy().astype("float64")
x[i] = x_i - eps
fwd_minus = fwd_func_compiled(*inputs).numpy().astype("float64")
x[i] = x_i
approx_grad[i] = np.sum((fwd_plus - fwd_minus) / (2 * eps))
approx_grads.append(approx_grad)
# Compare gradients by checking that relative difference is below tolerance.
for grad, approx_grad in zip(grads, approx_grads):
np.testing.assert_allclose(grad, approx_grad, atol=atol, rtol=rtol)
def rand(dtype, *shape):
return tvm.nd.array(np.random.rand(*shape).astype(dtype))
def count_ops(expr):
"""count number of times a given op is called in the graph"""
class OpCounter(tvm.relay.ExprVisitor):
"""OpCounter"""
def visit_call(self, call):
if hasattr(call, "op"):
self.node_counter[call.op.name] += 1
return super().visit_call(call)
def count(self, expr):
self.node_set = {}
self.node_counter = collections.Counter()
self.visit(expr)
return self.node_counter
return OpCounter().count(expr)
| 7,367 | 33.429907 | 98 | py |
tvm | tvm-main/python/tvm/relay/testing/resnet_3d.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Network definition of 3D ResNet for Action Recognition (CVPR 2018)
Reference : https://github.com/kenshohara/3D-ResNets-PyTorch
"""
# pylint: disable=unused-argument
from tvm import relay
from .init import create_workload
from . import layers
def residual_unit(
data,
num_filter,
stride,
dim_match,
name,
bottle_neck=True,
data_layout="NCDHW",
kernel_layout="OIDHW",
):
"""Return ResNet Unit symbol for building ResNet
Parameters
----------
data : str
Input data
num_filter : int
Number of output channels
bnf : int
Bottle neck channels factor with regard to num_filter
stride : tuple
Stride used in convolution
dim_match : bool
True means channel number between input and output is the same,
otherwise means differ
name : str
Base name of the operators
"""
if bottle_neck:
bn1 = layers.batch_norm_infer(data=data, epsilon=2e-5, name=name + "_bn1")
act1 = relay.nn.relu(data=bn1)
conv1 = layers.conv3d(
data=act1,
channels=int(num_filter * 0.25),
kernel_size=(1, 1, 1),
strides=stride,
padding=(0, 0, 0),
name=name + "_conv1",
data_layout=data_layout,
kernel_layout=kernel_layout,
)
bn2 = layers.batch_norm_infer(data=conv1, epsilon=2e-5, name=name + "_bn2")
act2 = relay.nn.relu(data=bn2)
conv2 = layers.conv3d(
data=act2,
channels=int(num_filter * 0.25),
kernel_size=(3, 3, 3),
strides=(1, 1, 1),
padding=(1, 1, 1),
name=name + "_conv2",
data_layout=data_layout,
kernel_layout=kernel_layout,
)
bn3 = layers.batch_norm_infer(data=conv2, epsilon=2e-5, name=name + "_bn3")
act3 = relay.nn.relu(data=bn3)
conv3 = layers.conv3d(
data=act3,
channels=num_filter,
kernel_size=(1, 1, 1),
strides=(1, 1, 1),
padding=(0, 0, 0),
name=name + "_conv3",
data_layout=data_layout,
kernel_layout=kernel_layout,
)
if dim_match:
shortcut = data
else:
shortcut = layers.conv3d(
data=act1,
channels=num_filter,
kernel_size=(1, 1, 1),
strides=stride,
name=name + "_sc",
data_layout=data_layout,
kernel_layout=kernel_layout,
)
return relay.add(conv3, shortcut)
bn1 = layers.batch_norm_infer(data=data, epsilon=2e-5, name=name + "_bn1")
act1 = relay.nn.relu(data=bn1)
conv1 = layers.conv3d(
data=act1,
channels=num_filter,
kernel_size=(3, 3, 3),
strides=stride,
padding=(1, 1, 1),
name=name + "_conv1",
data_layout=data_layout,
kernel_layout=kernel_layout,
)
bn2 = layers.batch_norm_infer(data=conv1, epsilon=2e-5, name=name + "_bn2")
act2 = relay.nn.relu(data=bn2)
conv2 = layers.conv3d(
data=act2,
channels=num_filter,
kernel_size=(3, 3, 3),
strides=(1, 1, 1),
padding=(1, 1, 1),
name=name + "_conv2",
data_layout=data_layout,
kernel_layout=kernel_layout,
)
if dim_match:
shortcut = data
else:
shortcut = layers.conv3d(
data=act1,
channels=num_filter,
kernel_size=(1, 1, 1),
strides=stride,
name=name + "_sc",
data_layout=data_layout,
kernel_layout=kernel_layout,
)
return relay.add(conv2, shortcut)
def resnet(
units,
num_stages,
filter_list,
num_classes,
data_shape,
bottle_neck=True,
layout="NCDHW",
dtype="float32",
):
"""Return ResNet Program.
Parameters
----------
units : list
Number of units in each stage
num_stages : int
Number of stages
filter_list : list
Channel size of each stage
num_classes : int
Output size of symbol
data_shape : tuple of int.
The shape of input data.
bottle_neck : bool
Whether apply bottleneck transformation.
layout: str
The data layout for conv3d
dtype : str
The global data type.
"""
data_layout = layout
kernel_layout = "OIDHW" if layout == "NCDHW" else "DHWIO"
num_unit = len(units)
assert num_unit == num_stages
data = relay.var("data", shape=data_shape, dtype=dtype)
data = layers.batch_norm_infer(data=data, epsilon=2e-5, scale=False, name="bn_data")
if layout == "NCDHW":
(_, _, _, height, _) = data_shape
else:
(_, _, height, _, _) = data_shape
if height <= 32: # such as cifar10
body = layers.conv3d(
data=data,
channels=filter_list[0],
kernel_size=(3, 3, 3),
strides=(1, 1, 1),
padding=(1, 1, 1),
name="conv0",
data_layout=data_layout,
kernel_layout=kernel_layout,
)
else: # often expected to be 224 such as imagenet
body = layers.conv3d(
data=data,
channels=filter_list[0],
kernel_size=(3, 7, 7),
strides=(1, 2, 2),
padding=(1, 3, 3),
name="conv0",
data_layout=data_layout,
kernel_layout=kernel_layout,
)
body = layers.batch_norm_infer(data=body, epsilon=2e-5, name="bn0")
body = relay.nn.relu(data=body)
# body = relay.nn.max_pool3d(data=body, pool_size=(3, 3), strides=(2, 2), padding=(1, 1),
# layout=data_layout)
for i in range(num_stages):
body = residual_unit(
body,
filter_list[i + 1],
(1 if i == 0 else 2, 1 if i == 0 else 2, 1 if i == 0 else 2),
False,
name=f"stage{i + 1}_unit1",
bottle_neck=bottle_neck,
data_layout=data_layout,
kernel_layout=kernel_layout,
)
for j in range(units[i] - 1):
body = residual_unit(
body,
filter_list[i + 1],
(1, 1, 1),
True,
name=f"stage{i + 1}_unit{j + 2}",
bottle_neck=bottle_neck,
data_layout=data_layout,
kernel_layout=kernel_layout,
)
bn1 = layers.batch_norm_infer(data=body, epsilon=2e-5, name="bn1")
relu1 = relay.nn.relu(data=bn1)
# Although kernel is not used here when global_pool=True, we should put one
pool1 = relay.nn.global_avg_pool3d(data=relu1, layout=data_layout)
flat = relay.nn.batch_flatten(data=pool1)
fc1 = layers.dense_add_bias(data=flat, units=num_classes, name="fc1")
net = relay.nn.softmax(data=fc1)
return relay.Function(relay.analysis.free_vars(net), net)
def get_net(
batch_size,
num_classes,
num_layers=50,
image_shape=(3, 16, 112, 112),
layout="NCDHW",
dtype="float32",
**kwargs,
):
"""
Adapted from https://github.com/tornadomeet/ResNet/blob/master/train_resnet.py
Original author Wei Wu
"""
if layout == "NCDHW":
(_, _, height, _) = image_shape
else:
(_, height, _, _) = image_shape
data_shape = (batch_size,) + image_shape
if height <= 28:
num_stages = 3
if (num_layers - 2) % 9 == 0 and num_layers >= 164:
per_unit = [(num_layers - 2) // 9]
filter_list = [16, 64, 128, 256]
bottle_neck = True
elif (num_layers - 2) % 6 == 0 and num_layers < 164:
per_unit = [(num_layers - 2) // 6]
filter_list = [16, 16, 32, 64]
bottle_neck = False
else:
raise ValueError(f"no experiments done on num_layers {num_layers}")
units = per_unit * num_stages
else:
if num_layers >= 50:
filter_list = [64, 256, 512, 1024, 2048]
bottle_neck = True
else:
filter_list = [64, 64, 128, 256, 512]
bottle_neck = False
num_stages = 4
if num_layers == 18:
units = [2, 2, 2, 2]
elif num_layers == 34:
units = [3, 4, 6, 3]
elif num_layers == 50:
units = [3, 4, 6, 3]
elif num_layers == 101:
units = [3, 4, 23, 3]
elif num_layers == 152:
units = [3, 8, 36, 3]
elif num_layers == 200:
units = [3, 24, 36, 3]
elif num_layers == 269:
units = [3, 30, 48, 8]
else:
raise ValueError(f"no experiments done on num_layers {num_layers}")
return resnet(
units=units,
num_stages=num_stages,
filter_list=filter_list,
num_classes=num_classes,
data_shape=data_shape,
bottle_neck=bottle_neck,
layout=layout,
dtype=dtype,
)
def get_workload(
batch_size=1,
num_classes=1000,
num_layers=18,
image_shape=(3, 16, 112, 112),
layout="NCDHW",
dtype="float32",
**kwargs,
):
"""Get benchmark workload for resnet
Parameters
----------
batch_size : int
The batch size used in the model
num_classes : int, optional
Number of classes
num_layers : int, optional
Number of layers
image_shape : tuple, optional
The input image shape
layout: str
The data layout for conv3d
dtype : str, optional
The data type
kwargs : dict
Extra arguments
Returns
-------
mod : tvm.IRModule
The relay module that contains a ResNet network.
params : dict of str to NDArray
The parameters.
"""
net = get_net(
batch_size=batch_size,
num_classes=num_classes,
num_layers=num_layers,
image_shape=image_shape,
dtype=dtype,
layout=layout,
**kwargs,
)
return create_workload(net)
| 10,913 | 27.570681 | 97 | py |
tvm | tvm-main/python/tvm/relay/testing/py_converter.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=no-else-return
"""Utility for converting Relay code into a Python script with equivalent semantics"""
import sys
import ast
from ast import alias, Assign, Load, Name, NameConstant, Num, Return, Store, Str
import re
import tvm
from tvm import relay
from tvm.relay.adt import Pattern
from tvm.relay.backend import te_compiler
from tvm.relay.expr import Expr, GlobalVar, Var
from tvm.relay.function import Function
from tvm.relay.expr_functor import ExprFunctor
__MAJOR__, __MINOR__, _, _, _ = sys.version_info
OUTPUT_VAR_NAME = "_py_out"
# corresponds to:
# import numpy
# import tvm
# from tvm import relay
# from tvm import nd
# from tvm.runtime import container as _container
# from tvm.relay.backend.interpreter import RefValue, ConstructorValue
PROLOGUE = [
ast.Import([alias("numpy", None)]),
ast.Import([alias("tvm", None)]),
ast.ImportFrom("tvm", [alias("relay", None)], 0),
ast.ImportFrom("tvm", [alias("nd", None)], 0),
ast.ImportFrom("tvm.runtime", [alias("container", "_container")], 0),
ast.ImportFrom(
"tvm.relay.backend.interpreter",
[alias("RefValue", None), alias("ConstructorValue", None)],
0,
),
]
class PythonConverter(ExprFunctor):
"""Functor for translating Relay programs into Python ASTs."""
def __init__(self, mod, target) -> None:
super().__init__()
self.mod = mod
self.tgt = target if isinstance(target, tvm.target.Target) else tvm.target.Target(target)
self.tec = te_compiler.get()
self.fun_no = 0
self.var_no = 0
self.var_map = {}
def convert(self, prog: Expr):
"""This method converts the passed Relay expression into a Python
AST object with equivalent semantics.
The Python AST can be executed using exec(); it can be turned
into text and inspected using astor.
"""
optimized = self.optimize(prog)
# start with conversion prelude (imports) and convert global defs
body = []
body += PROLOGUE
body += self.convert_module()
prog_body, extra_defs = self.visit(optimized)
body += extra_defs
# we finally must assign the final expression to the output var
# so it can be read after running EXEC
body.append(Assign([Name(OUTPUT_VAR_NAME, Store())], prog_body))
global __MAJOR__, __MINOR__
if __MAJOR__ == 3 and __MINOR__ >= 8:
return ast.fix_missing_locations(ast.Module(body=body, type_ignores=[]))
else:
return ast.fix_missing_locations(ast.Module(body=body))
def optimize(self, prog: Expr):
"""Performs optimizations necessary to be able to generate code for prog."""
# unwrap tuple wrappers (some op calls produce them)
unwrapped = prog.astuple() if isinstance(prog, relay.TupleWrapper) else prog
assert relay.analysis.well_formed(unwrapped)
# For a lone global var, there is nothing we need to do
if isinstance(unwrapped, relay.GlobalVar):
return unwrapped
# main might be in the mod already and from_expr will not override it if it's there,
# so we need a new name
target_name = self.generate_function_name("target")
wrapped = unwrapped
if not isinstance(unwrapped, relay.Function):
wrapped = relay.Function(relay.analysis.free_vars(unwrapped), unwrapped)
# easiest way to make a deep copy -- note that main will not be overridden if it's present
copy_mod = tvm.IRModule.from_expr(
relay.Tuple([]), self.mod.functions, self.mod.type_definitions
)
copy_mod[target_name] = wrapped
# necessary pass: SimplifyInference (otherwise we can't generate code for some operators)
# and fusion (to get primitive functions)
opts = tvm.transform.Sequential(
[relay.transform.SimplifyInference(), relay.transform.FuseOps(fuse_opt_level=0)]
)
copy_mod = opts(copy_mod)
optimized = copy_mod[target_name]
return optimized if isinstance(unwrapped, Function) else optimized.body
def sanitize(self, name: str) -> str:
"""Removes any invalid characters (only underscores, numbers, and letters permitted)
from the given name. Since we append a number and underscore to var names anyway,
it doesn't matter if the name is the empty string."""
return re.sub(r"\W", "", name)
def generate_var_name(self, name_hint: str) -> str:
"""Generates a unique variable name starting from the hint."""
name = f"{self.sanitize(name_hint)}_var_{self.var_no}"
self.var_no += 1
return name
def generate_function_name(self, name_hint: str) -> str:
"""Generates a unique function name starting from the hint."""
name = f"{self.sanitize(name_hint)}_fun_{self.fun_no}"
self.fun_no += 1
return name
def get_var_name(self, var: Expr) -> str:
"""Returns the var name for the given Realy variable."""
if var in self.var_map:
return self.var_map[var]
name = self.generate_var_name(var.name_hint)
self.var_map[var] = name
return name
def include_var(self, var: Expr, assign=False):
"""Returns a variable AST node for the given Relay var depending on
whether it must appear in an assignment or not."""
name = self.get_var_name(var)
return Name(name, Store() if assign else Load())
def parse_name(self, name: str):
"""Given the name of a Python method with dots (e.g., 'relay.var'),
returns an appropriate AST object corresponding to that name."""
attributes = name.split(".")
ret = Name(attributes[0], Load())
for i in range(len(attributes) - 1):
ret = ast.Attribute(ret, attributes[i + 1], Load())
return ret
def parse_numpy_array(self, arr):
"""Given a Numpy array, produces an appropriate Python array
or numerical literal representing its contents."""
def parse_single(i):
return NameConstant(i) if isinstance(i, bool) else Num(i)
if arr.ndim == 0:
return parse_single(arr.item())
if arr.ndim == 1:
return ast.List([parse_single(i.item()) for i in arr], Load())
elts = []
for row in arr:
elts.append(self.parse_numpy_array(row))
return ast.List(elts, Load())
def convert_fields(self, fields: [Expr]):
"""Given a list of call args or tuple fields, converts
each and returns their ASTs and their defs lists (in order)."""
bodies = []
defs = []
for field in fields:
member_body, member_defs = self.visit(field)
bodies.append(member_body)
defs += member_defs
return (bodies, defs)
def convert_to_thunk(self, name_hint: str, expr: Expr):
"""Wraps the passed expression in a thunk."""
body, defs = self.visit(expr)
thunk_name = self.generate_function_name(name_hint)
thunk = self.create_def(thunk_name, [], defs + [Return(body)])
return (thunk, thunk_name)
def convert_func_node(self, func: Function, name_var=None):
"""Converts the given Relay function into a Python function, with
special for named functions (locally or globally)"""
if name_var is None:
func_name = self.generate_function_name("_anon_func")
if isinstance(name_var, GlobalVar):
func_name = str(name_var.name_hint)
if isinstance(name_var, Var):
func_name = self.get_var_name(name_var)
var_names = [self.get_var_name(var) for var in func.params]
body, defs = self.visit(func.body)
ret = self.create_def(func_name, var_names, defs + [Return(body)], register_packed=True)
return (ret, func_name)
def convert_module(self):
"""Converts all the global functions defined in the module and returns
them as a list of definitions"""
defs = []
for var, func in self.mod.functions.items():
# optimize the definition so any operators used are lowered
opt_func = self.optimize(func)
try:
converted_func, _ = self.convert_func_node(opt_func, var)
defs.append(converted_func)
except TypeError:
# TODO(wweic): fix conversion for Any
pass
return defs
def create_call(self, func_name: str, arguments):
"""Creates a simple function call."""
return ast.Call(self.parse_name(func_name), arguments, [])
def create_def(self, func_name: str, arguments: [str], body, register_packed: bool = False):
"""
Wrapper over function definition AST node, whose constructor is inconvenient.
register_packed includes a tvm.register_func decorator on the generated function if true.
This option should be used for Relay functions (warning: clobbers registry!)
"""
inner_args = [ast.arg(argument, None) for argument in arguments]
# add a decorator to register as a PackedFunc so the function will be an ObjectRef
# and will allow for putting functions into tuples or refs
decorator_list = [
ast.Call(
self.parse_name("tvm.register_func"),
[ast.Constant(value=func_name)],
[ast.keyword(arg="override", value=ast.Constant(value=True))],
)
]
global __MAJOR__, __MINOR__
if __MAJOR__ == 3 and __MINOR__ >= 8:
arguments = ast.arguments([], inner_args, None, [], [], None, [])
else:
arguments = ast.arguments(inner_args, None, [], [], None, [])
return ast.FunctionDef(
func_name, arguments, body, decorator_list if register_packed else [], None
)
def create_tuple(self, fields):
"""
Given the ASTs for tuple fields, produce an AST that creates a
tuple value with those fields
"""
# Use the FFI API directly so that PackedFuncs will be correctly converted to ObjectRef.
# Using tvm.runtime.container.tuple_object fails to convert PackedFuncs in Python
return self.create_call("_container._ffi_api.Tuple", fields)
def create_op_call(self, op: Function, relay_args, py_args):
"""Lowers the passed primitive function, registers it in TVM's
global compiler, and produces a call to the lowered function in
the generated Python code."""
# compile the function and register globally
cc_key = te_compiler.CCacheKey(op, self.tgt)
func_hash = tvm.ir.structural_hash(op)
op_name = f"_lowered_op_{func_hash}"
if not tvm.get_global_func(op_name, allow_missing=True):
jitted = self.tec.jit(cc_key, self.tgt)
tvm.register_func(op_name, jitted)
def convert_input(py_input, arg_type):
"""Use the types of the function arguments to determine whether we expect
a tensor or tuple (returns list of inputs to the lowered op call)"""
# equivalent: input.data
if isinstance(arg_type, relay.TensorType):
return [py_input]
assert isinstance(arg_type, relay.TupleType)
# convert each input.fields[i]
ret = []
for i in range(len(arg_type.fields)):
ret += convert_input(
ast.Subscript(py_input, ast.Index(Num(i)), Load()), arg_type.fields[i]
)
return ret
def convert_output(ret_type):
"""Use the function return type to produce auxiliary variables to store outputs.
Returns ([assignments of output vars], [extra arguments to pass to op call],
expression collecting output)"""
if isinstance(ret_type, relay.TensorType):
output_var_name = self.generate_var_name("_out")
output_var = Name(output_var_name, Load())
shape = ast.Tuple([Num(dim) for dim in ret_type.concrete_shape], Load())
# create a new NDArray of the right shape and dtype
assign_output = Assign(
[Name(output_var_name, Store())],
self.create_call(
"nd.array", [self.create_call("numpy.empty", [shape, Str(ret_type.dtype)])]
),
)
return ([assign_output], [output_var], output_var)
assert isinstance(ret_type, relay.TupleType)
assignments = []
extra_args = []
fields = []
for t in ret_type.fields:
inner_assignments, inner_args, inner_output = convert_output(t)
assignments += inner_assignments
extra_args += inner_args
fields.append(inner_output)
return (assignments, extra_args, self.create_tuple(fields))
# create a function to wrap the call of the lowered op and return
# a call to that function
wrap_name = self.generate_function_name(f"_{op_name}_wrapper")
wrap_args = [self.generate_var_name(f"_arg_{i}") for i in range(len(py_args))]
inner_call_args = []
for i in range(len(py_args)):
inner_call_args += convert_input(Name(wrap_args[i], Load()), relay_args[i].checked_type)
output_assignments, aux_args, output = convert_output(op.checked_type.ret_type)
# equiv: _op = tvm.get_global_func(op_name)
op_var = self.generate_var_name("_op")
op_call = self.create_call("tvm.get_global_func", [Str(op_name)])
op_assign = Assign([Name(op_var, Store())], op_call)
# equiv: _op(args)
inner_call = self.create_call(op_var, inner_call_args + aux_args)
body = output_assignments + [op_assign, ast.Expr(inner_call), Return(output)]
wrap_def = self.create_def(wrap_name, wrap_args, body)
return wrap_def, self.create_call(wrap_name, py_args)
def create_match_check(self, pattern: Pattern, data):
"""Given an ADT match pattern and a (Python) expression pointing to
an ADT value, this generates a Python expression that checks if the
ADT value matches the given pattern (returning True or False)."""
# wildcard or var match everything
if isinstance(pattern, (relay.PatternWildcard, relay.PatternVar)):
return NameConstant(True)
conds = []
if isinstance(pattern, relay.PatternConstructor):
# constructor patterns check whether the constructors match
# and also the matches of any nested patterns
# equiv: (arg.tag == patern_constructor.tag)
conds.append(
ast.Compare(
ast.Attribute(data, "tag", Load()),
[ast.Eq()],
[ast.Num(pattern.constructor.tag)],
)
)
assert isinstance(pattern, (relay.PatternConstructor, relay.PatternTuple))
# now check for any nested patterns
for i in range(len(pattern.patterns)):
nested_pat = pattern.patterns[i]
# can safely skip var or wildcard patterns: they will
# never cause a check to fail
if not isinstance(nested_pat, relay.PatternConstructor):
continue
# index into the value corresponding to the subpattern
field_index = ast.Subscript(
ast.Attribute(data, "fields", Load()), ast.Index(Num(i)), Load()
)
conds.append(self.create_match_check(nested_pat, field_index))
# if we do not need to check nested pattern, just return the single check
if len(conds) == 1:
return conds[0]
# otherwise AND together any nested checks
return ast.BoolOp(ast.And(), conds)
def create_match_clause_body(self, pattern: Pattern, body: Expr):
"""Given a match clause pattern and a clause body,
generates a Python function that when called with an ADT
that matches the pattern, returns the result of evaluating
the clause body. This function returns a function definition
and the name of the generated function."""
def collect_var_assignments(pat, val):
"""This helper function ensures that the pattern is used to
properly assign all subfields of the given AST for use
in the clause body
E.g., for PatternConstructor(A, PatternVar(v), PatternWildcard(),
PatternConstructor(B, PatternVar(w)))
we would want to have
v = a.fields[0]
w = a.fields[2].fields[0]
"""
if isinstance(pat, relay.PatternWildcard):
return []
if isinstance(pat, relay.PatternVar):
return [Assign([self.include_var(pat.var, assign=True)], val)]
# constructor pattern: assign each field of the value
# based on subpatterns
assignments = []
for i in range(len(pat.patterns)):
# we want the assignments for val.fields[i]
field = ast.Subscript(
ast.Attribute(val, "fields", Load()), ast.Index(Num(i)), Load()
)
assignments += collect_var_assignments(pat.patterns[i], field)
return assignments
func_name = self.generate_function_name("_match_clause_body")
arg_name = self.generate_var_name("_match_clause_body")
clause_body, defs = self.visit(body)
assignments = collect_var_assignments(pattern, Name(arg_name, Load()))
func_def = self.create_def(
func_name, [arg_name], defs + assignments + [Return(clause_body)]
)
return (func_def, func_name)
# Convention for the expr visitor: Each visit function returns a tuple of two members.
#
# The first is a Python AST comprised of a single *expression* that evaluates to an equivalent
# result to the desired Relay expression (and executes all effects in the right order).
#
# The second is a list of function definition *statements* defining thunks and other
# auxiliary functions needed in the translated AST object. The defs in the second object
# will always have unique names and will never perform any effects, so as long as they
# appear in the Python program before the first statement is executed, there should not
# be any problems.
def visit_var(self, var: Expr):
return (self.include_var(var, assign=False), [])
def visit_global_var(self, gvar: Expr):
# we don't need to add numbers to global var names because
# the *names* are checked for uniqueness in the mod
func_name = str(gvar.name_hint)
# load in the packed func
return (self.create_call("tvm.get_global_func", [ast.Constant(value=func_name)]), [])
def visit_let(self, letexp: Expr):
# To properly account for scoping and ensure that the entire node produces an expression,
# we translate the let binding as a function that we call with the value we intend to bind.
# Yes, this is somewhat ugly.
"""
let var = value in body
=======================
def let_thunk(var):
return body
let_thunk(value)
"""
bind_body, bind_defs = self.visit(letexp.body)
func_name = self.generate_function_name("_let_func")
binding_func = self.create_def(
func_name, [self.get_var_name(letexp.var)], bind_defs + [Return(bind_body)]
)
# we call the binding func with the intended value for the bound variable
# special case: if the value is a function literal, we must ensure it can be
# recursive by naming it after the var
if isinstance(letexp.value, Function):
value_def, value_name = self.convert_func_node(letexp.value, letexp.var)
return (
self.create_call(func_name, [Name(value_name, Load())]),
[value_def, binding_func],
)
value_body, value_defs = self.visit(letexp.value)
value_defs.append(binding_func)
binding_call = self.create_call(func_name, [value_body])
return (binding_call, value_defs)
def visit_tuple(self, tup: Expr):
fields, ret_defs = self.convert_fields(tup.fields)
return (self.create_tuple(fields), ret_defs)
def visit_tuple_getitem(self, tgi: Expr):
tup, tup_defs = self.visit(tgi.tuple_value)
ret = ast.Subscript(tup, ast.Index(Num(tgi.index)), Load())
return (ret, tup_defs)
def visit_if(self, if_block: Expr):
cond_body, cond_defs = self.visit(if_block.cond)
true_body, true_defs = self.visit(if_block.true_branch)
false_body, false_defs = self.visit(if_block.false_branch)
# need to get the value out of a NDArray to check the condition
# equvialent to: val.numpy()
cond_check = ast.Call(ast.Attribute(cond_body, "numpy", Load()), [], [])
ret = ast.IfExp(cond_check, true_body, false_body)
return (ret, cond_defs + true_defs + false_defs)
def visit_constant(self, constant: Expr):
"""Proceeds by converting constant value to a numpy array
and converting it to the appropriate value in the generated
code (whether it be a Python scalar or a Numpy array)"""
value = constant.data.numpy()
const_expr = ast.Call(
ast.Attribute(Name("numpy", Load()), "array", Load()),
[self.parse_numpy_array(value)],
[ast.keyword("dtype", Str(constant.checked_type.dtype))],
)
return (self.create_call("nd.array", [const_expr]), [])
def visit_function(self, func: Expr):
# Python's lambdas are very restrictive, so we do "name" inline functions
converted_func, func_name = self.convert_func_node(func)
# load in the PackedFunc
return (
self.create_call("tvm.get_global_func", [ast.Constant(value=func_name)]),
[converted_func],
)
def visit_call(self, call: Expr):
"""For calls, we must distinguish between ordinary functions,
operators, and constructor calls."""
func = call.op
fields, field_defs = self.convert_fields(call.args)
if isinstance(func, tvm.ir.Op):
raise Exception("Operators should have been lowered and eliminated")
if isinstance(func, relay.Constructor):
# produce a constructor value
return (
self.create_call(
"ConstructorValue",
[ast.Num(func.tag), ast.List(fields, Load()), NameConstant(None)],
),
field_defs,
)
# lowered operator: generate a call to a function that gets the PackedFunc
# from TVM's registry
if isinstance(func, Function) and func.attrs and func.attrs.Primitive.value == 1:
op_call_def, op_call = self.create_op_call(func, call.args, fields)
return (op_call, field_defs + [op_call_def])
# ordinary function
converted_func, defs = self.visit(func)
defs += field_defs
return (ast.Call(converted_func, fields, []), defs)
def visit_ref_create(self, ref: Expr):
val, defs = self.visit(ref.value)
return (self.create_call("RefValue", [val]), defs)
def visit_ref_read(self, read: Expr):
ref, defs = self.visit(read.ref)
return (ast.Attribute(ref, "value", Load()), defs)
def visit_ref_write(self, write: Expr):
"""For writing refs, we wrap the update in a thunk
(returning an empty tuple to match Relay's semantics)
that we execute at the right time. This ensures such assignments
can be properly nested, since assignments are statements
in Python but expressions in Relay"""
ref, ref_defs = self.visit(write.ref)
val, val_defs = self.visit(write.value)
thunk_name = self.generate_function_name("_ref_write_thunk")
thunk = self.create_def(
thunk_name,
[],
ref_defs
+ val_defs
+ [Assign([ast.Attribute(ref, "value", Store())], val), Return(self.create_tuple([]))],
)
return (self.create_call(thunk_name, []), [thunk])
def visit_match(self, match: Expr):
"""For matches, we wrap the entire expression in a thunk
because it is easiest to implement them using if statements.
For each clause, we generate a function that checks if the
pattern matches. If yes, we call a function that assigns
the variables appropriately and invokes the clause body."""
data, defs = self.visit(match.data)
data_var = self.generate_var_name("_match_data")
# must ensure the data clause is executed exactly once
thunk_body = [Assign([Name(data_var, Store())], data)]
for clause in match.clauses:
check_expr = self.create_match_check(clause.lhs, Name(data_var, Load()))
body_def, body_name = self.create_match_clause_body(clause.lhs, clause.rhs)
defs.append(body_def)
# equiv: if check(data): return body(data)
thunk_body.append(
ast.If(
check_expr, [Return(self.create_call(body_name, [Name(data_var, Load())]))], []
)
)
# finally if nothing matches we have a failed assert (should never happen)
thunk_body.append(ast.Assert(NameConstant(False), Str("Match was not exhaustive")))
thunk_name = self.generate_function_name("_match_thunk")
thunk_def = self.create_def(thunk_name, [], defs + thunk_body)
return (self.create_call(thunk_name, []), [thunk_def])
# these are both handled in the "call" case
def visit_constructor(self, _):
pass
def visit_op(self, _):
pass
def to_python(expr: Expr, mod=None, target=tvm.target.Target("llvm")):
"""Converts the given Relay expression into a Python script (as a Python AST object).
For easiest debugging, import the astor package and use to_source()."""
mod = mod if mod is not None else tvm.IRModule()
mod = relay.transform.InferType()(mod)
converter = PythonConverter(mod, target)
python = converter.convert(expr)
assert python
return python
def run_as_python(expr: Expr, mod=None, target=tvm.target.Target("llvm")):
"""Converts the given Relay expression into a Python script and
executes it.
Note that closures will be returned as PackedFuncs
"""
mod = mod if mod is not None else tvm.IRModule()
py_ast = to_python(expr, mod, target)
code = compile(py_ast, "<string>", "exec")
var_map = {OUTPUT_VAR_NAME: None}
# pylint: disable=exec-used
exec(code, var_map, var_map)
return var_map[OUTPUT_VAR_NAME]
| 27,980 | 41.849923 | 100 | py |
tvm | tvm-main/python/tvm/relay/testing/mobilenet.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Port of NNVM version of MobileNet to Relay.
"""
# pylint: disable=invalid-name
from tvm import relay
from . import layers
from .init import create_workload
def conv_block(
data,
name,
channels,
kernel_size=(3, 3),
strides=(1, 1),
padding=(1, 1),
epsilon=1e-5,
layout="NCHW",
):
"""Helper function to construct conv_bn-relu"""
# convolution + bn + relu
conv = layers.conv2d(
data=data,
channels=channels,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_layout=layout,
kernel_layout=layers.conv_kernel_layout(layout),
name=name + "_conv",
)
bn = layers.batch_norm_infer(data=conv, epsilon=epsilon, name=name + "_bn")
act = relay.nn.relu(data=bn)
return act
def separable_conv_block(
data,
name,
depthwise_channels,
pointwise_channels,
kernel_size=(3, 3),
downsample=False,
padding=(1, 1),
epsilon=1e-5,
layout="NCHW",
dtype="float32",
):
"""Helper function to get a separable conv block"""
if downsample:
strides = (2, 2)
else:
strides = (1, 1)
# depthwise convolution + bn + relu
if layout == "NCHW":
wshape = (depthwise_channels, 1) + kernel_size
elif layout == "NHWC":
wshape = kernel_size + (depthwise_channels, 1)
else:
raise ValueError("Invalid layout: " + layout)
bn_axis = layout.index("C")
weight = relay.var(name + "_weight", shape=wshape, dtype=dtype)
conv1 = layers.conv2d(
data=data,
weight=weight,
channels=depthwise_channels,
groups=depthwise_channels,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_layout=layout,
kernel_layout=layers.conv_kernel_layout(layout, True),
name=name + "_depthwise_conv1",
)
bn1 = layers.batch_norm_infer(data=conv1, epsilon=epsilon, axis=bn_axis, name=name + "_bn1")
act1 = relay.nn.relu(data=bn1)
# pointwise convolution + bn + relu
conv2 = layers.conv2d(
data=act1,
channels=pointwise_channels,
kernel_size=(1, 1),
strides=(1, 1),
padding=(0, 0),
data_layout=layout,
kernel_layout=layers.conv_kernel_layout(layout),
name=name + "_conv2",
)
bn2 = layers.batch_norm_infer(data=conv2, epsilon=epsilon, axis=bn_axis, name=name + "_bn2")
act2 = relay.nn.relu(data=bn2)
return act2
def mobile_net(
num_classes=1000,
data_shape=(1, 3, 224, 224),
dtype="float32",
alpha=1.0,
is_shallow=False,
layout="NCHW",
):
"""Function to construct a MobileNet"""
data = relay.var("data", shape=data_shape, dtype=dtype)
body = conv_block(data, "conv_block_1", int(32 * alpha), strides=(2, 2), layout=layout)
body = separable_conv_block(
body, "separable_conv_block_1", int(32 * alpha), int(64 * alpha), layout=layout, dtype=dtype
)
body = separable_conv_block(
body,
"separable_conv_block_2",
int(64 * alpha),
int(128 * alpha),
downsample=True,
layout=layout,
dtype=dtype,
)
body = separable_conv_block(
body,
"separable_conv_block_3",
int(128 * alpha),
int(128 * alpha),
layout=layout,
dtype=dtype,
)
body = separable_conv_block(
body,
"separable_conv_block_4",
int(128 * alpha),
int(256 * alpha),
downsample=True,
layout=layout,
dtype=dtype,
)
body = separable_conv_block(
body,
"separable_conv_block_5",
int(256 * alpha),
int(256 * alpha),
layout=layout,
dtype=dtype,
)
body = separable_conv_block(
body,
"separable_conv_block_6",
int(256 * alpha),
int(512 * alpha),
downsample=True,
layout=layout,
dtype=dtype,
)
if is_shallow:
body = separable_conv_block(
body,
"separable_conv_block_7",
int(512 * alpha),
int(1024 * alpha),
downsample=True,
layout=layout,
dtype=dtype,
)
body = separable_conv_block(
body,
"separable_conv_block_8",
int(1024 * alpha),
int(1024 * alpha),
downsample=True,
layout=layout,
dtype=dtype,
)
else:
for i in range(7, 12):
body = separable_conv_block(
body,
f"separable_conv_block_{i}",
int(512 * alpha),
int(512 * alpha),
layout=layout,
dtype=dtype,
)
body = separable_conv_block(
body,
"separable_conv_block_12",
int(512 * alpha),
int(1024 * alpha),
downsample=True,
layout=layout,
dtype=dtype,
)
body = separable_conv_block(
body,
"separable_conv_block_13",
int(1024 * alpha),
int(1024 * alpha),
layout=layout,
dtype=dtype,
)
pool = relay.nn.global_avg_pool2d(data=body, layout=layout)
flatten = relay.nn.batch_flatten(data=pool)
weight = relay.var("fc_weight")
bias = relay.var("fc_bias")
fc = relay.nn.dense(data=flatten, weight=weight, units=num_classes)
fc = relay.nn.bias_add(fc, bias)
softmax = relay.nn.softmax(data=fc)
return relay.Function(relay.analysis.free_vars(softmax), softmax)
def get_workload(
batch_size=1, num_classes=1000, image_shape=(3, 224, 224), dtype="float32", layout="NCHW"
):
"""Get benchmark workload for mobilenet
Parameters
----------
batch_size : int, optional
The batch size used in the model
num_classes : int, optional
Number of classes
image_shape : tuple, optional
The input image shape, cooperate with layout
dtype : str, optional
The data type
layout : str, optional
The data layout of image_shape and the operators
cooperate with image_shape
Returns
-------
mod : tvm.IRModule
The relay module that contains a MobileNet network.
params : dict of str to NDArray
The parameters.
"""
data_shape = tuple([batch_size] + list(image_shape))
net = mobile_net(
num_classes=num_classes,
data_shape=data_shape,
dtype=dtype,
alpha=1.0,
is_shallow=False,
layout=layout,
)
return create_workload(net)
| 7,442 | 27.086792 | 100 | py |
tvm | tvm-main/python/tvm/relay/testing/darknet.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-variable, unused-argument, no-init
"""
Compile DarkNet Models
====================
DarkNet helper functions for darknet model parsing and image loading.
This functions will not be loaded by default.
These are utility functions used for testing and tutorial file.
"""
from __future__ import division
from cffi import FFI
import numpy as np
import cv2
def convert_image(image):
"""Convert the image with numpy."""
imagex = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
imagex = np.array(imagex)
imagex = imagex.transpose((2, 0, 1))
imagex = np.divide(imagex, 255.0)
imagex = np.flip(imagex, 0)
return imagex
def load_image_color(test_image):
"""To load the image using opencv api and do preprocessing."""
imagex = cv2.imread(test_image)
return convert_image(imagex)
def _letterbox_image(img, w_in, h_in):
"""To get the image in boxed format."""
imh, imw, imc = img.shape
if (w_in / imw) < (h_in / imh):
new_w = w_in
new_h = imh * w_in // imw
else:
new_h = h_in
new_w = imw * h_in // imh
dim = (new_w, new_h)
# Default interpolation method is INTER_LINEAR
# Other methods are INTER_AREA, INTER_NEAREST, INTER_CUBIC and INTER_LANCZOS4
# For more information see:
# https://docs.opencv.org/2.4/modules/imgproc/doc/geometric_transformations.html#resize
resized = cv2.resize(src=img, dsize=dim, interpolation=cv2.INTER_CUBIC)
resized = convert_image(resized)
boxed = np.full((imc, h_in, w_in), 0.5, dtype=float)
_, resizedh, resizedw = resized.shape
boxed[
:,
int((h_in - new_h) / 2) : int((h_in - new_h) / 2) + resizedh,
int((w_in - new_w) / 2) : int((w_in - new_w) / 2) + resizedw,
] = resized
return boxed
def load_image(img, resize_width, resize_height):
"""Load the image and convert to the darknet model format.
The image processing of darknet is different from normal.
Parameters
----------
image : string
The image file name with path
resize_width : integer
The width to which the image needs to be resized
resize_height : integer
The height to which the image needs to be resized
Returns
-------
img : Float array
Array of processed image
"""
imagex = cv2.imread(img)
return _letterbox_image(imagex, resize_width, resize_height)
class LAYERTYPE(object):
"""Darknet LAYERTYPE Class constant."""
CONVOLUTIONAL = 0
DECONVOLUTIONAL = 1
CONNECTED = 2
MAXPOOL = 3
SOFTMAX = 4
DETECTION = 5
DROPOUT = 6
CROP = 7
ROUTE = 8
COST = 9
NORMALIZATION = 10
AVGPOOL = 11
LOCAL = 12
SHORTCUT = 13
ACTIVE = 14
RNN = 15
GRU = 16
LSTM = 17
CRNN = 18
BATCHNORM = 19
NETWORK = 20
XNOR = 21
REGION = 22
YOLO = 23
REORG = 24
UPSAMPLE = 25
LOGXENT = 26
L2NORM = 27
BLANK = 28
class ACTIVATION(object):
"""Darknet ACTIVATION Class constant."""
LOGISTIC = 0
RELU = 1
RELIE = 2
LINEAR = 3
RAMP = 4
TANH = 5
PLSE = 6
LEAKY = 7
ELU = 8
LOGGY = 9
STAIR = 10
HARDTAN = 11
LHTAN = 12
__darknetffi__ = FFI()
__darknetffi__.cdef(
"""
typedef struct network network;
typedef struct layer layer;
typedef struct{
int *leaf;
int n;
int *parent;
int *child;
int *group;
char **name;
int groups;
int *group_size;
int *group_offset;
} tree;
typedef enum{
LOGISTIC, RELU, RELIE, LINEAR, RAMP, TANH, PLSE, LEAKY, ELU, LOGGY, STAIR, HARDTAN, LHTAN
} ACTIVATION;
typedef enum {
CONVOLUTIONAL,
DECONVOLUTIONAL,
CONNECTED,
MAXPOOL,
SOFTMAX,
DETECTION,
DROPOUT,
CROP,
ROUTE,
COST,
NORMALIZATION,
AVGPOOL,
LOCAL,
SHORTCUT,
ACTIVE,
RNN,
GRU,
LSTM,
CRNN,
BATCHNORM,
NETWORK,
XNOR,
REGION,
YOLO,
REORG,
UPSAMPLE,
LOGXENT,
L2NORM,
BLANK
} LAYERTYPE;
typedef enum{
SSE, MASKED, L1, SEG, SMOOTH, WGAN
} COSTTYPE;
struct layer{
LAYERTYPE type;
ACTIVATION activation;
COSTTYPE cost_type;
void (*forward);
void (*backward);
void (*update);
void (*forward_gpu);
void (*backward_gpu);
void (*update_gpu);
int batch_normalize;
int shortcut;
int batch;
int forced;
int flipped;
int inputs;
int outputs;
int nweights;
int nbiases;
int extra;
int truths;
int h,w,c;
int out_h, out_w, out_c;
int n;
int max_boxes;
int groups;
int size;
int side;
int stride;
int reverse;
int flatten;
int spatial;
int pad;
int sqrt;
int flip;
int index;
int binary;
int xnor;
int steps;
int hidden;
int truth;
float smooth;
float dot;
float angle;
float jitter;
float saturation;
float exposure;
float shift;
float ratio;
float learning_rate_scale;
float clip;
int softmax;
int classes;
int coords;
int background;
int rescore;
int objectness;
int joint;
int noadjust;
int reorg;
int log;
int tanh;
int *mask;
int total;
float alpha;
float beta;
float kappa;
float coord_scale;
float object_scale;
float noobject_scale;
float mask_scale;
float class_scale;
int bias_match;
int random;
float ignore_thresh;
float truth_thresh;
float thresh;
float focus;
int classfix;
int absolute;
int onlyforward;
int stopbackward;
int dontload;
int dontsave;
int dontloadscales;
float temperature;
float probability;
float scale;
char * cweights;
int * indexes;
int * input_layers;
int * input_sizes;
int * map;
float * rand;
float * cost;
float * state;
float * prev_state;
float * forgot_state;
float * forgot_delta;
float * state_delta;
float * combine_cpu;
float * combine_delta_cpu;
float * concat;
float * concat_delta;
float * binary_weights;
float * biases;
float * bias_updates;
float * scales;
float * scale_updates;
float * weights;
float * weight_updates;
float * delta;
float * output;
float * loss;
float * squared;
float * norms;
float * spatial_mean;
float * mean;
float * variance;
float * mean_delta;
float * variance_delta;
float * rolling_mean;
float * rolling_variance;
float * x;
float * x_norm;
float * m;
float * v;
float * bias_m;
float * bias_v;
float * scale_m;
float * scale_v;
float *z_cpu;
float *r_cpu;
float *h_cpu;
float * prev_state_cpu;
float *temp_cpu;
float *temp2_cpu;
float *temp3_cpu;
float *dh_cpu;
float *hh_cpu;
float *prev_cell_cpu;
float *cell_cpu;
float *f_cpu;
float *i_cpu;
float *g_cpu;
float *o_cpu;
float *c_cpu;
float *dc_cpu;
float * binary_input;
struct layer *input_layer;
struct layer *self_layer;
struct layer *output_layer;
struct layer *reset_layer;
struct layer *update_layer;
struct layer *state_layer;
struct layer *input_gate_layer;
struct layer *state_gate_layer;
struct layer *input_save_layer;
struct layer *state_save_layer;
struct layer *input_state_layer;
struct layer *state_state_layer;
struct layer *input_z_layer;
struct layer *state_z_layer;
struct layer *input_r_layer;
struct layer *state_r_layer;
struct layer *input_h_layer;
struct layer *state_h_layer;
struct layer *wz;
struct layer *uz;
struct layer *wr;
struct layer *ur;
struct layer *wh;
struct layer *uh;
struct layer *uo;
struct layer *wo;
struct layer *uf;
struct layer *wf;
struct layer *ui;
struct layer *wi;
struct layer *ug;
struct layer *wg;
tree *softmax_tree;
size_t workspace_size;
};
typedef enum {
CONSTANT, STEP, EXP, POLY, STEPS, SIG, RANDOM
} LEARNINGRATEPOLICY;
typedef struct network{
int n;
int batch;
size_t *seen;
int *t;
float epoch;
int subdivisions;
layer *layers;
float *output;
LEARNINGRATEPOLICY policy;
float learning_rate;
float momentum;
float decay;
float gamma;
float scale;
float power;
int time_steps;
int step;
int max_batches;
float *scales;
int *steps;
int num_steps;
int burn_in;
int adam;
float B1;
float B2;
float eps;
int inputs;
int outputs;
int truths;
int notruth;
int h, w, c;
int max_crop;
int min_crop;
float max_ratio;
float min_ratio;
int center;
float angle;
float aspect;
float exposure;
float saturation;
float hue;
int random;
int gpu_index;
tree *hierarchy;
float *input;
float *truth;
float *delta;
float *workspace;
int train;
int index;
float *cost;
float clip;
} network;
typedef struct {
int w;
int h;
int c;
float *data;
} image;
network *load_network(char *cfg, char *weights, int clear);
image letterbox_image(image im, int w, int h);
int resize_network(network *net, int w, int h);
void top_predictions(network *net, int n, int *index);
void free_image(image m);
image load_image_color(char *filename, int w, int h);
float *network_predict_image(network *net, image im);
float *network_predict(network *net, float *input);
network *make_network(int n);
layer make_convolutional_layer(
int batch,
int h, int w, int c, int n,
int groups, int size, int stride, int padding,
ACTIVATION activation, int batch_normalize, int binary, int xnor, int adam);
layer make_connected_layer(int batch, int inputs, int outputs,
ACTIVATION activation, int batch_normalize, int adam);
layer make_maxpool_layer(int batch, int h, int w, int c, int size, int stride, int padding);
layer make_avgpool_layer(int batch, int w, int h, int c);
layer make_shortcut_layer(int batch, int index, int w, int h, int c, int w2, int h2, int c2);
layer make_batchnorm_layer(int batch, int w, int h, int c);
layer make_reorg_layer(
int batch, int w, int h, int c,
int stride, int reverse, int flatten, int extra);
layer make_region_layer(int batch, int w, int h, int n, int classes, int coords);
layer make_softmax_layer(int batch, int inputs, int groups);
layer make_rnn_layer(int batch, int inputs, int outputs,
int steps, ACTIVATION activation, int batch_normalize, int adam);
layer make_yolo_layer(int batch, int w, int h, int n, int total, int *mask, int classes);
layer make_crnn_layer(
int batch, int h, int w, int c,
int hidden_filters, int output_filters, int steps,
ACTIVATION activation, int batch_normalize);
layer make_lstm_layer(
int batch, int inputs, int outputs, int steps,
int batch_normalize, int adam);
layer make_gru_layer(int batch, int inputs,
int outputs, int steps, int batch_normalize, int adam);
layer make_upsample_layer(int batch, int w, int h, int c, int stride);
layer make_l2norm_layer(int batch, int inputs);
void free_network(network *net);
"""
)
| 12,040 | 21.256932 | 93 | py |
tvm | tvm-main/python/tvm/relay/testing/yolo_detection.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-variable, unused-argument, no-init,
"""
Yolo detection boxes helper functions
====================
DarkNet helper functions for yolo and image loading.
This functions will not be loaded by default.
These are utility functions used for testing and tutorial file.
"""
from __future__ import division
import math
from collections import namedtuple
from functools import cmp_to_key
import numpy as np
Box = namedtuple("Box", ["x", "y", "w", "h"])
def nms_comparator(a, b):
if "sort_class" in b and b["sort_class"] >= 0:
diff = a["prob"][b["sort_class"]] - b["prob"][b["sort_class"]]
else:
diff = a["objectness"] - b["objectness"]
return diff
def _correct_boxes(dets, w, h, netw, neth, relative):
new_w, new_h = (netw, (h * netw) // w) if (netw / w < neth / h) else ((w * neth // h), neth)
for det in dets:
b = det["bbox"]
b = b._replace(x=(b.x - (netw - new_w) / 2 / netw) / (new_w / netw))
b = b._replace(y=(b.y - (neth - new_h) / 2 / neth) / (new_h / neth))
b = b._replace(w=b.w * netw / new_w)
b = b._replace(h=b.h * neth / new_h)
if not relative:
b = b._replace(x=b.x * w)
b = b._replace(w=b.w * w)
b = b._replace(y=b.y * h)
b = b._replace(h=b.h * h)
det["bbox"] = b
return dets
def _overlap(x1, w1, x2, w2):
l1 = x1 - w1 / 2
l2 = x2 - w2 / 2
left = l1 if l1 > l2 else l2
r1 = x1 + w1 / 2
r2 = x2 + w2 / 2
right = r1 if r1 < r2 else r2
return right - left
def _box_intersection(a, b):
w = _overlap(a.x, a.w, b.x, b.w)
h = _overlap(a.y, a.h, b.y, b.h)
if w < 0 or h < 0:
return 0
return w * h
def _box_union(a, b):
i = _box_intersection(a, b)
u = a.w * a.h + b.w * b.h - i
return u
def _box_iou(a, b):
return _box_intersection(a, b) / _box_union(a, b)
def _get_box(data, biases, n, location, lw, lh, w, h):
bx = (location[2] + data[location[0]][0][location[1]][location[2]]) / lw
by = (location[1] + data[location[0]][1][location[1]][location[2]]) / lh
bw = np.exp(data[location[0]][2][location[1]][location[2]]) * biases[2 * n] / w
bh = np.exp(data[location[0]][3][location[1]][location[2]]) * biases[2 * n + 1] / h
return Box(bx, by, bw, bh)
def _get_yolo_detections(l, im_shape, net_shape, thresh, relative, dets):
data = l["output"]
active_data_loc = np.asarray(np.where(data[:, 4, :, :] > thresh))
before_correct_dets = []
for i in range(active_data_loc.shape[1]):
location = [active_data_loc[0][i], active_data_loc[1][i], active_data_loc[2][i]]
box_b = _get_box(
data,
l["biases"],
np.asarray(l["mask"])[location[0]],
location,
data.shape[3],
data.shape[2],
net_shape[0],
net_shape[1],
)
objectness = data[location[0]][4][location[1]][location[2]]
classes = l["classes"]
prob = objectness * data[location[0], 5 : 5 + 1 + classes, location[1], location[2]]
prob[prob < thresh] = 0
detection = {}
detection["bbox"] = box_b
detection["classes"] = classes
detection["prob"] = prob
detection["objectness"] = objectness
before_correct_dets.append(detection)
dets.extend(
_correct_boxes(
before_correct_dets, im_shape[0], im_shape[1], net_shape[0], net_shape[1], relative
)
)
def _get_region_detections(l, im_shape, net_shape, thresh, relative, dets):
data = l["output"]
before_correct_dets = []
for row in range(data.shape[2]):
for col in range(data.shape[3]):
for n in range(data.shape[0]):
prob = [0] * l["classes"]
scale = data[n, l["coords"], row, col] if not l["background"] else 1
location = [n, row, col]
box_b = _get_box(
data,
l["biases"],
n,
location,
data.shape[3],
data.shape[2],
data.shape[3],
data.shape[2],
)
objectness = scale if scale > thresh else 0
if objectness:
prob = (
scale * data[n, l["coords"] + 1 : l["coords"] + 1 + l["classes"], row, col]
)
prob[prob < thresh] = 0
detection = {}
detection["bbox"] = box_b
detection["prob"] = prob
detection["objectness"] = objectness
before_correct_dets.append(detection)
_correct_boxes(
before_correct_dets, im_shape[0], im_shape[1], net_shape[0], net_shape[1], relative
)
dets.extend(before_correct_dets)
def fill_network_boxes(net_shape, im_shape, thresh, relative, tvm_out):
dets = []
for layer in tvm_out:
if layer["type"] == "Yolo":
_get_yolo_detections(layer, im_shape, net_shape, thresh, relative, dets)
elif layer["type"] == "Region":
_get_region_detections(layer, im_shape, net_shape, thresh, relative, dets)
return dets
def do_nms_sort(dets, classes, thresh):
"Does the sorting based on the threshold values"
k = len(dets) - 1
cnt = 0
while cnt < k:
if dets[cnt]["objectness"] == 0:
dets[k], dets[cnt] = dets[cnt], dets[k]
k = k - 1
else:
cnt = cnt + 1
total = k + 1
for k in range(classes):
for i in range(total):
dets[i]["sort_class"] = k
dets[0:total] = sorted(dets[0:total], key=cmp_to_key(nms_comparator), reverse=True)
for i in range(total):
if dets[i]["prob"][k] == 0:
continue
a = dets[i]["bbox"]
for j in range(i + 1, total):
b = dets[j]["bbox"]
if _box_iou(a, b) > thresh:
dets[j]["prob"][k] = 0
def get_detections(im, det, thresh, names, classes):
"Draw the markings around the detected region"
labelstr = []
category = -1
detection = None
valid = False
for j in range(classes):
if det["prob"][j] > thresh:
if category == -1:
category = j
labelstr.append(names[j] + " " + str(round(det["prob"][j], 4)))
if category > -1:
valid = True
imc, imh, imw = im.shape
width = int(imh * 0.006)
offset = category * 123457 % classes
red = _get_color(2, offset, classes)
green = _get_color(1, offset, classes)
blue = _get_color(0, offset, classes)
rgb = [red, green, blue]
b = det["bbox"]
left = int((b.x - b.w / 2.0) * imw)
right = int((b.x + b.w / 2.0) * imw)
top = int((b.y - b.h / 2.0) * imh)
bot = int((b.y + b.h / 2.0) * imh)
if left < 0:
left = 0
if right > imw - 1:
right = imw - 1
if top < 0:
top = 0
if bot > imh - 1:
bot = imh - 1
detection = {
"category": category,
"labelstr": labelstr,
"left": left,
"top": top,
"right": right,
"bot": bot,
"width": width,
"rgb": rgb,
}
return valid, detection
def draw_detections(font_path, im, dets, thresh, names, classes):
"Draw the markings around the detected region"
for det in dets:
valid, detection = get_detections(im, det, thresh, names, classes)
if valid:
rgb = detection["rgb"]
label = _get_label(font_path, "".join(detection["labelstr"]), rgb)
_draw_box_width(
im,
detection["left"],
detection["top"],
detection["right"],
detection["bot"],
detection["width"],
rgb[0],
rgb[1],
rgb[2],
)
_draw_label(im, detection["top"] + detection["width"], detection["left"], label, rgb)
def show_detections(im, dets, thresh, names, classes):
"Print the markings and the detected region"
for det in dets:
valid, detection = get_detections(im, det, thresh, names, classes)
if valid:
print(
"class:{} left:{} top:{} right:{} bottom:{}".format(
detection["labelstr"],
detection["left"],
detection["top"],
detection["right"],
detection["bot"],
)
)
def _get_pixel(im, x, y, c):
return im[c][y][x]
def _set_pixel(im, x, y, c, val):
if x < 0 or y < 0 or c < 0 or x >= im.shape[2] or y >= im.shape[1] or c >= im.shape[0]:
return
im[c][y][x] = val
def _draw_label(im, r, c, label, rgb):
w = label.shape[2]
h = label.shape[1]
if (r - h) >= 0:
r = r - h
for j in range(h):
if j < h and (j + r) < im.shape[1]:
for i in range(w):
if i < w and (i + c) < im.shape[2]:
for k in range(label.shape[0]):
val = _get_pixel(label, i, j, k)
_set_pixel(im, i + c, j + r, k, val) # rgb[k] * val)
def _get_label(font_path, labelstr, rgb):
# pylint: disable=import-outside-toplevel
from PIL import Image
from PIL import ImageDraw
from PIL import ImageFont
text = labelstr
colorText = "black"
testDraw = ImageDraw.Draw(Image.new("RGB", (1, 1)))
font = ImageFont.truetype(font_path, 25)
width, height = testDraw.textsize(labelstr, font=font)
img = Image.new(
"RGB", (width, height), color=(int(rgb[0] * 255), int(rgb[1] * 255), int(rgb[2] * 255))
)
d = ImageDraw.Draw(img)
d.text((0, 0), text, fill=colorText, font=font)
opencvImage = np.divide(np.asarray(img), 255)
return opencvImage.transpose(2, 0, 1)
def _get_color(c, x, max_value):
c = int(c)
colors = [[1, 0, 1], [0, 0, 1], [0, 1, 1], [0, 1, 0], [1, 1, 0], [1, 0, 0]]
ratio = (float(x) / float(max_value)) * 5
i = int(math.floor(ratio))
j = int(math.ceil(ratio))
ratio -= i
r = (1 - ratio) * colors[i][c] + ratio * colors[j][c]
return r
def _draw_box(im, x1, y1, x2, y2, r, g, b):
y1 = int(y1)
y2 = int(y2)
x1 = int(x1)
x2 = int(x2)
ac, ah, aw = im.shape
if x1 < 0:
x1 = 0
if x1 >= aw:
y1 = 0
if y1 >= ah:
y1 = ah - 1
if y2 < 0:
y2 = 0
if y2 >= ah:
y2 = ah - 1
for i in range(x1, x2):
im[0][y1][i] = r
im[0][y2][i] = r
im[1][y1][i] = g
im[1][y2][i] = g
im[2][y1][i] = b
im[2][y2][i] = b
for i in range(y1, y2):
im[0][i][x1] = r
im[0][i][x2] = r
im[1][i][x1] = g
im[1][i][x2] = g
im[2][i][x1] = b
im[2][i][x2] = b
def _draw_box_width(im, x1, y1, x2, y2, w, r, g, b):
for i in range(int(w)):
_draw_box(im, x1 + i, y1 + i, x2 - i, y2 - i, r, g, b)
| 12,033 | 30.751979 | 99 | py |
tvm | tvm-main/python/tvm/relay/dataflow_pattern/_ffi.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""DataFlow Pattern Language FFI bindings."""
import tvm._ffi
tvm._ffi._init_api("relay.dataflow_pattern", __name__)
| 903 | 42.047619 | 62 | py |
tvm | tvm-main/python/tvm/relay/dataflow_pattern/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""The Relay Pattern Language and tooling."""
# pylint: disable=no-member
from typing import Callable, Dict, List, Optional
import tvm._ffi
from tvm.relay.expr import RelayExpr as Expr
from ... import _ffi as tvm_ffi
from ... import ir as _ir
from ...ir import make_node
from ...ir.base import Node
from ...runtime import Object
from ..base import astext, pretty_print
from ..op import get
from . import _ffi as ffi
def register_df_node(type_key=None):
"""Register a Relay node type.
Parameters
----------
type_key : str or cls
The type key of the node.
"""
if not isinstance(type_key, str):
return tvm._ffi.register_object("relay.dataflow_pattern." + type_key.__name__)(type_key)
return tvm._ffi.register_object(type_key)
class DFPattern(Node):
"""Base class of all Patterns."""
def __str__(self):
return pretty_print(self)
def astext(self, show_meta_data=True, annotate=None):
"""Get the text format of the expression.
Parameters
----------
show_meta_data : bool
Whether to include meta data section in the text
if there is meta data.
annotate: Optional[Object->str]
Optionally annotate function to provide additional
information in the comment block.
Returns
-------
text : str
The text format of the expression.
Notes
-----
The meta data section is necessary to fully parse the text format.
However, it can contain dumps that are big (e.g constant weights),
so it can be helpful to skip printing the meta data section.
"""
return astext(self, show_meta_data, annotate)
def __call__(self, *args):
args = list(args)
if len(args) == 1 and args[0] is None:
args = None
return CallPattern(self, args)
def __or__(self, other):
return AltPattern(self, other)
def __add__(self, other):
return is_op("add")(self, other)
def __sub__(self, other):
return is_op("subtract")(self, other)
def __mul__(self, other):
return is_op("multiply")(self, other)
def __truediv__(self, other):
return is_op("divide")(self, other)
def has_attr(self, attrs: Dict[str, Object]):
"""
Add an attribute constraint to this pattern
Parameters
----------
attrs: Dict[str, Object]
Returns
-------
result: tvm.relay.dataflow_pattern.DFPattern
The resulting AttrPattern
"""
attrs = make_node("DictAttrs", **attrs)
return AttrPattern(self, attrs)
def has_type(self, ttype: tvm.ir.type.Type):
"""
Add a type constraint to this pattern
Parameters
----------
ttype: tvm.ir.type.Type
The type to match
Returns
-------
result: tvm.relay.dataflow_pattern.DFPattern
The resulting TypePattern
"""
return has_type(ttype, self)
def has_dtype(self, dtype: str):
"""
Add a type constraint to this pattern
Parameters
----------
dtype: str
The dtype to match
Returns
-------
result: tvm.relay.dataflow_pattern.DFPattern
The resulting DataTypePattern
"""
return has_dtype(dtype, self)
def has_shape(self, shape: List[tvm.ir.PrimExpr]):
"""
Add a type constraint to this pattern
Parameters
----------
shape: List[tvm.ir.PrimExpr]
The shape to match
Returns
-------
result: tvm.relay.dataflow_pattern.DFPattern
The resulting ShapePattern
"""
return has_shape(shape, self)
def match(self, expr: Expr) -> bool:
"""
Match this pattern to an expression
Parameters
----------
expr : tvm.relay.Expr
The expression to match.
Returns
-------
result: bool
Whether or not the expression matches the pattern
"""
return match(self, expr)
def partition(
self,
expr: Expr,
attrs: Optional[Dict[str, Object]] = None,
check: Callable[[Expr], bool] = lambda x: True,
) -> Expr:
"""
Partition the expression into functions defined by this pattern
Parameters
----------
expr : tvm.relay.Expr
The expression to match.
attrs : Optional[Dict[str, Object]]
A dictionary of Attribute name/values to add to the paritioned function
check : Callable[[Expr], bool]
A function to perform more complicated checks on the matched expression.
Returns true if partitioning should proceed, false otherwise.
Returns
-------
result : tvm.relay.Expr
The Expression with matched subgraphs replaced by function calls to that subgraph
"""
return partition(self, expr, attrs, check)
def dominates(self, parent: "DFPattern", path: "DFPattern" = None):
"""
Create a dominator for this pattern.
Parameters
----------
parent: tvm.relay.dataflow_pattern.DFPattern
The parent pattern this pattern dominates.
path: tvm.relay.dataflow_pattern.DFPattern
The fuzzy path pattern.
Returns
-------
result: tvm.relay.dataflow_pattern.DFPattern
The resulting DominatorPattern.
"""
if path is None:
path = wildcard()
return DominatorPattern(parent, path, self)
def optional(self, option_constructor: Callable[["DFPattern"], "DFPattern"]):
"""
Create a optional user of this pattern.
Parameters
----------
option_constructor: function
A function that takes a single Pattern parameter and returns
a constructed pattern matching the option
Returns
-------
result: tvm.relay.dataflow_pattern.DFPattern
The resulting Pattern
"""
return self | option_constructor(self)
def is_var(name: str = "") -> "DFPattern":
"""
Syntatic sugar for creating an optionally named VarPattern.
Parameters
----------
name: str
The name of the input pattern to match.
Returns
-------
result: tvm.relay.dataflow_pattern.DFPattern
The resulting pattern.
"""
return VarPattern(name)
def is_constant() -> "DFPattern":
"""
Syntatic sugar for creating a ConstantPattern.
Parameters
----------
name: str
The name of the input pattern to match.
Returns
-------
result: tvm.relay.dataflow_pattern.DFPattern
The resulting pattern.
"""
return ConstantPattern()
def is_expr(expr: Expr) -> "DFPattern":
"""
Syntatic sugar for creating an ExprPattern.
Parameters
----------
expr: Expr
The Relay expression to match.
Returns
-------
result: tvm.relay.dataflow_pattern.DFPattern
The resulting pattern.
"""
return ExprPattern(expr)
def is_op(op_name: str) -> "DFPattern":
"""
Syntatic sugar for creating an operator ExprPattern.
Parameters
----------
op_name: String
The name of the relay op
Returns
-------
result: tvm.relay.dataflow_pattern.DFPattern
The resulting ExprPattern
"""
op = get(op_name)
return ExprPattern(op)
def is_tuple(fields: tvm.ir.container.Array) -> "DFPattern":
"""
Syntatic sugar for creating an ExprPattern.
Parameters
----------
fields : Array[tvm.relay.dataflow_pattern.DFPattern]
The fields in the tuple.
Returns
-------
result: tvm.relay.dataflow_pattern.DFPattern
The resulting pattern.
"""
return TuplePattern(fields)
def is_tuple_get_item(tuple_value: "DFPattern", index: Optional[int] = None) -> "DFPattern":
"""
Syntatic sugar for creating an ExprPattern.
Parameters
----------
tuple_value: tvm.relay.dataflow_pattern.DFPattern
The input tuple expression.
index: Optional[int]
The index to match; Default (None) to match a TupleGetItem with any index.
Returns
-------
result: tvm.relay.dataflow_pattern.DFPattern
The resulting pattern.
"""
return TupleGetItemPattern(tuple_value, index)
def is_if(cond, true_branch, false_branch):
"""
Syntatic sugar for creating an IfPattern.
Parameters
----------
cond: tvm.relay.dataflow_pattern.DFPattern
The pattern describing the condition of If.
true_branch: tvm.relay.dataflow_pattern.DFPattern
The pattern describing the true branch of If.
false_branch: tvm.relay.dataflow_pattern.DFPattern
The pattern describing the false branch of If.
Returns
-------
result: tvm.relay.dataflow_pattern.DFPattern
The resulting pattern.
"""
return IfPattern(cond, true_branch, false_branch)
def is_let(var, value, body):
"""
Syntatic sugar for creating a LetPattern.
Parameters
----------
var: tvm.relay.dataflow_pattern.DFPattern
The pattern describing the variable of Let.
value: tvm.relay.dataflow_pattern.DFPattern
The pattern describing the value of Let.
body: tvm.relay.dataflow_pattern.DFPattern
The pattern describing the body where the binding is in effect.
Returns
-------
result: tvm.relay.dataflow_pattern.DFPattern
The resulting pattern.
"""
return LetPattern(var, value, body)
def wildcard() -> "DFPattern":
"""
Syntatic sugar for creating a WildcardPattern.
Returns
-------
result: tvm.relay.dataflow_pattern.DFPattern
The resulting pattern.
"""
return WildcardPattern()
def has_type(ttype: tvm.ir.type.Type, pattern: "DFPattern" = None) -> "DFPattern":
"""
Syntatic sugar for creating a TypePattern
Parameters
----------
ttype: tvm.ir.type.Type
The type to match
pattern: tvm.relay.dataflow_pattern.DFPattern
The pattern that needs type annotation
Returns
-------
result: tvm.relay.dataflow_pattern.DFPattern
The resulting TypePattern
"""
if pattern is None:
pattern = wildcard()
return TypePattern(pattern, ttype)
def has_dtype(dtype: str, pattern: "DFPattern" = None) -> "DFPattern":
"""
Syntatic sugar for creating a DataTypePattern
Parameters
----------
dtype: str
The dtype to match
pattern: tvm.relay.dataflow_pattern.DFPattern
The pattern that needs type annotation
Returns
-------
result: tvm.relay.dataflow_pattern.DFPattern
The resulting DataTypePattern
"""
if pattern is None:
pattern = wildcard()
return DataTypePattern(pattern, dtype)
def has_shape(shape: List[tvm.ir.PrimExpr], pattern: "DFPattern" = None) -> "DFPattern":
"""
Syntatic sugar for creating a ShapePattern
Parameters
----------
shape: List[tvm.ir.PrimExpr]
The shape to match
pattern: tvm.relay.dataflow_pattern.DFPattern
The pattern that needs type annotation
Returns
-------
result: tvm.relay.dataflow_pattern.DFPattern
The resulting ShapePattern
"""
if pattern is None:
pattern = wildcard()
return ShapePattern(pattern, shape)
def has_attr(attrs, pattern=None) -> "DFPattern":
"""
Syntatic sugar for creating an AttrPattern
Parameters
----------
attrs: Dict[str, Object]
The attributes to match
pattern: Optional[tvm.relay.dataflow_pattern.DFPattern]
The input pattern.
Returns
-------
result: tvm.relay.dataflow_pattern.DFPattern
The resulting AttrPattern
"""
if pattern is None:
pattern = wildcard()
return pattern.has_attr(attrs)
def dominates(parent: "DFPattern", path: "DFPattern", child: "DFPattern") -> "DFPattern":
"""
Syntatic sugar for creating an Dominator pattern
Parameters
----------
parent: tvm.relay.dataflow_pattern.DFPattern
The parent pattern.
path: tvm.relay.dataflow_pattern.DFPattern
The fuzzy path pattern.
child: tvm.relay.dataflow_pattern.DFPattern
The child pattern.
Returns
-------
result: tvm.relay.dataflow_pattern.DFPattern
The resulting DominatorPattern.
"""
return DominatorPattern(parent, path, child)
def match(pattern: "DFPattern", expr: Expr) -> bool:
"""
Match a pattern to an expression
Parameters
----------
pattern: tvm.relay.dataflow_pattern.DFPattern
The input pattern.
expr : tvm.relay.Expr
The expression to match.
"""
return ffi.match(pattern, expr)
@register_df_node
class ExprPattern(DFPattern):
"""A pattern which matches a constant expression.
Parameters
----------
expr : tvm.relay.Expr
The expression to match.
"""
def __init__(self, expr: Expr):
self.__init_handle_by_constructor__(ffi.ExprPattern, expr)
@register_df_node
class VarPattern(DFPattern):
"""A local variable in Relay.
Local variable can be used to declare input
arguments to a function, or intermediate variables.
Parameters
----------
name_hint: str
The name of the variable. Optional, if not provided,
the pattern will match any VarNode.
type_annotation: tvm.ir.type.Type, optional
The type annotation on the variable.
"""
def __init__(self, name_hint: str = ""):
self.__init_handle_by_constructor__(ffi.VarPattern, name_hint)
@register_df_node
class ConstantPattern(DFPattern):
"""A pattern matching a Relay Constant."""
def __init__(self):
self.__init_handle_by_constructor__(ffi.ConstantPattern)
@register_df_node
class CallPattern(DFPattern):
"""A pattern matching a function call node in Relay.
Parameters
----------
op: relay.dataflow_pattern.DFPattern
The operation to be called.
args: List[relay.dataflow_pattern.DFPattern]
The arguments to the call or None to match any arguments.
"""
def __init__(
self,
op: "DFPattern",
args: List["DFPattern"],
):
self.__init_handle_by_constructor__(ffi.CallPattern, op, args)
@register_df_node
class FunctionPattern(DFPattern):
"""A pattern matching a function node in Relay.
Parameters
----------
params: List[relay.dataflow_pattern.DFPattern]
The parameters to the Function or None to match any parameters.
body: relay.dataflow_pattern.DFPattern
The body fo the Function
"""
def __init__(
self,
params: List["DFPattern"],
body: "DFPattern",
):
self.__init_handle_by_constructor__(ffi.FunctionPattern, params, body)
@register_df_node
class IfPattern(DFPattern):
"""A patern matching a Relay If.
Parameters
----------
cond: tvm.relay.dataflow_pattern.DFPattern
The pattern describing the condition of If.
true_branch: tvm.relay.dataflow_pattern.DFPattern
The pattern describing the true branch of If.
false_branch: tvm.relay.dataflow_pattern.DFPattern
The pattern describing the false branch of If.
"""
def __init__(self, cond: "DFPattern", true_branch: "DFPattern", false_branch: "DFPattern"):
self.__init_handle_by_constructor__(ffi.IfPattern, cond, true_branch, false_branch)
@register_df_node
class LetPattern(DFPattern):
"""A patern matching a Relay Let.
Parameters
----------
var: tvm.relay.dataflow_pattern.DFPattern
The pattern describing the variable of Let.
value: tvm.relay.dataflow_pattern.DFPattern
The pattern describing the value of Let.
body: tvm.relay.dataflow_pattern.DFPattern
The pattern describing the body where the binding is in effect.
"""
def __init__(self, var: "DFPattern", value: "DFPattern", body: "DFPattern"):
self.__init_handle_by_constructor__(ffi.LetPattern, var, value, body)
@register_df_node
class TuplePattern(DFPattern):
"""A patern matching a Relay Tuple.
Parameters
----------
fields : Array[tvm.relay.dataflow_pattern.DFPattern]
The fields in the tuple.
"""
def __init__(self, fields: tvm.ir.container.Array):
self.__init_handle_by_constructor__(ffi.TuplePattern, fields)
def __getitem__(self, index: int):
if index >= len(self):
raise IndexError("TuplePattern index out of range")
return self.fields[index]
def __len__(self):
return len(self.fields)
def astype(self, _):
raise TypeError("astype cannot be used on TuplePattern")
@register_df_node
class TupleGetItemPattern(DFPattern):
"""Get index-th item from a TuplePattern.
Parameters
----------
tuple_value: tvm.relay.dataflow_pattern.DFPattern
The input tuple expression.
index: Optional[int]
The index to match; Default (None) to match a TupleGetItem with any index.
"""
def __init__(self, tuple_value: "DFPattern", index: Optional[int] = None):
match_index = index if index is not None else -1
self.__init_handle_by_constructor__(ffi.TupleGetItemPattern, tuple_value, match_index)
@register_df_node
class AltPattern(DFPattern):
"""Create a Pattern that can match one of two conditions
Parameters
----------
left: tvm.relay.dataflow_pattern.DFPattern
One possible matching pattern.
right: tvm.relay.dataflow_pattern.DFPattern
One possible matching pattern.
"""
def __init__(self, left: "DFPattern", right: "DFPattern"):
self.__init_handle_by_constructor__(ffi.AltPattern, left, right)
@register_df_node
class WildcardPattern(DFPattern):
"""A pattern which matches anything."""
def __init__(self):
self.__init_handle_by_constructor__(ffi.WildcardPattern)
@register_df_node
class TypePattern(DFPattern):
"""A pattern that matches another pattern with a certain type annotation.
Parameters
----------
pattern: tvm.relay.dataflow_pattern.DFPattern
The input pattern that needs type annotation.
ttype: tvm.ir.type.Type
The type to match.
"""
def __init__(self, pattern: "DFPattern", ttype: tvm.ir.type.Type):
self.__init_handle_by_constructor__(ffi.TypePattern, pattern, ttype)
@register_df_node
class DataTypePattern(DFPattern):
"""A pattern that matches another pattern with certain data type
Parameters
----------
pattern: tvm.relay.dataflow_pattern.DFPattern
The input pattern that needs type annotation.
dtype: str
The dtype to match.
"""
def __init__(self, pattern: "DFPattern", dtype: str):
self.__init_handle_by_constructor__(ffi.DataTypePattern, pattern, dtype)
@register_df_node
class ShapePattern(DFPattern):
"""A pattern that matches another pattern with a certain tensor shape
Parameters
----------
pattern: tvm.relay.dataflow_pattern.DFPattern
The input pattern that needs type annotation.
shape: List[tvm.ir.PrimExpr]
The shape to match.
"""
def __init__(self, pattern: "DFPattern", shape: List[tvm.ir.PrimExpr]):
self.__init_handle_by_constructor__(ffi.ShapePattern, pattern, shape)
@register_df_node
class AttrPattern(DFPattern):
"""Get match an expression with a certain attributes.
Currently only supports Op Attributes, not call Attributes.
Parameters
----------
pattern: tvm.relay.dataflow_pattern.DFPattern
The input pattern.
attrs: tvm.ir.attrs.Attrs
The attributes to match.
"""
def __init__(self, pattern: "DFPattern", attrs: tvm.ir.attrs.Attrs):
self.__init_handle_by_constructor__(ffi.AttrPattern, pattern, attrs)
@register_df_node
class DominatorPattern(DFPattern):
"""Match a domination graph.
Parameters
----------
parent: tvm.relay.dataflow_pattern.DFPattern
The parent, i.e., the single node which produces something,
later aggregated by the child.
path: tvm.relay.dataflow_pattern.DFPattern
The fuzzy path pattern between parent and child,
typically matches elementwise ops.
child: tvm.relay.dataflow_pattern.DFPattern
The last node in the domination which is the end user
for all nodes in the path and the parent.
"""
def __init__(self, parent: "DFPattern", path: "DFPattern", child: "DFPattern"):
self.__init_handle_by_constructor__(ffi.DominatorPattern, parent, path, child)
class DFPatternCallback:
"""A Callback for Pattern Rewriting.
When rewrite is called on this DFPatternCallback, the backend will find matches for the
pattern, call the callback function, and replace the matched expression with whatever
the callback returns.
Users are expect to inherit from this class and provide a "self.pattern" to match
Parameters
----------
require_type: bool
Whether InferType is required to be run before the callback.
rewrite_once: bool
If True, run the callback only once.
"""
def __init__(self, require_type=False, rewrite_once=False):
self.pattern = None
self.require_type = require_type
self.rewrite_once = rewrite_once
def rewrite(self, expr: Expr) -> Expr:
"""
Rewrite expression with this callback
Parameters
----------
expr : tvm.relay.Expr
The expression to rewrite.
Returns
-------
result : tvm.relay.Expr
The Expression with matched subgraphs rewritten by the callbacks.
"""
return rewrite(self, expr)
def callback(self, pre: Expr, post: Expr, node_map: tvm.ir.container.Map) -> Expr:
"""
Callback function to use when we found a match to the pattern
Parameters
----------
pre : tvm.relay.Expr
The matching expression from the original graph.
post : tvm.relay.Expr
The matching expression with rewritten inputs
node_map : tvm.ir.container.Map[DFPattern, List[Expr]]
The map between patterns and matched expressions
Returns
-------
result : tvm.relay.Expr
The Expression with matched subgraph rewritten by the callback
"""
raise NotImplementedError()
class _DFPatternCallback(Object):
"""C++ implemenation"""
def __init__(self, pattern, callback, require_type, rewrite_once):
self.__init_handle_by_constructor__(
ffi.DFPatternCallback, pattern, callback, require_type, rewrite_once
)
def rewrite(callbacks, expr: Expr, mod: Optional[_ir.IRModule] = None) -> Expr:
"""
Rewrite expression with the given callbacks.
Parameters
----------
callbacks: tvm.relay.dataflow_pattern.DFPatternCallback
The input callback or list of callbacks.
expr : tvm.relay.Expr
The expression to rewrite.
mod : Optional[tvm.ir.IRModule]
The module that associates with the expression.
Returns
-------
result : tvm.relay.Expr
The Expression with matched subgraphs rewritten by the callbacks.
"""
if mod is None:
mod = _ir.IRModule()
callbacks = [callbacks] if isinstance(callbacks, DFPatternCallback) else callbacks
tmp = []
for callback in callbacks:
assert callback.pattern is not None
tmp.append(
_DFPatternCallback(
callback.pattern, callback.callback, callback.require_type, callback.rewrite_once
)
)
return ffi.rewrite(tmp, expr, mod)
def partition(
pattern: "DFPattern",
expr: Expr,
attrs: Optional[Dict[str, Object]] = None,
check: Callable[[Expr], bool] = lambda x: True,
) -> Expr:
"""
Parition the expression into a series of functions that match the pattern
Parameters
----------
pattern: tvm.relay.dataflow_pattern.DFPattern
The pattern to match
expr : tvm.relay.Expr
The expression to split into functions
attrs : Optional[Dict[str, Object]]
A dict of attributes to apply to the partitioned function
check : Callable[[Expr], bool]
A function to perform more complicated checks on the matched expression.
Returns true if partitioning should proceed, false otherwise.
Returns
-------
result : tvm.relay.Expr
The Expression with matched subgraphs replaced by function calls to that subgraph
"""
return ffi.partition(pattern, expr, attrs, check)
| 25,675 | 26.199153 | 97 | py |
tvm | tvm-main/python/tvm/relay/backend/vm.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=no-else-return, unidiomatic-typecheck, undefined-variable, invalid-name, redefined-builtin
"""
The Relay Virtual Machine.
Implements a Python interface to compiling and executing on the Relay VM.
"""
import numpy as np
import tvm.runtime.ndarray as _nd
import tvm.runtime.vm as vm_rt
from tvm import autotvm
from tvm.relay import expr as _expr
from tvm.relay.backend.interpreter import Executor
from tvm.target import Target
from . import _vm
def compile(mod, target=None, target_host=None, params=None):
"""Compile the module to VM executable. A helper function for VMCompiler.
Parameters
----------
mod : tvm.IRModule
The Relay module to build.
target : any multi-target like object, see Target.canon_multi_target
For homogeneous compilation, the unique build target.
For heterogeneous compilation, a dictionary or list of possible build targets.
target_host : None, or any target-like object, see Target.canon_target
Host compilation target, if target is device.
When TVM compiles device specific program such as CUDA,
we also need host(CPU) side code to interact with the driver
to setup the dimensions and parameters correctly.
target_host is used to specify the host side codegen target.
By default, llvm is used if it is enabled,
otherwise a stackvm intepreter is used.
params : dict of str to NDArray
Input parameters to the graph that do not change
during inference time. Used for constant folding.
Returns
-------
exec : tvm.runtime.vm.Executable
The VM executable that contains both library code and bytecode.
"""
compiler = VMCompiler()
if params:
compiler.set_params(params)
compiler.lower(mod, target, target_host)
compiler.codegen()
return compiler.get_exec()
class VMCompiler(object):
"""Compiler that compiles Relay module to VM executable."""
def __init__(self):
self.mod = _vm._VMCompiler()
self._lower = self.mod["lower"]
self._codegen = self.mod["codegen"]
self._get_exec = self.mod["get_executable"]
self._set_params_func = self.mod["set_params"]
self._get_params_func = self.mod["get_params"]
self._optimize = self.mod["optimize"]
def set_params(self, params):
"""Set constant parameters for the model.
Parameters
----------
params : dict of str to NDArray
Input parameters to the graph that do not change
during inference time. Used for constant folding.
"""
inputs = {}
for name, param in params.items():
if isinstance(param, np.ndarray):
param = _nd.array(param)
inputs[name] = _expr.const(param)
self._set_params_func(inputs)
def get_params(self):
"""Return the updated weights."""
params = self._get_params_func()
ret = {}
for key, value in params.items():
ret[key] = value.data
return ret
def lower(self, mod, target=None, target_host=None):
"""Lower the module to VM bytecode.
Parameters
----------
mod : tvm.IRModule
The Relay module to build.
target : any multi-target like object, see Target.canon_multi_target
For homogeneous compilation, the unique build target.
For heterogeneous compilation, a dictionary or list of possible build targets.
target_host : any target-like object, see Target.canon_target
Host compilation target, if target is device.
"""
raw_targets = Target.canon_multi_target_and_host(target, target_host)
tophub_context = self._tophub_context(raw_targets)
with tophub_context:
self._lower(mod, raw_targets)
def codegen(self):
"""Generate the kernel library."""
self._codegen()
def optimize(self, mod, target=None, target_host=None, params=None):
"""Helper method that optimizes a Relay module via VM.
Parameters
----------
mod : tvm.IRModule
target : any multi-target like object, see Target.canon_multi_target
For homogeneous compilation, the unique build target.
For heterogeneous compilation, a dictionary or list of possible build targets.
target_host : any target-like object, see Target.canon_target
Host compilation target, if target is device.
params : dict of str to NDArray
Input parameters to the graph that do not change
during inference time. Used for constant folding.
Returns
-------
mod : tvm.IRModule
The optimized relay module.
params : dict
The parameters of the final module.
"""
raw_targets = Target.canon_multi_target_and_host(target, target_host)
if params:
self.set_params(params)
return self._optimize(mod, raw_targets), self.get_params()
def get_exec(self):
"""Get the VM executable.
Returns
-------
exec : tvm.runtime.vm.Executable
The VM executable that contains both library code and bytecode.
"""
return vm_rt.Executable(self._get_exec())
def _tophub_context(self, raw_targets):
"""Get the autotvm context."""
# If current dispatch context is fallback context (the default root context),
# then load pre-tuned parameters from TopHub
if isinstance(autotvm.DispatchContext.current, autotvm.FallbackContext):
tophub_context = autotvm.tophub.context(raw_targets)
else:
tophub_context = autotvm.utils.EmptyContext()
return tophub_context
class VMExecutor(Executor):
"""
An implementation of the executor interface for
the Relay VM.
Useful interface for experimentation and debugging
the VM can also be used directly from the API.
supported by `tvm.runtime.vm`.
Parameters
----------
mod : :py:class:`~tvm.IRModule`
The module to support the execution.
device : :py:class:`~tvm.runtime.Device`
The runtime device to run the code on.
target : any multi-target like object, see Target.canon_multi_target
For homogeneous compilation, the unique build target.
For heterogeneous compilation, a dictionary or list of possible build targets.
"""
def __init__(self, mod, device, target):
if mod is None:
raise RuntimeError("Must provide module to get VM executor.")
self.mod = mod
self.device = device
self.target = target
self.executable = None
self.vm = None
def _make_executor(self, expr=None):
if expr:
self.mod["main"] = expr
self.executable = compile(self.mod, self.target)
self.vm = vm_rt.VirtualMachine(self.executable, self.device)
def _vm_wrapper(*args, **kwargs):
args = self._convert_args(self.mod["main"], args, kwargs)
return self.vm.run(*args)
return _vm_wrapper
| 7,968 | 34.105727 | 108 | py |
tvm | tvm-main/python/tvm/relay/backend/utils.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Utility backend functions."""
from enum import Enum
class CallType(Enum):
Packed = 0
CPacked = 1
Unpacked = 2
def _is_valid_modname(mod_name):
"""Determine if mod_name is a valid string to use inside function names"""
if mod_name:
try:
mod_name.encode("ascii")
return True
except UnicodeEncodeError:
return False
return True
def mangle_module_name(mod_name):
if not _is_valid_modname(mod_name):
raise ValueError(mod_name + " contains invalid characters")
if mod_name:
return "tvmgen_" + mod_name
return "tvmgen"
| 1,411 | 30.377778 | 78 | py |
tvm | tvm-main/python/tvm/relay/backend/interpreter.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=no-else-return, broad-exception-raised
"""The Python interface to the Relay reference interpreter."""
from __future__ import absolute_import
import numpy as np
import tvm._ffi
from tvm.runtime import container, Object
from . import _backend
from .. import _make, analysis
from ... import nd
from ..expr import Tuple, RefCreate, Call, Constant, GlobalVar, const
from ..function import Function
from ..scope_builder import ScopeBuilder
@tvm._ffi.register_object("relay.ConstructorValue")
class ConstructorValue(Object):
def __init__(self, tag, fields, constructor):
self.__init_handle_by_constructor__(_make.ConstructorValue, tag, fields, constructor)
@tvm._ffi.register_object("relay.RefValue")
class RefValue(Object):
def __init__(self, value):
self.__init_handle_by_constructor__(_make.RefValue, value)
def _arg_to_ast(mod, arg):
if isinstance(arg, nd.NDArray):
return Constant(arg.copyto(nd.cpu(0)))
elif isinstance(arg, container.ADT):
return Tuple([_arg_to_ast(mod, field) for field in arg])
elif isinstance(arg, tuple):
return Tuple([_arg_to_ast(mod, field) for field in arg])
elif isinstance(arg, RefValue):
return RefCreate(_arg_to_ast(mod, arg.value))
elif isinstance(arg, ConstructorValue):
return Call(mod.get_constructor(arg.tag), [_arg_to_ast(mod, field) for field in arg.fields])
elif isinstance(arg, np.ndarray):
return Constant(nd.array(arg))
elif isinstance(arg, Constant):
return arg
else:
return const(arg)
class Executor(object):
"""An abstract interface for executing Relay programs."""
def _convert_args(self, expr, args, kwargs):
"""
Convert the combination of arguments and keyword arguments
into a sequence of arguments that may be passed to
a Relay evaluator.
We first provide all positional arguments, and then attempt
to fill in the remaining arguments using the keyword arguments. We
map the keyword arguments to the corresponding parameters, if there
is an ambiguity between positional and keyword arguments this
procedure will raise an error.
Parameters
----------
expr: relay.Expr
The expression to evaluate
args: List[tvm.nd.NDArray]
The arguments to pass to the evaluator.
kwargs: Dict[str, tvm.NDArrray]
The keyword arguments to pass to the evaluator.
Returns:
args: List[tvm.nd.NDArray]
The new arguments with all keyword arguments placed in the correct slot.
"""
assert expr is not None
if not kwargs:
return args
if kwargs and not isinstance(expr, Function):
raise Exception(
f"can only supply keyword parameters for a relay.Function, found {expr}"
)
params = expr.params
param_names = [p.name_hint for p in params]
num_of_args = len(args)
cargs = list(args)[:]
for i, name in enumerate(param_names):
if i < num_of_args:
if kwargs.get(name):
raise Exception(
f"duplicate argument supplied in "
f"both positional args (at position: {i}), "
f"and keyword argument (with name: {name})"
)
else:
cargs.append(kwargs[name])
if len(cargs) != len(params):
raise Exception(
f"insufficient arguments, expected " f"{len(cargs)}, provided {len(params)}"
)
return tuple(cargs)
def _make_executor(self, expr=None):
"""
Construct a Python function that implements the evaluation
of expression.
Parameters
----------
expr: Optional[relay.Expr]
The Relay expression to execute.
Returns
-------
executor: function,
A Python function which implements the behavior of `expr`.
"""
raise NotImplementedError()
def evaluate(self, expr=None, binds=None):
"""
Evaluate a Relay expression on the executor.
Parameters
----------
expr: Optional[tvm.relay.Expr]
The expression to evaluate.
binds: Optional[Map[tvm.relay.Var, tvm.relay.Expr]]
Additional binding of free variable.
Returns
-------
val : Union[function, Object]
The evaluation result.
"""
if binds:
scope_builder = ScopeBuilder()
for key, value in binds.items():
scope_builder.let(key, _arg_to_ast(self.mod, value))
scope_builder.ret(expr)
expr = scope_builder.get()
if not expr:
return self._make_executor()
if isinstance(expr, Function):
assert not analysis.free_vars(expr)
if isinstance(expr, (Function, GlobalVar)):
return self._make_executor(expr)
# normal expression evaluated by running a function.
# TODO(mbs): This should really be type rather than syntax driven.
func = Function([], expr)
return self._make_executor(func)()
class Interpreter(Executor):
"""
Simple interpreter interface.
Parameters
----------
mod : tvm.IRModule
The module to support the execution.
device : Device
The runtime device to run the code on.
target : tvm.Target
The target option to build the function. Only homogeneous execution is supported.
CAUTION: Despite the API the module is prepared upon each call to evaluate
rather than once in create_executor.
That is:
.. code-block:: python
executor = relay.create_executor(kind="debug", mod=module)
a = executor.evaluate(expr)(args1)
b = executor.evaluate(expr)(args2)
will prepare all the bindings in module twice. For efficiency, try to hoist
calls to evaluate as high as possible, preferably immediately after create_executor:
.. code-block:: python
func = relay.create_executor(kind="debug", mod=module).evaluate(expr)
a = func(args1)
b = func(args2)
"""
def __init__(self, mod, device, target):
self.mod = mod
self.device = device
self.target = target
def _make_executor(self, expr=None):
if expr is None or isinstance(expr, GlobalVar):
assert self.mod is not None
if expr is None:
# A missing expr denotes 'main' in the given module.
expr = self.mod.get_global_var("main")
# Evaluate expr to a packed function we can efficiently re-apply
# to Relay arguments.
func = _backend.EvalFunction(self.mod, expr, self.device, self.target)
def _apply_args(*args, **kwargs):
if isinstance(expr, GlobalVar):
# When expanding args, look inside the actual global definition so kwargs
# can be matched.
args = self._convert_args(self.mod[expr.name_hint], args, kwargs)
else:
args = self._convert_args(expr, args, kwargs)
# Reflect python arguments up into Relay.
relay_args = []
for arg in args:
relay_args.append(_arg_to_ast(self.mod, arg))
# Apply func to Relay args
return func(relay_args)
return _apply_args
| 8,343 | 32.51004 | 100 | py |
tvm | tvm-main/python/tvm/relay/backend/executor.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=len-as-condition,no-else-return,invalid-name
"""Executor configuration"""
import tvm
from tvm.runtime import Object
from . import _backend
@tvm._ffi.register_object
class Executor(Object):
"""Executor configuration"""
flag_registry_name = "executor"
def __init__(self, name, options=None) -> None:
if options is None:
options = {}
self.__init_handle_by_constructor__(_backend.CreateExecutor, name, options)
self._init_wrapper()
# Note: sometimes the _attrs field is not properly populated,
# most likely since __new__ is called instead of __init__ in tvm/_ffi/_ctypes/object.py
def _init_wrapper(self):
self._attrs = _backend.GetExecutorAttrs(self)
self._init_wrapper_called = True
def _check_init_wrapper(self):
if not (hasattr(self, "_init_wrapper_called") and self._init_wrapper_called):
self._init_wrapper()
def __contains__(self, name):
self._check_init_wrapper()
return name in self._attrs
def __getitem__(self, name):
self._check_init_wrapper()
return self._attrs[name]
def __eq__(self, other):
self._check_init_wrapper()
return str(other) == str(self) and dict(other._attrs) == dict(self._attrs)
@staticmethod
def list_registered():
"""Returns a list of possible executors"""
return list(_backend.ListExecutors())
@staticmethod
def list_registered_options(executor):
"""Returns the dict of available option names and types"""
return dict(_backend.ListExecutorOptions(str(executor)))
| 2,416 | 34.028986 | 91 | py |
tvm | tvm-main/python/tvm/relay/backend/name_transforms.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Name transformation functions for use in code generation
"""
from typing import List, Union
from tvm import TVMError
from . import _backend
def to_c_function_style(original_name: str):
"""Transform a name to the C function style assuming it is
appropriately constructed using the prefixing functions
Parameters
----------
original_name : str
Original name to transform
"""
return _backend.ToCFunctionStyle(original_name)
def to_c_variable_style(original_name: str):
"""Transform a name to the C variable style assuming it is
appropriately constructed using the prefixing functions
Parameters
----------
original_name : str
Original name to transform
"""
return _backend.ToCVariableStyle(original_name)
def to_c_constant_style(original_name: str):
"""Transform a name to the C constant style assuming it is
appropriately constructed using the prefixing functions
Parameters
----------
original_name : str
Original name to transform
"""
return _backend.ToCConstantStyle(original_name)
def _preprocess_names(names: Union[List[str], str]):
"""Preprocesses name strings into format for C++ functions
Parameters
----------
names : Union[List[str], str]
List of names to combine to form a combined name or the name itself
"""
if isinstance(names, str):
if names == "":
raise TVMError("Name is empty")
return [names]
return names
def prefix_name(names: Union[List[str], str]):
"""Apply TVM-specific prefix to a function name
Parameters
----------
names : Union[List[str], str]
List of names to combine to form a combined name or the name itself
"""
return _backend.PrefixName(_preprocess_names(names))
def prefix_generated_name(names: Union[List[str], str]):
"""Apply generated TVM-specific prefix to a function name
Parameters
----------
names : Union[List[str], str]
List of names to combine to form a combined name or the name itself
"""
return _backend.PrefixGeneratedName(_preprocess_names(names))
| 2,942 | 28.43 | 75 | py |
tvm | tvm-main/python/tvm/relay/backend/aot.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""AOT passes"""
from typing import Dict
from tvm import IRModule
from tvm.relay.backend import Executor
from tvm.ir.transform import Pass
from .utils import CallType
from . import _aot
def AOTLowerMain(mod_name: str, config: object, call_type: CallType) -> Pass:
"""Lower a Relay main function into an AOT TIR main function.
Parameters
----------
mod_name: str
The name of the module.
config : CompilationConfig
The compilation configuration.
call_type : CallType
The calling convention to use.
Returns
-------
Pass
The AOTLowerMain pass.
"""
return _aot.AOTLowerMain(mod_name, config, call_type.value)
def CreateFunctionMetadata(
mod: IRModule, workspace_byte_alignment: int, constant_byte_alignment: int
) -> Dict[str, object]:
"""Create the function metadata (FunctionInfos) from an AOT module.
Parameters
----------
mod : IRModule
The IRModule.
workspace_byte_alignment : int
The alignment of the workspace buffer in bytes.
constant_byte_alignment : int
The alignment of the constant buffer in bytes.
Returns
-------
Dict[str, FunctionInfo]
A map between function names and FunctionInfos.
"""
return _aot.CreateFunctionMetadata(mod, workspace_byte_alignment, constant_byte_alignment)
def CreateExecutorMetadata(
mod: IRModule,
mod_name: str,
executor: Executor,
workspace_byte_alignment: int,
constant_byte_alignment: int,
) -> object:
"""Create the executor metadata from an AOT module.
Parameters
----------
mod : IRModule
The IRModule.
mod_name : str
The name of the module.
executor : Executor
The executor configuration.
workspace_byte_alignment : int
The alignment of the workspace buffer in bytes.
constant_byte_alignment : int
The alignment of the constant buffer in bytes.
Returns
-------
ExecutorCodegenMetadata
The executor metadata.
"""
return _aot.CreateExecutorMetadata(
mod, mod_name, executor, workspace_byte_alignment, constant_byte_alignment
)
| 2,988 | 27.740385 | 94 | py |
tvm | tvm-main/python/tvm/relay/backend/runtime.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=len-as-condition,no-else-return,invalid-name
"""Runtime configuration"""
import tvm
from tvm.runtime import Object
from . import _backend
@tvm._ffi.register_object
class Runtime(Object):
"""Runtime configuration"""
flag_registry_name = "runtime"
def __init__(self, name, options=None) -> None:
if options is None:
options = {}
self.__init_handle_by_constructor__(_backend.CreateRuntime, name, options)
self._attrs = _backend.GetRuntimeAttrs(self)
def __contains__(self, name):
return name in self._attrs
def __getitem__(self, name):
self._attrs = _backend.GetRuntimeAttrs(self)
return self._attrs[name]
def __eq__(self, other):
return str(other) == str(self) and dict(other._attrs) == dict(self._attrs)
@staticmethod
def list_registered():
"""Returns a list of possible runtimes"""
return list(_backend.ListRuntimes())
@staticmethod
def list_registered_options(runtime):
"""Returns the dict of available option names and types"""
return dict(_backend.ListRuntimeOptions(str(runtime)))
| 1,939 | 33.035088 | 82 | py |
tvm | tvm-main/python/tvm/relay/backend/te_compiler.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=len-as-condition,no-else-return,invalid-name
"""TE compiler engine (replacing legacy compile_engine)."""
from __future__ import absolute_import
import logging
import numpy as np
import tvm
from tvm import autotvm, te
from tvm.auto_scheduler import is_auto_scheduler_enabled
from tvm.meta_schedule import is_meta_schedule_enabled
from tvm.runtime import Object
from tvm.support import libinfo
from tvm.target import Target
from .. import function as _function
from .. import ty as _ty
from ..backend.utils import mangle_module_name
from . import _backend
logger = logging.getLogger("te_compiler")
autotvm_logger = logging.getLogger("autotvm")
_first_warning = True
@tvm._ffi.register_object("relay.LoweredOutput")
class LoweredOutput(Object):
"""Lowered output"""
def __init__(self, outputs, implement):
self.__init_handle_by_constructor__(_backend._make_LoweredOutput, outputs, implement)
@tvm._ffi.register_object("relay.CCacheKey")
class CCacheKey(Object):
"""Key in the TE Compiler.
Parameters
----------
source_func : tvm.relay.Function
The source function.
target : tvm.Target
The target we want to run the function on.
"""
def __init__(self, source_func, target):
self.__init_handle_by_constructor__(_backend._make_CCacheKey, source_func, target)
@tvm._ffi.register_object("relay.CCacheValue")
class CCacheValue(Object):
"""Value in the TE Compiler, including usage statistics."""
def _get_cache_key(source_func, target):
if isinstance(source_func, _function.Function):
if isinstance(target, str):
target = Target(target)
if not target:
raise ValueError("Need target when source_func is a Function")
return CCacheKey(source_func, target)
if not isinstance(source_func, CCacheKey):
raise TypeError("Expect source_func to be CCacheKey")
return source_func
def get_valid_implementations(op, attrs, inputs, out_type, target):
"""Get all valid implementations from the op strategy.
Note that this function doesn't support op with symbolic input shapes.
Parameters
----------
op : tvm.ir.Op
Relay operator.
attrs : object
The op attribute.
inputs : List[tvm.te.Tensor]
Input tensors to the op.
out_type : relay.Type
The output type.
target : tvm.target.Target
The target to compile the op.
Returns
-------
ret : List[relay.op.OpImplementation]
The list of all valid op implementations.
"""
fstrategy = op.get_attr("FTVMStrategy")
assert fstrategy is not None, (
f"{op.name} doesn't have an FTVMStrategy registered. You can register "
f"one in python with `tvm.relay.op.register_strategy`."
)
with target:
strategy = fstrategy(attrs, inputs, out_type, target)
analyzer = tvm.arith.Analyzer()
ret = []
for spec in strategy.specializations:
if spec.condition:
# check if all the clauses in the specialized condition are true
flag = True
for clause in spec.condition.clauses:
clause = analyzer.canonical_simplify(clause)
if isinstance(clause, tvm.tir.IntImm) and clause.value:
continue
flag = False
break
if flag:
for impl in spec.implementations:
ret.append(impl)
else:
for impl in spec.implementations:
ret.append(impl)
return ret
def select_implementation(op, attrs, inputs, out_type, target, use_autotvm=True):
"""Select the best implementation from the op strategy.
If use_autotvm is True, it'll first try to find the best implementation
based on AutoTVM profile results. If no AutoTVM profile result is found,
it'll choose the implementation with highest plevel.
If use_autotvm is False, it'll directly choose the implementation with
highest plevel.
Note that this function doesn't support op with symbolic input shapes.
Parameters
----------
op : tvm.ir.Op
Relay operator.
attrs : object
The op attribute.
inputs : List[tvm.te.Tensor]
Input tensors to the op.
out_type : relay.Type
The output type.
target : tvm.target.Target
The target to compile the op.
use_autotvm : bool
Whether query AutoTVM to pick the best.
Returns
-------
ret : tuple(relay.op.OpImplementation, List[tvm.te.Tensor])
The best op implementation and the corresponding output tensors.
"""
all_impls = get_valid_implementations(op, attrs, inputs, out_type, target)
if len(all_impls) == 0:
raise RuntimeError(f"No valid {op} implementations for {target}")
best_plevel_impl = max(all_impls, key=lambda x: x.plevel)
# Disable autotvm if auto_scheduler is enabled.
# (i.e., always return the implementation with the highest priority for auto-scheduler).
if is_auto_scheduler_enabled() or is_meta_schedule_enabled():
use_autotvm = False
# If not use autotvm, always return the implementation with the highest priority
if not use_autotvm:
logger.info(
"Using %s for %s based on highest priority (%d)",
best_plevel_impl.name,
op.name,
best_plevel_impl.plevel,
)
outs = best_plevel_impl.compute(attrs, inputs, out_type)
return best_plevel_impl, outs
# Otherwise, try autotvm templates
outputs = {}
workloads = {}
best_autotvm_impl = None
best_cfg = None
dispatch_ctx = autotvm.task.DispatchContext.current
old_silent = autotvm.GLOBAL_SCOPE.silent
autotvm.GLOBAL_SCOPE.silent = True
for impl in all_impls:
outs = impl.compute(attrs, inputs, out_type)
outputs[impl] = outs
workload = autotvm.task.get_workload(outs)
workloads[impl] = workload
if workload is None:
# Not an AutoTVM tunable implementation
continue
cfg = dispatch_ctx.query(target, workload)
if cfg.is_fallback:
# Skip fallback config
continue
logger.info("Implementation %s for %s has cost %.2e", impl.name, op.name, cfg.cost)
if best_cfg is None or best_cfg.cost > cfg.cost:
best_autotvm_impl = impl
best_cfg = cfg
autotvm.GLOBAL_SCOPE.silent = old_silent
if best_autotvm_impl:
# The best autotvm implementation definitely doesn't use fallback config
logger.info(
"Using %s for %s based on lowest cost (%.2e)",
best_autotvm_impl.name,
op.name,
best_cfg.cost,
)
return best_autotvm_impl, outputs[best_autotvm_impl]
# Use the implementation with highest plevel
if workloads[best_plevel_impl] is not None:
msg = (
"Cannot find tuning records for:\n target=%s\n key=%s\n"
"TVM will apply a default schedule which may negatively impact performance."
% (target, workloads[best_plevel_impl])
)
if (
not autotvm.env.GLOBAL_SCOPE.silent
and msg not in autotvm.task.DispatchContext.warning_messages
):
autotvm.task.DispatchContext.warning_messages.add(msg)
global _first_warning
if _first_warning:
_first_warning = False
info_msg = (
"One or more operators have not been tuned. Please tune your model "
"for better performance. Use DEBUG logging level to see more details."
)
autotvm_logger.warning(info_msg)
autotvm_logger.debug(msg)
logger.info(
"Using %s for %s based on highest priority (%s)",
best_plevel_impl.name,
op.name,
best_plevel_impl.plevel,
)
return best_plevel_impl, outputs[best_plevel_impl]
def get_shape(shape):
"""Convert the shape to correct dtype and vars."""
ret = []
for dim in shape:
if isinstance(dim, tvm.tir.IntImm):
if libinfo()["INDEX_DEFAULT_I64"] == "ON":
ret.append(dim)
else:
val = int(dim)
assert val <= np.iinfo(np.int32).max
ret.append(tvm.tir.IntImm("int32", val))
elif isinstance(dim, tvm.tir.Any):
ret.append(te.size_var("any_dim", "int32"))
else:
ret.append(dim)
return ret
@tvm._ffi.register_func("relay.backend.lower_call")
def lower_call(call, inputs, target, otype=None):
"""Lower the call expression to op implementation and tensor outputs."""
assert isinstance(call.op, tvm.ir.Op)
op = call.op
if otype is not None:
ret_type = otype
else:
# Prepare the call_node->checked_type(). For the call node inputs, we ensure that
# the shape is Int32. Following code ensures the same for the output as well.
# TODO(@icemelon9): Support recursive tuple
ret_type = call.checked_type
if isinstance(ret_type, _ty.TensorType):
ret_type = _ty.TensorType(get_shape(ret_type.shape), ret_type.dtype)
elif isinstance(ret_type, _ty.TupleType):
new_fields = []
for field in ret_type.fields:
if isinstance(field, _ty.TensorType):
new_fields.append(_ty.TensorType(get_shape(field.shape), field.dtype))
else:
new_fields.append(field)
ret_type = _ty.TupleType(new_fields)
is_dyn = _ty.is_dynamic(call.checked_type)
for arg in call.args:
is_dyn = is_dyn or _ty.is_dynamic(arg.checked_type)
# check if in the AutoTVM tracing mode, and disable if op is not in wanted list
env = autotvm.task.TaskExtractEnv.current
reenable_tracing = False
if env is not None and env.tracing:
if env.wanted_relay_ops is not None and op not in env.wanted_relay_ops:
env.tracing = False
reenable_tracing = True
if not is_dyn:
best_impl, outputs = select_implementation(op, call.attrs, inputs, ret_type, target)
else:
# TODO(@icemelon9): Allow tvm to generate multiple kernels for dynamic shapes.
best_impl, outputs = select_implementation(
op, call.attrs, inputs, ret_type, target, use_autotvm=False
)
# re-enable AutoTVM tracing
if reenable_tracing:
env.tracing = True
return LoweredOutput(outputs, best_impl)
@tvm._ffi.register_object("relay.TECompiler")
class TECompiler(Object):
"""TECompiler to get lowered code."""
def __init__(self):
raise RuntimeError("Cannot construct a TECompiler")
def lower(self, source_func, target=None, mod_name="default"):
"""Lower a source_func to a CachedFunc.
Parameters
----------
source_func : Union[tvm.relay.Function, CCacheKey]
The source relay function.
target : tvm.Target
The target platform.
Returns
-------
cached_func: CachedFunc
The result of lowering.
"""
# pylint: disable=broad-except, import-outside-toplevel
try:
mod_name = mangle_module_name(mod_name)
key = _get_cache_key(source_func, target)
return _backend._TECompilerLower(self, key, mod_name)
except Exception:
import traceback
msg = traceback.format_exc()
msg += "Error during compile func\n"
msg += "--------------------------\n"
msg += source_func.astext(show_meta_data=False)
msg += "--------------------------\n"
raise RuntimeError(msg)
def jit(self, source_func, target=None):
"""JIT a source_func to a tvm.runtime.PackedFunc.
Parameters
----------
source_func : Union[tvm.relay.Function, CCacheKey]
The source relay function.
target : tvm.Target
The target platform.
Returns
-------
jited_func: tvm.runtime.PackedFunc
The result of jited function.
"""
key = _get_cache_key(source_func, target)
return _backend._TECompilerJIT(self, key)
def clear(self):
"""clear the existing cached functions"""
_backend._TECompilerClear(self)
def items(self):
"""List items in the cache.
Returns
-------
item_list : List[Tuple[CCacheKey, CCacheValue]]
The list of items.
"""
res = _backend._TECompilerListItems(self)
assert len(res) % 2 == 0
return [(res[2 * i], res[2 * i + 1]) for i in range(len(res) // 2)]
def get():
"""Get the global TE Compiler.
Returns
-------
engine : tvm.relay.backend.TECompiler
The TE Compiler.
"""
return _backend._TECompilerGlobal()
def lower_to_primfunc(relay_func, target):
"""Lower Relay Function to TIR PrimFunc.
Parameters
----------
relay_func: relay.Function
The source primitive function, created by FuseOps.
target : Target
The compilation target.
Returns
-------
prim_func : tir.PrimFunc
The created prim func.
"""
f = tvm._ffi.get_global_func("relay.backend.LowerToPrimFunc")
assert f is not None, "relay.backend.LowerToPrimFunc does not exist. "
with target:
return f(relay_func, target)
| 14,368 | 31.805936 | 93 | py |
tvm | tvm-main/python/tvm/relay/backend/_aot.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""The AOT FFI namespace.
"""
import tvm._ffi
tvm._ffi._init_api("relay.backend.aot", __name__)
| 882 | 39.136364 | 62 | py |
tvm | tvm-main/python/tvm/relay/backend/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Backend codegen modules for relay."""
from . import te_compiler
from .executor import Executor
from .runtime import Runtime
| 912 | 42.47619 | 62 | py |
tvm | tvm-main/python/tvm/relay/backend/graph_executor_codegen.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
A compiler from a Relay expression to TVM's graph executor.
The compiler is built from a few pieces.
First we define a compiler from a single Relay expression to the
graph language. We require the expression to be a function.
The function's parameters correspond to the placeholder/inputs
and model parameters found in the computation graph representation.
The body of the function represents the computation graph.
The compiler's output is a program in the graph language, which is composed of
Node, NodeRef, InputNode, OpNode. This "little language" represents programs in
TVM's graph format.
To connect to the graph executor, we use a printer that converts our graph format
into TVM's JSON format. The resulting string can be loaded by
contrib.graph_executor or any other TVM runtime compatible systems.
"""
from tvm.runtime.ndarray import empty
from tvm.relay import _build_module
from tvm.target import Target
from .utils import mangle_module_name
class GraphExecutorCodegen(object):
"""The compiler from Relay to the TVM runtime system."""
def __init__(self, mod, target):
self._mod = _build_module._GraphExecutorCodegen()
self._init = self._mod["init"]
self._codegen = self._mod["codegen"]
self._get_graph_json = self._mod["get_graph_json"]
self._list_params_name = self._mod["list_params_name"]
self._get_param_by_name = self._mod["get_param_by_name"]
self._get_irmodule = self._mod["get_irmodule"]
self._setup(mod, target)
def _setup(self, mod, target):
raw_targets = Target.canon_multi_target_and_host(target)
self._init(mod, raw_targets)
def codegen(self, ir_module, func):
"""Compile a single function into a graph.
Parameters
----------
ir_module: tvm.ir.Module
The module to compile
func: tvm.relay.Expr
The function to compile.
Returns
-------
graph_json : str
The graph json that can be consumed by runtime.
mod : IRModule or Dict[Target, IRModule]
The lowered functions.
params : Dict[str, tvm.nd.NDArray]
Additional constant parameters.
"""
default_mod_name = mangle_module_name("default")
self._codegen(ir_module, func, default_mod_name)
graph_json = self._get_graph_json()
lowered_func = self._get_irmodule()
param_names = self._list_params_name()
params = {}
for key in param_names:
arr = self._get_param_by_name(key)
param = empty(arr.shape, dtype=arr.dtype, device=arr.device)
arr.copyto(param)
params[key] = param
return graph_json, lowered_func, params
| 3,534 | 38.277778 | 81 | py |
tvm | tvm-main/python/tvm/relay/backend/_vm.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""The Relay virtual machine FFI namespace.
"""
import tvm._ffi
tvm._ffi._init_api("relay._vm", __name__)
| 892 | 39.590909 | 62 | py |
tvm | tvm-main/python/tvm/relay/backend/_backend.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""The interface of expr function exposed from C++."""
import tvm._ffi
import tvm.driver
@tvm._ffi.register_func("relay.backend.build")
def build(mod, target, target_host=None):
"""Backend build function.
Parameters
----------
mod : tvm.IRModule or Dict[str, tvm.IRModule]
Input module
target : tvm.Target
The target to run the code on.
target_host : tvm.Target
The host target.
Returns
-------
module : tvm.Module
The runtime module.
"""
target_host = None if target_host == "" else target_host
return tvm.driver.build(mod, target=target, target_host=target_host)
@tvm._ffi.register_func("relay._tensor_value_repr")
def _tensor_value_repr(tvalue):
return str(tvalue.data.numpy())
@tvm._ffi.register_func("relay._constant_repr")
def _tensor_constant_repr(tvalue):
dtype = tvm.runtime.DataType(tvalue.data.dtype)
if tvm.target.datatype.get_type_registered(dtype.type_code):
return "custom tensor of type " + dtype.type_code
return str(tvalue.data.numpy())
tvm._ffi._init_api("relay.backend", __name__)
| 1,902 | 30.716667 | 72 | py |
tvm | tvm-main/python/tvm/relay/backend/executor_factory.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Executor factory modules."""
from abc import abstractmethod
import warnings
from ..._ffi.base import string_types
from ..._ffi.registry import get_global_func
from ...runtime import ndarray
class ExecutorFactoryModule:
"""Common interface for executor factory modules
This class describes the common API of different
factory modules
"""
@abstractmethod
def get_executor_config(self):
"""Return the internal configuration the executor uses to execute the network"""
raise NotImplementedError
@abstractmethod
def get_params(self):
"""Return the compiled parameters."""
raise NotImplementedError
@abstractmethod
def get_lib(self):
"""Return the generated library"""
raise NotImplementedError
def __getitem__(self, item):
return self.module.__getitem__(item)
def __iter__(self):
warnings.warn(
"legacy graph executor behavior of producing json / lib / params will be "
"removed in the next release."
" Please see documents of tvm.contrib.graph_executor.GraphModule for the "
" new recommended usage.",
DeprecationWarning,
2,
)
return self
def __next__(self):
if self.iter_cnt > 2:
raise StopIteration
objs = [self.get_executor_config(), self.lib, self.params]
obj = objs[self.iter_cnt]
self.iter_cnt += 1
return obj
class AOTExecutorFactoryModule(ExecutorFactoryModule):
"""AOT executor factory module.
Attributes
----------
ir_mod : :py:class:`~tvm.IRModule`
The IR module to build.
lowered_ir_mods : dict[Target, IRModule]
The IR modules lowered per Target.
target : tvm.Target
The Target used to build this module.
executor : tvm.relay.backend.Executor
Internal representation of the Executor
runtime : tvm.relay.backend.Runtime
Internal representation of the Runtime
libmod : tvm.Module
The module of the corresponding function
libmod_name: str
The name of module
params : dict of str to NDArray
The parameters of module
function_metadata : Map of String to FunctionInfo
This holds a map function names to their information
devices : List[str]
List of devices used in the module
"""
def __init__(
self,
ir_mod,
lowered_ir_mods,
target,
executor,
runtime,
libmod,
libmod_name,
params,
function_metadata,
executor_codegen_metadata,
devices,
):
fcreate = get_global_func("tvm.aot_executor_factory.create")
args = []
for k, v in params.items():
args.append(k)
args.append(ndarray.array(v))
self.module = fcreate(libmod, libmod_name, *args)
self.ir_mod = ir_mod
self.lowered_ir_mods = lowered_ir_mods
self.target = target
self.executor = executor
self.runtime = runtime
self.lib = libmod
self.libmod_name = libmod_name
self.params = params
self.iter_cnt = 0
self.function_metadata = function_metadata
self.executor_codegen_metadata = executor_codegen_metadata
self.devices = devices
def get_devices(self):
return self.devices
def get_params(self):
return self.params
def get_executor_config(self):
return None
def get_lib(self):
return self.lib
def export_library(self, file_name, fcompile=None, addons=None, **kwargs):
return self.module.export_library(file_name, fcompile, addons, **kwargs)
class GraphExecutorFactoryModule(ExecutorFactoryModule):
"""Graph executor factory module.
This is a module of graph executor factory
Attributes
----------
ir_mod : :py:class:`~tvm.IRModule`
The IR module to build.
target : tvm.Target
The Target used to build this module.
executor : tvm.relay.backend.Executor
Internal representation of the Executor
graph_json_str : the json graph to be deployed in json format output by graph compiler.
The graph can contain operator(tvm_op) that points to the name of
PackedFunc in the libmod.
libmod : tvm.Module
The module of the corresponding function
libmod_name: str
The name of module
params : dict of str to NDArray
The parameters of module
function_metadata : Map of String to FunctionInfo
This holds a map function names to their information
"""
def __init__(
self,
ir_mod,
target,
executor,
graph_json_str,
libmod,
libmod_name,
params,
function_metadata,
):
assert isinstance(graph_json_str, string_types)
fcreate = get_global_func("tvm.graph_executor_factory.create")
args = []
for k, v in params.items():
args.append(k)
args.append(ndarray.array(v))
self.ir_mod = ir_mod
self.target = target
self.executor = executor
self.module = fcreate(graph_json_str, libmod, libmod_name, *args)
self.graph_json = graph_json_str
self.lib = libmod
self.libmod_name = libmod_name
self.params = params
self.iter_cnt = 0
self.function_metadata = function_metadata
def export_library(self, file_name, fcompile=None, addons=None, **kwargs):
return self.module.export_library(file_name, fcompile, addons, **kwargs)
def get_devices(self):
return []
def get_params(self):
return self.params
def get_graph_json(self):
return self.graph_json
def get_executor_config(self):
return self.graph_json
def get_lib(self):
return self.lib
| 6,683 | 29.520548 | 91 | py |
tvm | tvm-main/python/tvm/relay/backend/contrib/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""External backend codegen modules for Relay."""
| 835 | 45.444444 | 62 | py |
tvm | tvm-main/python/tvm/relay/backend/contrib/ethosu/codegen.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Codegen for Arm(R) Ethos(TM)-U NPU"""
from collections import defaultdict
from typing import List, Callable
from ethosu.vela import api as vapi
import tvm
from tvm import relay
from tvm.relay.backend.contrib.ethosu.tir.compiler import LowerToTIR
from tvm.relay.backend.contrib.ethosu.tir.scheduler import copy_constants
from tvm.contrib.ethosu.cascader import (
cascade,
EthosuDeviceConfig,
CascaderOptions,
MemoryRegion,
extract_memory_info,
)
from tvm.relay.backend.contrib.ethosu.legalize import LegalizeEthosU
from tvm.relay.backend.contrib.ethosu import tir_to_cs_translator, util, vela_api
from tvm.relay.expr_functor import ExprMutator, ExprVisitor, Call
from tvm.relay import expr as _expr
# pylint: disable=unused-import
from tvm.relay.backend.contrib.ethosu.op import op_attrs
from tvm.relay.backend.contrib.ethosu import op
from . import _ffi_api
class OptimizeLUTs(ExprMutator):
"""A pass to merge an identity operator with a LUT based activation function with
a preceding operator provided that operator can do a table lookup for the activation
in the hardware"""
def __init__(self):
super().__init__()
self.lut_ops = {
"contrib.ethosu.conv2d": op.ethosu_conv2d,
"contrib.ethosu.depthwise_conv2d": op.ethosu_depthwise_conv2d,
"contrib.ethosu.pooling": op.ethosu_pooling,
"contrib.ethosu.binary_elementwise": op.ethosu_binary_elementwise,
}
def create_op_with_lut(self, call):
"""Extract the parameters and attributes from the NPU operator and create
a new operator with LUT.
Parameters
----------
call : tvm.relay.expr.Call
The current call node being visited.
Returns
-------
tvm.relay.expr.Call
The new operator with LUT.
"""
identity = call
ethosu_op = call.args[0]
lut = identity.args[1]
activation = identity.attrs.activation
new_attrs = dict(ethosu_op.attrs)
new_attrs["activation"] = activation
# Assume that LUT is always the last argument
new_args = ethosu_op.args[:-1] + [lut]
assert ethosu_op.op.name in self.lut_ops.keys()
return self.lut_ops[ethosu_op.op.name](*new_args, **new_attrs)
def visit_call(self, call: tvm.relay.expr.Call) -> tvm.relay.expr.Call:
"""Recursively visit call nodes in the input graph and if an ethosu.identity
operator with LUT is found and the preceding operator has a LUT attribute, create
a new NPU operator.
Parameters
----------
call : tvm.relay.expr.Call
The current call node being visited.
Returns
-------
tvm.relay.expr.Call
The input call node in the case the current call node does
not refer to an Op. Else, a new call node with a new operator.
"""
new_call = call
lut_activations = ["TANH", "LUT", "SIGMOID"]
if isinstance(call.op, tvm.ir.Op) and isinstance(call.args[0], tvm.relay.expr.Call):
producer_op = call.args[0]
# Check if the producer can do a LUT operation
if (
producer_op.op.name in self.lut_ops.keys()
and call.op.name == "contrib.ethosu.identity"
and call.attrs.activation in lut_activations
):
# Check the producer doesn't already have a LUT
has_lut = producer_op.attrs.activation in lut_activations
if not has_lut:
new_call = self.create_op_with_lut(call)
new_call = super().visit_call(new_call)
return new_call
@util.create_npu_function_pass(opt_level=1)
class LUTsOptimizer:
"""Register LUTsOptimizer as a relay pass."""
def transform_npu_function(self, _, func: relay.Function) -> relay.Function:
"""Visit relay nodes in the given NPU function.
Parameters
----------
func : tvm.relay.function.Function
The function to apply the optimization pass for multiple LUTs to.
Returns
-------
mod : tvm.IRModule
New module with optimized LUTs.
"""
return OptimizeLUTs().visit(func)
def __call__(self, *args, **kwargs):
pass
class AnalyzeConsumers(ExprVisitor):
"""Traverses the graph to determine consumers that are NPU operations and
which have restrictions to use NHCWB16 layout. The result is maintained in
`npu_consumers` and `restrictions`.
Attributes
----------
npu_consumers : Dict[tvm.relay.expr.Call, List[bool]]
Mapping from NPU operation to list of boolean values that represent
whether or not each consumer is an NPU operation.
restrictions : Dict[tvm.relay.expr.Call, List[bool]]
Mapping from NPU operation to list of boolean values that represent
whether or not operation has restrictions to use NHCWB16 layout.
optimize_ops : Dict[str, Callable]
A map from NPU operation name to function that creates NPU operation.
"""
def __init__(self, optimize_ops):
self.npu_consumers = defaultdict(list)
self.restrictions = defaultdict(list)
self.optimize_ops = optimize_ops
super().__init__()
def visit_call(self, call: relay.Call):
is_npu_consumer = call.op.name in self.optimize_ops
args = []
# Expand tuples
for arg in call.args:
if isinstance(arg, relay.Tuple):
args.extend(arg.fields)
else:
args.append(arg)
for arg in args:
if isinstance(arg, relay.Call) and arg.op.name in self.optimize_ops:
self.npu_consumers[arg].append(is_npu_consumer)
# ReduceSum requires NHWC input in case input tensor has type int32 or
# accelerator is Ethos_U65_512
# https://review.mlplatform.org/plugins/gitiles/ml/ethos-u/ethos-u-vela/+/refs/tags/3.7.0/ethosu/vela/graph_optimiser_util.py#126
has_restrictions = (
call.op.name == "contrib.ethosu.pooling"
and call.attrs["pooling_type"] == "SUM"
and (
arg.checked_type.dtype == "int32"
or vela_api.get_accelerator_config() == vapi.NpuAccelerator.Ethos_U65_512
)
)
self.restrictions[arg].append(has_restrictions)
super().visit_call(call)
class LayoutOptimization(ExprMutator):
"""A pass to optimize the layout of NPU operations by converting to brick format (NHCWB16).
This pass traverses the graph and attempts to alter the input/output layouts when an NPU
operation is visited. Whether or not the input/output layout can be altered for a given NPU
operation depends on the following:
Check alter input layout: For each argument, if the producer is also an NPU operation and
its output is altered to brick format and there are no restrictions, then the input layout
with respect to the current argument is altered to brick format.
Check alter output layout: If all consumers (child nodes) are an NPU operation and
there are no restrictions, then the output layout is altered to brick format.
Note
----
In order for this pass to be run, the consumers of each NPU operation must first be analyzed
by the `AnalyzeConsumers` pass, since Relay doesn't keep a reference to child nodes.
Attributes
----------
npu_consumers : Dict[tvm.relay.expr.Call, List[bool]]
A map from current call to a list boolean values that state whether or not each consumer
is an NPU operation.
restrictions : Dict[tvm.relay.expr.Call, List[bool]]
A map from current call to a list boolean values that state
whether or not operation has restrictions to use NHCWB16 layout.
optimize_ops : Dict[str, Callable]
A map from NPU operation name to function that creates NPU operation.
"""
def __init__(self, npu_consumers, restrictions, optimize_ops):
self.npu_consumers = npu_consumers
self.restrictions = restrictions
self.optimize_ops = optimize_ops
super().__init__()
def alter_ethosu_op_layout(self, call: tvm.relay.expr.Call) -> tvm.relay.expr.Call:
"""Alter the layouts of given NPU operation to brick format if possible.
Parameters
----------
call : tvm.relay.expr.Call
The call pointing to an NPU operation that will be checked if
the layout needs altering.
Returns
-------
new_call : tvm.relay.expr.Call
New call with altered layouts.
"""
def are_all_consumers_npu(call):
"""
Check whether or not each consumer is an NPU operation.
Parameters
----------
call : tvm.relay.expr.Call
The call pointing to an NPU operation.
Returns
-------
all_consumers_npu : bool
Whether each consumer is an NPU operation.
"""
consumers = self.npu_consumers[call]
return consumers and all(consumers)
def check_restrictions(call):
"""
Check if there are any restrictions for call to use NHCWB16 layout.
Parameters
----------
call : tvm.relay.expr.Call
The call pointing to an NPU operation.
Returns
-------
any_restrictions : bool
Whether there are restrictions.
"""
restrictions = self.restrictions[call]
return restrictions and any(restrictions)
assert isinstance(call.attrs, tvm.ir.Attrs), (
f"The attributes for operator '{call.op.name}' could not be "
"found. Did you register the relay.attrs.Ethosu<opname>Attrs "
"object in python api?"
)
new_attrs = dict(call.attrs)
# Check if we can rewrite the input layouts
input_count = 0
for arg in call.args:
input_count += 1
if arg not in self.npu_consumers:
continue
parent_has_brick_output = are_all_consumers_npu(arg)
parent_has_restrictions = check_restrictions(arg)
if parent_has_brick_output and not parent_has_restrictions:
layout_string = "ifm_layout" if input_count <= 1 else f"ifm{input_count}_layout"
new_attrs[layout_string] = "NHCWB16"
# Check if we can rewrite the output layouts
has_brick_output = are_all_consumers_npu(call)
has_restrictions = check_restrictions(call)
if has_brick_output and not has_restrictions:
new_attrs["ofm_layout"] = "NHCWB16"
name = call.op.name
return self.optimize_ops[name](*call.args, **new_attrs)
def visit_call(self, call: tvm.relay.expr.Call) -> tvm.relay.expr.Call:
"""Recursively visit call nodes in the input graph and alter the
layout of an op if needed.
Parameters
----------
call : tvm.relay.expr.Call
The current call node being visited.
Returns
-------
tvm.relay.expr.Call
The input call node in the case the current call node does
not refer to an Op. Else, a new call node with altered Op
attributes.
"""
if isinstance(call.op, tvm.ir.Op) and call.op.name in self.optimize_ops:
call = self.alter_ethosu_op_layout(call)
return super().visit_call(call)
@util.create_npu_function_pass(opt_level=1)
class LayoutOptimizer:
"""Register LayoutOptimizer as a Relay pass."""
def transform_npu_function(self, _, func: relay.Function) -> relay.Function:
"""A pass to optimize the layout of NPU operations. If both the
producer and consumer of a tensor are NPU operators, then the
layout is converted from NHWC to NHCWB16 as this is the layout NPU
uses internally."""
optimize_ops = {
"contrib.ethosu.conv2d": op.ethosu_conv2d,
"contrib.ethosu.depthwise_conv2d": op.ethosu_depthwise_conv2d,
"contrib.ethosu.pooling": op.ethosu_pooling,
"contrib.ethosu.binary_elementwise": op.ethosu_binary_elementwise,
"contrib.ethosu.unary_elementwise": op.ethosu_unary_elementwise,
}
analyze = AnalyzeConsumers(optimize_ops)
analyze.visit(func)
return LayoutOptimization(analyze.npu_consumers, analyze.restrictions, optimize_ops).visit(
func
)
def __call__(self, *args, **kwargs):
pass
class PadsWithMultipleConsumersReplicator(ExprMutator):
"""A pass to to handle the situation when nn.pad operator has
more than one qnn.conv2d consumer.
pad
/ \
Conv2D Conv2D
In this case, because of the peculiarities of pattern parsing,
conv2d does not get into the composite for the NPU.
Therefore, pads are added so that each has only one consumer.
"""
def __init__(self):
super().__init__()
# a set to record hashes of an pads which already have one qnn.conv2d consumer
self.hashes = set()
def visit_call(self, call: tvm.relay.expr.Call) -> tvm.relay.expr.Call:
if (
isinstance(call.op, tvm.ir.Op)
and isinstance(call.args[0], Call)
and isinstance(call.args[0].op, tvm.ir.Op)
and call.op == relay.op.get("qnn.conv2d")
and call.args[0].op == relay.op.get("nn.pad")
):
if tvm.ir.structural_hash(call.args[0]) not in self.hashes:
# add the hash of nn.pad to set
self.hashes.add(tvm.ir.structural_hash(call.args[0]))
else:
# if this pad already has a conv2d consumer, duplicate the pad
# and make it an input for current conv2d
used_pad = self.visit(call.args[0])
used_pad_args = [self.visit(arg) for arg in used_pad.args]
new_pad = Call(
used_pad.op, used_pad_args, used_pad.attrs, used_pad.type_args, used_pad.span
)
new_conv2d_args = []
for i, arg in enumerate(call.args):
if i == 0:
new_conv2d_args.append(self.visit(new_pad))
else:
new_conv2d_args.append(self.visit(arg))
new_conv2d_op = self.visit(call.op)
expr__ = _expr.CallWithFields(
call,
new_conv2d_op,
new_conv2d_args,
call.attrs,
call.type_args,
None,
call.span,
)
return expr__
new_args = [self.visit(arg) for arg in call.args]
new_op = self.visit(call.op)
expr__ = _expr.CallWithFields(
call, new_op, new_args, call.attrs, call.type_args, None, call.span
)
return expr__
def replicate_pads(mod):
"""Traverses the Relay graph to replicate nn.pad operators if thay have
multiple qnn.conv2d consumers. That making remove the situation when
e.g. pad+conv2d corresponds qnn_conv2d_pattern, but can not be grouped
because several conv2d use the same pad operation.
Parameters
----------
tvm.ir.IRModule
The IRModule that gets generated from a relay frontend.
Returns
-------
tvm.ir.IRModule
The IRModule without nn.pad operators with multiple consumers.
"""
replicator = PadsWithMultipleConsumersReplicator()
for global_var, func in mod.functions.items():
func = replicator.visit(func)
mod.update_func(global_var, func)
return mod
def IdentityOptimizer(): # pylint: disable=invalid-name
"""Pass that removes redundant identities
Return
------
Pass
The module pass.
"""
return _ffi_api.IdentityOptimizer()
def OutlineCompilerFunctions(compiler_name): # pylint: disable=invalid-name
"""Pass that outlines functions given a named Compiler attribute.
Parameters
----------
compiler_name
The name of the compiler to look for and outline.
Return
------
Pass
The module pass.
"""
return _ffi_api.OutlineCompilerFunctions(compiler_name)
@tvm._ffi.register_func("relay.ext.ethos-u.constant_updater")
def constant_updater(expr, symbol): # pylint: disable=unused-argument
"""
The constant updater process happen after lowering in the core compiler.
For the NPU, we dont want the build process to extract constants to be loaded in
the runtime as we are embedding them inside the C runtime.Module.
"""
return dict()
def _create_cascader(
options: CascaderOptions,
io_region: MemoryRegion,
constant_region: MemoryRegion,
working_regions: List[MemoryRegion],
device_config: EthosuDeviceConfig,
) -> Callable:
def _cascader(te_graph, const_dict, sch):
cascade(
sch,
te_graph,
const_dict,
options,
io_region,
constant_region,
working_regions,
device_config,
)
return _cascader
def _ethos_u55_cascader(sram, enable_striping) -> Callable:
# TODO(ekalda): Extract the flash info from ConstantPools once it is implemented
flash = MemoryRegion(name="FLASH", size=10**7, read_bandwidth=4, write_bandwidth=4)
device_config = EthosuDeviceConfig(util.get_accelerator_config())
cascader_options = CascaderOptions(
cascade_region=sram,
max_proposals=64,
stripe_factors=5,
max_plan_size=10,
always_copy_size=1024,
max_open_plans=8,
max_closed_plans=32,
enable_striping=enable_striping,
)
return _create_cascader(
options=cascader_options,
io_region=sram,
constant_region=flash,
working_regions=[sram],
device_config=device_config,
)
def _calculate_memory_pressure(mod: tvm.ir.IRModule) -> int:
"""
Calculates a worst-case estimate of the memory consumed at the callsite of
each microNPU function. This value can be used as a hint to guide the cascader,
indicating how aggressively it will need to optimize the input module to fit
into the memory that remains in the memory workspace.
Parameters
----------
mod : tvm.ir.IRModule
The input module
Returns
-------
int
Memory pressure value for the module.
"""
memory_pressure = 0
@util.create_npu_function_pass(opt_level=1)
class CalculateMemoryPressure:
"""
Traverse the module and get total memory used by external NPU functions.
"""
def transform_npu_function(self, _, func: relay.Function) -> relay.Function:
nonlocal memory_pressure
max_val = max(func.attrs["used_memory"])
memory_pressure += max_val
return func
CalculateMemoryPressure()(mod) # pylint: disable=not-callable
io_used_memory = 0
if not tvm.tir.usmp.utils.use_workspace_io_is_enabled():
io_used_memory = int(mod["main"].attrs["io_used_memory"])
return memory_pressure - io_used_memory
@tvm._ffi.register_func("relay.ext.ethos-u.relay_to_tir")
def relay_to_tir(mod: tvm.ir.IRModule) -> tvm.ir.IRModule:
"""
This is the hook for python-based lowering of a Relay module which lowers NPU
external functions to TIR.
Parameters
----------
mod : tvm.ir.IRModule
This is the Relay module.
Returns
-------
mod : tvm.ir.IRModule
The Relay module with scheduled NPU external functions.
"""
mod = OutlineCompilerFunctions("ethos-u")(mod)
mod = LegalizeEthosU()(mod)
mod = LUTsOptimizer()(mod)
mod = relay.transform.InferType()(mod)
mod = IdentityOptimizer()(mod)
mod = LayoutOptimizer()(mod)
mod = relay.transform.InferType()(mod)
device_contexts = {
gv: "ethos-u" for gv, _ in filter(lambda x: util.is_npu_func(x[1]), mod.functions.items())
}
mod = mod.with_attr("device_contexts", device_contexts)
# Use the cascader if it is enabled for the U55 accelerator, otherwise use copy_constants
# scheduler
if util.is_cascader_enabled():
if util.get_accelerator_config() == "ethos-u65-256":
raise ValueError("Cascading is not supported for the U65 accelerator")
workspace_memory_pools = mod.attrs["workspace_memory_pools"]
if not workspace_memory_pools:
raise ValueError("Workspace memory pool needs to be provided for the U55 cascader")
if len(workspace_memory_pools.pools) != 1:
raise ValueError("Exactly one workspace pool needs to be provided for the U55 cascader")
memory_pressure = _calculate_memory_pressure(mod)
sram = extract_memory_info(workspace_memory_pools.pools[0], memory_pressure)
tir_mod = LowerToTIR(_ethos_u55_cascader(sram, util.is_striping_enabled()))(mod)
else:
scheduler = None if util.is_copying_constants_disabled() else copy_constants()
tir_mod = LowerToTIR(scheduler)(mod)
return tir_mod
@tvm._ffi.register_func("relay.ext.ethos-u.primfunc_to_artifact")
def primfunc_to_artifact(primfunc: tvm.tir.PrimFunc) -> util.CompilationArtifact:
"""
This is the hook for python-based lowering of TIR PrimFunc
that has undergone unified optimization to compilation
artifact destined for the microNPU.
Parameters
----------
primfunc : tir.PrimFunc
TIR PrimFunc that has undergone unified optimizations
Returns
-------
CompilationArtifact
This is a structure that holds the binary artifacts
for the microNPU
"""
symbol = str(primfunc.attrs["global_symbol"])
const_dict = primfunc.attrs["ethos-u.constants"]
tir_mod = tvm.IRModule()
tir_mod[symbol] = primfunc
const_dict_np = dict()
for buffer_var in const_dict.keys():
const_dict_np[buffer_var] = const_dict[buffer_var].numpy()
cmms, encoded_constants, base_addresses = tir_to_cs_translator.translate(tir_mod, const_dict_np)
return util.CompilationArtifact(symbol, cmms, encoded_constants, base_addresses)
| 23,429 | 34.990783 | 145 | py |
tvm | tvm-main/python/tvm/relay/backend/contrib/ethosu/vela_api.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
This is an adapter module for conversions between TVM and Vela.
The following conversion APIs are added :
*Obtaining the best block config
*Compressing weights
*Packing biases
"""
import logging
import math
from typing import List, Optional, Tuple
import numpy as np # type: ignore
from ethosu.vela import api as vapi # type: ignore
from ethosu.vela.architecture_allocator import find_block_config
from ethosu.vela.architecture_features import Accelerator, create_default_arch
from ethosu.vela.operation import NpuBlockType
from ethosu.vela.register_command_stream_generator import resampling_mode_map
from ethosu.vela.register_command_stream_util import to_kernel
from ethosu.vela.shape4d import Shape4D
import tvm
from tvm.relay.backend.contrib.ethosu import tir_to_cs_translator as tirtocs
from tvm.relay.backend.contrib.ethosu import util # type: ignore
# pylint: disable=invalid-name
logger = logging.getLogger("Ethos-U")
VELA_TO_NP_DTYPES = {
vapi.NpuDataType.UINT8: np.uint8,
vapi.NpuDataType.UINT16: np.uint16,
vapi.NpuDataType.INT8: np.int8,
vapi.NpuDataType.INT16: np.int16,
vapi.NpuDataType.INT32: np.int32,
}
SCALE_BIAS_LENGTH = 10
def get_optimal_block_config(
npu_op: vapi.NpuOperation, accel_config: vapi.NpuAccelerator
) -> vapi.NpuShape3D:
"""
"The NPU's unit of work is known as a block. It will fetch block(s) from Input
Feature Map (IFM) and a compute block for Output Feature Map (OFM).
Therefore, we need to pick an optimal block configuration considering bandwidth
to bring IFM blocks and the number of OFM block computes need to happen
to cover the OFM as indicated by the npu op.
For the case when cascader is enabled, the logic of choosing the optimal configuration block
from TVM will be used in other cases, the Vela's logic will be used except
the cases when dev_force_block_config option is specified.
Parameters
----------
npu_op : ethosu.vela.api.NpuOperation
The NPU operation and its params
accel_config : ethosu.vela.api.NpuAccelerator
The NPU accelerator config
Returns
-------
ethosu.vela.api.NpuShape3D :
The optimal block config for the operator
"""
options = tvm.transform.PassContext.current().config.get("relay.ext.ethos-u.options", None)
if options and options.dev_force_block_config:
block_config = [int(v) for v in options.dev_force_block_config.split("x")]
return vapi.NpuShape3D(height=block_config[0], width=block_config[1], depth=block_config[2])
elif options and options.enable_cascader:
all_valid_block_configs = vapi.npu_find_block_configs(npu_op, accel_config)
return _get_optimal_block_config(all_valid_block_configs)
else:
return _find_block_config_with_vela(npu_op, accel_config)
def _find_block_config_with_vela(
npu_op: vapi.NpuOperation, accelerator: vapi.NpuAccelerator
) -> vapi.NpuShape3D:
"""An internal function to get block config using Vela's logic.
Parameters
----------
npu_op : ethosu.vela.api.NpuOperation
The NPU operation
accelerator : ethosu.vela.api.NpuAccelerator
The NPU accelerator
Returns
-------
ethosu.vela.api.NpuShape3D :
The optimal block config for the operator
"""
if isinstance(npu_op, vapi.NpuConv2DOperation):
block_type = NpuBlockType.ConvolutionMxN
elif isinstance(npu_op, vapi.NpuConvDepthWiseOperation):
block_type = NpuBlockType.ConvolutionDepthWise
elif isinstance(npu_op, vapi.NpuPoolingOperation):
block_type = (
NpuBlockType.ReduceSum
if npu_op.sub_op_type == vapi.NpuPoolingOp.REDUCE_SUM
else NpuBlockType.Pooling
)
elif isinstance(npu_op, vapi.NpuElementWiseOperation):
block_type = NpuBlockType.ElementWise
else:
assert 0, "Unsupported operation"
ifm_shape = Shape4D(1, npu_op.ifm.shape.height, npu_op.ifm.shape.width, npu_op.ifm.shape.depth)
ifm2_shape = None
if npu_op.ifm2:
ifm2_shape = Shape4D(
1, npu_op.ifm2.shape.height, npu_op.ifm2.shape.width, npu_op.ifm2.shape.depth
)
ofm_shape = Shape4D(1, npu_op.ofm.shape.height, npu_op.ofm.shape.width, npu_op.ofm.shape.depth)
ifm_resampling_mode = resampling_mode_map[npu_op.ifm_upscale]
ifm_bits = npu_op.ifm.data_type.size_in_bits()
lut_banks = 0
if npu_op.activation:
lut_banks = 2 if npu_op.activation.op_type == vapi.NpuActivationOp.TABLE_LOOKUP else 0
has_scaling = True
for tensor in [npu_op.ifm, npu_op.ifm2, npu_op.ofm]:
if tensor and tensor.quantization is None:
has_scaling = False
break
arch = create_default_arch(Accelerator.from_npu_accelerator(accelerator))
cfg = find_block_config(
arch,
block_type,
ofm_shape,
ifm_shape,
ifm2_shape,
npu_op.ifm2_scalar is not None,
ifm_bits,
to_kernel(npu_op.kernel),
lut_banks,
has_scaling,
ifm_resampling_mode,
)
assert cfg is not None, f"There is no configuration suitable for {accelerator}"
return vapi.NpuShape3D(cfg.ofm_block.height, cfg.ofm_block.width, cfg.ofm_block.depth)
def _get_optimal_block_config(all_valid_block_configs: List[vapi.NpuShape3D]) -> vapi.NpuShape3D:
"""An internal function to get block config with largest depth
and then highest volume/area"""
assert isinstance(all_valid_block_configs, list)
for block_cfg in all_valid_block_configs:
assert isinstance(block_cfg, vapi.NpuShape3D)
# Getting the largest volume block for benchmarking
all_valid_block_configs.sort(
key=lambda _cfg: _cfg.depth * _cfg.height * _cfg.width, reverse=True
)
largest_volume_block_config = all_valid_block_configs[0]
largest_volume = (
largest_volume_block_config.depth
* largest_volume_block_config.height
* largest_volume_block_config.width
)
all_valid_block_configs.sort(key=lambda _cfg: _cfg.depth, reverse=True)
max_d = all_valid_block_configs[0].depth
max_depth_block_configs = [_cfg for _cfg in all_valid_block_configs if _cfg.depth == max_d]
max_depth_block_configs.sort(key=lambda _cfg: _cfg.height * _cfg.width, reverse=True)
max_area = max_depth_block_configs[0].height * max_depth_block_configs[0].width
max_area_depth_block_configs = [
_cfg for _cfg in max_depth_block_configs if _cfg.height * _cfg.width == max_area
]
# This to get a deterministic anwser everytime
max_area_depth_block_configs.sort(key=lambda _cfg: _cfg.height, reverse=True)
assert len(max_area_depth_block_configs) > 0
current_volume = (
max_area_depth_block_configs[0].depth
* max_area_depth_block_configs[0].height
* max_area_depth_block_configs[0].width
)
logger.info("Using block config=%s", max_area_depth_block_configs[0])
logger.info(
"Quality of the block config w.r.t. max volume block config=%s",
100.0 * (current_volume / largest_volume),
)
return max_area_depth_block_configs[0]
def encode_weights(
tir_extern_call: tvm.tir.Call, values: np.ndarray, accel_config: vapi.NpuAccelerator
):
"""This is an API function to compress weights by passing
a tir_extern_call to NPU Convolution operation and values.
Parameters
----------
tir_extern_call : tvm.tir.Call
tir_extern_call to NPU Convolution operation
values : numpy.ndarray
The constant flattened weight data in OHWI layout
accel_config : ethosu.vela.api.NpuAccelerator
The NPU accelerator config
Returns
-------
bytearray
Compressed weights
"""
supported_ops = {
"ethosu_conv2d": tirtocs.translate_ethosu_conv2d,
"ethosu_depthwise_conv2d": tirtocs.translate_ethosu_depthwise_conv2d,
}
op = str(tir_extern_call.args[0].value)
assert op in supported_ops.keys()
npu_op, weights_zero_point = supported_ops[op](tir_extern_call)
is_depthwise = op == "ethosu_depthwise_conv2d"
# Recover the original shape if we are dealing with a flattened tensor
if len(values.shape) == 1:
shape_ohwi = (
npu_op.ofm.shape.depth,
npu_op.kernel.height,
npu_op.kernel.width,
1 if is_depthwise else npu_op.ifm.shape.depth,
)
assert values.size == np.prod(shape_ohwi)
values = np.reshape(values, shape_ohwi)
return compress_weights(
weights=values,
weights_zp=weights_zero_point,
# The weight layout is assumed to be OHWI, always.
weights_layout="OHWI",
ifm_bitdepth=npu_op.ifm.data_type.size_in_bits(),
block_depth=npu_op.block_config.depth,
dilation=(npu_op.kernel.dilation_x, npu_op.kernel.dilation_y),
accel_config=accel_config,
is_depthwise=is_depthwise,
)
def compress_weights(
weights: np.ndarray,
weights_zp: int,
weights_layout: str,
ifm_bitdepth: int,
block_depth: int,
dilation: Tuple[int, int],
accel_config: vapi.NpuAccelerator,
is_depthwise: Optional[bool] = False,
) -> bytearray:
"""The NPU requires the weights to be compressed
to be executed. Therefore, this function calls into
the Vela APIs to compress the weights.
Parameters
----------
weights : numpy.ndarray
The raw weights
weights_zp : int
The zero point of the weights
weights_layout : str
A string literal indicating the layout
Supported values : HWIO, HWOI, OHWI
ifm_bitdepth : int
The bit depth of the ifm the weights are used with
block_depth : int
The depth of the optimal block config for the operator
dilation : tuple
A tuple of 2 elements indicating dilation in h and w
accel_config : ethosu.vela.api.NpuAccelerator
The NPU accelerator config
is_depthwise : bool, Optional
This indicates whether the weights are compressed for depthwise convolution
Returns
-------
compressed_weights : bytearray
Compressed weights
"""
layout_transform_indices = {"HWIO": (3, 0, 1, 2), "HWOI": (2, 0, 1, 3), "OHWI": (0, 1, 2, 3)}
assert weights_layout in layout_transform_indices.keys()
assert isinstance(weights_zp, np.int64)
weights = weights.astype(np.int16) - weights_zp
# Vela needs the weights in OHWI layout
weights_ohwi = np.transpose(weights, layout_transform_indices[weights_layout])
shape_ohwi = [
weights.shape[layout_transform_indices[weights_layout][0]],
weights.shape[layout_transform_indices[weights_layout][1]],
weights.shape[layout_transform_indices[weights_layout][2]],
weights.shape[layout_transform_indices[weights_layout][3]],
]
block_traversal = calculate_block_traversal_mode(is_depthwise, shape_ohwi, ifm_bitdepth)
compressed_weights = vapi.npu_encode_weights(
accelerator=accel_config,
weights_volume=weights_ohwi,
dilation_xy=dilation,
ifm_bitdepth=ifm_bitdepth,
ofm_block_depth=block_depth,
is_depthwise=is_depthwise,
block_traversal=block_traversal,
)
return compressed_weights
def calculate_block_traversal_mode(
is_depthwise: bool, weights_shape_ohwi: List[int], ifm_bitdepth: int
) -> vapi.NpuBlockTraversal:
"""Calculate a block traversal mode given whether the op is depthwise convolution,
shape of weights and bit-depth of the ifm.
"""
if is_depthwise:
return vapi.NpuBlockTraversal.DEPTH_FIRST
# Determine which block traversal strategy has better DPU utilization
kernel_size = weights_shape_ohwi[1] * weights_shape_ohwi[2]
depth_utilization = weights_shape_ohwi[3] / util.round_up(
weights_shape_ohwi[3], 32 if ifm_bitdepth == 8 else 16
)
part_kernel_utilization = (weights_shape_ohwi[3] / util.round_up(weights_shape_ohwi[3], 8)) * (
kernel_size / util.round_up(kernel_size, 4 if ifm_bitdepth == 8 else 2)
)
if part_kernel_utilization >= depth_utilization or weights_shape_ohwi[3] <= 8:
# Part-kernel first is always better for ifm depths <= 8
return vapi.NpuBlockTraversal.PART_KERNEL_FIRST
return vapi.NpuBlockTraversal.DEPTH_FIRST
def pack_biases(
biases: np.ndarray,
ifm_scale: float,
ifm_dtype: np.dtype,
weight_scales: np.ndarray,
ofm_scale: float,
is_activation_tanh_or_sigmoid: bool = False,
) -> np.ndarray:
"""
The NPU requires the each bias value to be packed with
output scale parameters in a 80-bit format (that is returned
via npu_encode_bias API). This function will pack such values
to a binary artifact that the NPU will use in the execution.
Parameters
----------
biases : numpy.ndarray
The values of biases
ifm_scale : float
The quantization scale parameter of input feature map
ifm_dtype : numpy.dtype
The data type of input feature map data.
weight_scales : numpy.ndarray
The quantization scale parameter of weight feature map
This could be a tuple if per-channel quantization is present.
ofm_scale : float
The quantization scale parameter of output feature map.
is_activation_tanh_or_sigmoid : bool
Indicates whether the fused activation function is tanh or sigmoid.
Returns
-------
scale_bias : numpy.ndarray
Packed scales/biases as the hardware requires them.
"""
# The BYOC infra should not partition anything else.
supported_ifm_dtypes = (np.uint8, np.int8, np.int16)
assert ifm_dtype in supported_ifm_dtypes
if weight_scales.size == 1:
weight_scales = [weight_scales] * biases.size
hw_bias_scales = _calculate_hw_bias_scales(
ifm_scale, weight_scales, ofm_scale, ifm_dtype, is_activation_tanh_or_sigmoid
)
assert len(hw_bias_scales) == biases.size
biases = biases.astype("int64")
packed_biases = bytearray()
for idx, scale in enumerate(hw_bias_scales):
packed_biases.extend(vapi.npu_encode_bias(biases[idx], *scale))
scale_bias = np.frombuffer(packed_biases, dtype=np.uint8)
scale_bias = np.reshape(scale_bias, (-1, 10))
return scale_bias
def _quantize_scale(scale: float) -> Tuple[int, int]:
"""Quantize floating point scale into 32-bit int scale with a 6-bit shift.
This is to be used with 8-bit data.
"""
mantissa, exponent = math.frexp(scale)
mantissa_scaled = mantissa * (1 << 31)
mantissa_scaled = int(util.round_away_zero(mantissa_scaled))
required_shift = 31 - exponent
if required_shift < 0 or required_shift >= (1 << 6):
# Shift outside of valid range, set scale to 0
return 0, 16
return mantissa_scaled, required_shift
def _reduced_quantize_scale(scale: float) -> Tuple[int, int]:
"""A reduction of precision is required for 16 bit data."""
mantissa_scaled, required_shift = _quantize_scale(scale)
# This is max a signed 16-bit number could represent
max_reduced_mantissa_scaled = (1 << 15) - 1
# if the current value is larger than pre-scaled max_reduced_mantissa_scaled
# we need to saturate the anwser to max_reduced_mantissa_scaled
if mantissa_scaled >= max_reduced_mantissa_scaled << 16:
reduced_mantissa_scaled = max_reduced_mantissa_scaled
else:
reduced_mantissa_scaled = (mantissa_scaled + (1 << 15)) >> 16
reduced_shift = required_shift - 16
if required_shift < 0 or required_shift >= (1 << 6):
# Shift outside of valid range, set scale to 0
return 0, 16
return reduced_mantissa_scaled, reduced_shift
def _calculate_hw_bias_scales(
ifm_scale: float,
weight_scales: List[float],
ofm_scale: float,
ifm_dtype: np.dtype,
is_faf_tanh_sigmoid: bool = False,
) -> List[Tuple[int, int]]:
"""This function will produce a scale that is calculated using scales of ifm,
weights and ofm. It is also important to note that if per-channel / per-value
quantization required they should go into hw bias scales"""
if is_faf_tanh_sigmoid:
ifm_scale = ifm_scale * 0x3000
if ifm_dtype == np.uint8:
bias_scales = [np.double(ifm_scale * ws) / np.double(ofm_scale) for ws in weight_scales]
else:
assert ifm_dtype in (np.int8, np.int16)
ifm_scale_dbl = np.double(ifm_scale)
ofm_scale_dbl = np.double(ofm_scale)
bias_scales = [ifm_scale_dbl * np.double(ws) / ofm_scale_dbl for ws in weight_scales]
if ifm_dtype == np.int16:
hw_bias_scales = [_reduced_quantize_scale(bs) for bs in bias_scales]
else:
assert ifm_dtype in (np.uint8, np.int8)
hw_bias_scales = [_quantize_scale(bs) for bs in bias_scales]
return hw_bias_scales
def get_accelerator_config() -> vapi.NpuAccelerator:
"""Get the configuration of the NPU accelerator.
The configuration string provided as a compiler option is converted into
an NpuAccelerator object. Valid configuration strings:
- 'ethos-u55-256'
- 'ethos-u55-128'
- 'ethos-u55-64'
- 'ethos-u55-32'
"""
npu_accel_str_map = {
"ethos-u55-256": vapi.NpuAccelerator.Ethos_U55_256,
"ethos-u55-128": vapi.NpuAccelerator.Ethos_U55_128,
"ethos-u55-64": vapi.NpuAccelerator.Ethos_U55_64,
"ethos-u55-32": vapi.NpuAccelerator.Ethos_U55_32,
"ethos-u65-256": vapi.NpuAccelerator.Ethos_U65_256,
"ethos-u65-512": vapi.NpuAccelerator.Ethos_U65_512,
}
compiler_attrs = tvm.get_global_func("relay.ext.ethos-u.get_compiler_attrs")()
accel_config_str = compiler_attrs.accelerator_config
assert accel_config_str in npu_accel_str_map.keys(), f"{accel_config_str} is not supported"
return npu_accel_str_map[accel_config_str]
def get_max_copy_movements() -> int:
"""Get maximum copy movements for CopyComputeReordering pass.
max_outstanding_dma from architecture features indicates how many
DMA operations can be in-progress.
"""
arch = create_default_arch(Accelerator.from_npu_accelerator(get_accelerator_config()))
return arch.max_outstanding_dma
| 19,025 | 37.358871 | 100 | py |
tvm | tvm-main/python/tvm/relay/backend/contrib/ethosu/_ffi_api.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""FFI APIs for relay transformation passes."""
import tvm._ffi # type: ignore
tvm._ffi._init_api("relay.ext.ethos-u", __name__)
tvm._ffi._init_api("tir.contrib.ethos-u", __name__)
| 968 | 43.045455 | 62 | py |
tvm | tvm-main/python/tvm/relay/backend/contrib/ethosu/legalize.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument, import-outside-toplevel
# pylint: disable=no-value-for-parameter, use-list-literal
"""A set of passes to legalize some of operations for the NPU"""
from typing import List, Type, Callable
import math
import numpy as np # type: ignore
from ethosu.vela import scaling, fp_math
import tvm # type: ignore
from tvm import relay
from tvm.relay.dataflow_pattern import DFPatternCallback # type: ignore
from tvm.relay.dataflow_pattern import wildcard
from tvm.relay.dataflow_pattern import is_op
from tvm.relay.dataflow_pattern import rewrite
from tvm.relay.dataflow_pattern import CallPattern
from tvm.relay.backend.contrib.ethosu import op as ethosu_ops # type: ignore
from tvm.relay.backend.contrib.ethosu import vela_api
from tvm.relay.backend.contrib.ethosu import util
from tvm.relay.backend.contrib.ethosu.softmax_rewriter import SoftmaxRewriter
from tvm.relay.op.contrib import ethosu as ethosu_patterns # type: ignore
class SplitRewriter(DFPatternCallback):
"""This rewriting converts split operations into a sequence of
strided_slice operations, because codegen is going to be based
on strided_slices that will define the slice of the tensor that
will be fed to the consumer.
"""
def __init__(self):
super().__init__(require_type=True)
self.split_in = wildcard()
self.pattern = is_op("split")(self.split_in)
@staticmethod
def get_section_begin_coords(split: tvm.relay.Expr) -> List[int]:
"""Currently, the split operator takes an array of indices or an integer
indicating the number of splits. However, its an array of indices could
represent both cases, therefore this function just make it an array of
indices where each index represent the co-ordinate of beginning of each
section -- defines as section begins.
Parameters
----------
split : tvm.relay.Expr
The Relay Call expression for a split operator
Returns
-------
section_begins : List[int]
A list containing integers corresponding to section
begins
"""
indices_or_sections = split.attrs.indices_or_sections
input_shape = split.args[0].checked_type.shape
split_axis = split.attrs.axis
if isinstance(indices_or_sections, tvm.ir.container.Array):
# 0 is the beginning of the first section.
return [0] + list(indices_or_sections)
split_axis_len = input_shape[split_axis].value
section_length = split_axis_len // indices_or_sections.value
return list(range(0, split_axis_len, section_length))
def callback(
self, pre: tvm.relay.Expr, post: tvm.relay.Expr, node_map: tvm.ir.container.Map
) -> tvm.relay.Expr:
split_input = post.args[0]
split_begins = list()
split_ends = list()
section_begins_in_split_axis = self.get_section_begin_coords(post)
for split_cord in section_begins_in_split_axis:
# first begin is [0, 0, ... , 0]
begin_shape = [0 for i in range(len(split_input.checked_type.shape))]
begin_shape[post.attrs.axis] = split_cord
split_begins.append(begin_shape)
end_shape = list(split_input.checked_type.shape)
# Only the split axis coordinate changes
end_shape[post.attrs.axis] = split_cord
split_ends.append(end_shape)
# Coordinates needs to be shifted left because beginning
# of the next section is the end of the previous
split_ends = split_ends[1:]
# Last section end is the shape of the tensor itself.
split_ends.append(list(split_input.checked_type.shape))
strided_slices = list()
for sb, se in zip(split_begins, split_ends):
strided_slices.append(relay.strided_slice(split_input, sb, se))
return relay.Tuple(strided_slices)
class PartitionedSplitRewriter(DFPatternCallback):
"""This pass brings the split out of the partitioned function"""
def __init__(self):
super().__init__(require_type=True, rewrite_once=True)
self.pattern = (
wildcard().has_attr({"Composite": ethosu_patterns.SplitParams.composite_name})
)(wildcard())
def callback(
self, pre: tvm.relay.Expr, post: tvm.relay.Expr, node_map: tvm.ir.container.Map
) -> tvm.relay.Expr:
split_input = post.args[0]
split_params = ethosu_patterns.SplitParams(post.op.body)
indices_or_sections = split_params.indices_or_sections
axis = split_params.axis
return relay.op.split(split_input, indices_or_sections, axis=axis).astuple()
def get_lut_from_func(
ifm_scale: float,
ifm_zp: int,
ofm_scale: float,
ofm_zp: int,
func: Callable[[float], float],
) -> List[int]:
"""Calculates the values of the lookup table based on the calculation function"""
lut_values = list()
# Only int8 is currently supported
dtype = np.int8
qmin, qmax = np.iinfo(dtype).min, np.iinfo(dtype).max
for x in range(qmin, qmax + 1):
x_real = ifm_scale * (x - ifm_zp)
out_real = func(x_real)
lut_result = int(util.round_away_zero(ofm_zp + out_real / ofm_scale))
lut_result = min(qmax, max(qmin, lut_result))
lut_values.append(lut_result)
return lut_values
class LutActivationRewriter(DFPatternCallback):
"""A class to create an identity operator with the LUT"""
def __init__(
self,
params_class: Type,
activation_type: str,
calc_func: Callable[[float], float],
):
super().__init__(require_type=True, rewrite_once=True)
self.params_class = params_class
self.pattern = (wildcard().has_attr({"Composite": params_class.composite_name}))(wildcard())
self.activation_type = activation_type
self.calc_func = calc_func
def callback(self, pre: tvm.relay.Expr, post: tvm.relay.Expr, node_map: tvm.ir.container.Map):
params = self.params_class(post.op.body)
params.ifm.tensor = post.args[0]
input_scale = float(params.ifm.q_params.scale_f32)
input_zp = int(params.ifm.q_params.zero_point)
output_scale = float(params.ofm.q_params.scale_f32)
output_zp = int(params.ofm.q_params.zero_point)
lut_values = get_lut_from_func(
input_scale,
input_zp,
output_scale,
output_zp,
self.calc_func,
)
lut = relay.const(lut_values, dtype=params.ifm.dtype)
# We baked the requantization into the LUT, so we don't requantize the identity operator
identity = ethosu_ops.ethosu_identity(
ifm=params.ifm.tensor,
lut=lut,
ifm_scale=input_scale,
ifm_zero_point=input_zp,
ofm_scale=input_scale,
ofm_zero_point=input_zp,
activation=self.activation_type,
)
return identity
class TanhRewriter(LutActivationRewriter):
"""This pass adds tanh as a LUT to the identity operator"""
def __init__(self):
super().__init__(
params_class=ethosu_patterns.TanhParams, activation_type="TANH", calc_func=math.tanh
)
def sigmoid_calc_func(x: float) -> float:
"""Function to calculate the values for sigmoid"""
# These limits are inherited from TFLite
upper_limit = 8.0
lower_limit = -8.0
if x <= lower_limit:
y = 0.0
elif x >= upper_limit:
y = 1.0
else:
y = 1 / (1 + math.exp(-x))
return y
class SigmoidRewriter(LutActivationRewriter):
"""This pass adds sigmoid as a LUT for identity op"""
def __init__(self):
super().__init__(
params_class=ethosu_patterns.SigmoidParams,
activation_type="SIGMOID",
calc_func=sigmoid_calc_func,
)
def leaky_relu_calc_func(x: float, alpha: float) -> float:
"""Function to calculate the values for leaky relu."""
return x if x >= 0 else x * alpha
class LeakyReLURewriter(DFPatternCallback):
"""This pass adds leaky relu as a LUT for identity op."""
def __init__(self):
super().__init__(require_type=True, rewrite_once=True)
self.params_class = ethosu_patterns.LeakyReLUParams
self.pattern = wildcard().has_attr({"Composite": self.params_class.composite_name})(
wildcard()
)
def callback(self, pre: tvm.relay.Expr, post: tvm.relay.Expr, node_map: tvm.ir.container.Map):
params = self.params_class(post.op.body)
params.ifm.tensor = post.args[0]
input_scale = np.double(float(params.ifm.q_params.scale_f32))
input_zp = int(params.ifm.q_params.zero_point)
output_scale = np.double(float(params.ofm.q_params.scale_f32))
output_zp = int(params.ofm.q_params.zero_point)
alpha = params.alpha
# The calculation of the LUT values is similar to that in Vela
# convert_lrelu_to_lut(op, arch)
# (https://review.mlplatform.org/plugins/gitiles/ml/ethos-u/ethos-u-vela/+/refs/tags/3.2.0/ethosu/vela/tflite_graph_optimiser.py#864) # pylint: disable=line-too-long
alpha_scalar = 1
alpha_scale, alpha_shift = scaling.elementwise_mul_scale(input_scale, alpha, output_scale)
identity_scale, identity_shift = scaling.elementwise_mul_scale(input_scale, 1, output_scale)
dtype = params.ifm.dtype
qmin, qmax = np.iinfo(dtype).min, np.iinfo(dtype).max
def calculate_lut_value(i):
zp_shift = (
fp_math.multiply_by_quantized_multiplier(
alpha_scalar * (i - input_zp), alpha_scale, alpha_shift
)
if i < input_zp
else fp_math.multiply_by_quantized_multiplier(
i - input_zp, identity_scale, identity_shift
)
)
return min(qmax, max(qmin, output_zp + zp_shift))
values = list(map(calculate_lut_value, range(qmin, qmax + 1)))
lut = relay.const(values, dtype=dtype)
# We baked the requantization into the LUT, so we don't requantize the identity operator
identity = ethosu_ops.ethosu_identity(
ifm=params.ifm.tensor,
lut=lut,
ifm_scale=input_scale,
ifm_zero_point=input_zp,
ofm_scale=input_scale,
ofm_zero_point=input_zp,
activation="LUT",
)
return identity
class HardSwishRewriter(DFPatternCallback):
"""Convert ethosu.hard_swish composite function to add operation with LUT."""
def __init__(self):
super().__init__(require_type=True, rewrite_once=True)
self.params_class = ethosu_patterns.HardSwishParams
self.pattern = wildcard().has_attr({"Composite": self.params_class.composite_name})(
wildcard()
)
def callback(self, pre: tvm.relay.Expr, post: tvm.relay.Expr, node_map: tvm.ir.container.Map):
params = self.params_class(post.op.body)
params.ifm.tensor = post.args[0]
# The calculation of the LUT values is similar to that in Vela
# convert_hardswish_to_lut(op, arch, nng)
# (https://review.mlplatform.org/plugins/gitiles/ml/ethos-u/ethos-u-vela/+/refs/tags/3.2.0/ethosu/vela/tflite_graph_optimiser.py#719) # pylint: disable=line-too-long
input_scale = np.double(params.ifm.q_params.scale_f32)
input_zp = int(params.ifm.q_params.zero_point)
hires_input_scale = (1 / 128) * input_scale
output_scale = np.double(params.ofm.q_params.scale_f32)
output_zp = int(params.ofm.q_params.zero_point)
output_scale, output_shift = scaling.quantise_scale(hires_input_scale / output_scale)
output_scale_16 = fp_math.downscale_multiplier_int32_to_int16(output_scale)
output_shift = 31 - output_shift
output_shift = -output_shift if output_shift < 0 else 0
dtype = params.ifm.dtype
qmin, qmax = np.iinfo(dtype).min, np.iinfo(dtype).max
def calculate_relu_multiplier(inp, input_scale):
rmultiplier = np.double(3 / 32768)
rscale, rshift = scaling.quantise_scale(input_scale / rmultiplier)
rscale_16 = fp_math.downscale_multiplier_int32_to_int16(rscale)
rvalue = np.int16(inp)
if rshift < 31:
rvalue = fp_math.shift_left16(rvalue, 30 - rshift)
rvalue = fp_math.saturating_rounding_mul16(rvalue, rscale_16)
rvalue = fp_math.shift_left16(rvalue, 1)
elif rshift > 31:
rvalue = fp_math.saturating_rounding_mul16(rvalue, rscale_16)
rvalue = fp_math.rounding_divide_by_pot(rvalue, rshift - 31)
else:
rvalue = fp_math.saturating_rounding_mul16(rvalue, rscale_16)
rvalue = (rvalue + (1 << 15)) >> 1
return rvalue
def calculate_lut_values(i):
hires_input_value = (i - input_zp) * 128
preshift_input_value = fp_math.saturating_rounding_mul16(
hires_input_value, output_scale_16
)
relu_value = calculate_relu_multiplier(hires_input_value, hires_input_scale)
lut_result = fp_math.saturating_mul16(relu_value, preshift_input_value)
lut_result = fp_math.rounding_divide_by_pot(lut_result, output_shift) + output_zp
return min(qmax, max(qmin, lut_result))
values = list(map(calculate_lut_values, range(-128, 128)))
lut = relay.const(values, dtype=dtype)
# We baked the requantization into the LUT, so we don't requantize the identity operator
identity = ethosu_ops.ethosu_identity(
ifm=params.ifm.tensor,
lut=lut,
ifm_scale=input_scale,
ifm_zero_point=input_zp,
ofm_scale=input_scale,
ofm_zero_point=input_zp,
activation="LUT",
)
return identity
class Conv2DRewriter(DFPatternCallback):
"""Convert conv2d related composite functions into ethosu_conv2d operators"""
def __init__(self):
super().__init__(require_type=True)
self.pattern = (wildcard().has_attr({"Composite": "ethos-u.qnn_conv2d"}))(wildcard())
def callback(
self, pre: tvm.relay.Expr, post: tvm.relay.Expr, node_map: tvm.ir.container.Map
) -> tvm.relay.Expr:
params = ethosu_patterns.QnnConv2DParams(post.op.body)
params.ifm.tensor = post.args[0]
channels_map = {
"NHWC": 3,
}
kernel_size_map = {
"HWIO": params.weights.shape[0:2],
"OHWI": params.weights.shape[1:3],
"HWOI": params.weights.shape[0:2],
}
activation_map = {"clip": "CLIP"}
weight_to_ohwi_transform_map = {"HWIO": [3, 0, 1, 2]}
weights_values = params.weights.values
weights_values_ohwi = np.transpose(
weights_values, weight_to_ohwi_transform_map[str(params.weights.layout)]
)
if params.activation:
activation = activation_map[params.activation.op.name]
clip_min = int(params.activation.attrs.a_min)
clip_max = int(params.activation.attrs.a_max)
else:
activation = "NONE"
clip_min = 0
clip_max = 0
scale_bias = vela_api.pack_biases(
biases=params.biases.tensor.data.asnumpy(),
ifm_scale=params.ifm.q_params.scale_f32,
ifm_dtype=np.dtype(params.ifm.dtype),
weight_scales=params.weights.q_params.scale_f32,
ofm_scale=params.ofm.q_params.scale_f32,
is_activation_tanh_or_sigmoid=activation in ["TANH", "SIGMOID"],
)
ethosu_conv2d = ethosu_ops.ethosu_conv2d(
ifm=post.args[0],
weight=relay.const(weights_values_ohwi, params.weights.values.dtype),
scale_bias=relay.const(scale_bias, "uint8"),
lut=relay.const([], dtype="int8"),
ifm_scale=float(params.ifm.q_params.scale_f32),
ifm_zero_point=int(params.ifm.q_params.zero_point),
weight_zero_point=int(params.weights.q_params.zero_point),
ofm_scale=float(params.ofm.q_params.scale_f32),
ofm_zero_point=int(params.ofm.q_params.zero_point),
kernel_shape=kernel_size_map[str(params.weights.layout)],
ofm_channels=params.ofm.shape[channels_map[str(params.ofm.layout)]],
strides=params.strides,
padding=params.padding,
dilation=params.dilation,
activation=activation,
clip_min=clip_min,
clip_max=clip_max,
upscale="NONE",
ifm_layout=str(params.ifm.layout),
ofm_layout=str(params.ofm.layout),
)
return ethosu_conv2d
class Conv2DTransposeRewriter(DFPatternCallback):
"""Convert conv2d_transpose related composite functions into
ethosu_conv2d_transpose operators."""
def __init__(self):
super().__init__(require_type=True)
self.pattern = (wildcard().has_attr({"Composite": "ethos-u.qnn_conv2d_transpose"}))(
wildcard()
)
def callback(
self, pre: tvm.relay.Expr, post: tvm.relay.Expr, node_map: tvm.ir.container.Map
) -> tvm.relay.Expr:
params = ethosu_patterns.QnnConv2DTransposeParams(post.op.body)
params.ifm.tensor = post.args[0]
ofm_shape = params.ofm.shape
legalize_padding = params.legalize_padding
weight_to_ohwi_transform_map = {"IOHW": [1, 2, 3, 0]}
weights_values = params.weights.values
weights_values_ohwi = np.transpose(
weights_values, weight_to_ohwi_transform_map[str(params.weights.layout)]
)
weights_values_ohwi = np.flip(weights_values_ohwi, (1, 2))
weights = relay.const(weights_values_ohwi, dtype=params.weights.values.dtype)
bias_values = (
params.biases.tensor.data.asnumpy()
if params.biases
else np.zeros((params.ifm.shape[-1]))
)
scale_bias = vela_api.pack_biases(
biases=bias_values,
ifm_scale=params.ifm.q_params.scale_f32,
ifm_dtype=np.dtype(params.ifm.dtype),
weight_scales=params.weights.q_params.scale_f32,
ofm_scale=params.ofm.q_params.scale_f32,
is_activation_tanh_or_sigmoid=False,
)
reduced_op = ethosu_ops.ethosu_conv2d(
ifm=post.args[0],
weight=weights,
scale_bias=relay.const(scale_bias, "uint8"),
lut=relay.const([], dtype="int8"),
ifm_scale=float(params.ifm.q_params.scale_f32),
ifm_zero_point=int(params.ifm.q_params.zero_point),
weight_zero_point=int(params.weights.q_params.zero_point),
ofm_scale=float(params.ofm.q_params.scale_f32),
ofm_zero_point=int(params.ofm.q_params.zero_point),
kernel_shape=params.kernel_shape,
ofm_channels=int(ofm_shape[-1]),
strides=(1, 1),
padding=legalize_padding,
dilation=params.dilation,
ifm_layout=str(params.ifm.layout),
ofm_layout=str(params.ofm.layout),
upscale="ZEROS",
)
# Remove additional padding by 'cropping' back to expected size
return relay.strided_slice(reduced_op, (0, 0, 0, 0), ofm_shape)
class DepthwiseConv2DRewriter(DFPatternCallback):
"""Convert ethosu.qnn_depthwise_conv2d composite functions to ethosu_depthwise_conv2d
operators"""
def __init__(self):
super().__init__(require_type=True)
self.pattern = (
wildcard().has_attr(
{"Composite": ethosu_patterns.QnnDepthwiseConv2DParams.composite_name}
)
)(wildcard())
def callback(
self, pre: tvm.relay.Expr, post: tvm.relay.Expr, node_map: tvm.ir.container.Map
) -> tvm.relay.Expr:
params = ethosu_patterns.QnnDepthwiseConv2DParams(post.op.body)
params.ifm.tensor = post.args[0]
channels_map = {
"NHWC": 3,
}
kernel_shape_map = {
"HWOI": params.weights.shape[0:2],
}
weights_values = params.weights.values
weights_values_ohwi = np.moveaxis(weights_values, [0, 1, 2, 3], [1, 2, 0, 3])
activation = "NONE"
# Activations requiring LUT is currently not supported, so setting it to an empty list
lut = relay.const([], "int8")
clip_min = 0
clip_max = 0
if params.activation:
activation = ethosu_patterns.QnnDepthwiseConv2DParams.activation_map[
params.activation.op.name
]
if activation == "CLIP":
clip_min = int(params.activation.attrs.a_min)
clip_max = int(params.activation.attrs.a_max)
scale_bias = vela_api.pack_biases(
biases=params.biases.tensor.data.asnumpy(),
ifm_scale=params.ifm.q_params.scale_f32,
ifm_dtype=np.dtype(params.ifm.dtype),
weight_scales=params.weights.q_params.scale_f32,
ofm_scale=params.ofm.q_params.scale_f32,
is_activation_tanh_or_sigmoid=activation in ["TANH", "SIGMOID"],
)
ethosu_depthwise_conv2d = ethosu_ops.ethosu_depthwise_conv2d(
post.args[0], # IFM
relay.const(weights_values_ohwi, params.weights.values.dtype),
relay.const(scale_bias, "uint8"),
lut,
float(params.ifm.q_params.scale_f32),
int(params.ifm.q_params.zero_point),
int(params.weights.q_params.zero_point),
float(params.ofm.q_params.scale_f32),
int(params.ofm.q_params.zero_point),
kernel_shape_map[str(params.weights.layout)],
params.ofm.shape[channels_map[str(params.ofm.layout)]],
strides=params.strides,
padding=params.padding,
dilation=params.dilation,
activation=activation,
clip_min=clip_min,
clip_max=clip_max,
upscale="NONE",
ifm_layout=str(params.ifm.layout),
ofm_layout=str(params.ofm.layout),
ofm_dtype=str(params.ofm.dtype),
)
return ethosu_depthwise_conv2d
class PoolingRewriter(DFPatternCallback):
"""Convert ethosu.avgpool2d and ethosu.maxpool2d composite functions to
ethosu_pooling operators"""
def __init__(
self,
params_class: Type,
pattern: CallPattern,
):
super().__init__(require_type=True)
self.params_class = params_class
self.pattern = pattern
def callback(
self, pre: tvm.relay.Expr, post: tvm.relay.Expr, node_map: tvm.ir.container.Map
) -> tvm.relay.Expr:
params = self.params_class(post.op.body)
params.ifm.tensor = post.args[0]
channels_map = {
"NHWC": 3,
}
activation_map = {"clip": "CLIP"}
if params.activation:
activation = activation_map[params.activation.op.name]
clip_min = int(params.activation.attrs.a_min)
clip_max = int(params.activation.attrs.a_max)
else:
activation = "NONE"
clip_min = 0
clip_max = 0
# Activations requiring LUT is currently not supported, so setting it to an empty list
lut = relay.const([], dtype="int8")
# If ethosu.avgpool2d has strides which are not supported by the NPU, convert
# ethosu.avgpool2d composite functions to ethosu_pooling operator with stride=[1, 1].
# Since the spatial dimensions of ifm and the pooling kernel coincide and the padding
# is [0, 0, 0, 0], the application of the pooling kernel will be done only once,
# which will give us the desired output
strides = params.strides
if params.strides[0] > 3 or params.strides[1] > 3:
strides = [1, 1]
return ethosu_ops.ethosu_pooling(
ifm=post.args[0],
lut=lut,
pooling_type=params.pooling_type,
ifm_scale=params.ifm.q_params.scale_f32,
ifm_zero_point=params.ifm.q_params.zero_point,
ofm_scale=params.ofm.q_params.scale_f32,
ofm_zero_point=params.ofm.q_params.zero_point,
pool_shape=params.pool_shape,
ofm_channels=params.ofm.shape[channels_map[str(params.ofm.layout)]],
ofm_dtype=params.ofm.dtype,
strides=strides,
padding=params.padding,
activation=activation,
clip_min=clip_min,
clip_max=clip_max,
upscale="NONE",
ifm_layout=str(params.ifm.layout),
ofm_layout=str(params.ofm.layout),
)
class MaxPoolingRewriter(PoolingRewriter):
def __init__(self):
super().__init__(
params_class=ethosu_patterns.MaxPool2DParams,
pattern=(
wildcard().has_attr({"Composite": ethosu_patterns.MaxPool2DParams.composite_name})
)(wildcard()),
)
class AvgPoolingRewriter(PoolingRewriter):
def __init__(self):
super().__init__(
params_class=ethosu_patterns.AvgPool2DParams,
pattern=(
wildcard().has_attr({"Composite": ethosu_patterns.AvgPool2DParams.composite_name})
)(wildcard()),
)
class BinaryElementwiseRewriter(DFPatternCallback):
"""Convert ethosu binary elementwise composite functions to
ethosu_binary_elementwise operators"""
def __init__(
self,
params_class: Type,
pattern: CallPattern,
):
super().__init__(require_type=True)
self.params_class = params_class
self.pattern = pattern
@staticmethod
def reshape_input(
inputs: List["TensorParams"],
) -> List[tvm.relay.Expr]:
"""Reshape the inputs so that the following binary elementwise
operator receives 4-dimensional inputs.
Parameters
----------
inputs: List[TensorParams]
The inputs to reshape.
Returns
-------
reshaped_inputs: List[tvm.relay.Expr]
The new reshaped inputs.
"""
reshaped_inputs = []
for i in inputs:
in_shape = i.shape
if len(in_shape) < 4:
pad_size = 4 - len(in_shape)
new_shape = ([1] * pad_size) + in_shape
new_call = relay.reshape(i.tensor, new_shape)
reshaped_inputs.append(new_call)
else:
reshaped_inputs.append(i.tensor)
return reshaped_inputs
@staticmethod
def reshape_output(output: tvm.relay.Expr, ifm_input_shape: List[int]) -> tvm.relay.Expr:
"""Reshape the output back to the original dimensionality.
Since the NPU must have the brodcastable tensor as the
second operand, the original shape of the first ifm must
be the output shape.
Parameters
----------
output: tvm.relay.Expr
The output to reshape.
ifm_input_shape: List[int]
The shape of the non-reshaped ifm tensor.
Returns
-------
reshaped_output: tvm.relay.Expr
The reshaped output expression.
"""
if len(ifm_input_shape) == 4:
return output
reshaped_output = relay.reshape(output, ifm_input_shape)
return reshaped_output
def callback(
self, pre: tvm.relay.Expr, post: tvm.relay.Expr, node_map: tvm.ir.container.Map
) -> tvm.relay.Expr:
params = self.params_class(post.op.body)
params.ifm.tensor = post.args[1] if params.reversed_operands else post.args[0]
params.ifm2.tensor = post.args[0] if params.reversed_operands else post.args[1]
activation_map = {"clip": "CLIP"}
if params.activation:
activation = activation_map[params.activation.op.name]
clip_min = int(params.activation.attrs.a_min)
clip_max = int(params.activation.attrs.a_max)
else:
activation = "NONE"
clip_min = 0
clip_max = 0
# We don't yet support activation functions that need to get legalized to LUTs.
lut = relay.const([], dtype="int8")
inputs = [params.ifm, params.ifm2]
inputs = self.reshape_input(inputs)
ethosu_binary_elementwise = ethosu_ops.ethosu_binary_elementwise(
ifm=inputs[0],
ifm2=inputs[1],
lut=lut,
operator_type=params.operator_type,
ifm_scale=float(params.ifm.q_params.scale_f32),
ifm_zero_point=int(params.ifm.q_params.zero_point),
ifm2_scale=float(params.ifm2.q_params.scale_f32),
ifm2_zero_point=int(params.ifm2.q_params.zero_point),
ofm_scale=float(params.ofm.q_params.scale_f32),
ofm_zero_point=int(params.ofm.q_params.zero_point),
ifm_channels=params.ifm.shape[-1] if params.ifm.shape else 1,
ifm2_channels=params.ifm2.shape[-1] if params.ifm2.shape else 1,
reversed_operands=params.reversed_operands,
ofm_dtype=params.ofm.dtype,
activation=activation,
clip_min=clip_min,
clip_max=clip_max,
ifm_layout=str(params.ifm.layout),
ifm2_layout=str(params.ifm2.layout),
ofm_layout=str(params.ofm.layout),
)
output = self.reshape_output(ethosu_binary_elementwise, params.ifm.shape)
return output
class AddRewriter(BinaryElementwiseRewriter):
def __init__(self):
super().__init__(
params_class=ethosu_patterns.AddParams,
pattern=(wildcard().has_attr({"Composite": ethosu_patterns.AddParams.composite_name}))(
wildcard(), wildcard()
),
)
class SubRewriter(BinaryElementwiseRewriter):
def __init__(self):
super().__init__(
params_class=ethosu_patterns.SubParams,
pattern=(wildcard().has_attr({"Composite": ethosu_patterns.SubParams.composite_name}))(
wildcard(), wildcard()
),
)
class MulRewriter(BinaryElementwiseRewriter):
def __init__(self):
super().__init__(
params_class=ethosu_patterns.MulParams,
pattern=(wildcard().has_attr({"Composite": ethosu_patterns.MulParams.composite_name}))(
wildcard(), wildcard()
),
)
class MinRewriter(BinaryElementwiseRewriter):
def __init__(self):
super().__init__(
params_class=ethosu_patterns.MinParams,
pattern=(wildcard().has_attr({"Composite": ethosu_patterns.MinParams.composite_name}))(
wildcard(), wildcard()
),
)
class MaxRewriter(BinaryElementwiseRewriter):
def __init__(self):
super().__init__(
params_class=ethosu_patterns.MaxParams,
pattern=(wildcard().has_attr({"Composite": ethosu_patterns.MaxParams.composite_name}))(
wildcard(), wildcard()
),
)
class ShlRewriter(BinaryElementwiseRewriter):
def __init__(self):
super().__init__(
params_class=ethosu_patterns.ShlParams,
pattern=(wildcard().has_attr({"Composite": ethosu_patterns.ShlParams.composite_name}))(
wildcard(), wildcard()
),
)
class StridedSliceRewriter(DFPatternCallback):
"""This pass brings the strided slice out of the partitioned function"""
def __init__(self):
super().__init__(require_type=True, rewrite_once=True)
self.pattern = (
wildcard().has_attr({"Composite": ethosu_patterns.StridedSliceParams.composite_name})
)(wildcard())
def callback(
self, pre: tvm.relay.Expr, post: tvm.relay.Expr, node_map: tvm.ir.container.Map
) -> tvm.relay.Expr:
slice_input = post.args[0]
# TODO(lhutton1) For an unknown reason compilation will fail for strides of 4
# dimensions, so we cannot use params.strides as this will sometimes give
# strides as [1, 1, 1, 1]. Since we only support strides of 1, hardcoding this
# value for now.
strides = [1]
params = ethosu_patterns.StridedSliceParams(post.op.body)
strided_slice = relay.op.strided_slice(
slice_input,
params.begin,
params.end,
strides=strides,
axes=params.axes,
slice_mode=params.slice_mode,
)
return strided_slice
class ReshapeRewriter(DFPatternCallback):
"""This pass brings the reshape out of the partitioned function"""
def __init__(self):
super().__init__(require_type=True, rewrite_once=True)
self.pattern = (
wildcard().has_attr({"Composite": ethosu_patterns.ReshapeParams.composite_name})
)(wildcard())
def callback(
self, pre: tvm.relay.Expr, post: tvm.relay.Expr, node_map: tvm.ir.container.Map
) -> tvm.relay.Expr:
reshape_input = post.args[0]
reshape_params = ethosu_patterns.ReshapeParams(post.op.body)
new_shape = reshape_params.new_shape
return relay.op.reshape(reshape_input, newshape=new_shape)
class NoOpRewriter(DFPatternCallback):
"""This pass adds an idenity operator to reshape and strided slice to avoid a no op
without a consumer"""
def __init__(self):
super().__init__(require_type=True, rewrite_once=True)
self.reshape = is_op("reshape")(wildcard())
self.strided_slice = is_op("strided_slice")(wildcard())
self.pattern = self.reshape | self.strided_slice
def callback(
self, pre: tvm.relay.Expr, post: tvm.relay.Expr, node_map: tvm.ir.container.Map
) -> tvm.relay.Expr:
if pre.checked_type.dtype == "int32":
return post
return ethosu_ops.ethosu_identity(ifm=post, lut=relay.const([], dtype="int8"))
class UnaryElementwiseRewriter(DFPatternCallback):
"""
Convert ethosu unary elementwise composite function to
ethosu_unary_elementwise operators
"""
def __init__(self, params_class: Type, pattern: CallPattern):
super().__init__(require_type=True)
self.params_class = params_class
self.pattern = pattern
def callback(
self, pre: tvm.relay.Expr, post: tvm.relay.Expr, node_map: tvm.ir.container.Map
) -> tvm.relay.Expr:
params = self.params_class(post.op.body)
params.ifm.tensor = post.args[0]
activation_map = {"clip": "CLIP"}
if params.activation:
activation = activation_map[params.activation.op.name]
clip_min = int(params.activation.attrs.a_min)
clip_max = int(params.activation.attrs.a_max)
else:
activation = "NONE"
clip_min = 0
clip_max = 0
# We don't yet support activation functions that use LUT.
lut = relay.const([], dtype="int8")
unary_input_shape = params.ifm.shape
# If the input tensor is not 4D, enter reshapes before and after the unary operator
if len(params.ifm.shape) == 4:
unary_input = params.ifm.tensor
else:
pad_size = 4 - len(unary_input_shape)
unary_input_shape = ([1] * pad_size) + unary_input_shape
unary_input = relay.op.reshape(params.ifm.tensor, newshape=unary_input_shape)
ethosu_unary_elementwise = ethosu_ops.ethosu_unary_elementwise(
ifm=unary_input,
lut=lut,
operator_type=params.operator_type,
ifm_scale=float(params.ifm.q_params.scale_f32),
ifm_zero_point=int(params.ifm.q_params.zero_point),
ofm_scale=float(params.ofm.q_params.scale_f32),
ofm_zero_point=int(params.ofm.q_params.zero_point),
ofm_channels=unary_input_shape[3],
activation=activation,
clip_min=clip_min,
clip_max=clip_max,
ifm_layout=str(params.ifm.layout),
ofm_layout=str(params.ofm.layout),
)
if len(params.ifm.shape) == 4:
op = ethosu_unary_elementwise
else:
op = relay.op.reshape(ethosu_unary_elementwise, newshape=params.ifm.shape)
return op
class AbsRewriter(UnaryElementwiseRewriter):
def __init__(self):
super().__init__(
params_class=ethosu_patterns.AbsParams,
pattern=(wildcard().has_attr({"Composite": ethosu_patterns.AbsParams.composite_name}))(
wildcard()
),
)
class MeanRewriter(DFPatternCallback):
"""Convert ethosu.mean composite functions to an equivalent legalization:
- Case 1 (ifm qparams == ofm qparams): ethosu_pooling
- Case 2 (else): ethosu_depthwise_conv2d
"""
def __init__(self):
super().__init__(require_type=True)
self.pattern = (
wildcard().has_attr({"Composite": ethosu_patterns.MeanParams.composite_name})
)(wildcard())
def callback(
self, pre: tvm.relay.Expr, post: tvm.relay.Expr, node_map: tvm.ir.container.Map
) -> tvm.relay.Expr:
params = ethosu_patterns.MeanParams(post.op.body)
params.ifm.tensor = post.args[0]
ifm_shape = params.ifm.shape
ofm_shape = params.ofm.shape
lut = relay.const([], "int8")
axis = params.axis
reduced_op = params.ifm.tensor
# Enforce 4d input
if len(ifm_shape) < 4:
axis = [x + 1 for x in axis]
if len(ifm_shape) == 3:
ifm_shape = [1, params.height, params.width, ifm_shape[2]]
else:
ifm_shape = [1, params.height, params.width, 1]
reduced_op = relay.reshape(reduced_op, ifm_shape)
filter_height = ifm_shape[1] if 1 in axis else 1
filter_width = ifm_shape[2] if 2 in axis else 1
in_channels = out_channels = ifm_shape[-1]
# If the height is greater than max kernel height, reshape the input
# from [filter_height, filter_width] to [1, (filter_height*filter_width)]
# only in the case the axis is [1, 2].
if axis == [1, 2] and filter_height > 64:
ifm_shape = (ifm_shape[0], 1, filter_height * filter_width, in_channels)
filter_width = filter_height * filter_width
filter_height = 1
reduced_op = relay.reshape(reduced_op, ifm_shape)
if (
params.ifm.q_params.scale_f32 == params.ofm.q_params.scale_f32
and params.ifm.q_params.zero_point == params.ofm.q_params.zero_point
):
reduced_op = ethosu_ops.ethosu_pooling(
ifm=reduced_op,
lut=lut,
pooling_type="AVG",
ifm_scale=float(params.ifm.q_params.scale_f32),
ifm_zero_point=0,
ofm_scale=float(params.ofm.q_params.scale_f32),
ofm_zero_point=0,
pool_shape=(filter_height, filter_width),
ofm_channels=out_channels,
ofm_dtype=params.ofm.dtype,
rounding_mode="TRUNCATE",
)
else:
weight_scale = 1 / (filter_height * filter_width)
weight_values = np.ones([out_channels, filter_height, filter_width, 1])
bias = -1 * int(params.ifm.q_params.zero_point) * filter_height * filter_width
scale_bias = vela_api.pack_biases(
biases=np.ones([ifm_shape[-1]]) * bias,
ifm_scale=params.ifm.q_params.scale_f32,
ifm_dtype=np.dtype(params.ifm.dtype),
weight_scales=np.array([weight_scale], dtype=np.float),
ofm_scale=params.ofm.q_params.scale_f32,
is_activation_tanh_or_sigmoid=False,
)
reduced_op = ethosu_ops.ethosu_depthwise_conv2d(
ifm=reduced_op,
weight=relay.const(weight_values, params.ifm.dtype),
scale_bias=relay.const(scale_bias, "uint8"),
lut=lut,
ifm_scale=float(params.ifm.q_params.scale_f32),
ifm_zero_point=0,
weight_zero_point=0,
ofm_scale=float(params.ofm.q_params.scale_f32),
ofm_zero_point=int(params.ofm.q_params.zero_point),
kernel_shape=(filter_height, filter_width),
ofm_channels=out_channels,
rounding_mode="NATURAL",
ofm_dtype=params.ofm.dtype,
)
# Reshape to original ofm shape
if len(ofm_shape) < 4:
reduced_op = relay.reshape(reduced_op, ofm_shape)
return reduced_op
class SumRewriter(DFPatternCallback):
"""
Convert ethosu.sum composite functions to pooling operations
"""
def __init__(self):
super().__init__(require_type=True)
self.pattern = (
wildcard().has_attr({"Composite": ethosu_patterns.SumParams.composite_name})
)(wildcard())
def callback(
self, pre: tvm.relay.Expr, post: tvm.relay.Expr, node_map: tvm.ir.container.Map
) -> tvm.relay.Expr:
params = ethosu_patterns.SumParams(post.op.body)
ifm_shape = params.ifm.shape
ofm_shape = params.ofm.shape
lut = relay.const([], "int8")
reduced_op = post.args[0]
# Enforce 4d input
if len(ifm_shape) == 3:
ifm_shape = [1, params.height, params.width, ifm_shape[2]]
reduced_op = relay.reshape(reduced_op, ifm_shape)
activation_map = {"clip": "CLIP"}
if params.activation:
activation = activation_map[params.activation.op.name]
clip_min = int(params.activation.attrs.a_min)
clip_max = int(params.activation.attrs.a_max)
else:
activation = "NONE"
clip_min = 0
clip_max = 0
reduced_op = ethosu_ops.ethosu_pooling(
ifm=reduced_op,
lut=lut,
pooling_type="SUM",
ifm_scale=float(params.ifm.q_params.scale_f32),
ifm_zero_point=int(params.ifm.q_params.zero_point),
ofm_scale=float(params.ofm.q_params.scale_f32),
ofm_zero_point=0,
pool_shape=(1, 1),
ofm_channels=1,
ofm_dtype="int32",
activation=activation,
clip_min=clip_min,
clip_max=clip_max,
ifm_layout=params.ifm.layout,
ofm_layout=params.ofm.layout,
rounding_mode="NATURAL",
)
# Convert tensor dtype from int32 to int8
scalar_tensor = relay.const(np.ones([1, 1, 1, 1], dtype="int32"), dtype="int32")
reduced_op = ethosu_ops.ethosu_binary_elementwise(
ifm=reduced_op,
ifm2=scalar_tensor,
lut=lut,
operator_type="MUL",
ifm_scale=0.0,
ifm_zero_point=0,
ifm2_scale=0.0,
ifm2_zero_point=0,
ofm_scale=0.0,
ofm_zero_point=int(params.ofm.q_params.zero_point),
ifm_channels=1,
ifm2_channels=1,
reversed_operands=False,
ofm_dtype="int8",
)
# Reshape to original ofm shape
if len(ofm_shape) < 4:
reduced_op = relay.reshape(reduced_op, ofm_shape)
return reduced_op
class ConcatRewriter(DFPatternCallback):
"""The newer versions of TFLite converters return a concatenate operator that concatenates
tensors with same QNN params (if the QNN params of tensors were initially different,
the converter adds a requantize node), so this rewriter replaces the QNN concatenate with
"normal" concatenate"""
def __init__(self):
super().__init__(require_type=True, rewrite_once=True)
self.pattern = (
wildcard().has_attr({"Composite": ethosu_patterns.ConcatParams.composite_name})
)(None)
def callback(
self, pre: tvm.relay.Expr, post: tvm.relay.Expr, node_map: tvm.ir.container.Map
) -> tvm.relay.Expr:
# Find the tensors that are inputs to the concat and the scales and zero points
concat_args = list()
for arg in post.args:
if isinstance(arg, tvm.relay.expr.Call):
concat_args.append(arg)
axis = post.op.body.attrs.axis
concat = relay.op.concatenate(relay.Tuple(concat_args), axis=axis)
return concat
class RequantizeRewriter(DFPatternCallback):
"""Convert ethos-u.requantize composite function to an identity operation."""
def __init__(self):
super().__init__(require_type=True)
self.pattern = (
wildcard().has_attr({"Composite": ethosu_patterns.RequantizeParams.composite_name})
)(wildcard())
def callback(
self, pre: tvm.relay.Expr, post: tvm.relay.Expr, node_map: tvm.ir.container.Map
) -> tvm.relay.Expr:
params = ethosu_patterns.RequantizeParams(post.op.body)
params.ifm.tensor = post.args[0]
lut = relay.const([], "int8")
return ethosu_ops.ethosu_identity(
ifm=params.ifm.tensor,
lut=lut,
ifm_scale=float(params.ifm.q_params.scale_f32),
ifm_zero_point=int(params.ifm.q_params.zero_point),
ofm_scale=float(params.ofm.q_params.scale_f32),
ofm_zero_point=int(params.ofm.q_params.zero_point),
)
class Resize2dRewriter(DFPatternCallback):
"""
Convert ethos-u.resize2d composite function to an equivalent operation that
performs the relevant upsampling operation.
Case 1: No upsampling (upscale factor of 1):
Identity.
Case 1: Nearest neighbor upsampling:
1x1 pooling with 2x2 nearest neighbor upsampling.
Case 2: Bilinear upsampling:
2x2 average pool with 2x2 nearest neighbor upsampling.
"""
def __init__(self):
super().__init__(require_type=True)
self.pattern = (
wildcard().has_attr({"Composite": ethosu_patterns.Resize2dParams.composite_name})
)(wildcard())
def callback(
self, pre: tvm.relay.Expr, post: tvm.relay.Expr, node_map: tvm.ir.container.Map
) -> tvm.relay.Expr:
params = ethosu_patterns.Resize2dParams(post.op.body)
params.ifm.tensor = post.args[0]
lut = relay.const([], "int8")
ifm_shape = params.ifm.shape
in_channels = ifm_shape[-1]
reduced_op = params.ifm.tensor
current_size = np.array(ifm_shape[1:3])
output_size = np.array(params.size)
if (current_size == output_size).all():
return ethosu_ops.ethosu_identity(
reduced_op,
lut,
ifm_scale=float(params.ifm.q_params.scale_f32),
ifm_zero_point=int(params.ifm.q_params.zero_point),
ofm_scale=float(params.ofm.q_params.scale_f32),
ofm_zero_point=int(params.ofm.q_params.zero_point),
)
padding = [0, 0, 0, 0]
rounding_mode = "TFL"
pool_shape = [1, 1]
if params.method == "linear":
pool_shape = [2, 2]
rounding_mode = "NATURAL"
if params.coordinate_transformation_mode == "asymmetric":
# Use SAME padding.
ypad = Resize2dRewriter.get_required_padding(ifm_shape[1])
xpad = Resize2dRewriter.get_required_padding(ifm_shape[2])
padding = [ypad // 2, xpad // 2, (ypad + 1) // 2, (xpad + 1) // 2]
return ethosu_ops.ethosu_pooling(
ifm=reduced_op,
lut=lut,
pooling_type="AVG",
ifm_scale=float(params.ifm.q_params.scale_f32),
ifm_zero_point=int(params.ifm.q_params.zero_point),
ofm_scale=float(params.ofm.q_params.scale_f32),
ofm_zero_point=int(params.ofm.q_params.zero_point),
pool_shape=pool_shape,
ofm_channels=in_channels,
ofm_dtype=params.ofm.dtype,
strides=[1, 1],
padding=padding,
upscale="NEAREST",
rounding_mode=rounding_mode,
)
@staticmethod
def get_required_padding(input_size: int, pool_size: int = 2) -> int:
"""Gets the amount of padding required needed to achieve
'SAME' padding for a given axis."""
needed_input = (input_size - 1) + pool_size
total_padding = max(0, needed_input - input_size)
return total_padding
class ExpandDimsRewriter(DFPatternCallback):
"""Legalize expand dims to a reshape operator."""
def __init__(self):
super().__init__(require_type=True, rewrite_once=True)
self.pattern = (
wildcard().has_attr({"Composite": ethosu_patterns.ExpandDimsParams.composite_name})
)(None)
def callback(
self, pre: tvm.relay.Expr, post: tvm.relay.Expr, node_map: tvm.ir.container.Map
) -> tvm.relay.Expr:
params = ethosu_patterns.ExpandDimsParams(post.op.body)
return relay.op.reshape(post.args[0], newshape=params.output.shape)
class SqueezeRewriter(DFPatternCallback):
"""Legalize squeeze to a reshape operator."""
def __init__(self):
super().__init__(require_type=True, rewrite_once=True)
self.pattern = (
wildcard().has_attr({"Composite": ethosu_patterns.SqueezeParams.composite_name})
)(None)
def callback(
self, pre: tvm.relay.Expr, post: tvm.relay.Expr, node_map: tvm.ir.container.Map
) -> tvm.relay.Expr:
params = ethosu_patterns.SqueezeParams(post.op.body)
return relay.op.reshape(post.args[0], newshape=params.output.shape)
class FullyConnectedRewriter(DFPatternCallback):
"""Legalize Fully Connected (with bias and clip) to an NPU operator"""
def __init__(self):
super().__init__(require_type=True)
self.pattern = (
wildcard().has_attr({"Composite": ethosu_patterns.FullyConnectedParams.composite_name})
)(wildcard())
def callback(self, pre, post, node_map):
params = ethosu_patterns.FullyConnectedParams(post.op.body)
params.ifm.tensor = post.args[0]
# IFM reshapes
ifm = post.args[0]
if len(params.ifm.shape) != 4 or not params.ifm.shape[1] == params.ifm.shape[2] == 1:
ifm = relay.reshape(ifm, (1, 1, 1, params.ifm.shape[-1]))
# Weight transformations
weights_values = params.weights.values
weights_values_ohwi = np.expand_dims(weights_values, axis=(1, 2))
if params.activation:
activation = "CLIP"
clip_min = int(params.activation.attrs.a_min)
clip_max = int(params.activation.attrs.a_max)
else:
activation = "NONE"
clip_min = 0
clip_max = 0
bias_values = (
params.biases.tensor.data.asnumpy()
if params.biases
else np.zeros((params.ofm.shape[-1]))
)
scale_bias = vela_api.pack_biases(
biases=bias_values,
ifm_scale=params.ifm.q_params.scale_f32,
ifm_dtype=np.dtype(params.ifm.dtype),
weight_scales=params.weights.q_params.scale_f32,
ofm_scale=params.ofm.q_params.scale_f32,
is_activation_tanh_or_sigmoid=False,
)
ethosu_fc = ethosu_ops.ethosu_conv2d(
ifm=ifm,
weight=relay.const(weights_values_ohwi, params.weights.values.dtype),
scale_bias=relay.const(scale_bias, "uint8"),
lut=relay.const([], dtype="int8"),
ifm_scale=float(params.ifm.q_params.scale_f32),
ifm_zero_point=int(params.ifm.q_params.zero_point),
weight_zero_point=int(params.weights.q_params.zero_point),
ofm_scale=float(params.ofm.q_params.scale_f32),
ofm_zero_point=int(params.ofm.q_params.zero_point),
kernel_shape=[1, 1],
ofm_channels=params.weights.shape[0],
strides=(1, 1),
padding=(0, 0, 0, 0),
dilation=(1, 1),
activation=activation,
clip_min=clip_min,
clip_max=clip_max,
upscale="NONE",
ifm_layout="NHWC",
ofm_layout="NHWC",
)
if len(params.ofm.shape) != 4 or not params.ofm.shape[1] == params.ofm.shape[2] == 1:
ethosu_fc = relay.reshape(ethosu_fc, params.ofm.shape)
return ethosu_fc
class PadRewriter(DFPatternCallback):
"""Convert ethos-u.pad2d composite function to ethosu_depthwise_conv2d
operator"""
def __init__(self):
super().__init__(require_type=True)
self.pattern = (
wildcard().has_attr({"Composite": ethosu_patterns.PadParams.composite_name})
)(wildcard())
def callback(
self, pre: tvm.relay.Expr, post: tvm.relay.Expr, node_map: tvm.ir.container.Map
) -> tvm.relay.Expr:
params = ethosu_patterns.PadParams(post.op.body)
params.ifm.tensor = post.args[0]
channels_map = {
"NHWC": 3,
}
w_h, w_w = (1, 1)
# OHWI format for the ethosu_depthwise_conv2d kernel weights
weight_shape = (params.ifm.shape[-1], w_h, w_w, 1)
weights = relay.const(np.full(weight_shape, 1), params.ifm.dtype)
scale_bias = vela_api.pack_biases(
biases=np.zeros(params.ifm.shape[-1]),
ifm_scale=params.ifm.q_params.scale_f32,
ifm_dtype=np.dtype(params.ifm.dtype),
weight_scales=np.array(1.0, dtype=np.float32),
ofm_scale=params.ofm.q_params.scale_f32,
is_activation_tanh_or_sigmoid=False,
)
return ethosu_ops.ethosu_depthwise_conv2d(
ifm=post.args[0],
weight=weights,
scale_bias=relay.const(scale_bias, "uint8"),
lut=relay.const([], "int8"),
ifm_scale=float(params.ifm.q_params.scale_f32),
ifm_zero_point=int(params.ifm.q_params.zero_point.item()),
weight_zero_point=0,
ofm_scale=float(params.ofm.q_params.scale_f32),
ofm_zero_point=int(params.ofm.q_params.zero_point.item()),
kernel_shape=(w_h, w_w),
ofm_channels=params.ofm.shape[channels_map[str(params.ofm.layout)]],
strides=(1, 1),
padding=params.padding,
dilation=(1, 1),
activation="NONE",
clip_min=0,
clip_max=0,
upscale="NONE",
ifm_layout=str(params.ifm.layout),
ofm_layout=str(params.ofm.layout),
ofm_dtype=str(params.ofm.dtype),
)
class ChannelPadRewriter(DFPatternCallback):
"""Convert ethos-u.channel-pad composite function to the Relay concatenate operation"""
def __init__(self):
super().__init__(require_type=True)
self.pattern = (
wildcard().has_attr({"Composite": ethosu_patterns.ChannelPadParams.composite_name})
)(wildcard())
def callback(
self, pre: tvm.relay.Expr, post: tvm.relay.Expr, node_map: tvm.ir.container.Map
) -> tvm.relay.Expr:
params = ethosu_patterns.ChannelPadParams(post.op.body)
params.ifm.tensor = post.args[0]
concat_args = list()
lut = relay.const([], dtype="int8")
# pad channels before
if params.ch_padding[0] > 0:
shape1 = list(params.ifm.shape)
shape1[3] = params.ch_padding[0].value
pad_channels = relay.Constant(
tvm.nd.array(
np.full(
shape=shape1,
fill_value=int(params.ifm.q_params.zero_point),
dtype=params.ifm.dtype,
)
)
)
identity1 = ethosu_ops.ethosu_identity(
ifm=pad_channels,
lut=lut,
ifm_scale=float(params.ifm.q_params.scale_f32),
ifm_zero_point=int(params.ifm.q_params.zero_point),
ofm_scale=float(params.ofm.q_params.scale_f32),
ofm_zero_point=int(params.ofm.q_params.zero_point),
)
concat_args.append(identity1)
identity2 = ethosu_ops.ethosu_identity(
ifm=params.ifm.tensor,
lut=lut,
ifm_scale=float(params.ifm.q_params.scale_f32),
ifm_zero_point=int(params.ifm.q_params.zero_point),
ofm_scale=float(params.ofm.q_params.scale_f32),
ofm_zero_point=int(params.ofm.q_params.zero_point),
)
concat_args.append(identity2)
# pad channels after
if params.ch_padding[1] > 0:
shape3 = list(params.ifm.shape)
shape3[3] = params.ch_padding[1].value
pad_channels3 = relay.Constant(
tvm.nd.array(
np.full(
shape=shape3,
fill_value=int(params.ifm.q_params.zero_point),
dtype=params.ifm.dtype,
)
)
)
identity3 = ethosu_ops.ethosu_identity(
ifm=pad_channels3,
lut=lut,
ifm_scale=float(params.ifm.q_params.scale_f32),
ifm_zero_point=int(params.ifm.q_params.zero_point),
ofm_scale=float(params.ofm.q_params.scale_f32),
ofm_zero_point=int(params.ofm.q_params.zero_point),
)
concat_args.append(identity3)
return relay.op.concatenate(relay.Tuple(concat_args), axis=3)
@util.create_npu_function_pass(opt_level=1)
class LegalizeEthosU:
"""This is the pass to call graph-rewrites to perform graph transformation
in a way such that the operations are replaced with hardware/codegen supported
operations.
"""
def transform_npu_function(self, _, func: relay.Function) -> relay.Function:
"""This is the method that replaces the operations with hardware/codegen supported
operations.
"""
rewriters = [
PartitionedSplitRewriter(),
SplitRewriter(),
ChannelPadRewriter(),
Conv2DRewriter(),
Conv2DTransposeRewriter(),
DepthwiseConv2DRewriter(),
FullyConnectedRewriter(),
MaxPoolingRewriter(),
AvgPoolingRewriter(),
PadRewriter(),
AddRewriter(),
SubRewriter(),
MulRewriter(),
MinRewriter(),
MaxRewriter(),
ShlRewriter(),
AbsRewriter(),
TanhRewriter(),
HardSwishRewriter(),
LeakyReLURewriter(),
MeanRewriter(),
SumRewriter(),
SoftmaxRewriter(),
ConcatRewriter(),
SigmoidRewriter(),
RequantizeRewriter(),
Resize2dRewriter(),
ExpandDimsRewriter(),
SqueezeRewriter(),
ReshapeRewriter(),
StridedSliceRewriter(),
NoOpRewriter(),
]
for rewriter in rewriters:
func = rewrite(rewriter, func)
return func
def __call__(self, *args, **kwargs):
# pylint is unable figure out the decorated
# class is callable, thus adding this to
# suppress the warning.
pass
| 60,439 | 36.988686 | 174 | py |
tvm | tvm-main/python/tvm/relay/backend/contrib/ethosu/util.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""
Helper utility Enums and Functions used through out code generation.
The rest of the utility functions are misc.
Refer to the description inside such functions
"""
from inspect import signature
from enum import Enum
from typing import Union, Tuple, List
import numpy as np # type: ignore
import tvm # type: ignore
from tvm import relay
from tvm._ffi import register_object
from tvm.runtime import Object
from . import _ffi_api
class QConv2DArgs(Enum):
"""
This is a helper enum to obtain the correct index
of qnn.conv2d arguments.
"""
IFM = 0
WEIGHTS = 1
IFM_ZERO_POINT = 2
WEIGHTS_ZERO_POINT = 3
IFM_SCALE = 4
WEIGHTS_SCALE = 5
class QConv2DTransposeArgs(Enum):
"""
This is a helper enum to obtain the correct index
of qnn.conv2d_transpose arguments.
"""
IFM = 0
WEIGHTS = 1
IFM_ZERO_POINT = 2
WEIGHTS_ZERO_POINT = 3
IFM_SCALE = 4
WEIGHTS_SCALE = 5
class RequantArgs(Enum):
"""
This is a helper enum to obtain the correct index
of qnn.requantize arguments.
"""
IFM_SCALE = 1
IFM_ZERO_POINT = 2
OFM_SCALE = 3
OFM_ZERO_POINT = 4
class BiasAddArgs(Enum):
"""
This is a helper enums to obtain the correct index
of qnn.bias_add arguments.
"""
BIASES = 1
class ClipArgs(Enum):
"""
This is a helper enums to obtain the correct index
of clip arguments.
"""
A_MIN = 1
A_MAX = 2
class BinaryElementwiseArgs(Enum):
"""This is a helper enums to access the correct index
of binary elementwise arguments
"""
IFM = 0
IFM2 = 1
IFM_SCALE = 2
IFM_ZERO_POINT = 3
IFM2_SCALE = 4
IFM2_ZERO_POINT = 5
OFM_SCALE = 6
OFM_ZERO_POINT = 7
class QuantizeArgs(Enum):
"""
This is a helper enums to access the correct index of
quantize arguments
"""
IFM = 0
OFM_SCALE = 1
OFM_ZERO_POINT = 2
class DequantizeArgs(Enum):
"""
This is a helper enums to access the correct index of
dequantize arguments
"""
IFM = 0
IFM_SCALE = 1
IFM_ZERO_POINT = 2
class QDenseArgs(Enum):
"""
This is a helper enum to access the correct index of
qnn.dense arguments
"""
IFM = 0
WEIGHTS = 1
IFM_ZERO_POINT = 2
WEIGHTS_ZERO_POINT = 3
IFM_SCALE = 4
WEIGHTS_SCALE = 5
class QPadArgs(Enum):
"""
This is a helper enum to obtain the correct index
of nn.pad arguments.
"""
IFM = 0
IFM_ZERO_POINT = 1
def is_npu_func(func: relay.Function) -> bool:
"""Check if the given function is an NPU function."""
return func.attrs and "Compiler" in func.attrs and func.attrs["Compiler"] == "ethos-u"
def is_composite_func(func: relay.Function, name: str) -> bool:
"""
This method checks whether the call is to
a composite function of a given name.
Parameters
----------
func : relay.Function
The header to be displayed along with the dump.
name : str
The candidate name to be checked
Returns
--------
a boolean
"""
if not hasattr(func, "attrs"):
return False
if "Composite" not in func.attrs.keys():
return False
composite_name = func.attrs["Composite"]
return composite_name == name
def is_named_ethosu_op(expr: tvm.relay.Expr, name: str) -> bool:
"""Checks whether a relay expression matches that of the
named operator.
Parameters
----------
expr : tvm.relay.Expr
The expression to check.
name : str
The name of the expected operator
(without NPU prefix "contrib.ethosu").
Returns
-------
bool
True if expression matches name, false if not.
"""
prefix = "contrib.ethosu."
return (
isinstance(expr, tvm.relay.expr.Call)
and isinstance(expr.op, tvm.ir.op.Op)
and expr.op.name == prefix + name
)
def get_range_for_dtype_str(dtype: str) -> Tuple[int, int]:
"""
Produce the min,max for a give data type.
Parameters
----------
dtype : str
a type string (e.g., int8)
Returns
-------
type_info.min : int
the minimum of the range
type_info.max : int
the maximum of the range
"""
try:
type_info = np.iinfo(dtype)
except ValueError:
type_info = np.finfo(dtype)
return type_info.min, type_info.max
def round_away_zero(f: Union[float, np.double, np.single, np.float32, np.float64]) -> np.float64:
"""Round the number away from zero towards +inf / -inf"""
offset = -0.5 if (f < 0) else 0.5
return np.trunc(f + offset)
def round_up(a: int, b: int) -> int:
"""Round up to a multiple of b"""
return ((a + b - 1) // b) * b
def get_accelerator_config():
"""Get the variant of the accelerator to compile for"""
compiler_attrs = tvm.get_global_func("relay.ext.ethos-u.get_compiler_attrs")()
return compiler_attrs.accelerator_config
def is_cascader_enabled() -> bool:
"""Determine whether the cascader is enabled"""
compiler_attrs = tvm.get_global_func("relay.ext.ethos-u.get_compiler_attrs")()
return bool(compiler_attrs.enable_cascader)
def is_copying_constants_disabled() -> bool:
"""Determine whether copying constants is disabled for case without cascader"""
compiler_attrs = tvm.get_global_func("relay.ext.ethos-u.get_compiler_attrs")()
return bool(compiler_attrs.disable_copying_constants)
def is_striping_enabled() -> bool:
"""Determine whether the cascader is enabled"""
compiler_attrs = tvm.get_global_func("relay.ext.ethos-u.get_compiler_attrs")()
return bool(compiler_attrs.enable_striping)
def get_arg_count(func):
"""Helper function to get the number of
arguments in a python function"""
sig = signature(func)
return len(sig.parameters)
def get_dim_value(layout: str, dim: int):
"""This is a helper function to retrieve the value
of the dimension given the shape and the layout
"""
assert isinstance(layout, str)
assert dim in list(layout)
for idx, dim_char in enumerate(layout):
if dim_char == dim:
return idx
return None
def calculate_size_bytes(expr):
"""This is a helper function to calculate the number
of bytes required to hold the tensor/relay.expr"""
try:
type_info = np.iinfo(expr.checked_type.dtype)
except ValueError:
type_info = np.finfo(expr.checked_type.dtype)
element_size = type_info.bits // 8
elements = np.prod(list(expr.checked_type.shape))
return element_size * elements
@register_object("relay.ext.ethos-u.BaseAddress")
class BaseAddress(Object):
"""
This is a structure to hold base addresses for pointers
provided for the driver.
"""
def __init__(
self,
name: str,
primfunc_param_idx: int,
region: int,
size: int,
is_runtime_allocation: bool = False,
):
self.__init_handle_by_constructor__(
_ffi_api.BaseAddress, # type: ignore # pylint: disable=no-member
name,
primfunc_param_idx,
region,
size,
is_runtime_allocation,
)
@register_object("relay.ext.ethos-u.CompilationArtifact")
class CompilationArtifact(Object):
"""
This is a structure to hold binary artifacts
for the microNPU.
"""
def __init__(
self,
function_name: str,
command_stream: str,
encoded_constants: str,
base_addresses: List[BaseAddress],
):
self.__init_handle_by_constructor__(
_ffi_api.CompilationArtifact, # type: ignore # pylint: disable=no-member
function_name,
command_stream,
encoded_constants,
base_addresses,
)
def create_npu_function_pass(opt_level: int, name: str = ""):
"""
A utility decorator that wraps a given class as an NPU function pass. That is,
a pass that behaves like a function pass and only traverses NPU external
functions. How each NPU function is mutated is defined by the
`transform_npu_function(global_variable, relay_function)` function which should
be created in the class that is to be decorated. See the example below.
Example
-------
This small example demonstrates a pass over NPU functions that performs no
mutation.
@create_npu_function_pass(opt_level=1)
class MyPass:
def transform_npu_function(self, global_var, func):
return func
mod = tvm.IRModule()
mod = MyPass()(mod)
Parameters
----------
opt_level: int
Optimization level for the module pass.
name: str, optional
Name for the module pass.
Returns
-------
decorator
The npu_pass decorator.
"""
def decorator(npu_pass_class):
@tvm.ir.transform.module_pass(name=name, opt_level=opt_level)
class ModulePassWrapper:
"""The wrapper for the NPU pass."""
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
def transform_module(self, mod: tvm.ir.IRModule, _) -> tvm.ir.IRModule:
npu_functions = filter(lambda x: is_npu_func(x[1]), mod.functions.items())
for global_var, func in npu_functions:
npu_pass = npu_pass_class(*self.args, **self.kwargs)
func = npu_pass.transform_npu_function(global_var, func)
mod.update_func(global_var, func)
return mod
return ModulePassWrapper
return decorator
| 10,458 | 24.761084 | 97 | py |
tvm | tvm-main/python/tvm/relay/backend/contrib/ethosu/tir_to_cs_translator.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=use-list-literal, invalid-name
"""This source will contain code to convert TIR, as produced by
the Relay to TIR compilation process, to Vela API calls to
generate command stream.
"""
from typing import Dict, NamedTuple, Tuple, Union, List
from enum import auto
from enum import Enum
import numpy as np # type: ignore
import ethosu.vela.api as vapi # type: ignore
import tvm
from tvm.tir import stmt_functor
from tvm.relay.backend.contrib.ethosu import util
from tvm.relay.backend.contrib.ethosu import vela_api
from tvm.relay.backend.contrib.ethosu.tir import spec
from tvm.relay.backend.contrib.ethosu.tir import utils as tir_utils
class BufferType(Enum):
"""The type of information that a buffer contains."""
constant = auto()
input_or_output = auto()
scratch = auto()
input = auto()
output = auto()
shram = auto()
class BufferInfo(NamedTuple):
"""A data structure to hold metadata of the buffer."""
# If the buffer holds constants, the values will contain that otherwise None
values: np.ndarray
shape: tvm.ir.container.Array
dtype: np.dtype
btype: BufferType
class AcceleratorArchConfig:
def __init__(self, total_shram_banks):
self.shram_bank_size = 1024
self.total_shram_banks = total_shram_banks
self.shram_size_bytes = self.shram_bank_size * self.total_shram_banks
self.lut_size_bytes = 2048
self.lut_start_address = self.shram_size_bytes - self.lut_size_bytes
def get_accelerator_arch_config(accel_type):
accel_config_str_map = {
"ethos-u55-32": AcceleratorArchConfig(16),
"ethos-u55-64": AcceleratorArchConfig(16),
"ethos-u55-128": AcceleratorArchConfig(24),
"ethos-u55-256": AcceleratorArchConfig(48),
"ethos-u65-256": AcceleratorArchConfig(48),
}
return accel_config_str_map[accel_type]
class RegionOffset(NamedTuple):
"""A data structure to hold region and address offset corresponding to a tensor"""
region: int
offset: int
def analyze_scratch_memory_acesses(mod: tvm.IRModule, candidate_regions_for_scratch: List[int]):
"""
This function analyzes the IRModule for intermediary tensors that can be resulting
from a offset of pool variables (via Let nodes) and/or allocate nodes. The allocate
nodes will be folded into a single TVMBackendallocWorkspace call with offsets. Ultimately
this will produce a mapping from each such node to a RegionOffset named tuple that
has the region and the obtained offset, as mentioned above.
Parameters
----------
mod: tvm.IRModule
The TIR module containing ethosu extern calls
candidate_regions_for_scratch: List[int]
A list of region integers that could be used for scratch regions
Returns
-------
scratch_region_map : Dict[tvm.tir.Var, RegionOffset]
A map between buffer vars to scratch regions they are assigned
tvm_backend_alloc_workspace_size : int
The size of tvm_backend_alloc_workspace call required to service
remaining allocate nodes if any
tvm_backend_alloc_workspace_region : int
The region associated with the tvm_backend_alloc_workspace
"""
scratch_region_map = dict()
pool_var_region_map = dict()
# There should only be a single function
assert len(mod.functions.items()) == 1
primfunc = mod.functions.items()[0][1]
if "pool_args" in primfunc.attrs.keys():
pool_args = primfunc.attrs["pool_args"]
for pool_arg in pool_args:
pool_param = primfunc.params[int(pool_arg.pool_var_idx)]
pool_var_region_map[pool_param] = candidate_regions_for_scratch.pop()
scratch_region_map[pool_param] = RegionOffset(
region=pool_var_region_map[pool_param], offset=None
)
def analyze_pool_access(stmt):
if isinstance(stmt, tvm.tir.stmt.LetStmt):
call_address_of = stmt.value
load = call_address_of.args[0]
pool_var = load.buffer.data
scratch_region_map[stmt.var] = RegionOffset(
region=pool_var_region_map[pool_var], offset=int(load.indices[0])
)
tvm.tir.stmt_functor.post_order_visit(primfunc.body, analyze_pool_access)
dynamic_allocation_region = None
if len(candidate_regions_for_scratch) > 0:
dynamic_allocation_region = candidate_regions_for_scratch.pop()
dynamic_allocation_size = 0
# If there are tir.Allocate remaining by now, they need to be serviced via
# dynamic_allocation calls.
def analyze_remaining_allocates(stmt):
nonlocal dynamic_allocation_size
if isinstance(stmt, tvm.tir.stmt.Allocate):
allocate = stmt
pointer_type = allocate.buffer_var.type_annotation
storage_scope = pointer_type.storage_scope
if storage_scope == "global":
dtype_bytes = np.iinfo(np.dtype(allocate.dtype)).bits // 8
size_in_bytes = int(dtype_bytes * np.prod(list(allocate.extents)))
# Every memory address the NPU access have to be 16 byte aligned
size_in_bytes = util.round_up(size_in_bytes, 16)
address = dynamic_allocation_size
dynamic_allocation_size += size_in_bytes
scratch_region_map[allocate.buffer_var] = RegionOffset(
region=dynamic_allocation_region, offset=address
)
tvm.tir.stmt_functor.post_order_visit(primfunc.body, analyze_remaining_allocates)
return (
scratch_region_map,
dynamic_allocation_size,
dynamic_allocation_region,
)
def _get_region(buffer_type, var=None, scratch_region_map=None):
"""A helper to obtain regions for buffer_types and buffer vars"""
static_regions = {
BufferType.constant: 0,
BufferType.input: 3,
BufferType.output: 4,
BufferType.shram: int((1 << 8) | (3 << 0)),
}
if buffer_type in static_regions.keys():
return static_regions[buffer_type]
assert buffer_type == BufferType.scratch
assert var in scratch_region_map.keys(), f"{var} is not analyzed for scratch regions"
return scratch_region_map[var].region
def translate(tir_module, params):
"""This will take an tir module for the NPU
and compile to command stream
Parameters
----------
tir_module : tvm.IRModule
The TIR module containing ethosu extern calls
params : dict
A dictionary containing TIR primfunc argument ordering
idx to constant NDArray map
accel_type : ethosu.vela.api.NpuAccelerator
the accelerator variant the tir module needs to compiled to
Returns
-------
cs : str
An hex string of the bytes of command stream
encoded_constants : str
An hex string of the bytes that includes concat'd
encoded weights, encoded biases and scales.
base_addresses : List[util.BaseAddress]
base addresses to be used by the driver
"""
# The NPU has 6 usable regions ranging from 0-6
# The regions 0, 3, and 4 is already used for input,
# output and constant, respectively (See _get_regions()).
# Thus, for scratch we are left with 5, 2 and 1.
candidate_regions_for_scratch = [5, 2, 1]
(
scratch_region_map,
dynamic_allocation_size,
dynamic_allocation_region,
) = analyze_scratch_memory_acesses(tir_module, candidate_regions_for_scratch)
buffer_info = extract_buffer_info(tir_module, params)
call_extern_list = extract_call_extern_list(tir_module)
_npu_ops = list()
for call_extern in call_extern_list:
_npu_ops.append(translate_ethosu_tir_call_extern(call_extern))
_npu_ops, constant_data = assign_addresses(buffer_info, _npu_ops, scratch_region_map)
base_addresses = extract_param_base_addresses(tir_module, buffer_info, scratch_region_map)
if dynamic_allocation_size:
base_addresses.append(
util.BaseAddress(
name="dynamic_allocation",
primfunc_param_idx=None,
region=dynamic_allocation_region,
size=dynamic_allocation_size,
is_runtime_allocation=True,
)
)
target_accel_config = vela_api.get_accelerator_config()
cmds = vapi.npu_generate_register_command_stream(_npu_ops, target_accel_config)
payload = vapi.npu_create_driver_payload(cmds, target_accel_config)
return payload.hex(), constant_data, base_addresses
def extract_param_base_addresses(mod, buffer_info, scratch_region_map) -> List[util.BaseAddress]:
"""This function extracts base addresses to be used by the driver
Parameters
----------
mod : tvm.IRModule
The TIR Module for NPU
buffer_info : Dict[tvm.tir.Var, BufferInfo]
Information regarding buffer vars used in the PrimFunc
Returns
-------
List[util.BaseAddress]
base addresses to be used by the driver
"""
# There should only be a single function
assert len(mod.functions.items()) == 1
primfunc = mod.functions.items()[0][1]
buffer_map = tir_utils.collect_buffer_map(primfunc.body)
base_addresses = list()
idx = 0
for param in primfunc.params:
# constants are pooled together and handled specially
# this will change after tir.allocate_const.
# For now, we are skipping generating buffer addresses here
if buffer_info[param].btype == BufferType.constant:
continue
if param in buffer_map:
buffer = buffer_map[param]
dtype = buffer.dtype
element_size_bytes = np.iinfo(dtype).bits // 8
size_bytes = element_size_bytes * np.prod(list(buffer.shape))
base_addresses.append(
util.BaseAddress(
param.name.replace("-", "_"),
idx,
_get_region(buffer_info[param].btype, param, scratch_region_map),
size_bytes,
)
)
else:
base_addresses.append(
util.BaseAddress(
param.name.replace("-", "_"),
idx,
_get_region(buffer_info[param].btype, param, scratch_region_map),
0,
)
)
idx += 1
return base_addresses
def extract_call_extern_list(mod):
"""This function will obtain all extern
calls from a TIR module
Parameters
----------
mod : tvm.IRModule
The TIR Module for NPU
Returns
-------
list
of tvm.tir.Call objects
that are tir extern calls
"""
# There should only be a single function
assert len(mod.functions.items()) == 1
primfunc = mod.functions.items()[0][1]
call_extern_list = list()
def populate_call_extern_list(stmt):
if isinstance(stmt, tvm.tir.Call) and stmt.op.name == "tir.call_extern":
call_extern_list.append(stmt)
stmt_functor.post_order_visit(primfunc.body, populate_call_extern_list)
return call_extern_list
def extract_buffer_info(
mod: tvm.IRModule, param_dict: Dict[int, np.ndarray]
) -> Dict[str, BufferInfo]:
"""This function is to read the tvm.IRModule that
contains Relay to TIR compiled IRModule. Thereafter,
this will extract the buffer information as the shape
and constant data (if any).
Parameters
----------
mod : tvm.IRModule
The NPU TIR IRModule.
param_dict : Dict[tvm.tir.Var, np.ndarray]
A dictionary containing param idx --> const numpy.NDArray
Returns
-------
dict : Dict[str, BufferInfo]
A dictionary of buffer names --> BufferInfo
"""
buffer_info = dict()
# There should only be a single function
assert len(mod.functions.items()) == 1
primfunc = mod.functions.items()[0][1]
for param, const_data in param_dict.items():
if isinstance(param, tvm.tir.Buffer):
param = param.data
buffer_info[param] = BufferInfo(
const_data, const_data.shape, const_data.dtype, BufferType.constant
)
pool_param_indices = list()
if "pool_args" in primfunc.attrs.keys():
pool_args = primfunc.attrs["pool_args"]
pool_param_indices = [allocated_pool_info.pool_var_idx for allocated_pool_info in pool_args]
for idx, param in enumerate(primfunc.params):
if param not in buffer_info.keys():
if idx in pool_param_indices:
btype = BufferType.scratch
else:
btype = BufferType.input_or_output
buffer_info[param] = BufferInfo(
None,
None,
None,
btype,
)
def populate_allocate_buffer_info(stmt):
if isinstance(stmt, tvm.tir.stmt.Allocate):
allocate = stmt
pointer_type = allocate.buffer_var.type_annotation
storage_scope = pointer_type.storage_scope
if storage_scope == "local":
buffer_info[allocate.buffer_var] = BufferInfo(
None,
allocate.extents,
allocate.dtype,
BufferType.shram,
)
tvm.tir.stmt_functor.post_order_visit(primfunc.body, populate_allocate_buffer_info)
return buffer_info
def assign_addresses(buffer_info, npu_ops, scratch_region_map):
"""This function will assign addresses to tensors
within two buffers : scratch and constants.
The scratch is the buffer created to hold all intermediary data
The constants is the buffer created via unifying all the constant data
(post-encoding).
Parameters
----------
buffer_info : dict
This is the dictionary obtained via calling extract_buffer_info.
The key is the buffer name to BufferInfo
npu_ops : list
A list of Vela NpuOps with tir.BufferLoads for addresses
scratch_region_map : Dict[tvm.tir.Var, RegionOffset]
A buffer_var to region and offset map.
Returns
-------
npu_ops : list
A list of Vela NpuOps with addesses within scratch and constant buffers
constant_tensor : NDArray
A unified constant data array of uint8 as the constant buffer
"""
def replace_npu_fm_with_address(npu_fm):
assert isinstance(npu_fm.tiles.addresses[0], tvm.tir.BufferLoad)
buffer = npu_fm.tiles.addresses[0].buffer.data
if buffer in scratch_region_map.keys():
address = scratch_region_map[buffer].offset
region = scratch_region_map[buffer].region
else:
assert buffer in buffer_addresses.keys()
address, buffer_type = buffer_addresses[buffer]
region = _get_region(buffer_type)
assert (
len(npu_fm.tiles.addresses[0].indices) == 1
), "Ethos-U translation expects flattened buffers"
index = npu_fm.tiles.addresses[0].indices[0] * (
np.iinfo(np.dtype(npu_fm.tiles.addresses[0])).bits // 8
)
npu_fm.tiles.addresses[0] = address + int(index)
npu_fm.tiles.addresses[1] = (
address if isinstance(npu_fm.tiles.addresses[1], tvm.tir.BufferLoad) else 0
)
npu_fm.tiles.addresses[2] = (
address if isinstance(npu_fm.tiles.addresses[2], tvm.tir.BufferLoad) else 0
)
npu_fm.tiles.addresses[3] = 0
npu_fm.region = region
return npu_fm
def replace_npu_address_range_with_address(npu_addr_range):
assert isinstance(npu_addr_range.address, tvm.tir.BufferLoad)
buffer = npu_addr_range.address.buffer.data
index = int(
npu_addr_range.address.indices[0]
* (np.iinfo(np.dtype(npu_addr_range.address)).bits // 8)
)
if buffer in scratch_region_map.keys():
return vapi.NpuAddressRange(
scratch_region_map[buffer].region,
scratch_region_map[buffer].offset + index,
npu_addr_range.length,
)
assert buffer in buffer_addresses.keys(), f"searching for buffer : {buffer}, but not found"
address, buffer_type = buffer_addresses[buffer]
address = address + int(npu_addr_range.address.indices[0].value)
return vapi.NpuAddressRange(_get_region(buffer_type), address, npu_addr_range.length)
def replace_tir_loads(npu_object):
if isinstance(npu_object, vapi.NpuFeatureMap):
return replace_npu_fm_with_address(npu_object)
if isinstance(npu_object, vapi.NpuAddressRange):
return replace_npu_address_range_with_address(npu_object)
return npu_object
def classify_io(buffer):
for _npu_op in npu_ops:
if issubclass(type(_npu_op), vapi.NpuBlockOperation):
if _npu_op.ifm and _npu_op.ifm.tiles.addresses[0].buffer.data == buffer:
return BufferType.input
if _npu_op.ifm2 and _npu_op.ifm2.tiles.addresses[0].buffer.data == buffer:
return BufferType.input
if _npu_op.ofm and _npu_op.ofm.tiles.addresses[0].buffer.data == buffer:
return BufferType.output
raise ValueError(f"Unused IO : {buffer} in tir module.")
constant_hex_data = []
total_constant_len = 0
buffer_addresses = dict()
for _buffer, info in buffer_info.items():
if info.values is not None:
assert info.btype == BufferType.constant
assert len(info.shape) == 1
buffer_addresses[_buffer] = (
(total_constant_len, info.btype) if constant_hex_data else (0, info.btype)
)
dtype_bytes = np.iinfo(np.dtype(info.dtype)).bits // 8
size_in_bytes = dtype_bytes * np.prod(list(info.shape))
# Every memory address the NPU access have to be 16 byte aligned
size_in_bytes = util.round_up(size_in_bytes, 16)
constant_tensor = np.resize(info.values, size_in_bytes // dtype_bytes)
constant_tensor = constant_tensor.tobytes().hex()
constant_hex_data.append(constant_tensor)
total_constant_len += len(constant_tensor) // 2
else:
if info.btype == BufferType.input_or_output or info.btype == BufferType.input:
buffer_type = info.btype
if info.btype == BufferType.input_or_output:
buffer_type = classify_io(_buffer)
assert buffer_type in (BufferType.input, BufferType.output)
address = 0
buffer_addresses[_buffer] = (address, buffer_type)
buffer_info[_buffer] = BufferInfo(
values=None, shape=info.dtype, dtype=info.dtype, btype=buffer_type
)
elif info.btype == BufferType.shram:
accl_config = util.get_accelerator_config()
arch_config = get_accelerator_arch_config(accl_config)
address = arch_config.lut_start_address
buffer_addresses[_buffer] = (address, info.btype)
else:
# These buffer_vars are already updated in scratch_region_map
assert info.btype == BufferType.scratch
for npu_op in npu_ops:
for attr_name, attr in npu_op.__dict__.items():
if isinstance(attr, list):
new_attr = list()
for attr_ in attr:
new_attr.append(replace_tir_loads(attr_))
setattr(npu_op, attr_name, new_attr)
else:
setattr(npu_op, attr_name, replace_tir_loads(attr))
constant_data = "".join(constant_hex_data)
return (npu_ops, constant_data)
def translate_ethosu_tir_call_extern(tir_call_extern):
"""This is a dispatcher function to dispatch
correct translation call depending on the extern call's
first argument"""
supported_call_extern = {
"ethosu_conv2d": translate_ethosu_conv2d,
"ethosu_copy": translate_ethosu_copy,
"ethosu_depthwise_conv2d": translate_ethosu_depthwise_conv2d,
"ethosu_pooling": translate_ethosu_pooling,
"ethosu_binary_elementwise": translate_ethosu_binary_elementwise,
"ethosu_identity": translate_ethosu_pooling,
"ethosu_unary_elementwise": translate_ethosu_unary_elementwise,
}
ext_call_type = tir_call_extern.args[0].value
assert ext_call_type in supported_call_extern.keys(), f"{ext_call_type} is not yet supported"
npu_op = supported_call_extern[ext_call_type](tir_call_extern)
# Some conversions return additional outputs
# if they are needed, the caller should use the function directly
if isinstance(npu_op, tuple):
return npu_op[0]
return npu_op
def translate_ethosu_copy(tir_call_extern: tvm.tir.Call) -> vapi.NpuDmaOperation:
"""This function will translate a TIR call_extern
as produced by NPU Relay to TIR compilation.
Parameters
----------
tir_call_extern : tvm.tir.Call
Returns
-------
ethosu.vela.api.NpuDmaOperation
The vela object containing the params of ethosu_copy
"""
# We skip the first element as it is the call_extern function name
serial_object = spec.create_serial_object(spec.SerialCopy, tir_call_extern.args[1:])
return _create_npu_dma_op(serial_object)
def _convert_clip_bounds(npu_op: vapi.NpuBlockOperation):
"""This function will convert the min and max value
of clip activations to non quantized floats as
expected by the API.
Parameters
----------
npu_op : vapi.NpuBlockOperation
"""
clip_min_quant = npu_op.activation.min
clip_max_quant = npu_op.activation.max
if npu_op.ofm.quantization.scale_f32:
clip_min_actual = (
clip_min_quant - npu_op.ofm.quantization.zero_point
) * npu_op.ofm.quantization.scale_f32
clip_max_actual = (
clip_max_quant - npu_op.ofm.quantization.zero_point
) * npu_op.ofm.quantization.scale_f32
else:
clip_min_actual = clip_min_quant
clip_max_actual = clip_max_quant
npu_op.activation.min = clip_min_actual
npu_op.activation.max = clip_max_actual
def translate_ethosu_conv2d(tir_call_extern: tvm.tir.Call) -> Tuple[vapi.NpuConv2DOperation, int]:
"""This function will translate a TIR call_extern
as produced by NPU Relay to TIR compilation.
Parameters
----------
tir_call_extern : tvm.tir.Call
This should be a TIR call_extern that has agreed upon ordering
for TIR Compiler. See Serial2DConvolution in
tvm/relay/backend/contrib/ethosu/tir/spec.py for the ordering.
Returns
-------
ethosu.vela.api.NpuConv2DOperation
The vela object containing the params of ethosu_conv2d
weights_zero_point : int
The zero point of the weights
"""
# We skip the first element as it is the call_extern function name
serial_object = spec.create_serial_object(spec.Serial2DConvolution, tir_call_extern.args[1:])
return _create_npu_op_conv2d(serial_object)
def _create_npu_op_conv2d(
serial_2d_convolution: spec.Serial2DConvolution,
) -> Tuple[vapi.NpuConv2DOperation, int]:
"""This is a helper function to capture a list
of arguments to create Vela NpuConv2DOperation object.
"""
has_two_weights = serial_2d_convolution.weight2.address != -1
has_two_biases = serial_2d_convolution.scale_bias2.address != -1
npu_conv2d_op = vapi.NpuConv2DOperation()
npu_conv2d_op.ifm = _create_npu_feature_map(serial_2d_convolution.ifm)
npu_conv2d_op.ofm = _create_npu_feature_map(serial_2d_convolution.ofm)
npu_conv2d_op.kernel = _create_npu_kernel(serial_2d_convolution.kernel)
npu_conv2d_op.weights = (
[
_create_npu_address_range(serial_2d_convolution.weight),
_create_npu_address_range(serial_2d_convolution.weight2),
]
if has_two_weights
else [_create_npu_address_range(serial_2d_convolution.weight)]
)
weights_zero_point = np.int64(serial_2d_convolution.weight_zero_point.value)
npu_conv2d_op.biases = (
[
_create_npu_address_range(serial_2d_convolution.scale_bias),
_create_npu_address_range(serial_2d_convolution.scale_bias2),
]
if has_two_biases
else [_create_npu_address_range(serial_2d_convolution.scale_bias)]
)
npu_conv2d_op.padding = _create_npu_padding(serial_2d_convolution.padding)
npu_conv2d_op.activation = _create_npu_activation(serial_2d_convolution.activation)
if (
npu_conv2d_op.activation
and npu_conv2d_op.activation.op_type == vapi.NpuActivationOp.NONE_OR_RELU
):
_convert_clip_bounds(npu_conv2d_op)
npu_conv2d_op.rounding_mode = _create_npu_rounding_mode(serial_2d_convolution.rounding_mode)
npu_conv2d_op.ifm_upscale = _create_npu_resampling_mode(serial_2d_convolution.upscale)
weights_shape_ohwi = [
npu_conv2d_op.ofm.shape.depth,
npu_conv2d_op.kernel.height,
npu_conv2d_op.kernel.width,
npu_conv2d_op.ifm.shape.depth,
]
npu_conv2d_op.block_traversal = vela_api.calculate_block_traversal_mode(
is_depthwise=False,
weights_shape_ohwi=weights_shape_ohwi,
ifm_bitdepth=npu_conv2d_op.ifm.data_type.size_in_bits(),
)
npu_conv2d_op.block_config = _create_npu_block_config(serial_2d_convolution.block_config)
if not npu_conv2d_op.block_config:
target_accel_config = vela_api.get_accelerator_config()
block_config = vela_api.get_optimal_block_config(npu_conv2d_op, target_accel_config)
npu_conv2d_op.block_config = block_config
return npu_conv2d_op, weights_zero_point
def translate_ethosu_depthwise_conv2d(
tir_call_extern: tvm.tir.Call,
) -> Tuple[vapi.NpuConvDepthWiseOperation, int]:
"""This function will translate a TIR call_extern
as produced by NPU Relay to TIR compilation.
Parameters
----------
tir_call_extern : tvm.tir.Call
This should be a TIR call_extern that has agreed upon ordering
for TIR Compiler. See Serial2DDepthwise in
tvm/relay/backend/contrib/ethosu/tir/spec.py for the ordering.
Returns
-------
ethosu.vela.api.NpuConvDepthWiseOperation
The vela object containing the params of ethosu_depthwise_conv2d
weights_zero_point : int
The zero point of the weights
"""
serial_object = spec.create_serial_object(spec.Serial2DDepthwise, tir_call_extern.args[1:])
return _create_npu_op_depthwise_conv2d(serial_object)
def _create_npu_op_depthwise_conv2d(serial_2d_depthwise):
npu_depthwise_conv2d_op = vapi.NpuConvDepthWiseOperation()
npu_depthwise_conv2d_op.ifm = _create_npu_feature_map(serial_2d_depthwise.ifm)
npu_depthwise_conv2d_op.ofm = _create_npu_feature_map(serial_2d_depthwise.ofm)
npu_depthwise_conv2d_op.kernel = _create_npu_kernel(serial_2d_depthwise.kernel)
npu_depthwise_conv2d_op.weights = [_create_npu_address_range(serial_2d_depthwise.weight)]
weights_zero_point = np.int64(serial_2d_depthwise.weight_zero_point.value)
npu_depthwise_conv2d_op.biases = [_create_npu_address_range(serial_2d_depthwise.scale_bias)]
npu_depthwise_conv2d_op.padding = _create_npu_padding(serial_2d_depthwise.padding)
npu_depthwise_conv2d_op.activation = _create_npu_activation(serial_2d_depthwise.activation)
if (
npu_depthwise_conv2d_op.activation
and npu_depthwise_conv2d_op.activation.op_type == vapi.NpuActivationOp.NONE_OR_RELU
):
_convert_clip_bounds(npu_depthwise_conv2d_op)
npu_depthwise_conv2d_op.rounding_mode = _create_npu_rounding_mode(
serial_2d_depthwise.rounding_mode
)
npu_depthwise_conv2d_op.ifm_upscale = _create_npu_resampling_mode(serial_2d_depthwise.upscale)
npu_depthwise_conv2d_op.block_config = _create_npu_block_config(
serial_2d_depthwise.block_config
)
if not npu_depthwise_conv2d_op.block_config:
target_accel_config = vela_api.get_accelerator_config()
block_config = vela_api.get_optimal_block_config(
npu_depthwise_conv2d_op, target_accel_config
)
npu_depthwise_conv2d_op.block_config = block_config
return npu_depthwise_conv2d_op, weights_zero_point
def _create_npu_feature_map(serial_feature_map: spec.SerialFeatureMap) -> vapi.NpuFeatureMap:
"""This is a helper function to capture a list
of arguments to create Vela NpuFeatureMap object.
"""
layout_map = {"NHWC": vapi.NpuLayout.NHWC, "NHCWB16": vapi.NpuLayout.NHCWB16}
datatype_map = {
"uint8": vapi.NpuDataType.UINT8,
"int8": vapi.NpuDataType.INT8,
"uint16": vapi.NpuDataType.UINT16,
"int16": vapi.NpuDataType.INT16,
"int32": vapi.NpuDataType.INT32,
}
layout = str(serial_feature_map.layout.value)
data_type = str(serial_feature_map.data_type.value)
date_type_bytes = np.iinfo(np.dtype(data_type)).bits // 8
assert layout in layout_map.keys()
assert data_type in datatype_map.keys()
nfm = vapi.NpuFeatureMap()
nfm.data_type = datatype_map[data_type]
nfm.shape = vapi.NpuShape3D(
int(serial_feature_map.height),
int(serial_feature_map.width),
int(serial_feature_map.channels),
)
nfm.tiles = vapi.NpuTileBox(
int(serial_feature_map.tile_height_0),
int(serial_feature_map.tile_height_1),
int(serial_feature_map.tile_width_0),
[
serial_feature_map.tile_address_0,
serial_feature_map.tile_address_1,
serial_feature_map.tile_address_2,
serial_feature_map.tile_address_3,
],
)
nfm.quantization = _create_npu_quantization(
serial_feature_map.scale, serial_feature_map.zero_point
)
nfm.layout = layout_map[layout]
nfm.strides = vapi.NpuShape3D(
int(serial_feature_map.stride_h.value) * date_type_bytes,
int(serial_feature_map.stride_w.value) * date_type_bytes,
int(serial_feature_map.stride_c.value) * date_type_bytes,
)
return nfm
def _create_npu_kernel(serial_kernel: spec.SerialKernel) -> vapi.NpuKernel:
"""This is a helper function to capture a list
of arguments to create Vela NpuKernel object.
"""
nknl = vapi.NpuKernel(
w=int(serial_kernel.width),
h=int(serial_kernel.height),
stride_x=int(serial_kernel.stride_w),
stride_y=int(serial_kernel.stride_h),
dilation_x=int(serial_kernel.dilation_w),
dilation_y=int(serial_kernel.dilation_h),
)
return nknl
def _create_npu_address_range(
serial_address_range: spec.SerialAddressRange,
) -> vapi.NpuAddressRange:
"""This is a helper function to capture a list
of arguments to create Vela NpuAddressRange object.
"""
addr_range = vapi.NpuAddressRange(
# region will be updated later
region=0,
address=serial_address_range.address,
length=int(serial_address_range.length),
)
return addr_range
def _create_npu_quantization(
scale: Union[tvm.tir.FloatImm, float],
zero_point: Union[tvm.tir.IntImm, int],
) -> vapi.NpuQuantization:
"""This is a helper function to capture a list
of arguments to create Vela NpuQuantization object.
"""
scale = float(scale)
if scale == 0.0:
scale = None
return vapi.NpuQuantization(scale_f32=scale, zero_point=int(zero_point))
def _create_npu_weights_zero_point(
zero_point: Union[int, tvm.tir.IntImm],
) -> int:
"""This is a helper function to capture the weights zero point."""
return int(zero_point)
def _create_npu_padding(serial_padding: spec.SerialPadding) -> vapi.NpuPadding:
"""This is a helper function to capture a list
of arguments to create Vela NpuPadding object."""
padding = vapi.NpuPadding(
top=int(serial_padding.top),
left=int(serial_padding.left),
bottom=int(serial_padding.bottom),
right=int(serial_padding.right),
)
return padding
def _create_npu_block_config(serial_block_config: spec.SerialBlockConfig) -> vapi.NpuShape3D:
"""A helper function to convert a SerialBlockConfig into an NpuShape3D"""
if serial_block_config.height * serial_block_config.width * serial_block_config.depth == 0:
return None
block_config = vapi.NpuShape3D(
height=int(serial_block_config.height),
width=int(serial_block_config.width),
depth=int(serial_block_config.depth),
)
return block_config
def _create_npu_activation(serial_activation: spec.SerialActivation) -> vapi.NpuActivation:
"""This is a helper function to capture a list
of arguments to create Vela NpuActivation object."""
if serial_activation.op == "NONE":
return None
if (
serial_activation.op == "CLIP"
and serial_activation.clip_min == 0
and serial_activation.clip_max == 0
):
return None
op_map = {
"CLIP": vapi.NpuActivationOp.NONE_OR_RELU,
"TANH": vapi.NpuActivationOp.TABLE_LOOKUP,
"SIGMOID": vapi.NpuActivationOp.TABLE_LOOKUP,
"LUT": vapi.NpuActivationOp.TABLE_LOOKUP,
}
op = str(serial_activation.op.value)
assert op in op_map.keys()
act_op = vapi.NpuActivation(op_map[op])
if serial_activation.op == "CLIP":
act_op.min = int(serial_activation.clip_min.value)
act_op.max = int(serial_activation.clip_max.value)
if op_map[op] == vapi.NpuActivationOp.TABLE_LOOKUP:
act_op.lookup_table_index = 0
return act_op
def _create_npu_resampling_mode(
mode: str,
) -> vapi.NpuResamplingMode:
"""This is a helper function to capture a list
of arguments to create Vela NpuResamplingMode object."""
mode_map = {
"NONE": vapi.NpuResamplingMode.NONE,
"NEAREST": vapi.NpuResamplingMode.NEAREST,
"ZEROS": vapi.NpuResamplingMode.TRANSPOSE,
}
mode = str(mode.value)
assert mode in mode_map.keys()
return mode_map[mode]
def _create_npu_rounding_mode(
mode: str,
) -> vapi.NpuRoundingMode:
"""This is a helper function to capture a list
of arguments to create Vela NpuRoundingMode object."""
mode_map = {
"TFL": vapi.NpuRoundingMode.TFL,
"TRUNCATE": vapi.NpuRoundingMode.TRUNCATE,
"NATURAL": vapi.NpuRoundingMode.NATURAL,
}
mode = str(mode.value)
assert mode in mode_map.keys()
return mode_map[mode]
def _create_npu_dma_op(serial_copy):
"""This is a helper function to capture the list of arguments
to create a NpuDmaOperation object"""
data_type_bytes = np.iinfo(np.dtype(serial_copy.read_address.dtype)).bits // 8
length = int(serial_copy.length.value) * data_type_bytes
# The buffer size in bytes must be at least 16 bytes
length = max(length, 16)
src = vapi.NpuAddressRange(
# region will be updated later
region=0,
address=serial_copy.read_address,
length=length,
)
dest = vapi.NpuAddressRange(
# region will be updated later
region=0,
address=serial_copy.write_address,
length=length,
)
return vapi.NpuDmaOperation(src, dest)
def translate_ethosu_pooling(tir_call_extern: tvm.tir.Call) -> vapi.NpuPoolingOperation:
"""This function will translate a TIR call_extern
as produced by NPU Relay to TIR compilation.
Parameters
----------
tir_call_extern : tvm.tir.Call
This should be a TIR call_extern that has agreed upon ordering
for TIR Compiler. See SerialPooling in
tvm/relay/backend/contrib/ethosu/tir/spec.py for the ordering.
Returns
-------
ethosu.vela.api.NpuPoolingOperation
The vela object containing the params of ethosu_pooling
"""
serial_object = spec.create_serial_object(spec.SerialPooling, tir_call_extern.args[1:])
return _create_npu_op_pooling(serial_object)
def _create_npu_op_pooling(serial_pooling: spec.SerialPooling):
pooling_type = serial_pooling.pooling_type
if pooling_type == "AVG":
npu_pooling_op = vapi.NpuPoolingOp.AVERAGE
elif pooling_type == "MAX":
npu_pooling_op = vapi.NpuPoolingOp.MAX
elif pooling_type == "SUM":
npu_pooling_op = vapi.NpuPoolingOp.REDUCE_SUM
npu_pooling_op = vapi.NpuPoolingOperation(npu_pooling_op)
npu_pooling_op.ifm = _create_npu_feature_map(serial_pooling.ifm)
npu_pooling_op.ofm = _create_npu_feature_map(serial_pooling.ofm)
npu_pooling_op.kernel = _create_npu_kernel(serial_pooling.pool_shape)
npu_pooling_op.padding = _create_npu_padding(serial_pooling.padding)
npu_pooling_op.activation = _create_npu_activation(serial_pooling.activation)
if (
npu_pooling_op.activation
and npu_pooling_op.activation.op_type == vapi.NpuActivationOp.NONE_OR_RELU
):
_convert_clip_bounds(npu_pooling_op)
npu_pooling_op.rounding_mode = _create_npu_rounding_mode(serial_pooling.rounding_mode)
npu_pooling_op.ifm_upscale = _create_npu_resampling_mode(serial_pooling.upscale)
npu_pooling_op.block_config = _create_npu_block_config(serial_pooling.block_config)
if not npu_pooling_op.block_config:
target_accel_config = vela_api.get_accelerator_config()
block_config = vela_api.get_optimal_block_config(npu_pooling_op, target_accel_config)
npu_pooling_op.block_config = block_config
return npu_pooling_op
def translate_ethosu_binary_elementwise(
tir_call_extern: tvm.tir.Call,
) -> vapi.NpuElementWiseOperation:
"""This function will translate a TIR call_extern
as produced by NPU Relay to TIR compilation.
Parameters
----------
tir_call_extern : tvm.tir.Call
This should be a TIR call_extern that has agreed upon ordering
for TIR Compiler. See SerialBinaryElementwise in
tvm/relay/backend/contrib/ethosu/tir/spec.py for the ordering.
Returns
-------
ethosu.vela.api.NpuElementWiseOperation
The vela object containing the params of ethosu_binary_elementwise
"""
serial_object = spec.create_serial_object(
spec.SerialBinaryElementwise, tir_call_extern.args[1:]
)
return _create_npu_op_binary_elementwise(serial_object)
def _create_npu_op_binary_elementwise(serial_binary_elementwise: spec.SerialBinaryElementwise):
operator_type = serial_binary_elementwise.operator_type
if operator_type == "ADD":
op = vapi.NpuElementWiseOp.ADD
elif operator_type == "SUB":
op = vapi.NpuElementWiseOp.SUB
elif operator_type == "MUL":
op = vapi.NpuElementWiseOp.MUL
elif operator_type == "MIN":
op = vapi.NpuElementWiseOp.MIN
elif operator_type == "MAX":
op = vapi.NpuElementWiseOp.MAX
elif operator_type == "SHR":
op = vapi.NpuElementWiseOp.SHR
elif operator_type == "SHL":
op = vapi.NpuElementWiseOp.SHL
npu_binary_elementwise_op = vapi.NpuElementWiseOperation(op)
npu_binary_elementwise_op.ifm = _create_npu_feature_map(serial_binary_elementwise.ifm)
npu_binary_elementwise_op.ifm2 = _create_npu_feature_map(serial_binary_elementwise.ifm2)
npu_binary_elementwise_op.ofm = _create_npu_feature_map(serial_binary_elementwise.ofm)
npu_binary_elementwise_op.reversed_operands = serial_binary_elementwise.reversed_operands
if serial_binary_elementwise.rescale_config.use_rescale:
npu_binary_elementwise_op.rescale = (
serial_binary_elementwise.rescale_config.scale.value,
serial_binary_elementwise.rescale_config.shift.value,
)
npu_binary_elementwise_op.activation = _create_npu_activation(
serial_binary_elementwise.activation
)
if (
npu_binary_elementwise_op.activation
and npu_binary_elementwise_op.activation.op_type == vapi.NpuActivationOp.NONE_OR_RELU
):
_convert_clip_bounds(npu_binary_elementwise_op)
npu_binary_elementwise_op.rounding_mode = _create_npu_rounding_mode(
serial_binary_elementwise.rounding_mode
)
npu_binary_elementwise_op.block_config = _create_npu_block_config(
serial_binary_elementwise.block_config
)
if not npu_binary_elementwise_op.block_config:
target_accel_config = vela_api.get_accelerator_config()
block_config = vela_api.get_optimal_block_config(
npu_binary_elementwise_op, target_accel_config
)
npu_binary_elementwise_op.block_config = block_config
return npu_binary_elementwise_op
def translate_ethosu_unary_elementwise(
tir_extern_call: tvm.tir.Call,
) -> vapi.NpuElementWiseOperation:
"""This function will translate a tir extern_call
as produced by Relay to TIR compilation.
Parameters
----------
tir_extern_call : tvm.tir.Call
This should be a tir external call that has a agreed upon ordering
for the NPU TIR Compiler. See SerialUnaryElementwise in
tvm/relay/backend/contrib/ethosu/tir/spec.py for the ordering.
Returns
-------
ethosu.vela.api.NpuElementWiseOperation
The vela object containing the params of ethosu_unary_elementwise
"""
serial_object = spec.create_serial_object(spec.SerialUnaryElementwise, tir_extern_call.args[1:])
return _create_npu_op_unary_elementwise(serial_object)
def _create_npu_op_unary_elementwise(serial_unary_elementwise):
operator_type = serial_unary_elementwise.operator_type
if operator_type == "ABS":
op = vapi.NpuElementWiseOp.ABS
if operator_type == "CLZ":
op = vapi.NpuElementWiseOp.CLZ
npu_unary_elementwise_op = vapi.NpuElementWiseOperation(op)
npu_unary_elementwise_op.ifm = _create_npu_feature_map(serial_unary_elementwise.ifm)
npu_unary_elementwise_op.ofm = _create_npu_feature_map(serial_unary_elementwise.ofm)
npu_unary_elementwise_op.activation = _create_npu_activation(
serial_unary_elementwise.activation
)
if (
npu_unary_elementwise_op.activation
and npu_unary_elementwise_op.activation.op_type == vapi.NpuActivationOp.NONE_OR_RELU
):
_convert_clip_bounds(npu_unary_elementwise_op)
npu_unary_elementwise_op.rounding_mode = _create_npu_rounding_mode(
serial_unary_elementwise.rounding_mode
)
npu_unary_elementwise_op.block_config = _create_npu_block_config(
serial_unary_elementwise.block_config
)
if not npu_unary_elementwise_op.block_config:
target_accel_type = vela_api.get_accelerator_config()
block_config = vela_api.get_optimal_block_config(
npu_unary_elementwise_op, target_accel_type
)
npu_unary_elementwise_op.block_config = block_config
return npu_unary_elementwise_op
| 43,746 | 37.543612 | 100 | py |
tvm | tvm-main/python/tvm/relay/backend/contrib/ethosu/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Arm(R) Ethos(TM)-U NPU codegen modules for Relay."""
from . import util
from . import legalize
from . import preprocess
from . import codegen
from . import vela_api
from . import tir_to_cs_translator
from . import softmax_rewriter
| 1,019 | 39.8 | 62 | py |
tvm | tvm-main/python/tvm/relay/backend/contrib/ethosu/softmax_rewriter.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""SoftmaxRewriter for legalization Softmax operation."""
import math
import numpy as np
from ethosu.vela import fp_math, scaling
import tvm
from tvm import relay
from tvm.relay.backend.contrib.ethosu import op as ethosu_ops
from tvm.relay.dataflow_pattern import DFPatternCallback, wildcard
from tvm.relay.op.contrib import ethosu as ethosu_patterns
class SoftmaxRewriter(DFPatternCallback):
"""This rewriting converts Softmax operation into a sequence of operations as in Vela."""
def __init__(self):
super().__init__(require_type=True, rewrite_once=True)
self.params_class = ethosu_patterns.SoftMaxParams
self.pattern = (
wildcard().has_attr({"Composite": ethosu_patterns.SoftMaxParams.composite_name})
)(None)
def generate_exp_table(self, input_scale):
"""Generate a LUT table for exponential function.
Parameters
----------
input_scale : float
The scale for input.
Returns
-------
lut : tvm.relay.expr.Constant
LUT table for exponential function.
"""
beta = 1.0
integer_bits = 5
total_signed_bits = 31
# Calculate scaling
real_beta = min(
np.double(beta) * np.double(input_scale) * (1 << (31 - integer_bits)),
np.double((1 << 31) - 1.0),
)
scale, shift = scaling.quantise_scale(real_beta)
shift = 31 - shift
diff_min = -1.0 * math.floor(
1.0
* ((1 << integer_bits) - 1)
* (1 << (total_signed_bits - integer_bits))
/ (1 << shift)
)
# Generate the exp LUT
lut = []
for x in range(256):
input_diff = x - 255
if input_diff >= diff_min:
rescale = fp_math.saturating_rounding_mul32(input_diff * (1 << shift), scale)
lut.append(fp_math.exp_on_negative_values(rescale))
else:
lut.append(0)
res = np.array(lut, dtype="int32")
return relay.const(res)
def callback(
self, pre: tvm.relay.Expr, post: tvm.relay.Expr, node_map: tvm.ir.container.Map
) -> tvm.relay.Expr:
params = self.params_class(post.op.body)
quant_min = -128
quant_max = 127
ifm = post.args[0]
ifm_dtype = ifm.checked_type.dtype
bhw = np.prod(params.ifm.shape[:-1])
depth = params.ifm.shape[-1]
# The calculation of Softmax is similar to that in Vela
# https://review.mlplatform.org/plugins/gitiles/ml/ethos-u/ethos-u-vela/+/refs/tags/3.7.0/ethosu/vela/softmax.py#230
# PASS 0 - Depthwise Maxpool
# reshape for depthwise maxpool
ifm = relay.reshape(ifm, (1, bhw, depth, 1))
lut = relay.const([], dtype="int32")
depthwise_maxpool = ethosu_ops.ethosu_pooling(
ifm=ifm,
lut=lut,
pooling_type="MAX",
ifm_scale=float(params.ifm.q_params.scale_f32),
ifm_zero_point=int(params.ifm.q_params.zero_point),
ofm_scale=0.0,
ofm_zero_point=int(params.ifm.q_params.zero_point),
pool_shape=(1, depth),
ofm_channels=1,
ofm_dtype=ifm_dtype,
)
# PASS 1 - Sub+LUT(exp)
# move all data along the height axis, except channels
ifm = relay.reshape(ifm, (1, bhw, 1, depth))
exp_lut = self.generate_exp_table(float(params.ifm.q_params.scale_f32))
ifm_exp = ethosu_ops.ethosu_binary_elementwise(
ifm=ifm,
ifm2=depthwise_maxpool,
lut=exp_lut,
operator_type="SUB",
ifm_scale=float(params.ifm.q_params.scale_f32),
ifm_zero_point=int(params.ifm.q_params.zero_point),
ifm2_scale=0.0,
ifm2_zero_point=int(params.ifm.q_params.zero_point),
ofm_scale=1.0,
ofm_zero_point=quant_max,
ifm_channels=depth,
ifm2_channels=1,
reversed_operands=False,
ofm_dtype="int32",
activation="LUT",
clip_min=-255,
clip_max=0,
)
# PASS 2 - SHR
shr_const = relay.const(np.full([1, 1, 1, 1], 12, dtype="int32"))
shr = ethosu_ops.ethosu_binary_elementwise(
ifm=ifm_exp,
ifm2=shr_const,
lut=lut,
operator_type="SHR",
ifm_scale=1.0,
ifm_zero_point=0,
ifm2_scale=0.0,
ifm2_zero_point=0,
ofm_scale=0.0,
ofm_zero_point=int(params.ifm.q_params.zero_point),
ifm_channels=params.ifm.shape[-1],
ifm2_channels=1,
reversed_operands=False,
ofm_dtype="int32",
activation="CLIP",
clip_min=quant_min,
clip_max=quant_max,
rounding_mode="NATURAL",
)
# PASS 3 - Reduce sum
sum_of_exp = ethosu_ops.ethosu_pooling(
ifm=shr,
lut=lut,
pooling_type="SUM",
ifm_scale=0.0,
ifm_zero_point=0,
ofm_scale=0.0,
ofm_zero_point=int(params.ifm.q_params.zero_point),
pool_shape=(1, 1),
ofm_channels=1,
upscale="NONE",
ofm_dtype="int32",
activation="CLIP",
clip_min=quant_min,
clip_max=quant_max,
)
# PASS 4 - CLZ
headroom_plus_one = ethosu_ops.ethosu_unary_elementwise(
ifm=sum_of_exp,
lut=lut,
operator_type="CLZ",
ifm_scale=0.0,
ifm_zero_point=0,
ofm_scale=0.0,
ofm_zero_point=int(params.ifm.q_params.zero_point),
ofm_channels=1,
activation="CLIP",
clip_min=quant_min,
clip_max=quant_max,
)
# PASS 5 - Sub
headroom_offset_const = relay.const(np.full([1, bhw, 1, 1], 35, dtype="int32"))
right_shift = ethosu_ops.ethosu_binary_elementwise(
ifm=headroom_offset_const,
ifm2=headroom_plus_one,
lut=lut,
operator_type="SUB",
ifm_scale=0.0,
ifm_zero_point=0,
ifm2_scale=0.0,
ifm2_zero_point=0,
ofm_scale=0.0,
ofm_zero_point=int(params.ifm.q_params.zero_point),
ifm_channels=1,
ifm2_channels=1,
reversed_operands=False,
ofm_dtype="int32",
activation="CLIP",
clip_min=quant_min,
clip_max=quant_max,
)
# PASS 6 - Sub
one_const = relay.const(np.full([1, 1, 1, 1], 1, dtype="int32"))
headroom = ethosu_ops.ethosu_binary_elementwise(
ifm=headroom_plus_one,
ifm2=one_const,
lut=lut,
operator_type="SUB",
ifm_scale=0.0,
ifm_zero_point=0,
ifm2_scale=0.0,
ifm2_zero_point=0,
ofm_scale=0.0,
ofm_zero_point=int(params.ifm.q_params.zero_point),
ifm_channels=1,
ifm2_channels=1,
reversed_operands=False,
ofm_dtype="int32",
activation="CLIP",
clip_min=quant_min,
clip_max=quant_max,
)
# PASS 7 - SHL
shifted_sum = ethosu_ops.ethosu_binary_elementwise(
ifm=sum_of_exp,
ifm2=headroom,
lut=lut,
operator_type="SHL",
ifm_scale=0.0,
ifm_zero_point=0,
ifm2_scale=0.0,
ifm2_zero_point=0,
ofm_scale=0.0,
ofm_zero_point=int(params.ifm.q_params.zero_point),
ifm_channels=1,
ifm2_channels=1,
reversed_operands=False,
ofm_dtype="int32",
activation="CLIP",
clip_min=quant_min,
clip_max=quant_max,
)
# PASS 8 - Sub
shifted_one_const = relay.const(np.full([1, 1, 1, 1], 1 << 30, dtype="int32"))
shifted_sum_minus_one = ethosu_ops.ethosu_binary_elementwise(
ifm=shifted_sum,
ifm2=shifted_one_const,
lut=lut,
operator_type="SUB",
ifm_scale=0.0,
ifm_zero_point=0,
ifm2_scale=0.0,
ifm2_zero_point=0,
ofm_scale=0.0,
ofm_zero_point=int(params.ifm.q_params.zero_point),
ifm_channels=1,
ifm2_channels=1,
reversed_operands=False,
ofm_dtype="int32",
activation="CLIP",
clip_min=quant_min,
clip_max=quant_max,
)
# PASS 9 - SHL
shifted_sum_minus_one = ethosu_ops.ethosu_binary_elementwise(
ifm=shifted_sum_minus_one,
ifm2=one_const,
lut=lut,
operator_type="SHL",
ifm_scale=0.0,
ifm_zero_point=0,
ifm2_scale=0.0,
ifm2_zero_point=0,
ofm_scale=0.0,
ofm_zero_point=int(params.ifm.q_params.zero_point),
ifm_channels=1,
ifm2_channels=1,
reversed_operands=False,
ofm_dtype="int32",
activation="CLIP",
clip_min=quant_min,
clip_max=quant_max,
)
# PASS 10 - Add
f0_one_const = relay.const(np.full([1, 1, 1, 1], (1 << 31) - 1, dtype="int32"))
half_denominator = ethosu_ops.ethosu_binary_elementwise(
ifm=shifted_sum_minus_one,
ifm2=f0_one_const,
lut=lut,
operator_type="ADD",
ifm_scale=0.0,
ifm_zero_point=0,
ifm2_scale=0.0,
ifm2_zero_point=0,
ofm_scale=1.0,
ofm_zero_point=0,
ifm_channels=1,
ifm2_channels=1,
reversed_operands=False,
ofm_dtype="int32",
activation="CLIP",
clip_min=quant_min,
clip_max=quant_max,
use_rescale=True,
rescale_scale=1,
rescale_shift=1,
)
# PASS 11 - Mul
neg_32_over_17_const = relay.const(np.full([1, 1, 1, 1], -1010580540, dtype="int32"))
rescaled = ethosu_ops.ethosu_binary_elementwise(
ifm=half_denominator,
ifm2=neg_32_over_17_const,
lut=lut,
operator_type="MUL",
ifm_scale=1.0,
ifm_zero_point=0,
ifm2_scale=1.0,
ifm2_zero_point=0,
ofm_scale=2.0,
ofm_zero_point=0,
ifm_channels=1,
ifm2_channels=1,
reversed_operands=False,
ofm_dtype="int32",
activation="CLIP",
clip_min=quant_min,
clip_max=quant_max,
)
# PASS 12 - Add
const_48_over_17_const = relay.const(np.full([1, 1, 1, 1], 1515870810, dtype="int32"))
rescale_w_offset = ethosu_ops.ethosu_binary_elementwise(
ifm=rescaled,
ifm2=const_48_over_17_const,
lut=lut,
operator_type="ADD",
ifm_scale=2.0,
ifm_zero_point=0,
ifm2_scale=0.0,
ifm2_zero_point=0,
ofm_scale=1.0,
ofm_zero_point=0,
ifm_channels=1,
ifm2_channels=1,
reversed_operands=False,
ofm_dtype="int32",
activation="CLIP",
clip_min=quant_min,
clip_max=quant_max,
)
nr_x = rescale_w_offset
f2_one_const = relay.const(np.full([1, bhw, 1, 1], 1 << 29, dtype="int32"))
four_const = relay.const(np.full([1, 1, 1, 1], 4, dtype="int32"))
for _ in range(3):
# PASS 13, 18, 23 - Mul
half_denominator_times_x = ethosu_ops.ethosu_binary_elementwise(
ifm=nr_x,
ifm2=half_denominator,
lut=lut,
operator_type="MUL",
ifm_scale=1.0,
ifm_zero_point=0,
ifm2_scale=1.0,
ifm2_zero_point=0,
ofm_scale=2.0,
ofm_zero_point=0,
ifm_channels=1,
ifm2_channels=1,
reversed_operands=False,
ofm_dtype="int32",
activation="CLIP",
clip_min=quant_min,
clip_max=quant_max,
)
# PASS 14, 19, 24 - Sub
one_minus_half_denomin_times_x = ethosu_ops.ethosu_binary_elementwise(
ifm=f2_one_const,
ifm2=half_denominator_times_x,
lut=lut,
operator_type="SUB",
ifm_scale=2.0,
ifm_zero_point=0,
ifm2_scale=0.0,
ifm2_zero_point=0,
ofm_scale=1.0,
ofm_zero_point=0,
ifm_channels=1,
ifm2_channels=1,
reversed_operands=False,
ofm_dtype="int32",
activation="CLIP",
clip_min=quant_min,
clip_max=quant_max,
)
# PASS 15, 20, 25 - Mul
to_rescale = ethosu_ops.ethosu_binary_elementwise(
ifm=nr_x,
ifm2=one_minus_half_denomin_times_x,
lut=lut,
operator_type="MUL",
ifm_scale=1.0,
ifm_zero_point=0,
ifm2_scale=1.0,
ifm2_zero_point=0,
ofm_scale=2.0,
ofm_zero_point=0,
ifm_channels=1,
ifm2_channels=1,
reversed_operands=False,
ofm_dtype="int32",
activation="CLIP",
clip_min=quant_min,
clip_max=quant_max,
)
# PASS 16, 21, 26 - Mul
to_add = ethosu_ops.ethosu_binary_elementwise(
ifm=to_rescale,
ifm2=four_const,
lut=lut,
operator_type="MUL",
ifm_scale=2.0,
ifm_zero_point=0,
ifm2_scale=0.0,
ifm2_zero_point=0,
ofm_scale=0.0,
ofm_zero_point=int(params.ifm.q_params.zero_point),
ifm_channels=1,
ifm2_channels=1,
reversed_operands=False,
ofm_dtype="int32",
activation="CLIP",
clip_min=quant_min,
clip_max=quant_max,
)
# PASS 17, 22, 27 - Add
nr_x = ethosu_ops.ethosu_binary_elementwise(
ifm=nr_x,
ifm2=to_add,
lut=lut,
operator_type="ADD",
ifm_scale=1.0,
ifm_zero_point=0,
ifm2_scale=0.0,
ifm2_zero_point=0,
ofm_scale=1.0,
ofm_zero_point=0,
ifm_channels=1,
ifm2_channels=1,
reversed_operands=False,
ofm_dtype="int32",
activation="CLIP",
clip_min=quant_min,
clip_max=quant_max,
)
# PASS 28 - Mul
two_const = relay.const(np.full([1, 1, 1, 1], 2, dtype="int32"))
scale_factor = ethosu_ops.ethosu_binary_elementwise(
ifm=nr_x,
ifm2=two_const,
lut=lut,
operator_type="MUL",
ifm_scale=1.0,
ifm_zero_point=0,
ifm2_scale=0.0,
ifm2_zero_point=0,
ofm_scale=1.0,
ofm_zero_point=0,
ifm_channels=1,
ifm2_channels=1,
reversed_operands=False,
ofm_dtype="int32",
activation="CLIP",
clip_min=quant_min,
clip_max=quant_max,
)
# PASS 29 - Mul
scaled_exp = ethosu_ops.ethosu_binary_elementwise(
ifm=ifm_exp,
ifm2=scale_factor,
lut=lut,
operator_type="MUL",
ifm_scale=1.0,
ifm_zero_point=0,
ifm2_scale=1.0,
ifm2_zero_point=0,
ofm_scale=2.0,
ofm_zero_point=0,
ifm_channels=depth,
ifm2_channels=1,
reversed_operands=False,
ofm_dtype="int32",
activation="CLIP",
clip_min=quant_min,
clip_max=quant_max,
)
# PASS 30 - SHR
shr30_op = ethosu_ops.ethosu_binary_elementwise(
ifm=scaled_exp,
ifm2=right_shift,
lut=lut,
operator_type="SHR",
ifm_scale=2.0,
ifm_zero_point=0,
ifm2_scale=0.0,
ifm2_zero_point=0,
ofm_scale=float(params.ofm.q_params.scale_f32),
ofm_zero_point=int(params.ofm.q_params.zero_point),
ifm_channels=depth,
ifm2_channels=1,
reversed_operands=False,
rounding_mode="NATURAL",
ofm_dtype=ifm_dtype,
)
reshape = relay.reshape(shr30_op, params.ofm.shape)
return reshape
| 18,046 | 32.297048 | 124 | py |
tvm | tvm-main/python/tvm/relay/backend/contrib/ethosu/preprocess.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument, import-outside-toplevel
"""Set of passes to pre-process the IRModule to support Arm(R)-Ethos(TM)-U
NPU code generation. These set of passes will mutate both the main and the
external functions.
"""
import tvm # type: ignore
from . import _ffi_api # type: ignore
def preprocess_ext_io() -> tvm.transform.Pass:
"""This pass mutates the number of inputs going to / outputs coming out to/from
external functions to one. This is achieved via concatenation
of inputs and splitting of outputs in around the call to the external function.
Returns
-------
ret : tvm.transform.Pass
The registered pass to mutate the IO of the external functions and their calls.
"""
return _ffi_api.PreprocessExternalFuncIO() # type: ignore # pylint: disable=no-member
| 1,622 | 42.864865 | 90 | py |
tvm | tvm-main/python/tvm/relay/backend/contrib/ethosu/te/pooling.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unused-argument
"""Tensor Expressions for poolings"""
from typing import Tuple
import numpy as np
from tvm import te
from tvm.contrib.ethosu.cascader import TESubgraph, EthosuPart, Propagator, register_matcher
from .dma import dma_ofm_compute, dma_ifm_compute
from .common import get_layout_transform_matrices
def pooling_compute(
ifm: te.Tensor,
lut: te.Tensor,
pooling_type: str,
ifm_scale: float,
ifm_zero_point: int,
ofm_scale: float,
ofm_zero_point: int,
pool_shape: Tuple[int, int],
ofm_channels: int,
ofm_dtype: str,
strides: Tuple[int, int],
padding: Tuple[int, int, int, int],
activation: str,
clip_min: int,
clip_max: int,
rounding_mode: str,
upscale: str,
ifm_layout: str,
ofm_layout: str,
) -> te.Tensor:
"""A compute operator representing the capabilities of pooling for the NPU.
Parameters
----------
ifm : te.Tensor
The Input Feature Map tensor (IFM).
lut : te.Tensor
The look-up table of values to use if activation = "LUT".
pooling_type: str
The type of the pooling. "AVG" - average pool, "MAX" - max pool.
ifm_scale : float
The quantization scale for the Input Feature Map tensor.
ifm_zero_point : int
The quantization zero point for the Input Feature Map tensor.
ofm_scale : float
The quantization scale for the Output Feature Map tensor.
ofm_zero_point : int
The quantization zero point for the Output Feature Map tensor.
pool_shape : Tuple[int, int]
The 2 dimensional pool shape as (pool_shape_height, pool_shape_width).
ofm_channels : int
The number of the Output Feature Map channels
ofm_dtype : str
The Output Feature Map tensor data type.
"AVG" or "MAX" pooling - can be "int8", "uint8", or "int16".
"SUM" pooling - can be "int32".
strides : Tuple[int, int]
The 2 dimensional strides as (stride_height, stride_width).
padding : Tuple[int, int, int, int]
The 4 dimensional padding as (pad_top, pad_left, pad_bottom, pad_right).
activation : str
The activation function to use.
"NONE" - no activation function.
"CLIP" - clip the output between clip_min and clip_max.
"TANH" - tanh activation function.
"SIGMOID" - sigmoid activation function.
"LUT" - use a look-up table to perform the activation function.
clip_min : int
The minimum clipping value if activation = "CLIP".
clip_max : int
The maximum clipping value if activation = "CLIP".
rounding_mode : str
The rounding mode to apply to the Output Feature Map tensor.
"TFL" - Tensorflow Lite rounding scheme.
"TRUNCATE" - Truncate towards zero.
"NATURAL" - Round to nearest value, with x.5 rounded up towards +infinity.
upscale : str
The 2x2 upscaling mode to apply to the Input Feature Map tensor.
"NONE" - no upscaling.
"NEAREST" - upscale using nearest neighbour.
"ZEROS" - upscale using zeros.
ifm_layout : str
The layout of the Input Feature Map tensor. Can be "NHWC" or "NHCWB16".
ofm_layout : str
The layout of the Output Feature Map tensor. Can be "NHWC" or "NHCWB16".
Returns
-------
te.Tensor
The OFM tensor.
"""
assert ifm.shape[0] == 1
assert ifm_layout in {"NHWC", "NHCWB16"}
assert ofm_layout in {"NHWC", "NHCWB16"}
padding = [int(v) for v in padding]
stride_h, stride_w = [int(v) for v in strides]
pool_shape_h, pool_shape_w = [int(v) for v in pool_shape]
ifm_channels = ofm_channels if pooling_type != "SUM" else ifm.shape[-1]
upscale_factor = 2 if upscale != "NONE" else 1
# Compute operation for the IFM DMA pipeline
dmaed_ifm = dma_ifm_compute(
ifm, ifm_layout, ifm_zero_point, ifm_scale, ifm_channels, padding, upscale_factor
)
# Pooling compute operation
ofm_height = (dmaed_ifm.shape[1] - pool_shape_h) // stride_h + 1
ofm_width = (dmaed_ifm.shape[2] - pool_shape_w) // stride_w + 1
rh = te.reduce_axis((0, pool_shape_h), name="ry")
rw = te.reduce_axis((0, pool_shape_w), name="rx")
rc = te.reduce_axis((0, 1 if pooling_type != "SUM" else ifm_channels), name="rc")
pooling_attrs = {
"op": "ethosu_pooling",
"pooling_type": pooling_type,
"pool_shape_h": pool_shape_h,
"pool_shape_w": pool_shape_w,
"stride_h": stride_h,
"stride_w": stride_w,
"activation": activation,
"clip_min": clip_min,
"clip_max": clip_max,
"rounding_mode": rounding_mode,
"upscale": upscale,
}
has_lut = activation in ("TANH", "LUT", "SIGMOID")
# This is a trick to insert the LUT tensor into the TE graph if LUT is present
lut_expr = (lut[0] + lut[255]).astype(ifm.dtype) if has_lut else 0
# Add the LUT tensor to the attributes to be able to later tell which tensor is the LUT
if has_lut:
pooling_attrs["lut"] = lut
pooling = te.compute(
(1, ofm_height, ofm_width, ofm_channels),
lambda nn, hh, ww, cc: te.max(
(dmaed_ifm(nn, hh * stride_h + rh, ww * stride_w + rw, cc + rc) + lut_expr).astype(
ofm_dtype
),
axis=[rh, rw, rc],
),
name="ethosu_pooling",
attrs=pooling_attrs,
)
nhwc_to_nhcwb16, nhcwb16_to_nhwc = get_layout_transform_matrices(int(ofm_channels))
ifm_matrix = [
[1, 0, 0, 0, 0],
[0, stride_h, 0, 0, (pool_shape_h - stride_h)],
[0, 0, stride_w, 0, (pool_shape_w - stride_w)],
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 1],
]
if ofm_layout == "NHCWB16":
ifm_matrix = np.matmul(ifm_matrix, nhcwb16_to_nhwc).tolist()
if ifm_layout == "NHCWB16":
ifm_matrix = np.matmul(nhwc_to_nhcwb16, ifm_matrix).tolist()
ifm_propagator = Propagator(
ifm_matrix,
[0, -padding[0], -padding[1], 0]
if ifm_layout == "NHWC"
else [0, -padding[0], 0, -padding[1], 0],
)
propagator_attrs = {
"ifm_propagator": ifm_propagator,
}
# Compute operation for the OFM DMA pipeline
return dma_ofm_compute(
pooling, ofm_layout, ofm_zero_point, ofm_scale, ofm_channels, attrs=propagator_attrs
)
@register_matcher
def match_ethosu_pooling(output_tensor, device_config):
"""Match a Tensor Expression corresponding to an NPU Pooling.
If the Tensor Expression matches, an EthosuPart will be created that models the
matched Tensor Expression. Otherwise, None will be returned.
Parameters
----------
output_tensor : tvm.te.Tensor
The tensor to attempt to match with.
device_config : EthosuDeviceConfig
Target device configuration
Returns
-------
Union[None, EthosuPart]
The created EthosuPart if there was a match, otherwise None.
"""
write = output_tensor
if write.op.name != "ethosu_write":
return None
convert_to_nhcwb16 = write.op.input_tensors[0]
if convert_to_nhcwb16.op.name != "ethosu_convert_to_nhcwb16":
return None
pool2d = convert_to_nhcwb16.op.input_tensors[0]
if pool2d.op.name != "ethosu_pooling":
return None
pad = pool2d.op.input_tensors[0]
if pad.op.name != "ethosu_pad":
return None
upscale = pad.op.input_tensors[0]
if upscale.op.name != "ethosu_upscale":
return None
convert_to_nhwc = upscale.op.input_tensors[0]
if convert_to_nhwc.op.name != "ethosu_convert_to_nhwc":
return None
read = convert_to_nhwc.op.input_tensors[0]
if read.op.name != "ethosu_read":
return None
input_tensors = [
read.op.input_tensors[0],
]
subgraph = TESubgraph(input_tensors, output_tensor)
propagators = [
write.op.attrs["ifm_propagator"],
]
ifm_dtype = input_tensors[0].dtype
ofm_dtype = output_tensor.dtype
# Use channels from a stage of TE graph where the IFM is always NHWC
channels = int(pool2d.shape[3])
pool_shape_h = int(pool2d.op.attrs["pool_shape_h"])
pool_shape_w = int(pool2d.op.attrs["pool_shape_w"])
subkernels = len(
device_config.get_kernel_steps(pool2d.op.name, pool_shape_h, pool_shape_w, ifm_dtype)
)
output_layout = convert_to_nhcwb16.op.attrs["layout"]
input_layout = convert_to_nhwc.op.attrs["layout"]
output_quantum = device_config.get_output_quantum(output_layout)
valid_block_configs = device_config.get_valid_block_configs(
propagators[0],
pool2d.op.attrs,
output_tensor.shape,
channels,
channels,
output_layout,
input_layout,
ifm_dtype,
ofm_dtype,
pool_shape_h,
pool_shape_w,
)
return EthosuPart(
subgraph,
propagators,
output_quantum,
subkernels,
valid_block_configs,
)
| 9,830 | 33.738516 | 95 | py |
tvm | tvm-main/python/tvm/relay/backend/contrib/ethosu/te/identity.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unused-argument
"""Tensor Expression for identity"""
import numpy as np
from tvm import te
from tvm.contrib.ethosu.cascader import TESubgraph, EthosuPart, Propagator, register_matcher
from .dma import read_compute, write_compute
def identity_compute(
ifm: te.Tensor,
lut: te.Tensor,
ifm_scale: float,
ifm_zero_point: int,
ofm_scale: float,
ofm_zero_point: int,
activation: str,
) -> te.Tensor:
"""A compute operator for the NPU identity operator.
Parameters
----------
ifm : te.Tensor
The Input Feature Map tensor (IFM).
lut : te.Tensor
The look-up table values to use if activation is "LUT", "TANH" or "SIGMOID".
ifm_scale : float
The quantization scale for the Input Feature Map tensor.
ifm_zero_point : int
The quantization zero point for the Input Feature Map tensor.
ofm_scale : float
The quantization scale for the Output Feature Map tensor.
ofm_zero_point : int
The quantization zero point for the Output Feature Map tensor.
activation : str
The activation function to use.
"NONE" - no activation function.
"TANH" - tanh activation function.
"SIGMOID" - sigmoid activation function.
"LUT" - use a look-up table to perform the activation function.
Returns
-------
te.Tensor
The Output Feature Map tensor.
"""
dmaed_ifm = read_compute(ifm, ifm_zero_point, ifm_scale)
id_attrs = {"op": "ethosu_identity", "activation": activation}
has_lut = activation in ("TANH", "LUT", "SIGMOID")
# This is a trick to insert the LUT tensor into the TE graph if LUT is present
lut_expr = (lut[0] + lut[255]).astype(ifm.dtype) if has_lut else 0
# Add the LUT tensor to the attributes to be able to later tell which tensor is the LUT
if has_lut:
id_attrs["lut"] = lut
identity = te.compute(
ifm.shape,
lambda *i: (dmaed_ifm(*i) + lut_expr).astype(ifm.dtype),
name="ethosu_identity",
attrs=id_attrs,
)
length = len(ifm.shape)
ifm_matrix = np.identity(length + 1)
offset = np.zeros(length, dtype="int64")
ifm_propagator = Propagator(
ifm_matrix,
offset.tolist(),
)
propagator_attrs = {
"ifm_propagator": ifm_propagator,
}
return write_compute(identity, ofm_zero_point, ofm_scale, attrs=propagator_attrs)
@register_matcher
def match_ethosu_identity(output_tensor, device_config):
"""Match a Tensor Expression corresponding to an NPU identity.
If the Tensor Expression matches, an EthosuPart will be created that models the
matched Tensor Expression. Otherwise, None will be returned.
Parameters
----------
output_tensor : tvm.te.Tensor
The tensor to attempt to match with.
device_config : EthosuDeviceConfig
Target device configuration
Returns
-------
Union[None, EthosuPart]
The created EthosuPart if there was a match, otherwise None.
"""
write = output_tensor
if write.op.name != "ethosu_write":
return None
identity = write.op.input_tensors[0]
if identity.op.name != "ethosu_identity":
return None
read = identity.op.input_tensors[0]
if read.op.name != "ethosu_read":
return None
input_tensors = [
read.op.input_tensors[0],
]
subgraph = TESubgraph(input_tensors, output_tensor)
propagators = [
write.op.attrs["ifm_propagator"],
]
ifm_dtype = input_tensors[0].dtype
ofm_dtype = output_tensor.dtype
input_tensors_shape = input_tensors[0].shape
length = len(input_tensors_shape)
assert length <= 4, "Input tensor shape must be <= 4 for the identity operator"
channels = int(input_tensors_shape[length - 1]) if length >= 3 else 1
subkernels = len(device_config.get_kernel_steps(identity.op.name, 1, 1, ifm_dtype))
input_layout = output_layout = "NHWC"
output_quantum = device_config.get_output_quantum(output_layout)
valid_block_configs = device_config.get_valid_block_configs(
propagators[0],
identity.op.attrs,
output_tensor.shape,
channels,
channels,
output_layout,
input_layout,
ifm_dtype,
ofm_dtype,
1,
1,
)
return EthosuPart(
subgraph,
propagators,
output_quantum,
subkernels,
valid_block_configs,
)
| 5,285 | 31.231707 | 92 | py |
tvm | tvm-main/python/tvm/relay/backend/contrib/ethosu/te/binary_elementwise.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unused-argument
"""Tensor Expressions for binary_elementwise"""
import operator
import numpy as np
from tvm import te
from tvm.contrib.ethosu.cascader import TESubgraph, EthosuPart, Propagator, register_matcher
from .dma import dma_ofm_compute, dma_ifm_compute
from .common import get_layout_transform_matrices
def binary_elementwise_compute(
ifm: te.Tensor,
ifm2: te.Tensor,
lut: te.Tensor,
operator_type: str,
ifm_scale: float,
ifm_zero_point: int,
ifm2_scale: float,
ifm2_zero_point: int,
ofm_scale: float,
ofm_zero_point: int,
ifm_channels: int,
ifm2_channels: int,
reversed_operands: bool,
activation: str,
clip_min: int,
clip_max: int,
rounding_mode: str,
ifm_layout: str,
ifm2_layout: str,
ofm_layout: str,
ofm_dtype: str,
use_rescale: bool,
rescale_scale: int,
rescale_shift: int,
) -> te.Tensor:
"""A compute operator representing the capabilities of binary_elementwise for the NPU.
Parameters
----------
ifm : te.Tensor
The Input Feature Map tensor (IFM).
ifm2 : te.Tensor
The Input Feature Map tensor 2 (IFM2).
lut : te.Tensor
The look-up table values to use if activation = "LUT".
operator_type: str
The type of the binary elementwise operator.
"ADD"
"SUB"
"MUL"
"MIN"
"MAX"
"SHR"
"SHL"
ifm_scale : float
The quantization scale for the Input Feature Map tensor.
ifm_zero_point : int
The quantization zero point for the Input Feature Map tensor.
ifm2_scale : float
The quantization scale for the Input Feature Map tensor 2.
ifm2_zero_point : int
The quantization zero point for the Input Feature Map tensor 1.
ofm_scale : float
The quantization scale for the Output Feature Map tensor.
ofm_zero_point : int
The quantization zero point for the Output Feature Map tensor.
ifm_channels : int
The number of the Input Feature Map channels.
ifm2_channels : int
The number of the Input Feature Map 2 channels.
reversed_operands : bool
True if IFM2 is the first operand and IFM is the second operand.
activation : str
The activation function to use.
"NONE" - no activation function.
"CLIP" - clip the output between clip_min and clip_max.
"TANH" - tanh activation function.
"SIGMOID" - sigmoid activation function.
"LUT" - use a look-up table to perform the activation function.
Available activations for activation type:
{int8, uint8}: "NONE", "CLIP", "TANH", "SIGMOID", "LUT"
{int32}: "NONE"
clip_min : int
The minimum clipping value if activation = "CLIP".
clip_max : int
The maximum clipping value if activation = "CLIP".
rounding_mode : str
The rounding mode to apply to the Output Feature Map tensor.
"TFL" - Tensorflow Lite rounding scheme.
"TRUNCATE" - Truncate towards zero.
"NATURAL" - Round to nearest value, with x.5 rounded up towards +infinity.
ifm_layout : str, optional
The layout of the Input Feature Map tensor. Can be "NHWC" or "NHCWB16".
ifm2_layout : str, optional
The layout of the Input Feature Map tensor 2. Can be "NHWC" or "NHCWB16".
ofm_layout : str, optional
The layout of the Output Feature Map tensor. Can be "NHWC" or "NHCWB16".
ofm_dtype: str
The Output Feature Map tensor type.
MUL, ADD, SUB {IFM}->{OFM}:
{uint8, int8 int32} -> {uint8, int8, int32}, any pairing
MAX, MIN:
IFM and OFM must be of the same type, one of:
{int8, uint8}
SHR {IFM}->{OFM}:
{int32}->{int8, uint8, int32}, any pairing"
SHL:
{int32}->{int32} only
use_rescale : bool
Use explicit scaling if True.
rescale_scale : int
Scale value for rescale. For 32-bit operations scale is not applied but shift is.
rescale_shift : int
Shift value for rescale.
Returns
-------
te.Tensor
The Output Feature Map tensor.
"""
assert ifm.shape[0] == 1
assert ifm2.shape[0] == 1
assert ifm_layout in {"NHWC", "NHCWB16"}
assert ifm2_layout in {"NHWC", "NHCWB16"}
assert ofm_layout in {"NHWC", "NHCWB16"}
# Compute operation for the IFM DMA pipeline
dmaed_ifm = dma_ifm_compute(
ifm, ifm_layout, ifm_zero_point, ifm_scale, ifm_channels, (0, 0, 0, 0)
)
dmaed_ifm2 = dma_ifm_compute(
ifm2, ifm2_layout, ifm2_zero_point, ifm2_scale, ifm2_channels, (0, 0, 0, 0)
)
# Binary elementwise compute operation
ofm_height = dmaed_ifm.shape[1]
ofm_width = dmaed_ifm.shape[2]
binary_elementwise_attrs = {
"op": "ethosu_binary_elementwise",
"operator_type": operator_type,
"reversed_operands": reversed_operands,
"activation": activation,
"clip_min": clip_min,
"clip_max": clip_max,
"rounding_mode": rounding_mode,
"use_rescale": use_rescale,
"rescale_scale": rescale_scale,
"rescale_shift": rescale_shift,
}
operators = {
"ADD": operator.add,
"SUB": operator.sub,
"MUL": operator.mul,
"MIN": te.min,
"MAX": te.max,
"SHR": operator.add,
"SHL": operator.add,
}
broadcast = [value == 1 for value in dmaed_ifm2.shape]
has_lut = activation in ("TANH", "LUT", "SIGMOID")
# This is a trick to insert the LUT tensor into the TE graph if LUT is present
lut_expr = (lut[0] + lut[255]).astype(ifm.dtype) if has_lut else 0
# Add the LUT tensor to the attributes to be able to later tell which tensor is the LUT
if has_lut:
binary_elementwise_attrs["lut"] = lut
if reversed_operands:
binary_elementwise = te.compute(
(1, ofm_height, ofm_width, ifm_channels),
lambda nn, hh, ww, cc: operators[operator_type](
dmaed_ifm2(
0 if broadcast[0] else nn,
0 if broadcast[1] else hh,
0 if broadcast[2] else ww,
0 if broadcast[3] else cc,
).astype(ifm.dtype),
dmaed_ifm(nn, hh, ww, cc).astype(ifm.dtype) + lut_expr,
).astype(ofm_dtype),
name="ethosu_binary_elementwise",
attrs=binary_elementwise_attrs,
)
else:
binary_elementwise = te.compute(
(1, ofm_height, ofm_width, ifm_channels),
lambda nn, hh, ww, cc: operators[operator_type](
dmaed_ifm(nn, hh, ww, cc).astype(ifm.dtype),
dmaed_ifm2(
0 if broadcast[0] else nn,
0 if broadcast[1] else hh,
0 if broadcast[2] else ww,
0 if broadcast[3] else cc,
).astype(ifm.dtype)
+ lut_expr,
).astype(ofm_dtype),
name="ethosu_binary_elementwise",
attrs=binary_elementwise_attrs,
)
nhwc_to_nhcwb16, nhcwb16_to_nhwc = get_layout_transform_matrices(int(ifm_channels))
ifm_matrix = [
[1, 0, 0, 0, 0],
[0, 1, 0, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 1],
]
ifm2_matrix = [
[1, 0, 0, 0, 0],
[0, (1 - int(broadcast[1])), 0, 0, int(broadcast[1])],
[0, 0, (1 - int(broadcast[2])), 0, int(broadcast[2])],
[0, 0, 0, (1 - int(broadcast[3])), int(broadcast[3])],
[0, 0, 0, 0, 1],
]
if ofm_layout == "NHCWB16":
ifm_matrix = np.matmul(ifm_matrix, nhcwb16_to_nhwc).tolist()
ifm2_matrix = np.matmul(ifm2_matrix, nhcwb16_to_nhwc).tolist()
if ifm_layout == "NHCWB16":
ifm_matrix = np.matmul(nhwc_to_nhcwb16, ifm_matrix).tolist()
if ifm2_layout == "NHCWB16":
ifm2_matrix = np.matmul(nhwc_to_nhcwb16, ifm2_matrix).tolist()
ifm_propagator = Propagator(
ifm_matrix,
[0, 0, 0, 0] if ifm_layout == "NHWC" else [0, 0, 0, 0, 0],
)
ifm2_propagator = Propagator(
ifm2_matrix,
[0, 0, 0, 0] if ifm2_layout == "NHWC" else [0, 0, 0, 0, 0],
)
propagator_attrs = {
"ifm_propagator": ifm_propagator,
"ifm2_propagator": ifm2_propagator,
}
# Compute operation for the OFM DMA pipeline
return dma_ofm_compute(
binary_elementwise,
ofm_layout,
ofm_zero_point,
ofm_scale,
ifm_channels,
attrs=propagator_attrs,
)
@register_matcher
def match_ethosu_binary_elementwise(output_tensor, device_config):
"""Match a Tensor Expression corresponding to an NPU Binary Elementwise.
If the Tensor Expression matches, an EthosuPart will be created that models the
matched Tensor Expression. Otherwise, None will be returned.
Parameters
----------
output_tensor : tvm.te.Tensor
The tensor to attempt to match with.
device_config : EthosuDeviceConfig
Target device configuration
Returns
-------
Union[None, EthosuPart]
The created EthosuPart if there was a match, otherwise None.
"""
write = output_tensor
if write.op.name != "ethosu_write":
return None
convert_to_nhcwb16 = write.op.input_tensors[0]
if convert_to_nhcwb16.op.name != "ethosu_convert_to_nhcwb16":
return None
binary_elementwise = convert_to_nhcwb16.op.input_tensors[0]
if binary_elementwise.op.name != "ethosu_binary_elementwise":
return None
pad = binary_elementwise.op.input_tensors[0]
if pad.op.name != "ethosu_pad":
return None
upscale = pad.op.input_tensors[0]
if upscale.op.name != "ethosu_upscale":
return None
convert_to_nhwc = upscale.op.input_tensors[0]
if convert_to_nhwc.op.name != "ethosu_convert_to_nhwc":
return None
read = convert_to_nhwc.op.input_tensors[0]
if read.op.name != "ethosu_read":
return None
pad2 = binary_elementwise.op.input_tensors[1]
if pad2.op.name != "ethosu_pad":
return None
upscale2 = pad2.op.input_tensors[0]
if upscale2.op.name != "ethosu_upscale":
return None
convert_to_nhwc2 = upscale2.op.input_tensors[0]
if convert_to_nhwc2.op.name != "ethosu_convert_to_nhwc":
return None
read2 = convert_to_nhwc2.op.input_tensors[0]
if read2.op.name != "ethosu_read":
return None
input_tensors = [
read.op.input_tensors[0],
read2.op.input_tensors[0],
]
subgraph = TESubgraph(input_tensors, output_tensor)
propagators = [
write.op.attrs["ifm_propagator"],
write.op.attrs["ifm2_propagator"],
]
ifm_dtype = input_tensors[0].dtype
ofm_dtype = output_tensor.dtype
output_layout = convert_to_nhcwb16.op.attrs["layout"]
input_layout = convert_to_nhwc.op.attrs["layout"]
input2_layout = convert_to_nhwc2.op.attrs["layout"]
output_quantum = device_config.get_output_quantum(output_layout)
block_config = device_config.get_elementwise_block_config(
propagators[0],
propagators[1],
binary_elementwise.op.attrs,
output_tensor.shape,
output_layout,
input_layout,
input2_layout,
ifm_dtype,
ofm_dtype,
)
return EthosuPart(
subgraph,
propagators,
output_quantum,
1,
block_config,
)
| 12,369 | 33.553073 | 92 | py |
tvm | tvm-main/python/tvm/relay/backend/contrib/ethosu/te/dma.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unnecessary-lambda
"""Tensor Expressions for operations supported by the NPU DMA engine"""
from typing import Callable, Tuple, Optional, List
import tvm # type: ignore
from tvm import te
from tvm.topi.utils import equal_const_int # type: ignore
def _pad_tensor(
tensor: te.Tensor, pad_before: List[int], pad_after: Optional[List[int]] = None
) -> Callable:
"""Generate a padded tensor.
Parameters
----------
tensor : te.Tensor
The tensor to pad.
pad_before : tuple of int
The 'before' padding on each axis.
pad_after : tuple of int
The 'after' padding on each axis.
Returns
-------
_pad : callable
The padded tensor.
"""
pad_after = pad_after or pad_before
dims = len(tensor.shape)
assert len(pad_before) == dims
assert len(pad_after) == dims
def _pad(*indices):
not_zero = [] # A list of padding conditions that aren't trivial (zero padding)
index_tuple = [] # The indices with which to access the padded tensor
for i in range(dims):
if equal_const_int(pad_before[i], 0) and equal_const_int(pad_after[i], 0):
index_tuple.append(indices[i])
else:
index_tuple.append(indices[i] - pad_before[i])
not_zero.append(indices[i] >= pad_before[i])
not_zero.append(indices[i] < tensor.shape[i] + pad_before[i])
if not_zero:
not_zero = tvm.tir.all(*not_zero)
return tvm.tir.if_then_else(
not_zero, tensor(*index_tuple), tvm.tir.const(0, tensor.dtype)
)
return tensor(*index_tuple)
return _pad
def read_compute(
tensor: te.Tensor, zero_point: int, scale: float, layout: Optional[str] = None
) -> te.Tensor:
"""A tensor expression which represents a read.
Parameters
----------
tensor : te.Tensor
The tensor to read.
zero_point : int
The zero point of the tensor.
scale : float
The scale of the tensor.
layout : Optional[str]
The layout of the tensor, either NHWC or NHCWB16.
Returns
-------
te.Tensor
The tensor having been read.
"""
read_attrs = {
"op": "ethosu_read",
"zero_point": zero_point,
"scale": scale,
}
if layout:
assert layout in {"NHWC", "NHCWB16"}
read_attrs["layout"] = layout
return te.compute(tensor.shape, lambda *i: tensor(*i), name="ethosu_read", attrs=read_attrs)
def write_compute(
tensor: te.Tensor,
zero_point: int,
scale: float,
layout: Optional[str] = None,
attrs: dict = None,
) -> te.Tensor:
"""A tensor expression which represents a write.
Parameters
----------
tensor : te.Tensor
The tensor to write.
zero_point : int
The zero point of the tensor.
scale : float
The scale of the tensor.
layout : Optional[str]
The layout of the tensor, either NHWC or NHCWB16.
attrs : dict, optional
Additional attributes to add to the compute op.
Returns
-------
te.Tensor
The tensor having been written.
"""
if not attrs:
attrs = {}
write_attrs = {
"op": "ethosu_write",
"zero_point": zero_point,
"scale": scale,
}
if layout:
assert layout in {"NHWC", "NHCWB16"}
write_attrs["layout"] = layout
write_attrs = {**write_attrs, **attrs}
return te.compute(
tensor.shape,
lambda *i: tensor(*i),
name="ethosu_write",
attrs=write_attrs,
)
def convert_to_nhwc_compute(tensor: te.Tensor, layout: str, channels: int) -> te.Tensor:
"""Converts a tensor into NHWC layout if it's in NHWCB16 layout.
When the current layout is NHCWB16, a reduce sum operation is inserted
to ensure that the whole of the input tensor has a data dependency on
the copy operation. Without this, TVM removes compute that is deemed to
be unnecessary, which causes strides for the NPU to be calculated
incorrectly.
Parameters
----------
tensor : te.Tensor
The tensor to convert.
layout : str
The layout of the tensor, either NHWC or NHCWB16.
channels : int
The number of valid channels for the tensor.
Returns
-------
te.Tensor
The converted tensor in NHWC layout.
"""
assert layout in {"NHWC", "NHCWB16"}
convert_to_nhwc_attrs = {
"op": "ethosu_convert_to_nhwc",
"layout": layout,
}
if layout == "NHCWB16":
rc = te.reduce_axis((0, 16), name="rc")
return te.compute(
(tensor.shape[0], tensor.shape[1], tensor.shape[3], channels),
lambda nn, hh, ww, cc: te.sum(
tensor(nn, hh, te.indexdiv(cc, 16), ww, te.indexmod(rc, 16)), axis=rc
),
name="ethosu_convert_to_nhwc",
attrs=convert_to_nhwc_attrs,
)
return te.compute(
tensor.shape,
lambda *i: tensor(*i),
name="ethosu_convert_to_nhwc",
attrs=convert_to_nhwc_attrs,
)
def convert_to_nhcwb16_compute(tensor: te.Tensor, layout: str, channels: int) -> te.Tensor:
"""Converts a tensor into NHCWB16 layout if it's in NHWC layout.
Parameters
----------
tensor : te.Tensor
The tensor to convert.
layout : str
The layout of the tensor, either NHWC or NHCWB16.
channels : int
The number of valid channels for the tensor.
Returns
-------
te.Tensor
The converted tensor in NHCWB16 layout.
"""
assert layout in {"NHWC", "NHCWB16"}
convert_to_nhcwb16_attrs = {
"op": "ethosu_convert_to_nhcwb16",
"layout": layout,
}
if layout == "NHCWB16":
out_channel_bricks = te.indexdiv(channels - 1, 16) + 1
output_shape = (1, tensor.shape[1], out_channel_bricks, tensor.shape[2], 16)
return te.compute(
output_shape,
lambda nn, hh, cc, ww, cb: tvm.tir.if_then_else(
cc * 16 + cb < channels,
tensor(nn, hh, ww, cc * 16 + cb),
tvm.tir.IntImm(tensor.dtype, 0),
),
name="ethosu_convert_to_nhcwb16",
attrs=convert_to_nhcwb16_attrs,
)
return te.compute(
tensor.shape,
lambda *i: tensor(*i),
name="ethosu_convert_to_nhcwb16",
attrs=convert_to_nhcwb16_attrs,
)
def pad_compute(tensor: te.Tensor, padding: tuple) -> te.Tensor:
"""Pad an NHWC tensor in the height and width axes.
Parameters
----------
tensor : te.Tensor
The tensor to pad.
padding : tuple
The 4 dimensional padding as (pad_top, pad_left, pad_bottom, pad_right).
Returns
-------
te.Tensor
The padded tensor.
"""
pad_top, pad_left, pad_down, pad_right = padding
pad_before = [0, int(pad_top), int(pad_left), 0]
pad_after = [0, int(pad_down), int(pad_right), 0]
pad_attrs = {
"op": "ethosu_pad",
}
shape = tensor.shape
return te.compute(
(shape[0], shape[1] + pad_top + pad_down, shape[2] + pad_left + pad_right, shape[3]),
lambda nn, hh, ww, cc: _pad_tensor(tensor, pad_before, pad_after)(nn, hh, ww, cc),
name="ethosu_pad",
attrs=pad_attrs,
)
def upscale_compute(tensor: te.Tensor, upscale_factor: int) -> te.Tensor:
"""Apply upscaling to an NHWC tensor.
Parameters
----------
tensor : te.Tensor
The tensor to pad.
upscale_factor : int
The factor by which to apply upscaling.
Returns
-------
te.Tensor
The upscaled tensor.
"""
shape = tensor.shape
reason = f"The compiler only supports 2x2 upscaling, but factor was {upscale_factor}."
assert upscale_factor in (1, 2), reason
new_shape = (shape[0], shape[1] * upscale_factor, shape[2] * upscale_factor, shape[3])
upscale_attrs = {"op": "ethosu_upscale"}
return te.compute(
new_shape,
lambda nn, hh, ww, cc: tensor(nn, hh // upscale_factor, ww // upscale_factor, cc),
name="ethosu_upscale",
attrs=upscale_attrs,
)
def dma_ifm_compute(
ifm: te.Tensor,
layout: str,
zero_point: int,
scale: float,
channels: int,
padding: Tuple[int, int, int, int],
upscale_factor: Optional[int] = 1,
) -> te.Tensor:
"""A sequence of compute operators representing the DMA capabilities for an IFM.
Parameters
----------
ifm : te.Tensor
The Input Feature Map (IFM) tensor.
layout : str
The layout of the data, either NHWC or NHCWB16.
zero_point : int
The zero point of the data.
scale : float
The scale of the data.
channels : int
The number of valid channels for the data.
padding : tuple
The 4 dimensional padding as (pad_top, pad_left, pad_bottom, pad_right).
upscale_factor : Optional[int]
The factor by which to apply upscaling. By default there will be no upscaling.
Returns
-------
te.Tensor
The dma-ed IFM tensor.
"""
read_ifm = read_compute(ifm, zero_point, scale, layout=layout)
convert_to_nhwc_ifm = convert_to_nhwc_compute(read_ifm, layout, channels)
upscale_ifm = upscale_compute(convert_to_nhwc_ifm, upscale_factor)
return pad_compute(upscale_ifm, padding)
def dma_ofm_compute(
ofm: te.Tensor, layout: str, zero_point: int, scale: float, channels: int, attrs: dict = None
) -> te.Tensor:
"""A sequence of compute operators representing the DMA capabilities for an OFM.
Parameters
----------
ofm : te.Tensor
The Output Feature Map (OFM) tensor.
layout : str
The layout of the data, either NHWC or NHCWB16.
zero_point : int
The zero point of the data.
scale : float
The scale of the data.
channels : int
The number of valid channels for the data.
attrs : dict, optional
Additional attributes to add to the write compute op.
Returns
-------
te.Tensor
The dma-ed OFM tensor.
"""
if not attrs:
attrs = {}
convert_to_nhcwb16_ofm = convert_to_nhcwb16_compute(ofm, layout, channels)
return write_compute(convert_to_nhcwb16_ofm, zero_point, scale, layout=layout, attrs=attrs)
| 11,174 | 28.177546 | 97 | py |
tvm | tvm-main/python/tvm/relay/backend/contrib/ethosu/te/unary_elementwise.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unused-argument
"""Tensor Expressions for unary_elementwise for the NPU"""
import numpy as np
from tvm import te
from tvm.contrib.ethosu.cascader import TESubgraph, EthosuPart, Propagator, register_matcher
from .dma import dma_ofm_compute, dma_ifm_compute
from .common import get_layout_transform_matrices
def unary_elementwise_compute(
ifm: te.Tensor,
lut: te.Tensor,
operator_type: str,
ifm_scale: float,
ifm_zero_point: int,
ofm_scale: float,
ofm_zero_point: int,
ofm_channels: int,
activation: str,
clip_min: int,
clip_max: int,
rounding_mode: str,
ifm_layout: str,
ofm_layout: str,
) -> te.Tensor:
"""A compute operator representing the capabilities of unary_elementwise for the NPU.
Parameters
----------
ifm : te.Tensor
The Input Feature Map tensor (IFM).
lut : te.Tensor
The look-up table values to use if activation = "LUT".
operator_type: str
The type of the unary elementwise operator.
"ABS"
"CLZ"
ifm_scale : float
The quantization scale for the Input Feature Map tensor.
ifm_zero_point : int
The quantization zero point for the Input Feature Map tensor.
ofm_scale : float
The quantization scale for the Output Feature Map tensor.
ofm_zero_point : int
The quantization zero point for the Output Feature Map tensor.
ofm_channels : int
The number of OFM channels.
activation : str
The activation function to use.
"NONE" - no activation function.
"CLIP" - clip the output between clip_min and clip_max.
"TANH" - tanh activation function.
"SIGMOID" - sigmoid activation function.
"LUT" - use a look-up table to perform the activation function.
clip_min : int
The minimum clipping value if activation = "CLIP".
clip_max : int
The maximum clipping value if activation = "CLIP".
rounding_mode : str
The rounding mode to apply to the Output Feature Map tensor.
"TFL" - Tensorflow Lite rounding scheme.
"TRUNCATE" - Truncate towards zero.
"NATURAL" - Round to nearest value, with x.5 rounded up towards +infinity.
ifm_layout : str, optional
The layout of the Input Feature Map tensor. Can be "NHWC" or "NHCWB16".
ofm_layout : str, optional
The layout of the Output Feature Map tensor. Can be "NHWC" or "NHCWB16".
Returns
-------
te.Tensor
The OFM tensor.
"""
assert ifm.shape[0] == 1
assert ifm_layout in {"NHWC", "NHCWB16"}
assert ofm_layout in {"NHWC", "NHCWB16"}
# Changing the ifm and ofm scale to conform with that expected by Vela API
if ofm_scale != 0:
ofm_scale = ifm_scale / ofm_scale
ifm_scale = 1.0
# Compute operation for the IFM DMA pipeline
dmaed_ifm = dma_ifm_compute(
ifm, ifm_layout, ifm_zero_point, ifm_scale, ofm_channels, (0, 0, 0, 0)
)
# Unary elementwise compute operation
ofm_height = dmaed_ifm.shape[1]
ofm_width = dmaed_ifm.shape[2]
unary_elementwise_attrs = {
"op": "ethosu_unary_elementwise",
"operator_type": operator_type,
"activation": activation,
"clip_min": clip_min,
"clip_max": clip_max,
"rounding_mode": rounding_mode,
}
def clz_imp(inp):
# Assuming that it's a 32 bit int
return 32 - te.log2(inp)
operators = {"ABS": te.abs, "CLZ": clz_imp}
unary_elementwise = te.compute(
(1, ofm_height, ofm_width, ofm_channels),
lambda nn, hh, ww, cc: operators[operator_type](
dmaed_ifm(nn, hh, ww, cc).astype(ifm.dtype)
),
name="ethosu_unary_elementwise",
attrs=unary_elementwise_attrs,
)
nhwc_to_nhcwb16, nhcwb16_to_nhwc = get_layout_transform_matrices(int(ofm_channels))
ifm_matrix = [
[1, 0, 0, 0, 0],
[0, 1, 0, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 1],
]
if ofm_layout == "NHCWB16":
ifm_matrix = np.matmul(ifm_matrix, nhcwb16_to_nhwc).tolist()
if ifm_layout == "NHCWB16":
ifm_matrix = np.matmul(nhwc_to_nhcwb16, ifm_matrix).tolist()
ifm_propagator = Propagator(
ifm_matrix,
[0, 0, 0, 0] if ifm_layout == "NHWC" else [0, 0, 0, 0, 0],
)
propagator_attrs = {"ifm_propagator": ifm_propagator}
# Compute operation for the OFM DMA pipeline
return dma_ofm_compute(
unary_elementwise,
ofm_layout,
ofm_zero_point,
ofm_scale,
ofm_channels,
attrs=propagator_attrs,
)
@register_matcher
def match_ethosu_unary_elementwise(output_tensor, device_config):
"""Match a Tensor Expression corresponding to an NPU Unary Elementwise.
If the Tensor Expression matches, an EthosuPart will be created that models the
matched Tensor Expression. Otherwise, None will be returned.
Parameters
----------
output_tensor : tvm.te.Tensor
The tensor to attempt to match with.
device_config : EthosuDeviceConfig
Target device configuration
Returns
-------
Union[None, EthosuPart]
The created EthosuPart if there was a match, otherwise None.
"""
write = output_tensor
if write.op.name != "ethosu_write":
return None
convert_to_nhcwb16 = write.op.input_tensors[0]
if convert_to_nhcwb16.op.name != "ethosu_convert_to_nhcwb16":
return None
unary_elementwise = convert_to_nhcwb16.op.input_tensors[0]
if unary_elementwise.op.name != "ethosu_unary_elementwise":
return None
pad = unary_elementwise.op.input_tensors[0]
if pad.op.name != "ethosu_pad":
return None
upscale = pad.op.input_tensors[0]
if upscale.op.name != "ethosu_upscale":
return None
convert_to_nhwc = upscale.op.input_tensors[0]
if convert_to_nhwc.op.name != "ethosu_convert_to_nhwc":
return None
read = convert_to_nhwc.op.input_tensors[0]
if read.op.name != "ethosu_read":
return None
input_tensors = [
read.op.input_tensors[0],
]
subgraph = TESubgraph(input_tensors, output_tensor)
propagators = [
write.op.attrs["ifm_propagator"],
]
ifm_dtype = input_tensors[0].dtype
ofm_dtype = output_tensor.dtype
output_layout = convert_to_nhcwb16.op.attrs["layout"]
input_layout = convert_to_nhwc.op.attrs["layout"]
output_quantum = device_config.get_output_quantum(output_layout)
block_config = device_config.get_elementwise_block_config(
propagators[0],
None,
unary_elementwise.op.attrs,
output_tensor.shape,
output_layout,
input_layout,
None,
ifm_dtype,
ofm_dtype,
)
return EthosuPart(
subgraph,
propagators,
output_quantum,
1,
block_config,
)
| 7,763 | 31.35 | 92 | py |
tvm | tvm-main/python/tvm/relay/backend/contrib/ethosu/te/depthwise.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unused-argument
"""Tensor Expressions for depthwise convolutions"""
from typing import Tuple, Union, List
import numpy as np
from tvm import te
from tvm.contrib.ethosu.cascader import TESubgraph, EthosuPart, Propagator, register_matcher
from .dma import dma_ofm_compute, dma_ifm_compute
from .common import get_layout_transform_matrices
def depthwise_conv2d_compute(
ifm: te.Tensor,
weight: te.Tensor,
scale_bias: te.Tensor,
lut: te.Tensor,
ifm_scale: float,
ifm_zero_point: int,
weight_zero_point: int,
ofm_scale: float,
ofm_zero_point: int,
strides: Tuple[int, int],
padding: Tuple[int, int, int, int],
dilation: Union[Tuple[int, int], List[int]],
activation: str,
clip_min: int,
clip_max: int,
rounding_mode: str,
upscale: str,
ifm_layout: str,
ofm_layout: str,
ofm_dtype: str,
) -> te.Tensor:
"""A compute operator representing the capabilities of 2D convolution for the NPU.
Parameters
----------
ifm : te.Tensor
The Input Feature Map tensor (IFM).
weight : te.Tensor
The weight tensor.
scale_bias : te.Tensor
The packed per-channel weight scale and bias tensor.
lut : te.Tensor
The look-up table of values to use if activation = "LUT".
ifm_scale : float
The quantization scale for the Input Feature Map tensor.
ifm_zero_point : int
The quantization zero point for the Input Feature Map tensor.
weight_zero_point : int
The quantization zero point for the weight tensor.
ofm_scale : float
The quantization scale for the Output Feature Map tensor.
ofm_zero_point : int
The quantization zero point for the Output Feature Map tensor.
strides : tuple
The 2 dimensional strides as (stride_height, stride_width).
padding : tuple
The 4 dimensional padding as (pad_top, pad_left, pad_bottom, pad_right).
dilation : Union[int, tuple, list]
The 2 dimensional dilation as (dilation_height, dilation_width).
activation : str
The activation function to use.
"NONE" - no activation function.
"CLIP" - clip the output between clip_min and clip_max.
"TANH" - tanh activation function.
"SIGMOID" - sigmoid activation function.
"LUT" - use a look-up table to perform the activation function.
clip_min : int
The minimum clipping value if activation = "CLIP".
clip_max : int
The maximum clipping value if activation = "CLIP".
rounding_mode : str
The rounding mode to apply to the Output Feature Map tensor.
"TFL" - Tensorflow Lite rounding scheme.
"TRUNCATE" - Truncate towards zero.
"NATURAL" - Round to nearest value, with x.5 rounded up towards +infinity.
upscale : str
The 2x2 upscaling mode to apply to the Input Feature Map tensor.
"NONE" - no upscaling.
"NEAREST" - upscale using nearest neighbour.
"ZEROS" - upscale using zeros.
ifm_layout : str
The layout of the Input Feature Map tensor. Can be "NHWC" or "NHCWB16".
ofm_layout : str
The layout of the Output Feature Map tensor. Can be "NHWC" or "NHCWB16".
ofm_dtype : str, optional
The Output Feature Map tensor data type. Can be 'int8', 'uint8' or 'int16'.
Returns
-------
te.Tensor
The OFM tensor.
"""
assert ifm.shape[0] == 1, "Only batch size 1 is supported"
assert ifm_layout in {"NHWC", "NHCWB16"}
assert ofm_layout in {"NHWC", "NHCWB16"}
padding = [int(v) for v in padding]
stride_h, stride_w = [int(v) for v in strides]
dilation_h, dilation_w = [int(v) for v in dilation]
channels, kernel_h, kernel_w, _ = [int(v) for v in weight.shape]
# Compute operation for the IFM DMA pipeline
dmaed_ifm = dma_ifm_compute(ifm, ifm_layout, ifm_zero_point, ifm_scale, channels, padding)
# 2D Depthwise Convolution compute operation
dilated_kernel_h = (kernel_h - 1) * dilation_h + 1
dilated_kernel_w = (kernel_w - 1) * dilation_w + 1
ofm_height = (dmaed_ifm.shape[1] - dilated_kernel_h) // stride_h + 1
ofm_width = (dmaed_ifm.shape[2] - dilated_kernel_w) // stride_w + 1
rh = te.reduce_axis((0, kernel_h), name="ry")
rw = te.reduce_axis((0, kernel_w), name="rx")
depthwise_conv2d_attrs = {
"op": "ethosu_depthwise_conv2d",
"weight_zero_point": weight_zero_point,
"activation": activation,
"clip_min": clip_min,
"clip_max": clip_max,
"rounding_mode": rounding_mode,
"upscale": upscale,
"stride_h": stride_h,
"stride_w": stride_w,
"dilation_h": dilation_h,
"dilation_w": dilation_w,
}
has_lut = activation in ("TANH", "LUT", "SIGMOID")
# This is a trick to insert the LUT tensor into the TE graph if LUT is present
lut_expr = (lut[0] + lut[255]).astype(ifm.dtype) if has_lut else 0
# Add the LUT tensor to the attributes to be able to later tell which tensor is the LUT
if has_lut:
depthwise_conv2d_attrs["lut"] = lut
depthwise = te.compute(
(1, ofm_height, ofm_width, channels),
lambda nn, hh, ww, cc: te.sum(
(
dmaed_ifm(
nn, hh * stride_h + rh * dilation_h, ww * stride_w + rw * dilation_w, cc
).astype(ifm.dtype)
* weight[cc, rh, rw, 0].astype(ifm.dtype)
# This is a trick to load 10 elements of the scale_bias at once, not accurate maths
+ (scale_bias[cc, 0] * scale_bias[cc, 9] + lut_expr).astype(ifm.dtype)
).astype(ofm_dtype),
axis=[rh, rw],
),
name="ethosu_depthwise_conv2d",
attrs=depthwise_conv2d_attrs,
)
nhwc_to_nhcwb16, nhcwb16_to_nhwc = get_layout_transform_matrices(channels)
ifm_matrix = [
[1, 0, 0, 0, 0],
[0, stride_h, 0, 0, (dilated_kernel_h - stride_h)],
[0, 0, stride_w, 0, (dilated_kernel_w - stride_w)],
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 1],
]
weights_matrix = [
[0, 0, 0, 1, 0],
[0, 0, 0, 0, kernel_h],
[0, 0, 0, 0, kernel_w],
[0, 0, 0, 0, 1],
[0, 0, 0, 0, 1],
]
bias_matrix = [
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 10],
[0, 0, 0, 0, 1],
]
if ofm_layout == "NHCWB16":
ifm_matrix = np.matmul(ifm_matrix, nhcwb16_to_nhwc).tolist()
weights_matrix = np.matmul(weights_matrix, nhcwb16_to_nhwc).tolist()
bias_matrix = np.matmul(bias_matrix, nhcwb16_to_nhwc).tolist()
if ifm_layout == "NHCWB16":
ifm_matrix = np.matmul(nhwc_to_nhcwb16, ifm_matrix).tolist()
ifm_propagator = Propagator(
ifm_matrix,
[0, -padding[0], -padding[1], 0]
if ifm_layout == "NHWC"
else [0, -padding[0], 0, -padding[1], 0],
)
weights_propagator = Propagator(
weights_matrix,
[0, 0, 0, 0],
)
bias_propagator = Propagator(
bias_matrix,
[0, 0],
)
propagator_attrs = {
"ifm_propagator": ifm_propagator,
"weights_propagator": weights_propagator,
"bias_propagator": bias_propagator,
}
# Compute operation for the OFM DMA pipeline
return dma_ofm_compute(
depthwise, ofm_layout, ofm_zero_point, ofm_scale, channels, attrs=propagator_attrs
)
@register_matcher
def match_ethosu_depthwise_conv2d(output_tensor, device_config):
"""Match a Tensor Expression corresponding to an NPU Depthwise Conv2D.
If the Tensor Expression matches, an EthosuPart will be created that models the
matched Tensor Expression. Otherwise, None will be returned.
Parameters
----------
output_tensor : tvm.te.Tensor
The tensor to attempt to match with.
device_config : EthosuDeviceConfig
Target device configuration.
Returns
-------
Union[None, EthosuPart]
The created EthosuPart if there was a match, otherwise None.
"""
write = output_tensor
if write.op.name != "ethosu_write":
return None
convert_to_nhcwb16 = write.op.input_tensors[0]
if convert_to_nhcwb16.op.name != "ethosu_convert_to_nhcwb16":
return None
depthwise2d = convert_to_nhcwb16.op.input_tensors[0]
if depthwise2d.op.name != "ethosu_depthwise_conv2d":
return None
pad = depthwise2d.op.input_tensors[0]
if pad.op.name != "ethosu_pad":
return None
upscale = pad.op.input_tensors[0]
if upscale.op.name != "ethosu_upscale":
return None
convert_to_nhwc = upscale.op.input_tensors[0]
if convert_to_nhwc.op.name != "ethosu_convert_to_nhwc":
return None
read = convert_to_nhwc.op.input_tensors[0]
if read.op.name != "ethosu_read":
return None
input_tensors = [
read.op.input_tensors[0],
depthwise2d.op.input_tensors[1],
depthwise2d.op.input_tensors[2],
]
subgraph = TESubgraph(input_tensors, output_tensor)
propagators = [
write.op.attrs["ifm_propagator"],
write.op.attrs["weights_propagator"],
write.op.attrs["bias_propagator"],
]
ifm_dtype = input_tensors[0].dtype
ofm_dtype = output_tensor.dtype
channels, kernel_height, kernel_width = (int(axis) for axis in input_tensors[1].shape[0:3])
subkernels = len(
device_config.get_kernel_steps(depthwise2d.op.name, kernel_height, kernel_width, ifm_dtype)
)
output_layout = convert_to_nhcwb16.op.attrs["layout"]
input_layout = convert_to_nhwc.op.attrs["layout"]
output_quantum = device_config.get_output_quantum(output_layout)
valid_block_configs = device_config.get_valid_block_configs(
propagators[0],
depthwise2d.op.attrs,
output_tensor.shape,
channels,
channels,
output_layout,
input_layout,
ifm_dtype,
ofm_dtype,
kernel_height,
kernel_width,
)
return EthosuPart(
subgraph,
propagators,
output_quantum,
subkernels,
valid_block_configs,
1,
)
| 10,987 | 33.993631 | 99 | py |
tvm | tvm-main/python/tvm/relay/backend/contrib/ethosu/te/convolution.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unused-argument
"""Tensor Expressions for convolutions for the NPU"""
from typing import Tuple, Union, List
import numpy as np # type: ignore
from tvm import te # type: ignore
from tvm.contrib.ethosu.cascader import TESubgraph, EthosuPart, Propagator, register_matcher
from .dma import dma_ofm_compute, dma_ifm_compute
from .common import get_layout_transform_matrices
def conv2d_compute(
ifm: te.Tensor,
weight: te.Tensor,
scale_bias: te.Tensor,
lut: te.Tensor,
ifm_scale: float,
ifm_zero_point: int,
weight_zero_point: int,
ofm_scale: float,
ofm_zero_point: int,
strides: Tuple[int, int],
padding: Tuple[int, int, int, int],
dilation: Union[Tuple[int, int], List[int]],
activation: str,
clip_min: int,
clip_max: int,
rounding_mode: str,
upscale: str,
ifm_layout: str,
ofm_layout: str,
) -> te.Tensor:
"""A compute operator representing the capabilities of a 2D convolution for the NPU.
Parameters
----------
ifm : te.Tensor
The Input Feature Map tensor (IFM).
weight : te.Tensor
The weight tensor.
scale_bias : te.Tensor
The packed per-channel weight scale and bias tensor.
lut : te.Tensor
The look-up table of values to use if activation = "LUT".
ifm_scale : float
The quantization scale for the Input Feature Map tensor.
ifm_zero_point : int
The quantization zero point for the Input Feature Map tensor.
weight_zero_point : int
The quantization zero point for the weight tensor.
ofm_scale : float
The quantization scale for the Output Feature Map tensor.
ofm_zero_point : int
The quantization zero point for the Output Feature Map tensor.
strides : tuple
The 2 dimensional strides as (stride_height, stride_width).
padding : tuple
The 4 dimensional padding as (pad_top, pad_left, pad_bottom, pad_right).
dilation : Union[Tuple[int, int], List[int]]
The 2 dimensional dilation as (dilation_height, dilation_width).
activation : str
The activation function to use.
"NONE" - no activation function.
"CLIP" - clip the output between clip_min and clip_max.
"TANH" - tanh activation function.
"SIGMOID" - sigmoid activation function.
"LUT" - use a look-up table to perform the activation function.
clip_min : int
The minimum clipping value if activation = "CLIP".
clip_max : int
The maximum clipping value if activation = "CLIP".
rounding_mode : str
The rounding mode to apply to the Output Feature Map tensor.
"TFL" - Tensorflow Lite rounding scheme.
"TRUNCATE" - Truncate towards zero.
"NATURAL" - Round to nearest value, with x.5 rounded up towards +infinity.
upscale : str
The 2x2 upscaling mode to apply to the Input Feature Map tensor.
"NONE" - no upscaling.
"NEAREST" - upscale using nearest neighbour.
"ZEROS" - upscale using zeros.
"NATURAL" - Round to nearest value, with x.5 rounded up towards +infinity.
ifm_layout : str
The layout of the Input Feature Map tensor. Can be "NHWC" or "NHCWB16".
ofm_layout : str
The layout of the Output Feature Map tensor. Can be "NHWC" or "NHCWB16".
Returns
-------
te.Tensor
The OFM tensor.
"""
assert ifm.shape[0] == 1
assert ifm_layout in {"NHWC", "NHCWB16"}
assert ofm_layout in {"NHWC", "NHCWB16"}
padding = [int(v) for v in padding]
stride_h, stride_w = [int(v) for v in strides]
dilation_h, dilation_w = [int(v) for v in dilation]
ofm_channels, kernel_h, kernel_w, ifm_channels = [int(v) for v in weight.shape]
upscale_factor = 2 if upscale != "NONE" else 1
# Compute operation for the IFM DMA pipeline
dmaed_ifm = dma_ifm_compute(
ifm,
ifm_layout,
ifm_zero_point,
ifm_scale,
weight.shape[3],
padding,
upscale_factor,
)
# 2D Convolution compute operation
dilated_kernel_h = (kernel_h - 1) * dilation_h + 1
dilated_kernel_w = (kernel_w - 1) * dilation_w + 1
ofm_height = (dmaed_ifm.shape[1] - dilated_kernel_h) // stride_h + 1
ofm_width = (dmaed_ifm.shape[2] - dilated_kernel_w) // stride_w + 1
rc = te.reduce_axis((0, ifm_channels), name="rc")
rh = te.reduce_axis((0, kernel_h), name="ry")
rw = te.reduce_axis((0, kernel_w), name="rx")
conv2d_attrs = {
"op": "ethosu_conv2d",
"weight_zero_point": weight_zero_point,
"activation": activation,
"upscale": upscale,
"clip_min": clip_min,
"clip_max": clip_max,
"rounding_mode": rounding_mode,
"stride_h": stride_h,
"stride_w": stride_w,
"dilation_h": dilation_h,
"dilation_w": dilation_w,
}
has_lut = activation in ("TANH", "LUT", "SIGMOID")
# This is a trick to insert the LUT tensor into the TE graph if LUT is present
lut_expr = (lut[0] + lut[255]).astype(ifm.dtype) if has_lut else 0
# Add the LUT tensor to the attributes to be able to later tell which tensor is the LUT
if has_lut:
conv2d_attrs["lut"] = lut
conv = te.compute(
(1, ofm_height, ofm_width, ofm_channels),
lambda nn, hh, ww, cc: te.sum(
dmaed_ifm(
nn, hh * stride_h + rh * dilation_h, ww * stride_w + rw * dilation_w, rc
).astype(ifm.dtype)
* weight[cc, rh, rw, rc].astype(ifm.dtype)
# This is a trick to load 10 elements of the scale_bias at once, not accurate maths
+ (scale_bias[cc, 0] * scale_bias[cc, 9] + lut_expr).astype(ifm.dtype),
axis=[rh, rw, rc],
),
name="ethosu_conv2d",
attrs=conv2d_attrs,
)
nhwc_to_nhcwb16, nhcwb16_to_nhwc = get_layout_transform_matrices(ofm_channels)
ifm_matrix = [
[1, 0, 0, 0, 0],
[0, stride_h, 0, 0, (dilated_kernel_h - stride_h)],
[0, 0, stride_w, 0, (dilated_kernel_w - stride_w)],
[0, 0, 0, 0, ifm_channels],
[0, 0, 0, 0, 1],
]
weights_matrix = [
[0, 0, 0, 1, 0],
[0, 0, 0, 0, kernel_h],
[0, 0, 0, 0, kernel_w],
[0, 0, 0, 0, ifm_channels],
[0, 0, 0, 0, 1],
]
bias_matrix = [
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 10],
[0, 0, 0, 0, 1],
]
if ofm_layout == "NHCWB16":
ifm_matrix = np.matmul(ifm_matrix, nhcwb16_to_nhwc).tolist()
weights_matrix = np.matmul(weights_matrix, nhcwb16_to_nhwc).tolist()
bias_matrix = np.matmul(bias_matrix, nhcwb16_to_nhwc).tolist()
if ifm_layout == "NHCWB16":
ifm_matrix = np.matmul(nhwc_to_nhcwb16, ifm_matrix).tolist()
ifm_propagator = Propagator(
ifm_matrix,
[0, -padding[0], -padding[1], 0]
if ifm_layout == "NHWC"
else [0, -padding[0], 0, -padding[1], 0],
)
weights_propagator = Propagator(
weights_matrix,
[0, 0, 0, 0],
)
bias_propagator = Propagator(
bias_matrix,
[0, 0],
)
propagator_attrs = {
"ifm_propagator": ifm_propagator,
"weights_propagator": weights_propagator,
"bias_propagator": bias_propagator,
}
# Compute operation for the OFM DMA pipeline
dma_ofm = dma_ofm_compute(
conv, ofm_layout, ofm_zero_point, ofm_scale, ofm_channels, attrs=propagator_attrs
)
return dma_ofm
@register_matcher
def match_ethosu_conv2d(output_tensor, device_config):
"""Match a Tensor Expression corresponding to an NPU Conv2D.
If the Tensor Expression matches, an EthosuPart will be created that models the
matched Tensor Expression. Otherwise, None will be returned.
Parameters
----------
output_tensor : tvm.te.Tensor
The tensor to attempt to match with.
device_config : EthosuDeviceConfig
Target device configuration
Returns
-------
Union[None, EthosuPart]
The created EthosuPart if there was a match, otherwise None.
"""
write = output_tensor
if write.op.name != "ethosu_write":
return None
convert_to_nhcwb16 = write.op.input_tensors[0]
if convert_to_nhcwb16.op.name != "ethosu_convert_to_nhcwb16":
return None
conv2d = convert_to_nhcwb16.op.input_tensors[0]
if conv2d.op.name != "ethosu_conv2d":
return None
pad = conv2d.op.input_tensors[0]
if pad.op.name != "ethosu_pad":
return None
upscale = pad.op.input_tensors[0]
if upscale.op.name != "ethosu_upscale":
return None
convert_to_nhwc = upscale.op.input_tensors[0]
if convert_to_nhwc.op.name != "ethosu_convert_to_nhwc":
return None
read = convert_to_nhwc.op.input_tensors[0]
if read.op.name != "ethosu_read":
return None
input_tensors = [
read.op.input_tensors[0],
conv2d.op.input_tensors[1],
conv2d.op.input_tensors[2],
]
subgraph = TESubgraph(input_tensors, output_tensor)
propagators = [
write.op.attrs["ifm_propagator"],
write.op.attrs["weights_propagator"],
write.op.attrs["bias_propagator"],
]
ifm_dtype = input_tensors[0].dtype
ofm_dtype = output_tensor.dtype
# Use channels from the weights tensor since that its shape doesn't change during layout
# conversion
ifm_channels = int(input_tensors[1].shape[3])
ofm_channels, kernel_height, kernel_width = (int(axis) for axis in input_tensors[1].shape[0:3])
kernel_elements = kernel_height * kernel_width
is_part_kernel = device_config.is_partkernel(
conv2d.op.name, ifm_channels, ifm_dtype, kernel_elements
)
subkernels = len(
device_config.get_kernel_steps(
conv2d.op.name, kernel_height, kernel_width, ifm_dtype, is_part_kernel
)
)
output_layout = convert_to_nhcwb16.op.attrs["layout"]
input_layout = convert_to_nhwc.op.attrs["layout"]
output_quantum = device_config.get_output_quantum(output_layout)
valid_block_configs = device_config.get_valid_block_configs(
propagators[0],
conv2d.op.attrs,
output_tensor.shape,
ofm_channels,
ifm_channels,
output_layout,
input_layout,
ifm_dtype,
ofm_dtype,
kernel_height,
kernel_width,
)
return EthosuPart(
subgraph,
propagators,
output_quantum,
subkernels,
valid_block_configs,
1,
)
| 11,386 | 33.401813 | 99 | py |
tvm | tvm-main/python/tvm/relay/backend/contrib/ethosu/te/inline.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=unused-argument
"""Tensor Expressions for operations that will be inlined"""
import numpy as np # type: ignore
from tvm.contrib.ethosu.cascader import TESubgraph, InlinePart, Propagator, register_matcher
INLINE_OPS = {"T_reshape", "T_strided_slice"}
@register_matcher
def match_ethosu_inline(output_tensor, device_config):
"""Match a Tensor Expression corresponding to an operator that will be inlined.
If the Tensor Expression matches, an InlinePart will be created that models the
matched Tensor Expression. Otherwise, None will be returned. This matcher is
naive and assumes nothing about the compute of the Tensor Expression. Therefore,
the resulting InlinePart will have full-tensor dependencies (i.e. each output
element depends on every input element).
Parameters
----------
output_tensor : tvm.te.Tensor
The tensor to attempt to match with.
device_config : EthosuDeviceConfig
Target device configuration
Returns
-------
Union[None, InlinePart]
The created InlinePart if there was a match, otherwise None.
"""
if output_tensor.op.name not in INLINE_OPS:
return None
input_tensors = output_tensor.op.input_tensors
propagators = []
output_dims = len(output_tensor.shape)
for input_tensor in input_tensors:
input_dims = len(input_tensor.shape)
transform_matrix = np.zeros((input_dims + 1, output_dims + 1))
for i, axis in enumerate(input_tensor.shape):
transform_matrix[i, output_dims] = int(axis)
transform_matrix[input_dims, output_dims] = 1
offset_vector = np.zeros(input_dims, dtype="int64")
propagators.append(
Propagator(
transform_matrix.tolist(),
offset_vector.tolist(),
)
)
subgraph = TESubgraph(input_tensors, output_tensor)
return InlinePart(
subgraph,
propagators,
)
| 2,758 | 35.786667 | 92 | py |
tvm | tvm-main/python/tvm/relay/backend/contrib/ethosu/te/common.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Common methods for the NPU tensor expressions"""
from typing import Tuple, List
def get_layout_transform_matrices(ofm_channels: int) -> Tuple[List[List[float]], List[List[float]]]:
"""Get the NHWC->NHCWB16 and NHCWB16->NHWC layout transform matrices.
For information about the supported layouts see https://developer.arm.com/documentation/102420/
0200/Functional-description/Control-and-data-flow/Supported-memory-formats-for-feature-maps
Parameters
----------
ofm_channels : int
The number of output channels in a NHWC layout
Returns
-------
nhwc_to_nhcwb16, nhcwb16_to_nhwc : Tuple[List[List[float]], List[List[float]]]
The layout transformation matrices
"""
# The value of the last dimension (B16) is always 16.
nhwc_to_nhcwb16 = [
[1, 0, 0, 0, 0],
[0, 1, 0, 0, 0],
[0, 0, 0, 1 / 16, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 0, 16],
[0, 0, 0, 0, 1],
]
# When we convert from NHWC to NHCWB16, the new C value is given by
# (ofm_channels - 1) // 16 + 1, which is a lossy operation, so we need to use
# the actual value of channels in the transform matrix to accurately recover
# the C in NHWC when we convert from NHCWB16 to NHWC.
nhcwb16_to_nhwc = [
[1, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0],
# We need to offset only if number of ofm_channels is not divisible by 16
# Moreover, we can't use just the "ofm_channels" as last element because
# the propogation matrices are used to propogate block configs as well.
[0, 0, 16, 0, 0, -(int(ofm_channels % 16 != 0)) * (16 - ofm_channels % 16)],
[0, 0, 0, 0, 0, 1],
]
return nhwc_to_nhcwb16, nhcwb16_to_nhwc
| 2,564 | 39.078125 | 100 | py |
tvm | tvm-main/python/tvm/relay/backend/contrib/ethosu/te/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Tensor Expressions for the NPU"""
from .convolution import *
from .depthwise import *
from .pooling import *
from .binary_elementwise import *
from .identity import *
from .unary_elementwise import *
from .inline import *
| 1,011 | 37.923077 | 62 | py |
tvm | tvm-main/python/tvm/relay/backend/contrib/ethosu/tir/pooling.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument
"""Extract information from the pooling operators in TIR."""
from typing import Tuple
import tvm
from .utils import get_outer_loops, get_op_attrs, get_loads, get_stores
from .dma import get_ifm_params, get_ofm_params
from .spec import SerialKernel, SerialActivation, SerialPooling
from .producers_consumers import ProducersConsumers
def get_pooling_params(
stmt: tvm.tir.AttrStmt, producers_consumers: ProducersConsumers
) -> Tuple[SerialPooling, tvm.tir.Var, tvm.tir.Var]:
"""Get the parameters necessary to construct a call_extern for a pooling.
Parameters
----------
stmt : tvm.tir.AttrStmt
The outermost attribute statement of a convolution loop nest.
producers_consumers: ProducersConsumers
It associates pointers with the loop nest that produces
their values and with the loop nest that consumes their values.
Returns
-------
SerialPooling
The parameters needed to construct a 2D convolution.
output_pointer : tvm.tir.Var
The output pointer of the convolution operation.
replace_pointer : tvm.tir.Var
The output pointer of the DMA write operation, which is to replace
the convolution output pointer.
is_allocator : bool
Whether this operator allocates its output.
"""
attrs, body = get_op_attrs(stmt)
_, _, _, _, _, inner = get_outer_loops(body, "NHWC")
rh = inner
rw = rh.body
# loads = [output, input, LUT, LUT]
loads = get_loads(rw.body)
# stores = [output]
stores = get_stores(rw.body)
input_pointer = loads[1].buffer.data
output_pointer = stores[0].buffer.data
# Get feature map info
serial_ifm, serial_padding = get_ifm_params(input_pointer, producers_consumers, stmt)
serial_ofm, serial_block_config, replace_pointer, is_allocator = get_ofm_params(
output_pointer, producers_consumers, stmt
)
# Get kernel info
serial_kernel = SerialKernel(
width=int(rw.extent),
height=int(rh.extent),
stride_w=int(attrs["stride_w"]),
stride_h=int(attrs["stride_h"]),
dilation_w=1,
dilation_h=1,
)
# Get activation info
serial_activation = SerialActivation(
op=attrs["activation"], clip_min=attrs["clip_min"], clip_max=attrs["clip_max"]
)
return (
SerialPooling(
ifm=serial_ifm,
ofm=serial_ofm,
pooling_type=attrs["pooling_type"],
pool_shape=serial_kernel,
padding=serial_padding,
activation=serial_activation,
rounding_mode=attrs["rounding_mode"],
upscale=attrs["upscale"],
block_config=serial_block_config,
),
output_pointer,
replace_pointer,
is_allocator,
)
| 3,612 | 36.247423 | 89 | py |
tvm | tvm-main/python/tvm/relay/backend/contrib/ethosu/tir/identity.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument
"""Extract information from the identity operator in TIR."""
from typing import Tuple
import tvm
from .spec import (
SerialBlockConfig,
SerialKernel,
SerialActivation,
SerialPooling,
SerialPadding,
SerialFeatureMap,
)
from .utils import get_op_attrs, get_base_address, get_strides, get_loads
from .producers_consumers import ProducersConsumers
def _get_feature_map(stmt: tvm.tir.AttrStmt, fm_type: str) -> Tuple[SerialFeatureMap, tvm.tir.Var]:
"""Get the feature map parameters from a loop nest of any shape (as long there are at
most 4 nested loops).
Parameters
----------
stmt : tvm.tir.AttrStmt
The outermost attribute statement of a loop nest.
fm_type: str
Either "ifm" or "ofm", depending on whether it is an input or output feature map
Returns
-------
SerialFeatureMap
The serializable feature map.
output_pointer : tvm.tir.Var
The pointer produced by the operation.
"""
assert fm_type in ("ifm", "ofm")
attrs, body = get_op_attrs(stmt)
loops = []
inner = body
# extact the loops and the innermost statement
while hasattr(inner, "body"):
loops.append(inner)
inner = inner.body
# If the batch size loop is present, we need to remove it
if len(loops) > 3:
assert loops[0].extent == 1
loops = loops[1:]
fm_inner = inner.value if fm_type == "ifm" else inner
# Needed for stride calculation, can replace with
# inner.value.buffer.strides in future.
assert len(fm_inner.indices) == 1, "Ethos-U passes expect flattened buffers"
stride_vars = [l.loop_var for l in loops]
strides = get_strides(fm_inner.indices[0], stride_vars)
base_address = [get_base_address(index) for index in fm_inner.indices]
data_type = inner.buffer.data.type_annotation.element_type.dtype
serial_feature_map = SerialFeatureMap(
data_type=data_type,
height=loops[0].extent,
width=loops[1].extent if len(loops) > 1 else 1,
channels=loops[2].extent if len(loops) > 2 else 1,
tile_height_0=loops[0].extent,
tile_height_1=0,
tile_width_0=loops[1].extent if len(loops) > 1 else 1,
tile_address_0=tvm.tir.BufferLoad(fm_inner.buffer, base_address),
tile_address_1=0,
tile_address_2=0,
tile_address_3=0,
scale=attrs["scale"],
zero_point=attrs["zero_point"],
layout="NHWC",
stride_h=strides[0] if len(strides) > 0 else 1,
stride_w=strides[1] if len(strides) > 1 else 1,
stride_c=strides[2] if len(strides) > 2 else 1,
)
output_pointer = inner.buffer.data
return serial_feature_map, output_pointer
def get_identity_params(
stmt: tvm.tir.AttrStmt, producers_consumers: ProducersConsumers
) -> Tuple[SerialPooling, tvm.tir.Var, tvm.tir.Var]:
"""Get the parameters necessary to construct a call_extern for an identity pooling.
Parameters
----------
stmt : tvm.tir.AttrStmt
The outermost attribute statement of an identity pooling loop nest.
producers_consumers: ProducersConsumers
It associates pointers with the loop nest that produces
their values and with the loop nest that consumes their values.
Returns
-------
SerialPooling
The parameters needed to construct a 2D pooling.
output_pointer : tvm.tir.Var
The output pointer of the pooling operation.
replace_pointer : tvm.tir.Var
The output pointer of the DMA write operation, which is to replace
the pooling output pointer.
is_allocator : bool
Whether this operator allocates its output.
"""
attrs, _ = get_op_attrs(stmt)
# Find the inner loop
store = stmt
while hasattr(store, "body"):
store = store.body
# loads = [input, LUT, LUT]
loads = get_loads(store)
input_pointer = loads[0].buffer.data
output_pointer = store.buffer.data
read = producers_consumers.get_producer(input_pointer, stmt)
write = producers_consumers.get_consumer(output_pointer, stmt)
serial_ifm, _ = _get_feature_map(read, "ifm")
serial_ofm, write_output_pointer = _get_feature_map(write, "ofm")
replace_pointer = write_output_pointer
is_allocator = True
producer = producers_consumers.get_producer(write_output_pointer, write)
if producer is None or producer != write:
is_allocator = False
# TODO: We might want to support stand alone ReLU in the future by adding clip_min and
# clip max attributes to the identity operator
serial_activation = SerialActivation(op=attrs["activation"], clip_min=0, clip_max=0)
# Create a serialized identity pooling to be run on the NPU
return (
SerialPooling(
ifm=serial_ifm,
ofm=serial_ofm,
pooling_type="AVG",
pool_shape=SerialKernel(1, 1, 1, 1, 1, 1),
padding=SerialPadding(0, 0, 0, 0),
activation=serial_activation,
upscale="NONE",
rounding_mode="TFL",
block_config=SerialBlockConfig(0, 0, 0),
),
output_pointer,
replace_pointer,
is_allocator,
)
| 6,044 | 33.346591 | 99 | py |
tvm | tvm-main/python/tvm/relay/backend/contrib/ethosu/tir/binary_elementwise.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument
"""Extract information from the binary_elementwise operators in TIR."""
from typing import Tuple
import tvm
from .utils import get_outer_loops, get_op_attrs, get_loads
from .dma import get_ifm_params, get_ofm_params
from .spec import SerialActivation, SerialBinaryElementwise, SerialRescaleConfig
from .producers_consumers import ProducersConsumers
def get_binary_elementwise_params(
stmt: tvm.tir.AttrStmt, producers_consumers: ProducersConsumers
) -> Tuple[SerialBinaryElementwise, tvm.tir.Var, tvm.tir.Var]:
"""Get the parameters necessary to construct a call_extern for a binary_elementwise.
Parameters
----------
stmt : tvm.tir.AttrStmt
The outermost attribute statement of a binary elementwise loop nest.
producers_consumers: ProducersConsumers
It associates pointers with the loop nest that produces
their values and with the loop nest that consumes their values.
Returns
-------
SerialBinaryElementwise
The parameters needed to construct a binary elementwise operator.
output_pointer : tvm.tir.Var
The output pointer of the binary elementwise operation.
replace_pointer : tvm.tir.Var
The output pointer of the DMA write operation, which is to replace
the binary elementwise output pointer.
is_allocator : bool
Whether this operator allocates its output.
"""
attrs, body = get_op_attrs(stmt)
reversed_operands = attrs["reversed_operands"]
_, _, _, _, _, inner = get_outer_loops(body, "NHWC")
# loads = [input, input, LUT, LUT]
loads = get_loads(inner)
input_pointer = loads[0].buffer.data
input_pointer1 = loads[1].buffer.data
if reversed_operands:
input_pointer, input_pointer1 = input_pointer1, input_pointer
output_pointer = inner.buffer.data
# Get feature map info
serial_ifm, _ = get_ifm_params(input_pointer, producers_consumers, stmt)
serial_ifm2, _ = get_ifm_params(input_pointer1, producers_consumers, stmt)
serial_ofm, serial_block_config, replace_pointer, is_allocator = get_ofm_params(
output_pointer, producers_consumers, stmt
)
# Get activation info
serial_activation = SerialActivation(
op=attrs["activation"], clip_min=attrs["clip_min"], clip_max=attrs["clip_max"]
)
rescale_config = SerialRescaleConfig(
use_rescale=attrs["use_rescale"], scale=attrs["rescale_scale"], shift=attrs["rescale_shift"]
)
return (
SerialBinaryElementwise(
ifm=serial_ifm,
ifm2=serial_ifm2,
ofm=serial_ofm,
operator_type=attrs["operator_type"],
reversed_operands=reversed_operands,
activation=serial_activation,
rounding_mode=attrs["rounding_mode"],
block_config=serial_block_config,
rescale_config=rescale_config,
),
output_pointer,
replace_pointer,
is_allocator,
)
| 3,787 | 39.297872 | 100 | py |
tvm | tvm-main/python/tvm/relay/backend/contrib/ethosu/tir/dma.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument
"""Extract parameters from the DMA operators in TIR."""
from typing import NamedTuple, Union
import tvm
from .utils import get_outer_loops, get_base_address, get_strides, get_op_attrs
from .spec import SerialBlockConfig, SerialFeatureMap, SerialPadding
def get_pad_params(stmt):
"""Get the padding parameters from a pad loop nest.
Parameters
----------
stmt : tvm.tir.AttrStmt
The outermost attribute statement of a pad loop nest.
Returns
-------
pad : SerialPadding
The serializable padding.
input_pointer : tvm.tir.Var
The pointer consumed by the operation.
output_pointer : tvm.tir.Var
The pointer produced by the operation.
"""
_, body = get_op_attrs(stmt)
n, h, w, c, _, inner = get_outer_loops(body, "NHWC")
output_pointer = inner.buffer.data
pad = SerialPadding(top=0, left=0, bottom=0, right=0)
if isinstance(inner.value, tvm.tir.Call):
input_pointer = inner.value.args[1].buffer.data
else:
input_pointer = inner.value.buffer.data
return pad, input_pointer, output_pointer
padded_shape = [n.extent, h.extent, w.extent, c.extent]
def _visit(expr):
if isinstance(expr, tvm.tir.expr.LT):
var = expr.a
val = expr.b
if var == h.loop_var:
pad.bottom = padded_shape[1] - val
else:
pad.right = padded_shape[2] - val
elif isinstance(expr, tvm.tir.expr.LE):
var = expr.b
val = expr.a
if var == h.loop_var:
pad.top = val
else:
pad.left = val
cond = inner.value.args[0]
tvm.tir.stmt_functor.post_order_visit(cond, _visit)
return (
pad,
input_pointer,
output_pointer,
)
def get_upscale_params(stmt):
"""Get the upscale parameters from a loop nest.
Parameters
----------
stmt : tvm.tir.AttrStmt
The outermost attribute statement of an upscale loop nest.
Returns
-------
input_pointer : tvm.tir.Var
The pointer consumed by the operation.
output_pointer : tvm.tir.Var
The pointer produced by the operation.
"""
_, body = get_op_attrs(stmt)
_, _, _, _, _, inner = get_outer_loops(body, "NHWC")
if isinstance(inner.value, tvm.tir.Call):
input_pointer = inner.value.args[1].buffer.data
else:
input_pointer = inner.value.buffer.data
output_pointer = inner.buffer.data
return (input_pointer, output_pointer)
def get_convert_to_nhwc_params(stmt):
"""Get the true number of channels from a convert_to_nhwc loop nest.
Parameters
----------
stmt : tvm.tir.AttrStmt
The outermost attribute statement of a convert_to_nhwc loop nest.
Returns
-------
int
The true number of channels.
input_pointer : tvm.tir.Var
The pointer consumed by the operation.
output_pointer : tvm.tir.Var
The pointer produced by the operation.
"""
attrs, body = get_op_attrs(stmt)
_, _, _, c, _, inner = get_outer_loops(body, "NHWC")
# Ignore the reduce sum operation inserted to ensure
# compute that is deemed uneccesary isn't removed by TVM.
if attrs["layout"] == "NHCWB16":
inner = inner.body
input_pointer = inner.value.b.buffer.data
else:
input_pointer = inner.value.buffer.data
output_pointer = inner.buffer.data
return c.extent, input_pointer, output_pointer
def get_convert_to_nhcwb16_params(stmt):
"""Get the true number of channels from a convert_to_nhcwb16 loop nest.
Parameters
----------
stmt : tvm.tir.AttrStmt
The outermost attribute statement of a convert_to_nhcwb16 loop nest.
Returns
-------
out_channels : int
The true number of channels.
input_pointer : tvm.tir.Var
The pointer consumed by the operation.
output_pointer : tvm.tir.Var
The pointer produced by the operation.
"""
attrs, body = get_op_attrs(stmt)
_, _, _, c, b, inner = get_outer_loops(body, attrs["layout"])
output_pointer = inner.buffer.data
if isinstance(inner.value, tvm.tir.Call):
cond = inner.value.args[0]
out_channels = cond.b.value
input_pointer = inner.value.args[1].buffer.data
else:
input_pointer = inner.value.buffer.data
out_channels = c.extent * b.extent if attrs["layout"] == "NHCWB16" else c.extent
return out_channels, input_pointer, output_pointer
class Tiles(NamedTuple):
height_0: tvm.tir.expr.IntImm
height_1: tvm.tir.expr.IntImm
width_0: tvm.tir.expr.IntImm
address_0: Union[tvm.tir.expr.BufferLoad, int]
address_1: Union[tvm.tir.expr.BufferLoad, int]
address_2: Union[tvm.tir.expr.BufferLoad, int]
def create_tiles(stmt: tvm.tir.stmt.AttrStmt) -> Tiles:
"""Given an AttrStmt this function returns a Tiles instance
containing the tiles' addresses and dimensions.
When rolling buffers are not used only tile0 is used.
Otherwise, when rolling buffers are used, the statement contains
modulo arithmetic operations, which are unsupported by the NPU.
To support this scenario more than one tile is used.
In particular, when the rolling variable is the height one
tile0 and tile2 are used, otherwise, when the rolling variable
is the width one, tile0 and tile1 are used.
As an example consider this statement:
// attr [iter_var(i0, )] pragma_op = "ethosu_read"
// attr [iter_var(i0, )] pragma_zero_point = 0
// attr [iter_var(i0, )] pragma_layout = "NHCWB16"
// attr [iter_var(i0, )] pragma_scale = 1f
for (i0, 0, 1) {
for (i1, 0, 6) {
for (i2, 0, 1) {
for (i3, 0, 1) {
for (i4, 0, 16) {
ethosu_read[((i1*16) + i4)] = ethosu_write[((floormod((i1 + 4), 6)*16) + i4)]
}
}
}
}
}
You can see from the floormod expression floormod((i1 + 4), 6)
that the rolling variable is i1, that is, the height one.
In this case tile0 and tile2 are used.
The height of tile0 will be 6 - 4 = 2, and height of tile2 will be 4.
Both the width of tile0 and tile2 will be equal to the extent of the width variable.
Also, the addresses are set accordingly.
When the rolling variable is the width one a simmetric approach will be used.
It is worth mentioning that only the height of tile0, the height of tile1,
and the width of tile0 must be computed, the other ones can be inferred.
"""
attrs, body = get_op_attrs(stmt)
_, h, w, _, _, inner = get_outer_loops(body, attrs["layout"])
base_address = [get_base_address(index) for index in inner.value.indices]
read_stmt = inner.value
floor_mod_mul = None
def _compute_stride(for_stmt):
stride = 1
while isinstance(for_stmt.body, tvm.tir.For):
for_stmt = for_stmt.body
stride *= for_stmt.extent
return stride
def _get_floor_mod_mul(stmt):
nonlocal floor_mod_mul
if (
isinstance(stmt, tvm.tir.expr.Mul)
and isinstance(stmt.b, tvm.tir.expr.IntImm)
and isinstance(stmt.a, tvm.tir.FloorMod)
and isinstance(stmt.a.b, tvm.tir.expr.IntImm)
and isinstance(stmt.a.a, tvm.tir.expr.Add)
and isinstance(stmt.a.a.a, tvm.tir.expr.Var)
and isinstance(stmt.a.a.b, tvm.tir.expr.IntImm)
):
floor_mod_mul = stmt
tvm.tir.stmt_functor.post_order_visit(read_stmt, _get_floor_mod_mul)
if floor_mod_mul is not None:
rolling_var = floor_mod_mul.a.a.a
count = 0
def _count_var(var):
nonlocal count
if var == rolling_var:
count += 1
tvm.tir.stmt_functor.ir_transform(inner, _count_var, None, ["tir.Var"])
if count == 2:
stride = floor_mod_mul.b
tile_length = floor_mod_mul.a.b - floor_mod_mul.a.a.b
if rolling_var == h.loop_var and _compute_stride(h) == stride:
return Tiles(
height_0=tile_length,
height_1=0,
width_0=w.extent,
address_0=tvm.tir.BufferLoad(inner.value.buffer, base_address),
address_1=0,
address_2=tvm.tir.BufferLoad(inner.value.buffer, [0]),
)
if rolling_var == w.loop_var and _compute_stride(w) == stride:
return Tiles(
height_0=h.extent,
height_1=h.extent,
width_0=tile_length,
address_0=tvm.tir.BufferLoad(inner.value.buffer, base_address),
address_1=tvm.tir.BufferLoad(inner.value.buffer, [0]),
address_2=0,
)
return Tiles(
height_0=h.extent,
height_1=0,
width_0=w.extent,
address_0=tvm.tir.BufferLoad(inner.value.buffer, base_address),
address_1=0,
address_2=0,
)
def get_read_params(stmt):
"""Get the feature map parameters from a read loop nest.
Parameters
----------
stmt : tvm.tir.AttrStmt
The outermost attribute statement of a read loop nest.
Returns
-------
SerialFeatureMap
The serializable feature map.
input_pointer : tvm.tir.Var
The pointer consumed by the operation.
output_pointer : tvm.tir.Var
The pointer produced by the operation.
"""
attrs, body = get_op_attrs(stmt)
_, h, w, c, _, inner = get_outer_loops(body, attrs["layout"])
input_pointer = inner.value.buffer.data
output_pointer = inner.buffer.data
# Needed for stride calculation, can replace with
# inner.value.buffer.strides in future.
assert len(inner.value.indices) == 1, "Ethos-U DMA expects flattened buffers"
stride_vars = [h.loop_var, w.loop_var, c.loop_var]
strides = get_strides(inner.value.indices[0], stride_vars)
data_type = inner.buffer.data.type_annotation.element_type.dtype
tiles = create_tiles(stmt)
return (
SerialFeatureMap(
data_type=data_type,
height=h.extent,
width=w.extent,
channels=c.extent,
tile_height_0=tiles.height_0,
tile_height_1=tiles.height_1,
tile_width_0=tiles.width_0,
tile_address_0=tiles.address_0,
tile_address_1=tiles.address_1,
tile_address_2=tiles.address_2,
tile_address_3=0,
scale=attrs["scale"],
zero_point=attrs["zero_point"],
layout=attrs["layout"],
stride_h=strides[0],
stride_w=strides[1],
stride_c=strides[2],
),
input_pointer,
output_pointer,
)
def get_write_params(stmt):
"""Get the feature map parameters from a write loop nest.
Parameters
----------
stmt : tvm.tir.AttrStmt
The outermost attribute statement of a write loop nest.
Returns
-------
SerialFeatureMap
The serializable feature map.
input_pointer : tvm.tir.Var
The pointer consumed by the operation.
output_pointer : tvm.tir.Var
The pointer produced by the operation.
"""
attrs, body = get_op_attrs(stmt)
_, h, w, c, _, inner = get_outer_loops(body, attrs["layout"])
input_pointer = inner.value.buffer.data
output_pointer = inner.buffer.data
# Needed for stride calculation, can replace with
# inner.value.buffer.strides in future.
assert len(inner.indices) == 1, "Ethos-U DMA expects flattened buffers"
stride_vars = [h.loop_var, w.loop_var, c.loop_var]
strides = get_strides(inner.indices[0], stride_vars)
base_address = [get_base_address(index) for index in inner.indices]
data_type = inner.buffer.data.type_annotation.element_type.dtype
if "block_config_height" in attrs:
block_config = SerialBlockConfig(
height=int(attrs["block_config_height"]),
width=int(attrs["block_config_width"]),
depth=int(attrs["block_config_depth"]),
)
else:
block_config = SerialBlockConfig(0, 0, 0)
return (
SerialFeatureMap(
data_type=data_type,
height=h.extent,
width=w.extent,
channels=c.extent,
tile_height_0=h.extent,
tile_height_1=0,
tile_width_0=w.extent,
tile_address_0=tvm.tir.BufferLoad(inner.buffer, base_address),
tile_address_1=0,
tile_address_2=0,
tile_address_3=0,
scale=attrs["scale"],
zero_point=attrs["zero_point"],
layout=attrs["layout"],
stride_h=strides[0],
stride_w=strides[1],
stride_c=strides[2],
),
block_config,
input_pointer,
output_pointer,
)
def get_ifm_params(pointer, producers_consumers, stmt):
"""Get the parameters associated with the DMA capabilities for an IFM.
Parameters
----------
pointer : tvm.tir.Var
The pointer that the IFM DMA pipeline produces.
producers_consumers: ProducersConsumers
It associates pointers with the loop nest that produces
their values and with the loop nest that consumes their values.
Returns
-------
serial_ifm : SerialFeatureMap
The serializable IFM.
serial_padding : SerialPadding
The serializable padding.
"""
pad = producers_consumers.get_producer(pointer, stmt)
serial_padding, input_pointer, _ = get_pad_params(pad)
upscale = producers_consumers.get_producer(input_pointer, pad)
input_pointer, _ = get_upscale_params(upscale)
convert_to_nhwc = producers_consumers.get_producer(input_pointer, upscale)
in_channels, input_pointer, _ = get_convert_to_nhwc_params(convert_to_nhwc)
read = producers_consumers.get_producer(input_pointer, convert_to_nhwc)
serial_ifm, _, _ = get_read_params(read)
serial_ifm.channels = in_channels
floor_mod_stmt = None
for_stmt = None
def _get_buffer_var(stmt):
nonlocal for_stmt
nonlocal floor_mod_stmt
if isinstance(stmt, tvm.tir.For):
for_stmt = stmt
if isinstance(stmt, tvm.tir.FloorMod):
floor_mod_stmt = stmt
tvm.tir.stmt_functor.post_order_visit(stmt, _get_buffer_var)
if floor_mod_stmt is not None:
layout = get_op_attrs(read)[0]["layout"]
channels = serial_ifm.channels
if for_stmt.body.loop_var == floor_mod_stmt.a.a.a:
height_a = floor_mod_stmt.b - floor_mod_stmt.a.b
height_b = serial_ifm.height
serial_ifm.height = height_a + height_b
serial_ifm.tile_height_0 = serial_ifm.height
address = serial_ifm.tile_address_0
offset = (
height_a * (channels // 16 + 1) * serial_ifm.width * 16
if layout == "NHCWB16"
else height_a * serial_ifm.width * channels
)
serial_ifm.tile_address_0 = tvm.tir.BufferLoad(
address.buffer, [address.indices[0] - offset]
)
else:
width_a = floor_mod_stmt.b - floor_mod_stmt.a.b
width_b = serial_ifm.width
serial_ifm.width = width_a + width_b
serial_ifm.tile_width_0 = serial_ifm.width
address = serial_ifm.tile_address_0
offset = width_a * 16 if layout == "NHCWB16" else width_a * channels
serial_ifm.tile_address_0 = tvm.tir.BufferLoad(
address.buffer, [address.indices[0] - offset]
)
return serial_ifm, serial_padding
def get_ofm_params(pointer, producers_consumers, stmt):
"""Get the parameters associated with the DMA capabilities for an OFM.
Parameters
----------
pointer : tvm.tir.Var
The pointer that the OFM DMA pipeline consumes.
producers_consumers: ProducersConsumers
It associates pointers with the loop nest that produces
their values and with the loop nest that consumes their values.
Returns
-------
serial_ifm : SerialFeatureMap
The serializable OFM.
serial_block_config : SerialBlockConfig
The serializable block config.
output_pointer : tvm.tir.Var
The pointer that the OFM DMA pipeline produces.
is_allocator : bool
Whether this operator allocates its output.
"""
convert_to_nhcwb16 = producers_consumers.get_consumer(pointer, stmt)
out_channels, _, output_pointer = get_convert_to_nhcwb16_params(convert_to_nhcwb16)
write = producers_consumers.get_consumer(output_pointer, convert_to_nhcwb16)
serial_ofm, serial_block_config, _, output_pointer = get_write_params(write)
is_allocator = True
producer = producers_consumers.get_producer(output_pointer, write)
if producer is None or producer != write:
is_allocator = False
serial_ofm.channels = out_channels
return serial_ofm, serial_block_config, output_pointer, is_allocator
| 17,994 | 33.874031 | 91 | py |
tvm | tvm-main/python/tvm/relay/backend/contrib/ethosu/tir/transform.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument
"""Extract parameters from the transform operators in TIR."""
import tvm
from .spec import SerialCopy
from .utils import get_base_address, get_op_attrs
def get_copy_params(stmt, producers_consumers):
"""Get the parameters necessary to construct a call_extern for a copy.
Parameters
----------
stmt : tvm.tir.AttrStmt
The outermost attribute statement of a copy loop nest.
producers_consumers: ProducersConsumers
It associates pointers with the loop nest that produces
their values and with the loop nest that consumes their values.
Returns
-------
SerialCopy
The parameters needed to construct a copy.
tvm.tir.Var
The output pointer of the copy operation.
replace_pointer : tvm.tir.Var
The output pointer of the DMA write operation, which is to replace
the convolution output pointer.
is_allocator : bool
Whether this operator allocates its output.
"""
_, body = get_op_attrs(stmt)
length = body.extent
write_store = body.body
write_base = [get_base_address(index) for index in write_store.indices]
read_load = body.body.value
read_base = [get_base_address(index) for index in read_load.indices]
return (
SerialCopy(
read_address=tvm.tir.expr.BufferLoad(read_load.buffer, read_base),
length=length,
write_address=tvm.tir.expr.BufferLoad(write_store.buffer, write_base),
),
write_store.buffer.data,
None,
True,
)
| 2,369 | 36.619048 | 82 | py |
tvm | tvm-main/python/tvm/relay/backend/contrib/ethosu/tir/unary_elementwise.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument
"""Extract information from the unary_elementwise operators in TIR."""
from tvm import tir
from .utils import get_outer_loops, get_op_attrs
from .dma import get_ifm_params, get_ofm_params
from .spec import SerialActivation, SerialUnaryElementwise
def get_unary_elementwise_params(stmt, producers_consumers):
"""Get the parameters necessary to construct a call_extern for a unary_elementwise.
Parameters
----------
stmt : tvm.tir.AttrStmt
The outermost attribute statement of a unary elementwise loop nest.
producers_consumers: ProducersConsumers
It associates pointers with the loop nest that produces
their values and with the loop nest that consumes their values.
Returns
-------
SerialUnaryElementwise
The parameters needed to construct a unary elementwise operator.
output_pointer : tvm.tir.Var
The output pointer of the unary elementwise operation.
replace_pointer : tvm.tir.Var
The output pointer of the DMA write operation, which is to replace
the unary elementwise output pointer.
is_allocator : bool
Whether this operator allocates its output.
"""
attrs, body = get_op_attrs(stmt)
_, _, _, _, _, inner = get_outer_loops(body, "NHWC")
input_pointer = None
if isinstance(inner.value, tir.expr.Select):
# ABS
input_pointer = inner.value.condition.b.buffer.data
if isinstance(inner.value, tir.expr.Sub):
# CLZ
input_pointer = inner.value.b.args[0].buffer.data
output_pointer = inner.buffer.data
# Get feature map info
serial_ifm, _ = get_ifm_params(input_pointer, producers_consumers, stmt)
serial_ofm, serial_block_config, replace_pointer, is_allocator = get_ofm_params(
output_pointer, producers_consumers, stmt
)
# Get activation info
serial_activation = SerialActivation(
op=attrs["activation"], clip_min=attrs["clip_min"], clip_max=attrs["clip_max"]
)
return (
SerialUnaryElementwise(
ifm=serial_ifm,
ofm=serial_ofm,
operator_type=attrs["operator_type"],
activation=serial_activation,
rounding_mode=attrs["rounding_mode"],
block_config=serial_block_config,
),
output_pointer,
replace_pointer,
is_allocator,
)
| 3,189 | 38.382716 | 87 | py |
tvm | tvm-main/python/tvm/relay/backend/contrib/ethosu/tir/depthwise.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument
"""Extract information from the depthwise convolution operators in TIR."""
from typing import Tuple
import tvm
from ..vela_api import SCALE_BIAS_LENGTH
from .utils import get_outer_loops, get_op_attrs, get_base_address, get_loads, get_stores
from .dma import get_ifm_params, get_ofm_params
from .spec import (
SerialKernel,
SerialAddressRange,
SerialActivation,
Serial2DDepthwise,
)
from .producers_consumers import ProducersConsumers
def get_depthwise_conv2d_params(
stmt: tvm.tir.AttrStmt, producers_consumers: ProducersConsumers
) -> Tuple[Serial2DDepthwise, tvm.tir.Var, tvm.tir.Var]:
"""Get the parameters necessary to construct a call_extern for a depthwise_conv2d.
Parameters
----------
stmt : tvm.tir.AttrStmt
The outermost attribute statement of a depthwise loop nest.
producers_consumers: ProducersConsumers
It associates pointers with the loop nest that produces
their values and with the loop nest that consumes their values.
Returns
-------
Serial2DDepthwise
The parameters needed to construct a 2D depthwise.
output_pointer : tvm.tir.Var
The output pointer of the convolution operation.
replace_pointer : tvm.tir.Var
The output pointer of the DMA write operation, which is to replace
the convolution output pointer.
is_allocator : bool
Whether this operator allocates its output.
"""
attrs, body = get_op_attrs(stmt)
_, _, _, _, _, inner = get_outer_loops(body, "NHWC")
rh = inner
rw = rh.body
# loads = [output, input, weights, scale_bias, scale_bias]
loads = get_loads(rw.body)
# stores = [output]
stores = get_stores(rw.body)
input_pointer = loads[1].buffer.data
output_pointer = stores[0].buffer.data
# Get feature map info
serial_ifm, serial_padding = get_ifm_params(input_pointer, producers_consumers, stmt)
serial_ofm, serial_block_config, replace_pointer, is_allocator = get_ofm_params(
output_pointer, producers_consumers, stmt
)
# Get kernel info
serial_kernel = SerialKernel(
width=int(rw.extent),
height=int(rh.extent),
stride_w=int(attrs["stride_w"]),
stride_h=int(attrs["stride_h"]),
dilation_w=int(attrs["dilation_w"]),
dilation_h=int(attrs["dilation_h"]),
)
# Get scale_bias info
scale_bias_load = loads[3]
scale_bias_base = [get_base_address(index) for index in scale_bias_load.indices]
serial_scale_bias = SerialAddressRange(
address=tvm.tir.BufferLoad(scale_bias_load.buffer, scale_bias_base),
length=SCALE_BIAS_LENGTH * serial_ofm[3],
)
# Get weight info
weight_load = loads[2]
weight_base = [get_base_address(index) for index in weight_load.indices]
serial_weight = SerialAddressRange(
address=tvm.tir.BufferLoad(weight_load.buffer, weight_base),
length=serial_ofm[3] * serial_kernel[0] * serial_kernel[1],
)
# Get activation info
serial_activation = SerialActivation(
op=attrs["activation"], clip_min=attrs["clip_min"], clip_max=attrs["clip_max"]
)
return (
Serial2DDepthwise(
ifm=serial_ifm,
ofm=serial_ofm,
kernel=serial_kernel,
weight=serial_weight,
weight_zero_point=attrs["weight_zero_point"],
scale_bias=serial_scale_bias,
padding=serial_padding,
activation=serial_activation,
rounding_mode=attrs["rounding_mode"],
upscale="NONE",
block_config=serial_block_config,
),
output_pointer,
replace_pointer,
is_allocator,
)
| 4,526 | 36.725 | 89 | py |
tvm | tvm-main/python/tvm/relay/backend/contrib/ethosu/tir/utils.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""Helper utility functions used by the NPU TIR compiler"""
import tvm
from tvm import arith
def get_op_attrs(stmt):
"""Iterate through nested attribute statements accumulating their values
in an attribute dictionary.
The "pragma_" prefix is removed as a convenience.
Parameters
----------
stmt : tvm.tir.AttrStmt
The outermost attribute statement to begin from.
Returns
-------
attrs : dict of str to object
The attribute dictionary.
stmt : tvm.tir.Stmt
The body after having collected the final attribute statement.
"""
attrs = {}
while isinstance(stmt, tvm.tir.AttrStmt):
# The pragma scheduler inserts "pragma_" before all the
# attr names, this is annoying so we get rid of it
attr = stmt.attr_key.replace("pragma_", "")
attrs[attr] = stmt.value
stmt = stmt.body
return attrs, stmt
def get_strides(index, stride_vars):
"""Get the striding of given vars in an indexing expression.
Parameters
----------
index : tvm.tir.PrimExpr
The index expression where the stride vars are present.
stride_vars : list of tvm.tir.Var
The vars to determine the striding of.
Returns
-------
strides : list of int
The striding of each stride var in the index expression
in the same order as the stride vars were given.
"""
strides = [1] * len(stride_vars)
dmap = {}
def _visit(stmt):
if isinstance(stmt, tvm.tir.Var):
dmap[stmt] = arith.IntervalSet(0, 0)
tvm.tir.stmt_functor.post_order_visit(index, _visit)
min_value = int(arith.Analyzer().int_set(index, dmap).min_value)
for var in dmap:
if var in stride_vars:
# NOTE: Doing this using a [0, 1] interval doesn't work reliably
# Seems to be a bug
dmap[var] = arith.IntervalSet(1, 1)
max_value = int(arith.Analyzer().int_set(index, dmap).max_value)
stride = int(max_value - min_value)
i = stride_vars.index(var)
strides[i] = stride
dmap[var] = arith.IntervalSet(0, 0)
return strides
def get_base_address(index):
"""Determine the first (base) address accessed by an index expression.
Parameters
----------
index : tvm.tir.PrimExpr
The index expression to determine the base address of.
Returns
-------
base_address:
The first address accessed by the index expression.
"""
dmap = {}
def _visit(stmt):
if isinstance(stmt, tvm.tir.Var):
dmap[stmt] = arith.IntervalSet(0, 0)
tvm.tir.stmt_functor.post_order_visit(index, _visit)
base_address = int(arith.Analyzer().int_set(index, dmap).min_value)
return base_address
def get_outer_loops(stmt, layout):
"""Get the outer loops of an operator.
Parameters
----------
stmt : tvm.tir.For
The outermost loop.
layout : str
The output tensor layout (NHWC or NHCWB16).
Returns
-------
n : tvm.tir.For
The batch loop.
h : tvm.tir.For
The height loop.
w : tvm.tir.For
The width loop.
c : tvm.tir.For
The channels loop.
b : tvm.tir.For
The brick loop. None for NHWC
body : tvm.tir.Stmt
The inner body of the loops.
"""
if layout == "NHWC":
n = stmt
h = n.body
w = h.body
c = w.body
b = tvm.tir.For(tvm.tir.Var("b", "int32"), 0, 0, 0, tvm.tir.Evaluate(0))
return n, h, w, c, b, c.body
if layout == "NHCWB16":
n = stmt
h = n.body
cb = h.body
w = cb.body
b = w.body
return n, h, w, cb, b, b.body
return None
def collect_buffer_map(stmt):
"""Collect a map of Var -> Buffer
Generate a map from a buffer's backing `tir.Var` to the
`tir.Buffer` object that uses it. If multiple such buffers exist,
return the first occurrence.
Parameters
----------
stmt : tvm.tir.Stmt
The statement to get the BufferLoads from.
Returns
-------
buffer_map : Dict[Var, Buffer]
The map from buffer var to the buffers that use it.
"""
buffer_map = {}
def _visit(node):
if isinstance(node, (tvm.tir.BufferLoad, tvm.tir.BufferStore)):
buf = node.buffer
if buf.data not in buffer_map:
buffer_map[buf.data] = buf
tvm.tir.stmt_functor.post_order_visit(stmt, _visit)
return buffer_map
def get_loads(stmt):
"""Get the BufferLoad statements.
Parameters
----------
stmt : tvm.tir.Stmt
The statement to get the BufferLoads from.
Returns
-------
loads : list of tvm.tir.BufferLoad
The BufferLoads found.
"""
loads = []
def _visit(s):
if isinstance(s, tvm.tir.BufferLoad):
loads.append(s)
tvm.tir.stmt_functor.post_order_visit(stmt, _visit)
return loads
def get_stores(stmt):
"""Get the BufferStore statements.
Parameters
----------
stmt : tvm.tir.Stmt
The statement to get the BufferStores from.
Returns
-------
stores : list of tvm.tir.BufferStore
The BufferStores found.
"""
stores = []
def _visit(s):
if isinstance(s, tvm.tir.BufferStore):
stores.append(s)
tvm.tir.stmt_functor.post_order_visit(stmt, _visit)
return stores
| 6,268 | 25.451477 | 80 | py |
tvm | tvm-main/python/tvm/relay/backend/contrib/ethosu/tir/producers_consumers.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument
"""The ProducersConsumers class"""
from typing import Optional
from collections.abc import KeysView
import tvm
class ProducersConsumers:
"""It associates pointers with the loop nest that produces
their values and with the loop nest that consumes their values."""
def __init__(self) -> None:
self.indices: dict[tvm.tir.AttrStmt, int] = {}
self.producers: list[(tvm.tir.AttrStmt, tvm.tir.expr.Var)] = []
self.consumers: list[(tvm.tir.AttrStmt, list[tvm.tir.expr.Var])] = []
self.allocate_variables: Optional[KeysView] = None
def add_producer(self, var: tvm.tir.expr.Var, attr: tvm.tir.AttrStmt) -> None:
"""Add the attribute statement attr as producer of the variable var."""
self.indices[attr] = len(self.producers)
self.producers.append((attr, var))
def get_producer(
self, var: tvm.tir.expr.Var, attr: tvm.tir.AttrStmt
) -> Optional[tvm.tir.AttrStmt]:
"""Get the last attribute statement which produces the variable var when
the current attribute statement is attr."""
if var not in self.allocate_variables:
return None
index = self.indices[attr]
for i in list(reversed(range(index + 1))):
if self.producers[i][1] == var:
return self.producers[i][0]
return None
def get_last_producer(self, var: tvm.tir.expr.Var) -> Optional[tvm.tir.AttrStmt]:
"""Get the last attribute statement which produces the variable var."""
return self.get_producer(var, self.producers[-1][0])
def add_allocate_variables(self, allocate_variables: KeysView) -> None:
"""Add the allocated variables."""
self.allocate_variables = allocate_variables
def add_consumer(self, var: tvm.tir.expr.Var, attr: tvm.tir.AttrStmt) -> None:
"""Add the attribute statement attr as consumer of the variable var."""
index = self.indices[attr]
if index < len(self.consumers):
self.consumers[index][1].append(var)
else:
self.consumers.append((attr, [var]))
def get_consumer(
self, var: tvm.tir.expr.Var, attr: tvm.tir.AttrStmt
) -> Optional[tvm.tir.AttrStmt]:
"""Get the first attribute statement which consumes the variable var when
the current attribute statement is attr."""
index = self.indices[attr]
for i in range(index, len(self.consumers)):
if var in self.consumers[i][1]:
return self.consumers[i][0]
return None
| 3,379 | 41.78481 | 85 | py |
tvm | tvm-main/python/tvm/relay/backend/contrib/ethosu/tir/convolution.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument
"""Extract parameters from the convolution operators in TIR."""
import math
from ethosu.vela import api as vapi
import tvm
from ..vela_api import SCALE_BIAS_LENGTH, get_accelerator_config
from .utils import get_outer_loops, get_op_attrs, get_base_address, get_loads, get_stores
from .dma import get_ifm_params, get_ofm_params
from .spec import SerialKernel, SerialAddressRange, SerialActivation, Serial2DConvolution
def get_conv2d_params(stmt, producers_consumers):
"""Get the parameters necessary to construct a call_extern for a 2D convolution.
Parameters
----------
stmt : tvm.tir.AttrStmt
The outermost attribute statement of a convolution loop nest.
producers_consumers: ProducersConsumers
It associates pointers with the loop nest that produces
their values and with the loop nest that consumes their values.
Returns
-------
Serial2DConvolution
The parameters needed to construct a 2D convolution.
output_pointer : tvm.tir.Var
The output pointer of the convolution operation.
replace_pointer : tvm.tir.Var
The output pointer of the DMA write operation, which is to replace
the convolution output pointer.
is_allocator : bool
Whether this operator allocates its output.
"""
accel_config = get_accelerator_config()
attrs, body = get_op_attrs(stmt)
_, _, _, _, _, inner = get_outer_loops(body, "NHWC")
rh = inner
rw = rh.body
rc = rw.body
# loads = [output, input, weights, scale_bias, scale_bias, LUT, LUT]
loads = get_loads(rc.body)
# stores = [output]
stores = get_stores(rc.body)
input_pointer = loads[1].buffer.data
output_pointer = stores[0].buffer.data
# Get feature map info
serial_ifm, serial_padding = get_ifm_params(input_pointer, producers_consumers, stmt)
serial_ofm, serial_block_config, replace_pointer, is_allocator = get_ofm_params(
output_pointer, producers_consumers, stmt
)
# Get kernel info
serial_kernel = SerialKernel(
width=int(rw.extent),
height=int(rh.extent),
stride_w=int(attrs["stride_w"]),
stride_h=int(attrs["stride_h"]),
dilation_w=int(attrs["dilation_w"]),
dilation_h=int(attrs["dilation_h"]),
)
# Get scale_bias info
scale_bias_load = loads[3]
scale_bias_base = [get_base_address(index) for index in scale_bias_load.indices]
# Get weight info
weight_load = loads[2]
weight_base = [get_base_address(index) for index in weight_load.indices]
channels = serial_ofm[3] if isinstance(serial_ofm[3], int) else serial_ofm[3].value
if accel_config == vapi.NpuAccelerator.Ethos_U65_512:
scale_bias_length = SCALE_BIAS_LENGTH * math.ceil(channels / 2)
scale_bias2_length = SCALE_BIAS_LENGTH * math.floor(channels / 2)
serial_scale_bias = SerialAddressRange(
address=tvm.tir.BufferLoad(scale_bias_load.buffer, scale_bias_base),
length=scale_bias_length,
)
serial_scale_bias2 = SerialAddressRange(
address=tvm.tir.BufferLoad(
scale_bias_load.buffer, [scale_bias_base[0] + scale_bias_length]
),
length=scale_bias2_length,
)
weight_length = (
channels * serial_kernel[0] * serial_kernel[1] * math.ceil(rc.extent.value / 2)
)
weight2_length = (
channels * serial_kernel[0] * serial_kernel[1] * math.floor(rc.extent.value / 2)
)
serial_weight = SerialAddressRange(
address=tvm.tir.BufferLoad(weight_load.buffer, weight_base),
length=weight_length,
)
serial_weight2 = SerialAddressRange(
address=tvm.tir.BufferLoad(weight_load.buffer, [weight_base[0] + weight_length]),
length=weight2_length,
)
else:
scale_bias_length = SCALE_BIAS_LENGTH * channels
serial_scale_bias = SerialAddressRange(
address=tvm.tir.BufferLoad(scale_bias_load.buffer, scale_bias_base),
length=scale_bias_length,
)
# Insert -1s into the spec to denote the absence of the other pointer
serial_scale_bias2 = SerialAddressRange(
address=tvm.tir.IntImm("int8", -1),
length=tvm.tir.IntImm("int8", -1),
)
weight_length = channels * serial_kernel[0] * serial_kernel[1] * rc.extent.value
serial_weight = SerialAddressRange(
address=tvm.tir.BufferLoad(weight_load.buffer, weight_base),
length=weight_length,
)
serial_weight2 = SerialAddressRange(
address=tvm.tir.IntImm("int8", -1),
length=tvm.tir.IntImm("int8", -1),
)
# Get activation info
serial_activation = SerialActivation(
op=attrs["activation"], clip_min=attrs["clip_min"], clip_max=attrs["clip_max"]
)
return (
Serial2DConvolution(
ifm=serial_ifm,
ofm=serial_ofm,
kernel=serial_kernel,
weight=serial_weight,
weight2=serial_weight2,
weight_zero_point=attrs["weight_zero_point"],
scale_bias=serial_scale_bias,
scale_bias2=serial_scale_bias2,
padding=serial_padding,
activation=serial_activation,
rounding_mode=attrs["rounding_mode"],
upscale=attrs["upscale"],
block_config=serial_block_config,
),
output_pointer,
replace_pointer,
is_allocator,
)
| 6,379 | 37.902439 | 93 | py |
tvm | tvm-main/python/tvm/relay/backend/contrib/ethosu/tir/scheduler.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument
"""Scheduling for Arm(R) Ethos(TM)-U NPU."""
import tvm
from tvm.contrib.ethosu.cascader import Propagator
def schedule(cached_func, const_dict, cascader=None):
"""Schedule a CachedFunc for NPU compilation.
Parameters
----------
cached_func : CachedFunc
The CachedFunc to schedule.
const_dict : dict of int to numpy.ndarray
The constant dictionary.
cascader : callable, optional
A cascading function to apply optimizing scheduling
to the graph.
Returns
-------
s : tvm.te.Schedule
The completed schedule for the graph.
"""
s = tvm.te.create_schedule([t.op for t in cached_func.outputs])
if cascader:
cascader(cached_func, const_dict, s)
inline_no_ops(cached_func, s)
copy_luts()(cached_func, const_dict, s)
inline_no_ops(cached_func, s)
schedule_pragmas(s)
schedule_cache_reads(s)
return s
def tile_nd(s, tensor, tile):
"""Scheduling utility to perform N-dimensional tiling.
Parameters
----------
s : tvm.te.Schedule
The schedule to apply the tiling to.
tensor : tvm.te.Tensor
The tensor to apply the tiling to.
tile : tuple
The N-dimensional tile size.
Returns
-------
outer_indices : list of tvm.tir.IterVar
The outer iteration variables.
inner_indices : list of tvm.tir.IterVar
The inner iteration variables.
"""
outer_indices = []
inner_indices = []
for i, size in enumerate(tile):
outer, inner = s[tensor].split(tensor.op.axis[i], size)
outer_indices.append(outer)
inner_indices.append(inner)
s[tensor].reorder(*outer_indices, *inner_indices)
return outer_indices, inner_indices
def total_cascader(stripe_size):
"""A demo/test cascader which tries to cascade every op in the graph together.
The desired output stride size should be specified. Note this only works
for single output graphs.
Parameters
----------
stripe_size : tuple
The output stripe size.
Returns
-------
func : callable
The cascading function.
"""
def _cascader(cached_func, const_dict, sch):
scheduled = set()
def _visit(tensor, stage, ax):
if tensor not in scheduled and isinstance(tensor.op, tvm.te.ComputeOp):
sch[tensor].compute_at(stage, ax)
scheduled.add(tensor)
for input_tensor in tensor.op.input_tensors:
_visit(input_tensor, stage, ax)
assert len(cached_func.outputs) == 1
out = cached_func.outputs[0]
oi, _ = tile_nd(sch, out, stripe_size)
for ax in oi:
sch[out].unroll(ax)
for input_tensor in out.op.input_tensors:
_visit(input_tensor, sch[out], oi[-1])
return _cascader
def copy_constants():
"""A simple planner which copies all constant data from FLASH -> SRAM.
Returns
-------
planner : callable
The planning function.
"""
def _planner(cached_func, const_dict, sch):
planned = set() # type: ignore
def _is_matmul(tensor):
if tensor.name not in ["ethosu_conv2d"]:
return False
a, b = tensor.op.input_tensors[0:2]
return a.shape[1:3] == [1, 1] and b.shape[1:3] == [1, 1]
def _visit(tensor, reader, lut):
if tensor not in planned:
planned.add(tensor)
if isinstance(tensor.op, tvm.te.PlaceholderOp) and tensor != lut:
# Find index of input using 'same_as' check to prevent equality
# ambiguity when encountering a scalar.
is_same = [var.same_as(tensor) for var in cached_func.inputs]
index = is_same.index(True)
# Along with constants, also skip for FullyConnected to correspond
# with Vela behavior
if index in const_dict and not _is_matmul(reader):
sch.cache_read(tensor, "global", [reader])
elif isinstance(tensor.op, tvm.te.ComputeOp):
if "lut" in tensor.op.attrs.keys():
lut = tensor.op.attrs["lut"]
for input_tensor in tensor.op.input_tensors:
_visit(input_tensor, tensor, lut)
for output_tensor in cached_func.outputs:
_visit(output_tensor, None, None)
return _planner
def copy_luts():
"""A scheduler that copies LUTs to SHRAM.
Returns
-------
planner : callable
The planning function.
"""
def _planner(te_graph, const_dict, sch):
planned = set() # type: ignore
def _visit(tensor, reader, lut):
if tensor not in planned:
planned.add(tensor)
if isinstance(tensor.op, tvm.te.PlaceholderOp) and tensor == lut:
index = list(te_graph.inputs).index(tensor)
if index in const_dict:
sch.cache_read(tensor, "local", [reader])
elif isinstance(tensor.op, tvm.te.ComputeOp):
if "lut" in tensor.op.attrs.keys():
lut = tensor.op.attrs["lut"]
for input_tensor in tensor.op.input_tensors:
_visit(input_tensor, tensor, lut)
for output_tensor in te_graph.outputs:
_visit(output_tensor, None, None)
return _planner
def schedule_pragmas(sch):
"""Add pragmas to the operators that require them.
This adds the pragmas used for codegen to the NPU ops.
They are taken directly from the TE compute op's attributes.
Modifies the schedule in-place.
Parameters
----------
sch : tvm.te.Schedule
The schedule.
"""
def _add_pragmas(stage, ax):
if stage.op.name == "T_concat":
stage.pragma(ax, "op", "ethosu_concatenate")
if "op" in [attr for attr, val in stage.op.attrs.items()]:
stage.pragma(ax, "op", stage.op.attrs["op"])
for attr, val in stage.op.attrs.items():
if attr not in ("op", "lut") and not isinstance(val, Propagator):
stage.pragma(ax, str(attr), val)
if stage.op.axis[0] in stage.iter_var_attrs:
attrs = stage.iter_var_attrs[stage.op.axis[0]]
if "block_config_height" in attrs.pragma_keys:
pragmas = dict(zip([k.value for k in attrs.pragma_keys], attrs.pragma_values))
stage.pragma(ax, "block_config_height", pragmas["block_config_height"])
stage.pragma(ax, "block_config_width", pragmas["block_config_width"])
stage.pragma(ax, "block_config_depth", pragmas["block_config_depth"])
for stage in sch.stages:
if (
isinstance(stage.op, tvm.te.ComputeOp)
and len(stage.op.axis) + len(stage.op.reduce_axis) > 0
):
# The logic ensures the pragmas are assigned to the inner tiling loops
# rather than the outer ones (which end up getting unrolled).
num_inner_loops = len(stage.op.axis) + len(stage.op.reduce_axis)
ax = stage.leaf_iter_vars[-num_inner_loops]
_add_pragmas(stage, ax)
def schedule_cache_reads(sch):
"""Schedule cache reads that have been introduced.
There are two things we need to happen to cache_read stages. They should be tagged
with the 'ethosu_copy' pragma and have all their axes fused to make them 1D.
Parameters
----------
sch : tvm.te.Schedule
The schedule.
"""
def _detect_cache_read(stage):
# Try and detect cache_reads by checking if the compute op is identity
if isinstance(stage.op, tvm.te.ComputeOp):
op = stage.op
if "ethosu" in op.name:
return False
axes = op.axis
if len(op.input_tensors) == 1:
tensor = op.input_tensors[0]
try:
identity_op = tensor(*axes)
except ValueError:
return False
if tvm.tir.analysis.expr_deep_equal(identity_op, op.body[0]):
return True
return False
for stage in sch.stages:
if stage.attach_type != 2: # Not inlined
if _detect_cache_read(stage):
fax = stage.fuse(*stage.op.axis)
# propagate pragmas placed on the outer loop
if len(stage.op.axis) > 0 and stage.op.axis[0] in stage.iter_var_attrs:
attrs = stage.iter_var_attrs[stage.op.axis[0]]
for k, v in zip(attrs.pragma_keys, attrs.pragma_values):
stage.pragma(fax, k.value, v)
stage.pragma(fax, "op", "ethosu_copy")
def inline_no_ops(cached_func, sch):
"""Inline 'no-ops' - operations that in principle do nothing.
Modifies the schedule in-place. For now we inline reshape and
strided slice - more could be added.
Parameters
----------
cached_func : CachedFunc
The cached func.
sch : tvm.te.Schedule
The schedule.
"""
no_ops = {"T_reshape", "T_strided_slice"}
scheduled = set()
def _visit(tensor):
if tensor not in scheduled and isinstance(tensor.op, tvm.te.ComputeOp):
if tensor.op.name in no_ops:
sch[tensor].compute_inline()
scheduled.add(tensor)
for input_tensor in tensor.op.input_tensors:
_visit(input_tensor)
for out in cached_func.outputs:
_visit(out)
class OperatorCompute:
"""A helper class to manipulate the series of compute ops that make up an operator."""
def __init__(self, read, convert_to_nhwc, pad, upscale, op, convert_to_nhcwb16, write):
self.read = read
self.convert_to_nhwc = convert_to_nhwc
self.pad = pad
self.upscale = upscale
self.op = op
self.convert_to_nhcwb16 = convert_to_nhcwb16
self.write = write
@classmethod
def from_output(cls, out):
write = out
convert_to_nhcwb16 = write.op.input_tensors[0]
op = convert_to_nhcwb16.op.input_tensors[0]
pad = op.op.input_tensors[0]
upscale = pad.op.input_tensors[0]
convert_to_nhwc = upscale.op.input_tensors[0]
read = convert_to_nhwc.op.input_tensors[0]
return cls(read, convert_to_nhwc, pad, upscale, op, convert_to_nhcwb16, write)
def split(self, sch, axis, val):
outer, inner = sch[self.write].split(self.write.op.axis[axis], val)
iter_vars = [ax for ax in self.write.op.axis if ax != self.write.op.axis[axis]]
iter_vars.insert(axis, inner)
sch[self.write].reorder(outer, *iter_vars)
sch[self.write].unroll(outer)
g = sch.create_group(outputs=self.convert_to_nhcwb16, inputs=self.read, include_inputs=True)
g.compute_at(sch[self.write], outer)
return outer
def rolling_buffer(self, sch):
sch[self.read].rolling_buffer()
sch[self.convert_to_nhwc].rolling_buffer()
sch[self.pad].rolling_buffer()
sch[self.upscale].rolling_buffer()
sch[self.op].rolling_buffer()
sch[self.convert_to_nhcwb16].rolling_buffer()
sch[self.write].rolling_buffer()
def compute_at(self, sch, stage, axis):
sch[self.read].compute_at(stage, axis)
sch[self.convert_to_nhwc].compute_at(stage, axis)
sch[self.pad].compute_at(stage, axis)
sch[self.upscale].compute_at(stage, axis)
sch[self.op].compute_at(stage, axis)
sch[self.convert_to_nhcwb16].compute_at(stage, axis)
sch[self.write].compute_at(stage, axis)
| 12,641 | 33.826446 | 100 | py |
tvm | tvm-main/python/tvm/relay/backend/contrib/ethosu/tir/compiler.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument
"""The integration of the Arm(R) Ethos(TM)-U NPU TIR compiler."""
import tvm
from tvm import relay
from tvm.driver.build_module import schedule_to_module
from tvm.relay.backend.contrib.ethosu import vela_api as vapi
from tvm.relay.expr_functor import ExprMutator
from .. import util
from . import passes as ethosu_passes
from .scheduler import schedule
def lower_ethosu(sch, args, const_dict, name="main"):
"""Lower a schedule to TIR for the Arm(R) Ethos(TM)-U NPU target.
The resulting TIR module will contain a single function
that consists of a sequence of tir.call_extern to NPU
operations.
Parameters
----------
sch : tvm.te.Schedule
The schedule to be lowered.
args : Union[list of tvm.te.Tensor, TEGraph]
The input/output tensors.
const_dict : dict of int to numpy.ndarray
The constant dictionary.
name : str, optional
The name of the lowered primitive function.
Returns
-------
mod : tvm.IRModule
The lowered TIR module.
const_dict : dict of int to numpy.ndarray
The modified constant dictionary.
"""
if not isinstance(args, list):
args = list(args.inputs) + list(args.outputs)
# config setup
curr_pass_ctx = tvm.ir.transform.PassContext.current()
curr_cfg = dict()
for key, value in curr_pass_ctx.config.items():
curr_cfg[key] = value
tir_compiler_cfg = {
"tir.LoopPartition": {
"partition_const_loop": True,
"no_unroll_loop_with_extent_one": True,
},
"tir.UnrollLoop": {"auto_max_depth": -1},
"tir.noalias": True,
"tir.debug_keep_trivial_loop": True,
}
# Merge two configs
curr_cfg = {**curr_cfg, **tir_compiler_cfg}
sch = sch.normalize()
with tvm.transform.PassContext(config=curr_cfg):
mod = schedule_to_module(sch, args, name)
mod = tvm.tir.transform.Simplify()(mod)
mod = ethosu_passes.RemoveConcatenates()(mod)
mod = tvm.tir.transform.InjectRollingBuffer()(mod)
mod = tvm.tir.transform.StorageFlatten(64)(mod)
mod = tvm.tir.transform.UnrollLoop()(mod)
mod = tvm.tir.transform.Simplify()(mod)
mod = tvm.tir.transform.LoopPartition()(mod)
mod = ethosu_passes.RemoveZeroStores()(mod)
mod = tvm.tir.transform.Simplify()(mod)
mod = tvm.tir.transform.RemoveNoOp()(mod)
mod = ethosu_passes.ReplaceOperators()(mod)
mod = tvm.tir.transform.RemoveNoOp()(mod)
mod, const_dict = ethosu_passes.EncodeConstants(const_dict)(mod)
mod = ethosu_passes.HoistAllocates()(mod)
mod = tvm.tir.transform.RemoveNoOp()(mod)
mod, const_dict = ethosu_passes.MergeConstants(const_dict)(mod)
mod = ethosu_passes.CopyComputeReordering(vapi.get_max_copy_movements())(mod)
disable_storage_rewrite = curr_cfg.get("tir.disable_storage_rewrite", False)
if not disable_storage_rewrite:
mod = tvm.tir.transform.StorageRewrite()(mod)
mod = tvm.tir.transform.RemoveNoOp()(mod)
mod = ethosu_passes.AnnotateAllocates()(mod)
mod, const_dict = ethosu_passes.CreatePrimFuncWithoutConstants(const_dict)(mod)
return mod, const_dict
def lower_to_te(prim_func):
"""Lower a Relay primitive function to a Tensor Expression in an unscheduled CachedFunc.
Parameters
----------
prim_func : tvm.relay.Function
The Relay function to lower.
Returns
-------
out : CachedFunc
The lowered Tensor Expression as part of a CachedFunc.
"""
f = tvm._ffi.get_global_func("relay.backend.LowerToTE")
return f(prim_func)
class ExtractConstants(ExprMutator):
"""The actual mutator pass to extract the constants from a function and replace them with
Vars so the function can be lowered to a TE graph. Additionally returns all the values of
the constants extracted."""
def __init__(self):
super().__init__()
self.constants = []
self.const_vars = []
def visit_constant(self, const):
if isinstance(const.checked_type, relay.ty.TensorType):
self.constants.append(const.data.asnumpy())
name = "p" + str(len(self.constants))
var = relay.var(type_annotation=const.checked_type, name_hint=name)
self.const_vars.append(var)
return var
return const
def visit_function(self, fn):
new_body = self.visit(fn.body)
new_params = list(fn.params) + self.const_vars
return relay.Function(new_params, new_body)
def extract_constants(self, func):
new_func = self.visit(func)
return new_func, self.constants
def extract_constants(func):
"""Extract the constants from a function and replace them with
Vars so the function can be lowered to a TE graph. Additionally
returns all the values of the constants extracted.
Parameters
----------
func : tvm.relay.Function
The Relay function from which to extract constants.
Returns
-------
new_func : tvm.relay.Function
The Relay function with constants replaced by vars.
const_dict : dict of int to numpy.ndarray
A dict of the extracted constants keyed by their param index.
"""
const_dict = {}
params = len(func.params)
new_func, consts = ExtractConstants().extract_constants(func)
for i, const in enumerate(consts):
const_dict[params + i] = const
new_func = tvm.relay.transform.InferType()(tvm.IRModule.from_expr(new_func))["main"]
return new_func, const_dict
@util.create_npu_function_pass(opt_level=1)
class LowerToTIR:
"""A pass that lowers NPU Relay functions to TIR. This pass wraps
the _lower_to_tir pass that operates function->function, while this
is IRModule->IRModule.
Attributes
----------
scheduler : callable
A function to schedule NPU operations. For example,
scheduler.py/copy_constants.
"""
def __init__(self, scheduler):
self.scheduler = scheduler
def transform_npu_function(self, _, func: relay.Function) -> relay.Function:
"""Lower NPU functions to TIR."""
tir_mod, const_dict = _lower_to_tir(func, self.scheduler)
for param in const_dict.keys():
const_dict[param] = tvm.nd.array(const_dict[param])
compiler_name = "ethos-u"
primfunc = tir_mod["main"]
primfunc = primfunc.with_attr("global_symbol", func.attrs["global_symbol"])
primfunc = primfunc.with_attr("ethos-u.constants", const_dict)
primfunc = primfunc.with_attr("target", tvm.target.Target(compiler_name))
return primfunc
def __call__(self, *args, **kwargs):
pass
def _lower_to_tir(func, cascader=None):
"""Lower a Relay function to TIR for the Arm(R) Ethos(TM)-U NPU target.
The Relay function should only contain operations supported
by the NPU.
Parameters
----------
func : tvm.relay.Function
The Relay function to lower.
cascader : Callable
An optional cascading function,
Returns
-------
mod : tvm.IRModule
The lowered TIR module.
consts : dict of int to numpy.ndarray
A dict of the extracted constants keyed by their param index.
"""
func, consts = extract_constants(func)
mod = tvm.IRModule.from_expr(func)
func = relay.transform.InferType()(mod)["main"]
cached_func = lower_to_te(func)
s = schedule(cached_func, consts, cascader)
mod, consts = lower_ethosu(s, cached_func, consts)
return mod, consts
| 8,450 | 33.214575 | 93 | py |
tvm | tvm-main/python/tvm/relay/backend/contrib/ethosu/tir/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Arm(R) Ethos(TM)-U NPU TIR codegen modules."""
| 835 | 45.444444 | 62 | py |
tvm | tvm-main/python/tvm/relay/backend/contrib/ethosu/tir/spec.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""The TIR serialization specification for Arm(R) Ethos(TM)-U NPU."""
from typing import Union
from typing import get_type_hints
from inspect import isclass
import tvm
from tvm.relay.backend.contrib.ethosu import util
def create_serial_object(serialized_type, deserialized_elements):
"""
This function will create serialized type that is one of the subclasses
of tvm.relay.backend.contrib.ethosu.tir.spec.SerializableFormat
Parameters
----------
serialized_type : a subclass type of SerializableFormat
deserialized_elements : list
The list of arguments that needs to packed to create SerializableFormat objects
Returns
-------
The constructed object of type serialized_type
"""
def _create_serial_object(internal_serialized_type, read_element_idx=0):
"""The internal function that increments the read_element_idx
when creating nested serial objects"""
arg_len = util.get_arg_count(internal_serialized_type.__init__) - 1
serial_init_types = get_type_hints(internal_serialized_type.__init__)
serial_init_arg_names = list(serial_init_types.keys())
serial_init_args = []
assert arg_len == len(serial_init_arg_names)
for si_arg_name in serial_init_arg_names:
si_arg_type = serial_init_types[si_arg_name]
if isclass(si_arg_type) and issubclass(si_arg_type, SerializableFormat):
sia, read_element_idx = _create_serial_object(si_arg_type, read_element_idx)
serial_init_args.append(sia)
else:
serial_init_args.append(deserialized_elements[read_element_idx])
read_element_idx += 1
return internal_serialized_type(*serial_init_args), read_element_idx
# Just return the primary serial object
return _create_serial_object(serialized_type)[0]
class SerializableFormat:
"""Base class to retrieve arguments on a predefined ordering"""
def __iter__(self):
# Note class attribute definition order is preserved - see PEP 520
for name in self.__dict__:
value = self.__getattribute__(name)
if isinstance(value, SerializableFormat):
yield from list(value)
else:
yield value
def __getitem__(self, index):
# Note class attribute definition order is preserved - see PEP 520
name = list(self.__dict__.keys())[index]
return self.__getattribute__(name)
class SerialFeatureMap(SerializableFormat):
"""Specialization class to retrieve arguments of a Feature Map
(similiar to NpuFeatureMap of Vela) on a predefined ordering"""
def __init__(
self,
data_type: str,
height: int,
width: int,
channels: int,
tile_height_0: int,
tile_height_1: int,
tile_width_0: int,
tile_address_0: tvm.tir.expr.BufferLoad,
tile_address_1: Union[tvm.tir.expr.BufferLoad, int],
tile_address_2: Union[tvm.tir.expr.BufferLoad, int],
tile_address_3: Union[tvm.tir.expr.BufferLoad, int],
scale: float,
zero_point: int,
layout: str,
stride_h: int,
stride_w: int,
stride_c: int,
):
self.data_type = data_type
self.height = height
self.width = width
self.channels = channels
self.tile_height_0 = tile_height_0
self.tile_height_1 = tile_height_1
self.tile_width_0 = tile_width_0
self.tile_address_0 = tile_address_0
self.tile_address_1 = tile_address_1
self.tile_address_2 = tile_address_2
self.tile_address_3 = tile_address_3
self.scale = scale
self.zero_point = zero_point
self.layout = layout
self.stride_h = stride_h
self.stride_w = stride_w
self.stride_c = stride_c
class SerialKernel(SerializableFormat):
"""Specialization class to retrieve arguments of a Kernel
(similiar to NpuKernel of Vela) on a predefined ordering"""
def __init__(
self,
width: int,
height: int,
stride_w: int,
stride_h: int,
dilation_w: int,
dilation_h: int,
):
self.width = width
self.height = height
self.stride_w = stride_w
self.stride_h = stride_h
self.dilation_w = dilation_w
self.dilation_h = dilation_h
class SerialAddressRange(SerializableFormat):
"""Specialization class to retrieve arguments of a AddressRange
(similiar to NpuAddressRange of Vela) on a predefined ordering"""
def __init__(self, address: tvm.tir.expr.BufferLoad, length: int):
self.address = address
self.length = length
class SerialPadding(SerializableFormat):
"""Specialization class to retrieve arguments of a Padding
(similiar to NpuPadding of Vela) on a predefined ordering"""
def __init__(self, top: int, left: int, bottom: int, right: int):
self.top = top
self.left = left
self.bottom = bottom
self.right = right
class SerialActivation(SerializableFormat):
"""Specialization class to retrieve arguments of a Activation
(similiar to NpuActivation of Vela) on a predefined ordering"""
def __init__(self, op: str, clip_min: int, clip_max: int):
self.op = op
self.clip_min = clip_min
self.clip_max = clip_max
class SerialBlockConfig(SerializableFormat):
"""Specialization class to retrieve arguments of a BlockConfig
(similar to NpuBlockConfig of Vela) on a predefined ordering"""
def __init__(self, height: int, width: int, depth: int):
self.height = height
self.width = width
self.depth = depth
class SerialRescaleConfig(SerializableFormat):
"""Specialization class to retrieve arguments of a rescale parameters
(to fill in rescale field in Vela NpuElementWiseOperation) on a predefined ordering"""
def __init__(self, use_rescale: bool, scale: int, shift: int):
self.use_rescale = use_rescale
self.scale = scale
self.shift = shift
class Serial2DConvolution(SerializableFormat):
"""Specialization class to retrieve arguments of
a ethosu.conv2d tir extern call on a predefined ordering"""
def __init__(
self,
ifm: SerialFeatureMap,
ofm: SerialFeatureMap,
kernel: SerialKernel,
weight: SerialAddressRange,
weight2: SerialAddressRange,
weight_zero_point: int,
scale_bias: SerialAddressRange,
scale_bias2: SerialAddressRange,
padding: SerialPadding,
activation: SerialActivation,
rounding_mode: str,
upscale: str,
block_config: SerialBlockConfig,
):
self.ifm = ifm
self.ofm = ofm
self.kernel = kernel
self.weight = weight
self.weight2 = weight2
self.weight_zero_point = weight_zero_point
self.scale_bias = scale_bias
self.scale_bias2 = scale_bias2
self.padding = padding
self.activation = activation
self.rounding_mode = rounding_mode
self.upscale = upscale
self.block_config = block_config
class Serial2DDepthwise(SerializableFormat):
"""Specialization class to retrieve arguments of
a ethosu.depthwise_conv2d TIR extern call on a predefined ordering"""
def __init__(
self,
ifm: SerialFeatureMap,
ofm: SerialFeatureMap,
kernel: SerialKernel,
weight: SerialAddressRange,
weight_zero_point: int,
scale_bias: SerialAddressRange,
padding: SerialPadding,
activation: SerialActivation,
rounding_mode: str,
upscale: str,
block_config: SerialBlockConfig,
):
self.ifm = ifm
self.ofm = ofm
self.kernel = kernel
self.weight = weight
self.weight_zero_point = weight_zero_point
self.scale_bias = scale_bias
self.padding = padding
self.activation = activation
self.rounding_mode = rounding_mode
self.upscale = upscale
self.block_config = block_config
class SerialCopy(SerializableFormat):
"""Specialization class to retrieve arguments of
a ethosu.copy tir extern call on a predefined ordering"""
def __init__(
self,
read_address: tvm.tir.expr.BufferLoad,
length: int,
write_address: tvm.tir.expr.BufferLoad,
):
self.read_address = read_address
self.length = length
self.write_address = write_address
class SerialPooling(SerializableFormat):
"""Specialization class to retrieve arguments of
a ethosu.pooling tir extern call on a predefined ordering"""
def __init__(
self,
ifm: SerialFeatureMap,
ofm: SerialFeatureMap,
pooling_type: str,
pool_shape: SerialKernel,
padding: SerialPadding,
activation: SerialActivation,
rounding_mode: str,
upscale: str,
block_config: SerialBlockConfig,
):
self.ifm = ifm
self.ofm = ofm
self.pooling_type = pooling_type
self.pool_shape = pool_shape
self.padding = padding
self.activation = activation
self.rounding_mode = rounding_mode
self.upscale = upscale
self.block_config = block_config
class SerialBinaryElementwise(SerializableFormat):
"""Specialization class to retrieve arguments of
a ethosu.binary_elementwise tir extern call on a predefined ordering"""
def __init__(
self,
ifm: SerialFeatureMap,
ifm2: SerialFeatureMap,
ofm: SerialFeatureMap,
operator_type: str,
reversed_operands: bool,
activation: SerialActivation,
rounding_mode: str,
block_config: SerialBlockConfig,
rescale_config: SerialRescaleConfig,
):
self.ifm = ifm
self.ifm2 = ifm2
self.ofm = ofm
self.operator_type = operator_type
self.reversed_operands = reversed_operands
self.activation = activation
self.rounding_mode = rounding_mode
self.block_config = block_config
self.rescale_config = rescale_config
class SerialUnaryElementwise(SerializableFormat):
"""Specialization class to retrieve arguments of
a ethosu.unary_elementwise tir extern call on a predefined ordering"""
def __init__(
self,
ifm: SerialFeatureMap,
ofm: SerialFeatureMap,
operator_type: str,
activation: SerialActivation,
rounding_mode: str,
block_config: SerialBlockConfig,
):
self.ifm = ifm
self.ofm = ofm
self.operator_type = operator_type
self.activation = activation
self.rounding_mode = rounding_mode
self.block_config = block_config
| 11,643 | 32.173789 | 92 | py |
tvm | tvm-main/python/tvm/relay/backend/contrib/ethosu/tir/passes.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument, no-else-return
# pylint: disable=use-list-literal, inconsistent-return-statements, too-many-nested-blocks
"""The TIR passes to be run on Arm(R) Ethos(TM)-U NPU TIR Compiler."""
from collections import namedtuple
from typing import Optional
import numpy as np # type: ignore
from ethosu.vela import api as vapi # type: ignore
import tvm
from tvm.relay.backend.contrib.ethosu import vela_api
from tvm.relay.backend.contrib.ethosu import tir_to_cs_translator as tirtocs
from .convolution import get_conv2d_params
from .depthwise import get_depthwise_conv2d_params
from .pooling import get_pooling_params
from .binary_elementwise import get_binary_elementwise_params
from .identity import get_identity_params
from .unary_elementwise import get_unary_elementwise_params
from .transform import get_copy_params
from .producers_consumers import ProducersConsumers
from .. import _ffi_api
def RemoveZeroStores():
"""This pass removes stores which just store zero to initialise buffers.
We don't codegen these stores and they otherwise considerably reduce
the simplicity of the static traversal of convolution."""
def _remove_zero_store(stmt):
if isinstance(stmt.value, tvm.tir.IntImm) and int(stmt.value) == 0:
return tvm.tir.Evaluate(tvm.tir.IntImm("uint8", 0))
return stmt
def _ftransform(f, mod, ctx):
return f.with_body(
tvm.tir.stmt_functor.ir_transform(f.body, _remove_zero_store, None, ["tir.BufferStore"])
)
return tvm.tir.transform.prim_func_pass(
_ftransform, opt_level=0, name="tir.contrib.ethos-u.remove_zero_stores"
)
def ReplaceOperators():
"""Replace operators represented as explicit loop nests with call_externs
to NPU operators."""
op_map = {
"ethosu_conv2d": get_conv2d_params,
"ethosu_copy": get_copy_params,
"ethosu_depthwise_conv2d": get_depthwise_conv2d_params,
"ethosu_pooling": get_pooling_params,
"ethosu_binary_elementwise": get_binary_elementwise_params,
"ethosu_identity": get_identity_params,
"ethosu_unary_elementwise": get_unary_elementwise_params,
}
producers_consumers = ProducersConsumers()
replace_output_pointer = {}
pointer_to_extents = {}
replaced_pointers = []
ReplaceInfo = namedtuple("ReplaceInfo", ["pointer", "reallocate"])
def _find_pointer_to_extent(stmt):
if isinstance(stmt, tvm.tir.Allocate):
pointer_to_extents[stmt.buffer_var] = stmt.extents
def _resolve_pointers(stmt):
"""This pass determines information about the pointers present in the IR.
In particular, it associates pointers with both the operations that
produce them and the operations that consume them through the
pointer_to_producer and pointer_to_consumer dicts.
Additionally, it determines the extent (size/shape) of each pointer which
is required for the _replace_pointers pass which runs later."""
loads = []
def _get_loads(stmt):
if isinstance(stmt, tvm.tir.BufferLoad):
loads.append(stmt.buffer.data)
buffer_var = None
def _get_buffer_var(stmt):
if isinstance(stmt, tvm.tir.BufferStore):
nonlocal buffer_var
buffer_var = stmt.buffer.data
if isinstance(stmt, tvm.tir.AttrStmt):
if stmt.attr_key == "pragma_op":
tvm.tir.stmt_functor.post_order_visit(stmt, _get_buffer_var)
producers_consumers.add_producer(buffer_var, stmt)
tvm.tir.stmt_functor.post_order_visit(stmt, _get_loads)
for load_pointer in loads:
if load_pointer != buffer_var:
producers_consumers.add_consumer(load_pointer, stmt)
def _replace_operator(stmt):
"""Replace operators with call_externs, having derived the parameters
from the relevant TIR expressions/statements.
Note the complexity of this pass is mostly from the concept of 'replace
pointers'. A call_extern may in principle require information from several
loop nests in TIR (each corresponding to a different TE compute op). For
example, a convolution operator will have other TE compute ops before and
after corresponding to the input/output DMA functionality. Therefore, when
the 'central' convolution op is replaced with a call_extern, the memory
from the final DMA output op must be hoisted to the location/scope of
the call_extern.
The is done by replacing the pointer corresponding to the current operation
with the 'true' output operator through the replace_output_pointer dict.
Because of this, the param_func must provide a replace_pointer if the op
isn't the true output but instead a no_compile op is."""
if isinstance(stmt, tvm.tir.AttrStmt):
op_name = stmt.value.value
if stmt.attr_key == "pragma_op" and op_name in op_map:
# Get the parameters for the extern call
param_func = op_map[op_name]
info, output_pointer, replace_pointer, is_allocator = param_func(
stmt, producers_consumers
)
if replace_pointer is not None:
# Allocate pointer only once
if replace_pointer in replaced_pointers:
is_allocator = False
replace_output_pointer[output_pointer] = ReplaceInfo(
replace_pointer, is_allocator
)
replaced_pointers.append(replace_pointer)
# Make the extern call
irb = tvm.tir.ir_builder.create()
irb.emit(tvm.tir.call_extern("handle", op_name, *info))
return irb.get()
return None
def _remove_no_compile(stmt):
"""Certain operators are marked as 'no compile' operators. This means they
should be removed from the IR as they are compiled as part of other operators.
The IFM DMA operations are an example of this, as they don't get compiled
independently but instead get compiled into the operator they're associated with,
e.g. a conv2d.
There are potentially 2 parts to remove for an operator:
the allocate for its output and the compute nest itself. For the
allocate, we can check if the pointer they reference is produced by a 'no compile'
operator. For the compute nest, we can just check the op pragma."""
if isinstance(stmt, tvm.tir.AttrStmt):
# Remove compute nests
if stmt.attr_key == "pragma_op" and stmt.value.value not in op_map:
return tvm.tir.Evaluate(0)
if isinstance(stmt, tvm.tir.Allocate):
# Remove allocates
producer = producers_consumers.get_last_producer(stmt.buffer_var)
if producer:
if producer.attr_key == "pragma_op" and producer.value.value not in op_map:
return stmt.body
return None
def _replace_pointers(stmt):
if isinstance(stmt, tvm.tir.Allocate):
# If the allocate allocates a pointer that needs replacing
if stmt.buffer_var in replace_output_pointer:
replace_pointer, reallocate = replace_output_pointer[stmt.buffer_var]
if not reallocate:
return stmt.body
# Otherwise, rewrite the allocation statement with the new pointer
# and the new extent
replace_type = replace_pointer.type_annotation.element_type.dtype
replace_extents = pointer_to_extents[replace_pointer]
return tvm.tir.Allocate(
replace_pointer, replace_type, replace_extents, stmt.condition, stmt.body
)
return None
def _remove_buffer_decl(stmt):
if isinstance(stmt, tvm.tir.DeclBuffer):
if stmt.buffer.data in replace_output_pointer:
return stmt.body
def _post_transform(stmt):
# Replace operators with call_externs
result = _replace_operator(stmt)
# Remove operators that don't need compiling
result = result or _remove_no_compile(stmt)
# Replace necessary pointers that were removed in the previous step
result = result or _replace_pointers(stmt)
# Replace BufferDecl, since only the tir.Var data pointer is
# still used, and not the tir.Buffer
result = result or _remove_buffer_decl(stmt)
return result
def _ftransform(f, mod, ctx):
tvm.tir.stmt_functor.post_order_visit(f.body, _find_pointer_to_extent)
tvm.tir.stmt_functor.post_order_visit(f.body, _resolve_pointers)
producers_consumers.add_allocate_variables(pointer_to_extents.keys())
return f.with_body(
tvm.tir.stmt_functor.ir_transform(
f.body, None, _post_transform, ["tir.AttrStmt", "tir.Allocate"]
)
)
return tvm.tir.transform.prim_func_pass(
_ftransform, opt_level=0, name="tir.contrib.ethos-u.replace_operators"
)
def DivideConstants(const_dict):
"""This pass rewrites the IR and constant dict such that all constant
accesses are at 0 offset and full length (i.e. they read the whole buffer).
Where necessary, new constants are created in order to ensure the rewrite
can take place. As an example, if a convolution is tiled along the channels
axis, the accesses to the weights will need to be offset. This pass will
create new constants consisting of 'slices' of the weights so each tile
of the compute can access one of these 'slices'.
The purpose of this pass is to transform the IR into a form we can apply
constant encoding to (which will compress weights and encode biases)."""
buffer_to_const = {} # type: ignore
new_buffers = []
new_consts = []
keep_buffers = set()
new_const_dict = {}
def _visit(stmt):
new_args = []
# We don't want to divide the constant that will be executed on two cores in parallel
is_u65_conv2d = (
vela_api.get_accelerator_config() == vapi.NpuAccelerator.Ethos_U65_512
and stmt.args[0] == "ethosu_conv2d"
)
for i, arg in enumerate(stmt.args):
if isinstance(arg, tvm.tir.expr.BufferLoad):
# If we're trying to load a buffer that maps to a constant
if arg.buffer.data in buffer_to_const:
const = buffer_to_const[arg.buffer.data]
flattened_const_shape = np.prod(const.shape)
offset = int(arg.indices[0])
# Note by convention the arg after a constant read is the length of the read
length = int(stmt.args[i + 1])
# If it's anything other than a full read, create a new buffer
if (
offset != 0 or flattened_const_shape != length and length > 0
) and not is_u65_conv2d:
out_channels = const.shape[0]
offset_channels = int((offset * out_channels) / flattened_const_shape)
length_channels = int((length * out_channels) / flattened_const_shape)
# split the constant up across channels
split_const = np.split(const, out_channels, axis=0)
# create a new const out of the channels we want to keep
new_const = np.concatenate(
split_const[offset_channels : offset_channels + length_channels], axis=0
)
new_consts.append(new_const)
new_buffer = tvm.tir.decl_buffer(
(length,), arg.dtype, scope=arg.buffer.scope()
)
new_buffers.append(new_buffer)
new_args.append(tvm.tir.expr.BufferLoad(new_buffer, [0]))
continue
keep_buffers.add(arg.buffer.data)
new_args.append(arg)
return tvm.tir.Call(stmt.dtype, stmt.op, new_args, stmt.span)
def _ftransform(f, mod, ctx):
for i, param in enumerate(f.params):
if i in const_dict:
buffer_to_const[param] = const_dict[i]
buffer_to_const[f.buffer_map[param].data] = const_dict[i]
new_body = tvm.tir.stmt_functor.ir_transform(f.body, _visit, None, ["tir.Call"])
# Both the params and buffer map need updating for the newly introduced buffers
new_params = [] # type: ignore
new_buffer_map = {}
for i, param in enumerate(f.params):
buffer = f.buffer_map[param]
pointer = buffer.data
if pointer in buffer_to_const:
if pointer not in keep_buffers:
continue
new_const_dict[len(new_params)] = const_dict[i]
new_params.append(param)
new_buffer_map[param] = buffer
for i, new_buffer in enumerate(new_buffers):
handle = tvm.tir.Var("placeholder", "handle")
new_params.append(handle)
new_buffer_map[handle] = new_buffer
new_const_dict[len(new_params) - 1] = new_consts[i]
new_f = tvm.tir.PrimFunc(
new_params,
new_body,
f.ret_type,
new_buffer_map,
f.attrs,
f.span,
)
return new_f
def _divide_constants(mod):
transform_func = tvm.tir.transform.prim_func_pass(
_ftransform, opt_level=0, name="tir.contrib.ethos-u.divide_constants"
)
new_func = transform_func(mod)
return new_func, new_const_dict
return _divide_constants
def EncodeConstants(const_dict):
"""the NPU requires that weights are compressed and bias/scales are 'encoded', both
of which are performed by this pass.
This pass modifies both the constant dict to contain the post-encoding values of the
constants and the IR to adjust buffer types/sizes/accesses so they align with the
encoded constants. Calls to the Vela API are made to perform the actual compression/
encoding.
"""
new_const_dict = {}
def collect_encoding_definitions(stmt, old_buffer_var_to_const):
# Map from copy destination to copy source.
copy_map = {}
# List of buffer copies that occurred
copied_buffers = []
# List of encoded buffer information
constant_buffer_replacements = []
def _align_scale_bias(tir_extern_call, bias):
"""Align the scale_bias to 16 bytes."""
value_bytes = bytearray()
value_bytes.extend(bias.tobytes())
# Align to 16
remainder = (len(value_bytes)) % 16
if remainder > 0:
value_bytes.extend(bytearray(16 - remainder))
value = np.frombuffer(value_bytes, dtype="uint8")
return value
accel_config = vela_api.get_accelerator_config()
def _encode_weights(tir_extern_call, weights):
"""Encode the weights for a TIR extern call."""
value_bytes = vela_api.encode_weights(tir_extern_call, weights, accel_config)
value = np.frombuffer(value_bytes, dtype="uint8")
return value
def _declare_constant_buffer(old_buffer, encoded_constants, split_idx):
"""Create a new buffer and add the old buffer and its pointer to the
rewriting maps."""
new_buffer = tvm.tir.decl_buffer(
shape=[len(encoded_constants)],
dtype=str(encoded_constants.dtype),
name=old_buffer.name + "_encoded",
scope=old_buffer.scope(),
)
constant_buffer_replacements.append(
{
"old_buffer": old_buffer,
"new_buffer": new_buffer,
"encoded_constants": encoded_constants,
"split_idx": split_idx,
}
)
def _encode_weights_or_bias(buffer1, buffer2, stmt, encode_func):
"""Encode the weights or align the bias either for one or two cores,
depending on the variant."""
constant = old_buffer_var_to_const[buffer1.data]
# If we have just one core, encode the whole constant
if buffer2 is None:
new_const = encode_func(stmt, constant)
return new_const, None
# Assume that the constant tensor has not been flattened yet
assert len(constant.shape) != 1
channels = constant.shape[0]
split_const = np.split(constant, channels, axis=0)
const_list = [split_const[i] for i in range(channels) if i % 2 == 0]
const_to_encode = np.concatenate(const_list, axis=0)
new_const = encode_func(stmt, const_to_encode)
split_idx = len(new_const)
# Encode half of the constant separately for the other core if it exists
assert buffer1.same_as(buffer2)
const2_list = [split_const[i] for i in range(channels) if i % 2 == 1]
const2_to_encode = np.concatenate(const2_list, axis=0)
new_const2 = encode_func(stmt, const2_to_encode)
new_const = np.append(new_const, new_const2).astype("uint8")
return new_const, split_idx
def _visit(stmt):
if isinstance(stmt, tvm.tir.Call):
op = str(stmt.args[0].value)
# Handle copies as a special-case by propagating the buffer information
# from the read to the write pointer.
if op == "ethosu_copy":
read_buffer = stmt.args[1].buffer
write_buffer = stmt.args[3].buffer
# Assert writing to the base of the write_var (pre-StorageRewrite)
assert list(stmt.args[3].indices) == [0]
assert list(stmt.args[1].indices) == [0]
copied_buffers.append({"source": read_buffer, "dest": write_buffer})
copy_map[write_buffer] = read_buffer
ops_with_weights = {
"ethosu_conv2d": tirtocs.translate_ethosu_conv2d,
"ethosu_depthwise_conv2d": tirtocs.translate_ethosu_depthwise_conv2d,
}
if op in ops_with_weights:
npu_op, _ = ops_with_weights[op](stmt)
# Encode the weights
weights_buffer = npu_op.weights[0].address.buffer
if weights_buffer in copy_map:
weights_buffer = copy_map[weights_buffer]
# In case of U65 512 mac variant the weights are split across two cores
# and need to be encoded separately
weights2_buffer = (
npu_op.weights[1].address.buffer
if accel_config == vapi.NpuAccelerator.Ethos_U65_512
else None
)
if weights2_buffer in copy_map:
weights2_buffer = copy_map[weights2_buffer]
new_weights, split_idx = _encode_weights_or_bias(
weights_buffer, weights2_buffer, stmt, _encode_weights
)
_declare_constant_buffer(weights_buffer, new_weights, split_idx)
# Align the scale_bias to 16 bytes
scale_bias_buffer = npu_op.biases[0].address.buffer
if scale_bias_buffer in copy_map:
scale_bias_buffer = copy_map[scale_bias_buffer]
scale_bias2_buffer = (
npu_op.biases[1].address.buffer
if accel_config == vapi.NpuAccelerator.Ethos_U65_512
else None
)
if scale_bias2_buffer in copy_map:
scale_bias2_buffer = copy_map[scale_bias2_buffer]
new_scale_bias, split_idx = _encode_weights_or_bias(
scale_bias_buffer, scale_bias2_buffer, stmt, _align_scale_bias
)
_declare_constant_buffer(scale_bias_buffer, new_scale_bias, split_idx)
tvm.tir.stmt_functor.post_order_visit(stmt, _visit)
return {
"copied_buffers": copied_buffers,
"constant_buffer_replacements": constant_buffer_replacements,
}
def transform_stmt(
stmt,
buf_remap,
var_remap,
pointer_to_buffer,
new_buffer_var_to_const,
new_buffer_to_split_idx,
):
def _visit_rewrite(stmt):
if isinstance(stmt, tvm.tir.Call):
# For extern calls, we need to rewrite pairs of arguments corresponding to
# base address load and the length of the load.
old_args = list(stmt.args)
new_args = [stmt.args[0]]
for prev_arg, arg in zip(old_args[:-1], old_args[1:]):
# If the previous argument was a load from an
# encoded buffer, the current should be a length.
if (
isinstance(prev_arg, tvm.tir.BufferLoad)
and prev_arg.buffer.data in new_buffer_var_to_const
):
buffer_size = np.prod(list(prev_arg.buffer.shape))
arg = buffer_size
# We have to check for split weights/bias for conv2d and depthwise_conv2d
if old_args[0] in ("ethosu_conv2d", "depthwise_conv2d"):
# We have split weights/bias
if prev_arg.buffer in new_buffer_to_split_idx:
split_idx = new_buffer_to_split_idx[prev_arg.buffer]
# The first half of the split buffer
if prev_arg.indices[0] == 0:
arg = split_idx
# the second half of the split buffer
else:
arg = buffer_size - split_idx
new_args.append(arg)
return tvm.tir.Call(stmt.dtype, stmt.op, new_args, stmt.span)
if isinstance(stmt, tvm.tir.Allocate):
# Where a pointer needs rewriting, the allocate for it must be rewritten
allocate_pointer = stmt.buffer_var
if allocate_pointer in var_remap:
new_allocate_pointer = var_remap[allocate_pointer]
new_buffer = pointer_to_buffer[new_allocate_pointer]
return tvm.tir.Allocate(
new_buffer.data,
new_buffer.dtype,
new_buffer.shape,
stmt.condition,
stmt.body,
stmt.span,
)
# The following rewrites would be better expressed by just
# rewriting the Buffers. However ir_transform doesn't
# visit Buffers, so instead we do the next best thing and
# rewrite the nodes which contain the Buffers.
if isinstance(stmt, tvm.tir.BufferLoad):
if stmt.buffer in buf_remap:
new_buffer = buf_remap[stmt.buffer]
new_indices = stmt.indices
offset = new_indices[0]
if offset != 0 and new_buffer in new_buffer_to_split_idx:
offset = new_buffer_to_split_idx[new_buffer]
return tvm.tir.BufferLoad(buf_remap[stmt.buffer], [offset], stmt.span)
if isinstance(stmt, tvm.tir.AttrStmt):
node_pointer = stmt.node
if node_pointer in var_remap:
return tvm.tir.AttrStmt(
var_remap[node_pointer],
stmt.attr_key,
stmt.value,
stmt.body,
stmt.span,
)
return None
return tvm.tir.stmt_functor.ir_transform(
stmt,
None,
_visit_rewrite,
["tir.Call", "tir.Allocate", "tir.BufferLoad", "tir.AttrStmt"],
)
def _collect_parameter_buffer_aliases(prim_func):
buffer_vars = {}
for param in prim_func.params:
if param in prim_func.buffer_map:
buf = prim_func.buffer_map[param]
buffer_vars[buf.data] = {buf}
def visit(node):
if isinstance(node, (tvm.tir.BufferStore, tvm.tir.BufferLoad, tvm.tir.DeclBuffer)):
buf = node.buffer
if buf.data in buffer_vars:
buffer_vars[buf.data].add(buf)
tvm.tir.stmt_functor.post_order_visit(prim_func.body, visit)
return buffer_vars
def _ftransform(f, mod, ctx):
param_buffer_var_usage = _collect_parameter_buffer_aliases(f)
# Step 0: Unpack the constant dictionary in terms of the
# functions buffers.
old_buffer_var_to_const = {}
for i, param in enumerate(f.params):
if i in const_dict:
old_buffer_var_to_const[f.buffer_map[param].data] = const_dict[i]
# Step 1: Collect information on the buffers that will be
# replaced by encodings.
buffer_information = collect_encoding_definitions(f.body, old_buffer_var_to_const)
# Step 2: Generate variable/buffer remaps, based on the
# collected information.
buf_remap = {}
new_buffer_var_to_const = {}
new_buffer_to_split_idx = {}
def define_remap(old_buf, new_buf):
try:
old_buffers = param_buffer_var_usage[old_buf.data]
except KeyError:
old_buffers = [old_buf]
for old_buffer in old_buffers:
buf_remap[old_buffer] = new_buf
# Any encoded buffers must be replaced
for info in buffer_information["constant_buffer_replacements"]:
define_remap(info["old_buffer"], info["new_buffer"])
new_buffer_var_to_const[info["new_buffer"].data] = info["encoded_constants"]
if info["split_idx"]:
new_buffer_to_split_idx[info["new_buffer"]] = info["split_idx"]
# Any buffers that are copied into from an encoded buffer must
# be replaced.
for info in buffer_information["copied_buffers"]:
copy_source = info["source"]
while copy_source in buf_remap:
copy_source = buf_remap[copy_source]
copy_dest = info["dest"]
if copy_source.shape != copy_dest.shape or copy_source.dtype != copy_dest.dtype:
new_dest = tvm.tir.decl_buffer(
shape=copy_source.shape,
dtype=copy_source.dtype,
name=copy_dest.name,
scope=copy_dest.scope(),
)
define_remap(copy_dest, new_dest)
if copy_source.data in new_buffer_var_to_const:
new_buffer_var_to_const[new_dest.data] = new_buffer_var_to_const[
copy_source.data
]
if copy_source in new_buffer_to_split_idx:
new_buffer_to_split_idx[new_dest] = new_buffer_to_split_idx[copy_source]
# Define additional dependent lookup tables.
var_remap = {old.data: new.data for (old, new) in buf_remap.items()}
pointer_to_buffer = {
buf.data: buf for (old, new) in buf_remap.items() for buf in [old, new]
}
# Step 3: Then perform the rewrites
new_body = transform_stmt(
f.body,
buf_remap,
var_remap,
pointer_to_buffer,
new_buffer_var_to_const,
new_buffer_to_split_idx,
)
# Step 4: Rewrite the buffer map and const dict to instead use the encoded versions
new_buffer_map = {}
for i, param in enumerate(f.params):
buffer = f.buffer_map[param]
if buffer in buf_remap:
buffer = buf_remap[buffer]
if buffer.data in new_buffer_var_to_const:
new_const_dict[i] = new_buffer_var_to_const[buffer.data].flatten()
elif buffer.data in old_buffer_var_to_const:
new_const_dict[i] = old_buffer_var_to_const[buffer.data].flatten()
new_buffer_map[param] = buffer
new_f = tvm.tir.PrimFunc(
f.params,
new_body,
f.ret_type,
new_buffer_map,
f.attrs,
f.span,
)
return new_f
def _encode_constants(mod):
mod, divided_const_dict = DivideConstants(const_dict)(mod)
const_dict.clear()
for key, value in divided_const_dict.items():
const_dict[key] = value
transform_func = tvm.tir.transform.prim_func_pass(
_ftransform, opt_level=0, name="tir.contrib.ethos-u.encode_constants"
)
new_func = transform_func(mod)
return new_func, new_const_dict
return _encode_constants
# This need to be kept in sync with kDisableLowerTVMBuiltin in include/tvm/tir/transform.h
DISABLE_LOWER_BUILTIN = "disable_lower_builtin"
def AnnotateAllocates():
"""
This is pass to annotate all allocate
nodes of the PrimFuncs of the microNPU
to be not lowered to built-ins.
"""
def _post_transform(allocate):
return tvm.tir.Allocate(
buffer_var=allocate.buffer_var,
dtype=allocate.dtype,
extents=allocate.extents,
condition=allocate.condition,
body=allocate.body,
annotations={DISABLE_LOWER_BUILTIN: True},
)
def _ftransform(f, mod, ctx):
return f.with_body(
tvm.tir.stmt_functor.ir_transform(f.body, None, _post_transform, ["tir.Allocate"])
)
return tvm.tir.transform.prim_func_pass(
_ftransform, opt_level=0, name="tir.contrib.ethos-u.annotate_allocates"
)
def RemoveConcatenates():
"""Remove concatenate operators by modifying the input buffers to write directly into
the concatenated buffer with the appropriate offset.
This pass works in two stages. The first finds every concatenate operation (marked by
pragma_op = ethosu_concatenate) and it performs the following analysis. For each buffer
that is concatenated, the buffer is marked that it is to be replaced with the concat
buffer and the axis along which it is concatenated as well as the offset along that
axis is recorded in 'ReplaceInfo'. Once this analysis is completed, the concatenate
loop nest along with its buffer realization statements are removed.
In the second stage, the input buffers to the concatenate operators are rewritten
to use the concat buffer directly. This means applying the correct offset to the
concatenation axis where ever the buffer is loaded or stored. Additionally, as the
realization statements for the concat buffers were removed in the first stage, they
are rewritten in place of the input buffer realization with the earliest liveness."""
in_concat = [False] # Whether the visitor is currently inside a concatenate operator
concat_buffers = [] # The buffers produced by concatenate operators
buffer_replace_map = {} # A map of buffers to be replaced with the concat buffer
attrs_by_buffer = {} # AttrStmts by the buffer they reference
realizes_by_buffer = {} # BufferRealize statements by the buffer they reference
first_replacements = {} # The first buffers to be replaced by a given concat buffer
ReplaceInfo = namedtuple("ReplaceInfo", ["buffer", "axis", "offset"])
def _get_replace_info(buffer_load, concat_buffer):
axis = 0
offset = 0
dmap = dict()
for i, index in enumerate(buffer_load.indices):
if isinstance(index, tvm.tir.Sub):
axis = i
dmap = {}
def _visit(stmt):
if isinstance(stmt, tvm.tir.Var):
dmap[stmt] = tvm.arith.IntervalSet(0, 0)
tvm.tir.stmt_functor.post_order_visit(index, _visit)
offset = abs(int(tvm.arith.Analyzer().int_set(index, dmap).max_value))
return ReplaceInfo(concat_buffer, axis, offset)
def _pre_remove(stmt):
if isinstance(stmt, tvm.tir.BufferRealize):
# Record the realize statements by buffer as we need to hoist some of these
realizes_by_buffer[stmt.buffer] = stmt
if isinstance(stmt, tvm.tir.AttrStmt):
if stmt.attr_key == "realize_scope" and isinstance(stmt.node, tvm.tir.Buffer):
# Record the realize_scope attrs by buffer as we need to hoist some of these
attrs_by_buffer[stmt.node] = stmt
if stmt.attr_key == "pragma_op" and stmt.value.value == "ethosu_concatenate":
# Record that we're entering a concatenate loop nest
in_concat[0] = True
if isinstance(stmt, tvm.tir.BufferLoad) and in_concat[0]:
# Any buffer loaded inside a concat is a buffer we intend to replace with this pass.
# The buffer_replace_map keeps track of which buffers need replacing with the
# concat buffer.
replace_info = _get_replace_info(stmt, concat_buffers[-1])
buffer_replace_map[stmt.buffer] = replace_info
if isinstance(stmt, tvm.tir.BufferStore) and in_concat[0]:
# If we're inside a concat, the BufferStore indicates what the concat buffer is
concat_buffers.append(stmt.buffer)
def _post_remove(stmt):
if isinstance(stmt, tvm.tir.AttrStmt):
if isinstance(stmt.node, tvm.tir.Buffer) and stmt.node in concat_buffers:
return stmt.body
if stmt.attr_key == "pragma_op" and stmt.value.value == "ethosu_concatenate":
# When we leave a concatenate operator, record it and then remove the loop nest
in_concat[0] = False
return tvm.tir.Evaluate(0)
if isinstance(stmt, tvm.tir.BufferRealize):
if stmt.buffer in concat_buffers:
return stmt.body
return None
def _pre_replace(stmt):
if isinstance(stmt, (tvm.tir.BufferLoad, tvm.tir.BufferStore)):
# The first buffer referenced that needs replacing with a concat buffer shall
# be the one that the concat buffer realize is hoisted to.
if stmt.buffer in buffer_replace_map:
concat_buffer = buffer_replace_map[stmt.buffer].buffer
if concat_buffer not in first_replacements:
first_replacements[concat_buffer] = stmt.buffer
def _post_replace(stmt):
if isinstance(stmt, tvm.tir.BufferStore):
if stmt.buffer in buffer_replace_map:
# Replace the original buffer store with a new one into the concat buffer
# and adjust the indices accordingly to account for the offset
replace_info = buffer_replace_map[stmt.buffer]
concat_buffer = replace_info.buffer
new_indices = list(stmt.indices)
new_indices[replace_info.axis] += replace_info.offset
# The new buffer store node that stores the tensor directly into the concat buffer
new_store = tvm.tir.BufferStore(concat_buffer, stmt.value, new_indices, stmt.span)
return new_store
if isinstance(stmt, tvm.tir.BufferLoad):
if stmt.buffer in buffer_replace_map:
# Replace the original buffer load with a new one into the concat buffer
# and adjust the indices accordingly to account for the offset
replace_info = buffer_replace_map[stmt.buffer]
concat_buffer = replace_info.buffer
new_indices = list(stmt.indices)
new_indices[replace_info.axis] += replace_info.offset
new_load = tvm.tir.BufferLoad(concat_buffer, new_indices, stmt.span)
return new_load
if isinstance(stmt, tvm.tir.BufferRealize):
if stmt.buffer in buffer_replace_map:
concat_buffer = buffer_replace_map[stmt.buffer].buffer
# If this isn't the first buffer replaced, don't hoist the realize
if first_replacements[concat_buffer] != stmt.buffer:
return stmt.body
# Otherwise, do hoist it
else:
concat_realize = realizes_by_buffer[concat_buffer]
new_realize = tvm.tir.BufferRealize(
concat_realize.buffer,
concat_realize.bounds,
concat_realize.condition,
stmt.body,
stmt.span,
)
return new_realize
if isinstance(stmt, tvm.tir.AttrStmt):
if isinstance(stmt.node, tvm.tir.Buffer) and stmt.node in buffer_replace_map:
concat_buffer = buffer_replace_map[stmt.node].buffer
# If this isn't the first buffer replaced, don't hoist the attrstmt
if first_replacements[concat_buffer] != stmt.node:
return stmt.body
# Otherwise, do hoist it
else:
concat_attr = attrs_by_buffer[concat_buffer]
new_attr = tvm.tir.AttrStmt(
concat_attr.node,
concat_attr.attr_key,
concat_attr.value,
stmt.body,
stmt.span,
)
return new_attr
def _ftransform(f, mod, ctx):
f = f.with_body(
tvm.tir.stmt_functor.ir_transform(
f.body,
_pre_remove,
_post_remove,
["tir.AttrStmt", "tir.BufferLoad", "tir.BufferStore", "tir.BufferRealize"],
)
)
return f.with_body(
tvm.tir.stmt_functor.ir_transform(
f.body,
_pre_replace,
_post_replace,
["tir.AttrStmt", "tir.BufferLoad", "tir.BufferStore", "tir.BufferRealize"],
)
)
return tvm.tir.transform.prim_func_pass(
_ftransform, opt_level=0, name="tir.contrib.ethos-u.remove_concatenates"
)
def CreatePrimFuncWithoutConstants(const_dict):
"""
This pass will remove arguments that are constants
from PrimFunc Args. These should be replaced properly
with tir.allocate_const when it becomes available.
It also modifies the constant dictionary to
rewrite the keys as the actual tir.Vars that are params
rather than the index because this pass removes PrimFunc
arguments that represent constants.
"""
new_const_dict = dict()
def _ftransform(f, mod, ctx):
new_params = list()
new_buffer_map = dict()
for param_idx in const_dict.keys():
# We are using buffer_var to key the constants as
# PrimFunc params of constants will be removed.
new_const_dict[f.buffer_map[f.params[param_idx]].data] = const_dict[param_idx]
for i, param in enumerate(f.params):
if i not in const_dict.keys():
new_params.append(param)
new_buffer_map[param] = f.buffer_map[param]
return tvm.tir.PrimFunc(
new_params,
f.body,
f.ret_type,
new_buffer_map,
f.attrs,
f.span,
)
def _create_primfunc_without_constants(mod):
transform_func = tvm.tir.transform.prim_func_pass(
_ftransform, opt_level=0, name="tir.contrib.ethos-u.CreatePrimFuncWithoutConstants"
)
mod = transform_func(mod)
return mod, new_const_dict
return _create_primfunc_without_constants
def HoistAllocates() -> tvm.IRModule:
"""
Hoist allocate nodes up to the top of the body of the main function.
Returns
-------
tvm.IRModule
The new module with hoisted allocate nodes.
"""
return _ffi_api.HoistAllocates()
def CopyComputeReordering(
max_copy_movements: Optional[int] = None, reorder_by_cycles: Optional[bool] = None
) -> tvm.IRModule:
"""
Reorders copy and compute nodes in such a way that independent DMA copies
and computes happen in parallel.
Copies to buffers with local scope are not reordered since they copy LUT
into the SHRAM and that already happens in parallel with copying weights into
the weights encoder.
If reorder_by_cycles is set, we use the compute_cycles_hint to decide the reordering. If it is
not set, we move the copies up by a fixed number of movements, either by max_copy_movements if
it is specified, or by default value of 1.
If reordering based on the cycle count is enabled, we try to achieve further copy latency
hiding with a two step algorithm:
(1) Move all the global copies (i.e. copies that copy a constant into SRAM for conv2d or
depthwise_conv2d) above a preceding compute op. If in general the computes take longer than
copies, this should be enough to hide the copy latencies.
(2) If there are some global copies that take longer than the computes, we might be able to
hide them further by moving them further up in a graph since in general there are more compute
ops than copy ops in a graph (as only conv2d and depthwise_conv2d have constants associated
with them). The algortithm checks whether a copy is hidden and if it is not, it checks if a
preceding compute op has a preceding copy and if it doesn't it moves the copy that we try to
hide further up. It keeps moving the copy until it can't move it any further or until the
latency is hidden.
Parameters
----------
max_copy_movements: Optional[int]
The maximum number of movements allowed for a copy.
If None, the pass context option
tir.contrib.ethos-u.copy_compute_reordering_max_copy_movements
is used if provided, otherwise the default value will be 1.
reorder_by_cycles: Optional[bool]
Whether to reorder the computes and copies based on the cycle hint.
If None, the pass context option
tir.contrib.ethos-u.copy_compute_reordering_reorder_by_cycles
is used if provided, otherwise the default value will be False.
Returns
-------
tvm.IRModule
The new module with copy and compute nodes reordered.
"""
return _ffi_api.CopyComputeReordering(max_copy_movements, reorder_by_cycles)
def MergeConstants(const_dict):
"""
This pass looks for the constants used by each compute operator
and merges them into a single buffer.
Constants written to a buffer with local scope are not merged.
"""
def _merge_constants(mod):
nonlocal const_dict
try:
mod["main"]
except:
raise tvm.TVMError(
"Expected a single primitive function called 'main'. "
"Please run the MergeConstants pass in conjunction with the LowerToTIR() pass."
)
new_const_dict = {}
for param in const_dict.keys():
new_const_dict[tvm.tir.IntImm("int64", param)] = tvm.nd.array(const_dict[param])
mod["main"] = mod["main"].with_attr("ethos-u.const_dict", new_const_dict)
mod = _ffi_api.MergeConstants()(mod)
const_dict = mod["main"].attrs["ethos-u.const_dict"]
mod = _ffi_api.RemoveConstDictAttribute()(mod)
new_const_dict = {}
for param in const_dict.keys():
new_const_dict[int(param)] = const_dict[param].numpy()
return mod, new_const_dict
return _merge_constants
| 45,493 | 42.368923 | 100 | py |
tvm | tvm-main/python/tvm/relay/backend/contrib/ethosu/op/pooling.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=unused-argument
"""Relay operators for pooling for Arm(R) Ethos(TM)-U NPU"""
from typing import Tuple
import tvm
from tvm.relay.op import _make
from tvm.topi.generic import schedule_injective
from tvm.relay.op.op import OpStrategy
from tvm.relay.op import strategy as _strategy
from ..te import pooling_compute
def _extract_ethosu_pooling_params(attrs, args):
"""Get the parameters necessary to construct a ethosu_pooling compute TE
from a ethosu_pooling Relay call."""
ifm = args[0]
lut = args[1]
pooling_type = attrs.pooling_type
ifm_scale = attrs.ifm_scale
ifm_zero_point = attrs.ifm_zero_point
ofm_scale = attrs.ofm_scale
ofm_zero_point = attrs.ofm_zero_point
pool_shape = attrs.pool_shape
ofm_channels = attrs.ofm_channels
ofm_dtype = attrs.ofm_dtype
strides = attrs.strides
padding = attrs.padding
activation = attrs.activation
clip_min = attrs.clip_min
clip_max = attrs.clip_max
rounding_mode = attrs.rounding_mode
upscale = attrs.upscale
ifm_layout = attrs.ifm_layout
ofm_layout = attrs.ofm_layout
return (
ifm,
lut,
pooling_type,
ifm_scale,
ifm_zero_point,
ofm_scale,
ofm_zero_point,
pool_shape,
ofm_channels,
ofm_dtype,
strides,
padding,
activation,
clip_min,
clip_max,
rounding_mode,
upscale,
ifm_layout,
ofm_layout,
)
@tvm.ir.register_op_attr("contrib.ethosu.pooling", "FTVMCompute")
def create_ethosu_pooling_compute(attrs, args, out_type):
"""Create an ethosu_pooling compute op."""
params = _extract_ethosu_pooling_params(attrs, args)
op = pooling_compute(*params)
return [op]
@tvm.ir.register_op_attr("contrib.ethosu.pooling", "FTVMStrategy")
def pooling_strategy_ethosu(attrs, inputs, out_type, target):
strategy = OpStrategy()
strategy.add_implementation(
create_ethosu_pooling_compute,
_strategy.wrap_topi_schedule(schedule_injective),
name="ethosu_pooling",
)
return strategy
def ethosu_pooling(
ifm: tvm.relay.Expr,
lut: tvm.relay.Expr,
pooling_type: str,
ifm_scale: float,
ifm_zero_point: int,
ofm_scale: float,
ofm_zero_point: int,
pool_shape: Tuple[int, int],
ofm_channels: int,
ofm_dtype: str,
strides: Tuple[int, int] = (1, 1),
padding: Tuple[int, int, int, int] = (0, 0, 0, 0),
activation: str = "NONE",
clip_min: int = 0,
clip_max: int = 0,
rounding_mode: str = "TFL",
upscale: str = "NONE",
ifm_layout: str = "NHWC",
ofm_layout: str = "NHWC",
) -> tvm.relay.Call:
"""This is a quantized 2D pooling operation as supported by
the NPU. It accepts either NHWC or NHCWB16 format
for the input data.
Parameters
----------
ifm : tvm.relay.Expr
The Input Feature Map tensor (IFM).
lut : tvm.relay.Expr
The look-up table of values to use if activation = "LUT".
pooling_type: str
The type of the pooling. "AVG" - average pool, "MAX" - max pool, "SUM" - reduce sum pool.
ifm_scale : float
The quantization scale for the Input Feature Map tensor.
ifm_zero_point : int
The quantization zero point for the Input Feature Map tensor.
ofm_scale : float
The quantization scale for the Output Feature Map tensor.
ofm_zero_point : int
The quantization zero point for the Output Feature Map tensor.
pool_shape : tuple of int
The 2 dimensional pool shape as (pool_shape_height, pool_shape_width).
ofm_channels : int
The number of the Output Feature Map channels
ofm_dtype : str
The Output Feature Map tensor data type.
"AVG" or "MAX" pooling - can be "int8", "uint8", or "int16".
"SUM" pooling - can be "int32".
strides : tuple of int, optional
The 2 dimensional strides as (stride_height, stride_width).
padding : tuple of int, optional
The 4 dimensional padding as (pad_top, pad_left, pad_bottom, pad_right).
activation : str, optional
The activation function to use.
"NONE" - no activation function.
"CLIP" - clip the output between clip_min and clip_max.
"TANH" - tanh activation function.
"SIGMOID" - sigmoid activation function.
"LUT" - use a look-up table to perform the activation function.
clip_min : int, optional
The minimum clipping value if activation = "CLIP".
clip_max : int, optional
The maximum clipping value if activation = "CLIP".
rounding_mode : str, optional
The rounding mode to apply to the Output Feature Map tensor.
"TFL" - Tensorflow Lite rounding scheme.
"TRUNCATE" - Truncate towards zero.
"NATURAL" - Round to nearest value, with x.5 rounded up towards +infinity.
upscale: str, optional
The 2x2 upscaling mode to apply to the Input Feature Map tensor.
"NONE" - no upscaling.
"NEAREST" - upscale using nearest neighbour.
"ZEROS" - upscale using zeros.
ifm_layout : str, optional
The layout of the Input Feature Map tensor. Can be "NHWC" or "NHCWB16".
ofm_layout : str, optional
The layout of the Output Feature Map tensor. Can be "NHWC" or "NHCWB16".
Returns
-------
out : tvm.relay.Call
A call to the ethosu_pooling op.
"""
return _make.ethosu_pooling(
ifm,
lut,
pooling_type,
ifm_scale,
ifm_zero_point,
ofm_scale,
ofm_zero_point,
pool_shape,
ofm_channels,
ofm_dtype,
strides,
padding,
activation,
clip_min,
clip_max,
rounding_mode,
upscale,
ifm_layout,
ofm_layout,
)
| 6,685 | 32.43 | 97 | py |
tvm | tvm-main/python/tvm/relay/backend/contrib/ethosu/op/identity.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=unused-argument
"""Relay identity operator for Arm(R) Ethos(TM)-U NPU"""
import tvm
from tvm.relay.op import _make
from tvm.topi.generic import schedule_injective
from tvm.relay.op.op import OpStrategy
from tvm.relay.op import strategy as _strategy
from ..te import identity_compute
@tvm.ir.register_op_attr("contrib.ethosu.identity", "FTVMCompute")
def create_ethosu_identity_compute(attrs, args, out_type):
"""Create an ethosu_identity compute op."""
ifm = args[0]
lut = args[1]
ifm_scale = attrs.ifm_scale
ifm_zero_point = attrs.ifm_zero_point
ofm_scale = attrs.ofm_scale
ofm_zero_point = attrs.ofm_zero_point
activation = attrs.activation
op = identity_compute(
ifm, lut, ifm_scale, ifm_zero_point, ofm_scale, ofm_zero_point, activation
)
return [op]
@tvm.ir.register_op_attr("contrib.ethosu.identity", "FTVMStrategy")
def identity_strategy_ethosu(attrs, inputs, out_type, target):
strategy = OpStrategy()
strategy.add_implementation(
create_ethosu_identity_compute,
_strategy.wrap_topi_schedule(schedule_injective),
name="ethosu_identity",
)
return strategy
def ethosu_identity(
ifm: tvm.relay.Expr,
lut: tvm.relay.Expr,
ifm_scale: float = 1,
ifm_zero_point: int = 0,
ofm_scale: float = 1,
ofm_zero_point: int = 0,
activation: str = "NONE",
) -> tvm.relay.Call:
"""The Identity operator that runs on the NPU.
This operator takes in a tensor of any shape and returns the same tensor,
with the data optionally requantized.
Parameters
----------
ifm : tvm.relay.Expr
The Input Feature Map tensor (IFM).
lut : tvm.relay.Expr
The look-up table values to use if activation = "LUT", "TANH" or "SIGMOID".
ifm_scale : float
The quantization scale for the Input Feature Map tensor.
ifm_zero_point : int
The quantization zero point for the Input Feature Map tensor.
ofm_scale : float
The quantization scale for the Output Feature Map tensor.
ofm_zero_point : int
The quantization zero point for the Output Feature Map tensor.
activation : str, optional
The activation function to use.
"NONE" - no activation function.
"TANH" - tanh activation function.
"SIGMOID" - sigmoid activation function.
"LUT" - use a look-up table to perform the activation function.
Returns
-------
out : tvm.relay.Call
A call to the ethosu_identity op.
"""
return _make.ethosu_identity(
ifm, lut, ifm_scale, ifm_zero_point, ofm_scale, ofm_zero_point, activation
)
| 3,459 | 33.949495 | 83 | py |
tvm | tvm-main/python/tvm/relay/backend/contrib/ethosu/op/binary_elementwise.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=unused-argument
"""Relay operators for binary elementwise operators for Arm(R) Ethos(TM)-U NPU"""
from typing import Optional
import tvm
from tvm.relay.op import _make
from tvm.topi.generic import schedule_injective
from tvm.relay.op.op import OpStrategy
from tvm.relay.op import strategy as _strategy
from ..te import binary_elementwise_compute
def _extract_ethosu_binary_elementwise_params(attrs, args):
"""Get the parameters necessary to construct a ethosu_binary_elementwise compute TE
from a ethosu_binary_elementwise Relay call."""
ifm = args[0]
ifm2 = args[1]
lut = args[2]
operator_type = attrs.operator_type
ifm_scale = attrs.ifm_scale
ifm_zero_point = attrs.ifm_zero_point
ifm2_scale = attrs.ifm2_scale
ifm2_zero_point = attrs.ifm2_zero_point
ofm_scale = attrs.ofm_scale
ofm_zero_point = attrs.ofm_zero_point
ifm_channels = attrs.ifm_channels
ifm2_channels = attrs.ifm2_channels
reversed_operands = attrs.reversed_operands
activation = attrs.activation
clip_min = attrs.clip_min
clip_max = attrs.clip_max
rounding_mode = attrs.rounding_mode
ifm_layout = attrs.ifm_layout
ifm2_layout = attrs.ifm2_layout
ofm_layout = attrs.ofm_layout
ofm_dtype = attrs.ofm_dtype
use_rescale = attrs.use_rescale
rescale_scale = attrs.rescale_scale
rescale_shift = attrs.rescale_shift
return (
ifm,
ifm2,
lut,
operator_type,
ifm_scale,
ifm_zero_point,
ifm2_scale,
ifm2_zero_point,
ofm_scale,
ofm_zero_point,
ifm_channels,
ifm2_channels,
reversed_operands,
activation,
clip_min,
clip_max,
rounding_mode,
ifm_layout,
ifm2_layout,
ofm_layout,
ofm_dtype,
use_rescale,
rescale_scale,
rescale_shift,
)
@tvm.ir.register_op_attr("contrib.ethosu.binary_elementwise", "FTVMCompute")
def create_ethosu_binary_elementwise_compute(attrs, args, out_type):
"""Create an ethosu_binary_elementwise compute op."""
params = _extract_ethosu_binary_elementwise_params(attrs, args)
op = binary_elementwise_compute(*params)
return [op]
@tvm.ir.register_op_attr("contrib.ethosu.binary_elementwise", "FTVMStrategy")
def binary_elementwise_strategy_ethosu(attrs, inputs, out_type, target):
strategy = OpStrategy()
strategy.add_implementation(
create_ethosu_binary_elementwise_compute,
_strategy.wrap_topi_schedule(schedule_injective),
name="ethosu_binary_elementwise",
)
return strategy
def ethosu_binary_elementwise(
ifm: tvm.relay.Expr,
ifm2: tvm.relay.Expr,
lut: tvm.relay.Expr,
operator_type: str,
ifm_scale: float,
ifm_zero_point: int,
ifm2_scale: float,
ifm2_zero_point: int,
ofm_scale: float,
ofm_zero_point: int,
ifm_channels: int,
ifm2_channels: int,
reversed_operands: bool,
ofm_dtype: str,
activation: Optional[str] = "NONE",
clip_min: Optional[int] = 0,
clip_max: Optional[int] = 0,
rounding_mode: Optional[str] = "TFL",
ifm_layout: Optional[str] = "NHWC",
ifm2_layout: Optional[str] = "NHWC",
ofm_layout: Optional[str] = "NHWC",
use_rescale: Optional[bool] = False,
rescale_scale: Optional[int] = 0,
rescale_shift: Optional[int] = 0,
) -> tvm.relay.Call:
"""This is a quantized binary elementwise operation as supported by
the NPU. It accepts either NHWC or NHCWB16 format
for the input data.
Parameters
----------
ifm : tvm.relay.Expr
The Input Feature Map tensor (IFM).
ifm2 : tvm.relay.Expr
The Input Feature Map tensor 2 (IFM2).
lut : tvm.relay.Expr
The look-up table of values to use if activation = "LUT".
operator_type: str
The type of the binary elementwise operator.
"ADD"
"SUB"
"MUL"
"MIN"
"MAX"
"SHR"
"SHL"
ifm_scale : float
The quantization scale for the Input Feature Map tensor.
ifm_zero_point : int
The quantization zero point for the Input Feature Map tensor.
ifm2_scale : float
The quantization scale for the Input Feature Map tensor 2.
ifm2_zero_point : int
The quantization zero point for the Input Feature Map tensor 2.
ofm_scale : float
The quantization scale for the Output Feature Map tensor.
ofm_zero_point : int
The quantization zero point for the Output Feature Map tensor.
ifm_channels : int
The number of the Input Feature Map channels.
ifm2_channels : int
The number of the Input Feature Map 2 channels.
reversed_operands : bool
True if IFM2 is the first operand and IFM is the second operand.
ofm_dtype: str
The Output Feature Map tensor type.
MUL, ADD, SUB {IFM}->{OFM}:
{uint8, int8 int32} -> {uint8, int8, int32}, any pairing
MAX, MIN:
IFM and OFM must be of the same type, one of:
{int8, uint8}
SHR {IFM}->{OFM}:
{int32}->{int8, uint8, int32}, any pairing"
SHL:
{int32}->{int32} only
activation : str, optional
The activation function to use.
"NONE" - no activation function.
"CLIP" - clip the output between clip_min and clip_max.
"TANH" - tanh activation function.
"SIGMOID" - sigmoid activation function.
"LUT" - use a look-up table to perform the activation function.
Available activations for activation type:
{int8, uint8}: "NONE", "CLIP", "TANH", "SIGMOID", "LUT"
{int32}: "NONE"
clip_min : int, optional
The minimum clipping value if activation = "CLIP".
clip_max : int, optional
The maximum clipping value if activation = "CLIP".
rounding_mode : str, optional
The rounding mode to apply to the Output Feature Map tensor.
"TFL" - Tensorflow Lite rounding scheme.
"TRUNCATE" - Truncate towards zero.
"NATURAL" - Round to nearest value, with x.5 rounded up towards +infinity.
ifm_layout : str, optional
The layout of the Input Feature Map tensor. Can be "NHWC" or "NHCWB16".
ifm2_layout : str, optional
The layout of the Input Feature Map tensor 2. Can be "NHWC" or "NHCWB16".
ofm_layout : str, optional
The layout of the Output Feature Map tensor. Can be "NHWC" or "NHCWB16".
use_rescale : bool, optional
Use explicit scaling if True.
rescale_scale : int, optional
Scale value for rescale. For 32-bit operations scale is not applied but shift is.
rescale_shift : int, optional
Shift value for rescale.
Returns
-------
out : tvm.relay.Call
A call to the ethosu_binary_elementwise op.
"""
return _make.ethosu_binary_elementwise(
ifm,
ifm2,
lut,
operator_type,
ifm_scale,
ifm_zero_point,
ifm2_scale,
ifm2_zero_point,
ofm_scale,
ofm_zero_point,
ifm_channels,
ifm2_channels,
reversed_operands,
activation,
clip_min,
clip_max,
rounding_mode,
ifm_layout,
ifm2_layout,
ofm_layout,
ofm_dtype,
use_rescale,
rescale_scale,
rescale_shift,
)
| 8,243 | 32.925926 | 89 | py |
tvm | tvm-main/python/tvm/relay/backend/contrib/ethosu/op/op_attrs.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""The attributes node used for Arm(R) Ethos(TM)-U NPU Relay operators."""
from tvm.ir import Attrs
import tvm._ffi
@tvm._ffi.register_object("relay.attrs.EthosuConv2DAttrs")
class EthosuConv2DAttrs(Attrs):
"""Attributes for contrib.ethosu.conv2d."""
@tvm._ffi.register_object("relay.attrs.EthosuIdentityAttrs")
class EthosuIdentityAttrs(Attrs):
"""Attributes for contrib.ethosu.identity."""
@tvm._ffi.register_object("relay.attrs.EthosuDepthwiseConv2DAttrs")
class EthosuDepthwiseConv2DAttrs(Attrs):
"""Attributes for contrib.ethosu.depthwise_conv2d."""
@tvm._ffi.register_object("relay.attrs.EthosuPoolingAttrs")
class EthosuPooling2DAttrs(Attrs):
"""Attributes for contrib.ethosu.pooling."""
@tvm._ffi.register_object("relay.attrs.EthosuBinaryElementwiseAttrs")
class EthosuBinaryElementwiseAttrs(Attrs):
"""Attributes for contrib.ethosu.binary_elementwise"""
@tvm._ffi.register_object("relay.attrs.EthosuUnaryElementwiseAttrs")
class EthosuUnaryElementwiseAttrs(Attrs):
"""Attributes for contrib.ethosu.unary_elementwise"""
| 1,849 | 36 | 74 | py |
tvm | tvm-main/python/tvm/relay/backend/contrib/ethosu/op/unary_elementwise.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=unused-argument
"""Relay operator for unary elementwise operations for Arm(R) Ethos(TM)-U NPU"""
from typing import Optional
import tvm
from tvm.relay.op import _make
from tvm.topi.generic import schedule_injective
from tvm.relay.op.op import OpStrategy
from tvm.relay.op import strategy as _strategy
from ..te import unary_elementwise_compute
def _extract_ethosu_unary_elementwise_params(attrs, args):
"""Get the parameters necessary to construct a ethosu_unary_elementwise compute TE
from a ethosu_unary_elementwise Relay call."""
ifm = args[0]
lut = args[1]
operator_type = attrs.operator_type
ifm_scale = attrs.ifm_scale
ifm_zero_point = attrs.ifm_zero_point
ofm_scale = attrs.ofm_scale
ofm_zero_point = attrs.ofm_zero_point
ofm_channels = attrs.ofm_channels
activation = attrs.activation
clip_min = attrs.clip_min
clip_max = attrs.clip_max
rounding_mode = attrs.rounding_mode
ifm_layout = attrs.ifm_layout
ofm_layout = attrs.ofm_layout
return (
ifm,
lut,
operator_type,
ifm_scale,
ifm_zero_point,
ofm_scale,
ofm_zero_point,
ofm_channels,
activation,
clip_min,
clip_max,
rounding_mode,
ifm_layout,
ofm_layout,
)
@tvm.ir.register_op_attr("contrib.ethosu.unary_elementwise", "FTVMCompute")
def create_ethosu_unary_elementwise_compute(attrs, args, out_type):
"""Create an ethosu_unary_elementwise compute op."""
params = _extract_ethosu_unary_elementwise_params(attrs, args)
op = unary_elementwise_compute(*params)
return [op]
@tvm.ir.register_op_attr("contrib.ethosu.unary_elementwise", "FTVMStrategy")
def unary_elementwise_strategy_ethosu(attrs, inputs, out_type, target):
strategy = OpStrategy()
strategy.add_implementation(
create_ethosu_unary_elementwise_compute,
_strategy.wrap_topi_schedule(schedule_injective),
name="ethosu_unary_elementwise",
)
return strategy
def ethosu_unary_elementwise(
ifm: tvm.relay.Expr,
lut: tvm.relay.Expr,
operator_type: str,
ifm_scale: float,
ifm_zero_point: int,
ofm_scale: float,
ofm_zero_point: int,
ofm_channels: int,
activation: Optional[str] = "NONE",
clip_min: Optional[int] = 0,
clip_max: Optional[int] = 0,
rounding_mode: Optional[str] = "TFL",
ifm_layout: Optional[str] = "NHWC",
ofm_layout: Optional[str] = "NHWC",
) -> tvm.relay.Call:
"""This is a quantized unary elementwise operation as supported by the
NPU. It accepts either NHWC or NHCWB16 format for the input data.
Parameters
----------
ifm : tvm.relay.Expr
The Input Feature Map tensor (IFM).
lut : tvm.relay.Expr
The look-up table values to use if activation = "LUT".
operator_type: str
The type of the unary elementwise operator.
"ABS"
"CLZ"
ifm_scale : float
The quantization scale for the Input Feature Map tensor.
ifm_zero_point : int
The quantization zero point for the Input Feature Map tensor.
ofm_scale : float
The quantization scale for the Output Feature Map tensor.
ofm_zero_point : int
The quantization zero point for the Output Feature Map tensor.
ofm_channels : int
The number of OFM channels.
activation : str, optional
The activation function to use.
"NONE" - no activation function.
"CLIP" - clip the output between clip_min and clip_max.
"TANH" - tanh activation function.
"SIGMOID" - sigmoid activation function.
"LUT" - use a look-up table to perform the activation function.
clip_min : int, optional
The minimum clipping value if activation = "CLIP".
clip_max : int, optional
The maximum clipping value if activation = "CLIP".
rounding_mode : str, optional
The rounding mode to apply to the Output Feature Map tensor.
"TFL" - Tensorflow Lite rounding scheme.
"TRUNCATE" - Truncate towards zero.
"NATURAL" - Round to nearest value, with x.5 rounded up towards +infinity.
ifm_layout : str, optional
The layout of the Input Feature Map tensor. Can be "NHWC" or "NHCWB16".
ofm_layout : str, optional
The layout of the Output Feature Map tensor. Can be "NHWC" or "NHCWB16".
Returns
-------
out : tvm.relay.Call
A call to the ethosu_unary_elementwise op.
"""
return _make.ethosu_unary_elementwise(
ifm,
lut,
operator_type,
ifm_scale,
ifm_zero_point,
ofm_scale,
ofm_zero_point,
ofm_channels,
activation,
clip_min,
clip_max,
rounding_mode,
ifm_layout,
ofm_layout,
)
| 5,647 | 33.230303 | 86 | py |
tvm | tvm-main/python/tvm/relay/backend/contrib/ethosu/op/depthwise.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=unused-argument
"""Relay operator for depthwise convolution for Arm(R) Ethos(TM)-U NPU"""
from typing import Tuple
import tvm
from tvm.relay.op import _make
from tvm.topi.generic import schedule_injective
from tvm.relay.op.op import OpStrategy
from tvm.relay.op import strategy as _strategy
from ..te import depthwise_conv2d_compute
def _extract_ethosu_depthwise_conv2d_params(attrs, args):
"""Get the parameters necessary to construct a ethosu_depthwise_conv2d compute TE
from a ethosu_depthwise_conv2d Relay call."""
ifm = args[0]
weight = args[1]
scale_bias = args[2]
lut = args[3]
ifm_scale = attrs.ifm_scale
ifm_zero_point = attrs.ifm_zero_point
weight_zero_point = attrs.weight_zero_point
ofm_scale = attrs.ofm_scale
ofm_zero_point = attrs.ofm_zero_point
strides = attrs.strides
padding = attrs.padding
dilation = attrs.dilation
activation = attrs.activation
clip_min = attrs.clip_min
clip_max = attrs.clip_max
rounding_mode = attrs.rounding_mode
upscale = attrs.upscale
ifm_layout = attrs.ifm_layout
ofm_layout = attrs.ofm_layout
ofm_dtype = attrs.ofm_dtype
return (
ifm,
weight,
scale_bias,
lut,
ifm_scale,
ifm_zero_point,
weight_zero_point,
ofm_scale,
ofm_zero_point,
strides,
padding,
dilation,
activation,
clip_min,
clip_max,
rounding_mode,
upscale,
ifm_layout,
ofm_layout,
ofm_dtype,
)
@tvm.ir.register_op_attr("contrib.ethosu.depthwise_conv2d", "FTVMCompute")
def create_ethosu_depthwise_conv2d_compute(attrs, args, out_type):
"""Create an ethosu_depthwise_conv2d compute op."""
params = _extract_ethosu_depthwise_conv2d_params(attrs, args)
op = depthwise_conv2d_compute(*params)
return [op]
@tvm.ir.register_op_attr("contrib.ethosu.depthwise_conv2d", "FTVMStrategy")
def depthwise_conv2d_strategy_ethosu(attrs, inputs, out_type, target):
strategy = OpStrategy()
strategy.add_implementation(
create_ethosu_depthwise_conv2d_compute,
_strategy.wrap_topi_schedule(schedule_injective),
name="ethosu_depthwise_conv2d",
)
return strategy
def ethosu_depthwise_conv2d(
ifm: tvm.relay.Expr,
weight: tvm.relay.Expr,
scale_bias: tvm.relay.Expr,
lut: tvm.relay.Expr,
ifm_scale: float,
ifm_zero_point: int,
weight_zero_point: int,
ofm_scale: float,
ofm_zero_point: int,
kernel_shape: Tuple[int, int],
ofm_channels: int,
strides: Tuple[int, int] = (1, 1),
padding: Tuple[int, int, int, int] = (0, 0, 0, 0),
dilation: Tuple[int, int] = (1, 1),
activation: str = "NONE",
clip_min: int = 0,
clip_max: int = 0,
rounding_mode: str = "TFL",
upscale: str = "NONE",
ifm_layout: str = "NHWC",
ofm_layout: str = "NHWC",
ofm_dtype: str = "int8",
) -> tvm.relay.Call:
"""This is a quantized 2D depthwise convolution operation as supported by
the NPU. It accepts either NHWC or NHCWB16 format
for the input data and OHWI format for the kernel weights.
Reference: https://developer.arm.com/documentation/102420/0200/
Note that the per-channel weight scale and bias tensor must be
packed together into a combined tensor of uint80s. This is represented
in TVM by a (channels, 10) tensor of type uint8. For more detail,
refer to the Technical Reference Manual linked above.
Parameters
----------
ifm : tvm.relay.Expr
The Input Feature Map tensor (IFM).
weight : tvm.relay.Expr
The weight tensor.
scale_bias : tvm.relay.Expr
The packed per-channel weight scale and bias tensor.
lut : tvm.relay.Expr
The look-up table of values to use if activation = "LUT"
ifm_scale : float
The quantization scale for the Input Feature Map tensor.
ifm_zero_point : int
The quantization zero point for the Input Feature Map tensor.
weight_zero_point : int
The quantization zero point for the weight tensor.
ofm_scale : float
The quantization scale for the Output Feature Map tensor.
ofm_zero_point : int
The quantization zero point for the Output Feature Map tensor.
kernel_shape : tuple of int
The 2 dimensional kernel shape as (kernel_height, kernel_width).
ofm_channels : int
The number of the Output Feature Map channels.
strides : tuple of int, optional
The 2 dimensional strides as (stride_height, stride_width).
padding : tuple of int, optional
The 4 dimensional padding as (pad_top, pad_left, pad_bottom, pad_right).
dilation : tuple of int, optional
The 2 dimensional dilation as (dilation_height, dilation_width).
activation : str, optional
The activation function to use.
"NONE" - no activation function.
"CLIP" - clip the output between clip_min and clip_max.
"TANH" - tanh activation function.
"SIGMOID" - sigmoid activation function.
"LUT" - use a look-up table to perform
the activation function.
clip_min : int, optional
The minimum clipping value if activation = "CLIP"
clip_max : int, optional,
The maximum clipping value if activation = "CLIP"
rounding_mode : str, optional
The rounding mode to apply to the Output Feature Map tensor.
"TFL" - Tensorflow Lite rounding scheme.
"TRUNCATE" - Truncate towards zero.
"NATURAL" - Round to nearest value, with x.5 rounded up towards +infinity.
upscale : str, optional
The 2x2 upscaling mode to apply to the Input Feature Map tensor.
"NONE" - no upscaling.
"NEAREST" - upscale using nearest neighbour.
"ZEROS" - upscale using zeros.
ifm_layout : str, optional
The layout of the Input Feature Map tensor. Can be "NHWC" or "NHCWB16".
ofm_layout : str, optional
The layout of the Output Feature Map tensor. Can be "NHWC" or "NHCWB16".
ofm_dtype : str, optional
The Output Feature Map tensor data type. Can be 'int8', 'uint8' or 'int16'.
Returns
-------
out : tvm.relay.Call
A call to the ethosu_depthwise_conv2d op.
"""
return _make.ethosu_depthwise_conv2d(
ifm,
weight,
scale_bias,
lut,
ifm_scale,
ifm_zero_point,
weight_zero_point,
ofm_scale,
ofm_zero_point,
kernel_shape,
ofm_channels,
strides,
padding,
dilation,
activation,
clip_min,
clip_max,
rounding_mode,
upscale,
ifm_layout,
ofm_layout,
ofm_dtype,
)
| 7,603 | 33.252252 | 86 | py |
tvm | tvm-main/python/tvm/relay/backend/contrib/ethosu/op/convolution.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=unused-argument
"""Relay operators for convolutions for Arm(R) Ethos(TM)-U NPU"""
from typing import Tuple
import tvm # type: ignore
from tvm.relay.op import _make # type: ignore
from tvm.topi.generic import schedule_injective # type: ignore
from tvm.relay.op.op import OpStrategy # type: ignore
from tvm.relay.op import strategy as _strategy
from ..te import conv2d_compute
def _extract_ethosu_conv2d_params(attrs, args):
"""Get the parameters necessary to construct a compute TE
from a ethosu_conv2d Relay call."""
ifm = args[0]
weight = args[1]
scale_bias = args[2]
lut = args[3]
ifm_scale = attrs.ifm_scale
ifm_zero_point = attrs.ifm_zero_point
weight_zero_point = attrs.weight_zero_point
ofm_scale = attrs.ofm_scale
ofm_zero_point = attrs.ofm_zero_point
strides = attrs.strides
padding = attrs.padding
dilation = attrs.dilation
activation = attrs.activation
clip_min = attrs.clip_min
clip_max = attrs.clip_max
rounding_mode = attrs.rounding_mode
upscale = attrs.upscale
ifm_layout = attrs.ifm_layout
ofm_layout = attrs.ofm_layout
return (
ifm,
weight,
scale_bias,
lut,
ifm_scale,
ifm_zero_point,
weight_zero_point,
ofm_scale,
ofm_zero_point,
strides,
padding,
dilation,
activation,
clip_min,
clip_max,
rounding_mode,
upscale,
ifm_layout,
ofm_layout,
)
@tvm.ir.register_op_attr("contrib.ethosu.conv2d", "FTVMCompute")
def create_ethosu_conv2d_compute(attrs, args, out_type):
"""Create an ethosu_conv2d compute op."""
params = _extract_ethosu_conv2d_params(attrs, args)
op = conv2d_compute(*params)
return [op]
@tvm.ir.register_op_attr("contrib.ethosu.conv2d", "FTVMStrategy")
def conv2d_strategy_ethosu(attrs, inputs, out_type, target):
strategy = OpStrategy()
strategy.add_implementation(
create_ethosu_conv2d_compute,
_strategy.wrap_topi_schedule(schedule_injective),
name="ethosu_conv2d",
)
return strategy
def ethosu_conv2d(
ifm: tvm.relay.Expr,
weight: tvm.relay.Expr,
scale_bias: tvm.relay.Expr,
lut: tvm.relay.Expr,
ifm_scale: float,
ifm_zero_point: int,
weight_zero_point: int,
ofm_scale: float,
ofm_zero_point: int,
kernel_shape: Tuple[int, int],
ofm_channels: int,
strides: Tuple[int, int] = (1, 1),
padding: Tuple[int, int, int, int] = (0, 0, 0, 0),
dilation: Tuple[int, int] = (1, 1),
activation: str = "NONE",
clip_min: int = 0,
clip_max: int = 0,
rounding_mode: str = "TFL",
upscale: str = "NONE",
ifm_layout: str = "NHWC",
ofm_layout: str = "NHWC",
) -> tvm.relay.Call:
"""This is a quantized 2D convolution operation as supported by
the NPU. It accepts either NHWC or NHCWB16 format
for the input data and OHWI format for the kernel weights.
Reference: https://developer.arm.com/documentation/102420/0200/
Note that the per-channel weight scale and bias tensor must be
packed together into a combined tensor of uint80s. This is represented
in TVM by a (channels, 10) tensor of type uint8. For more detail,
refer to the Technical Reference Manual linked above.
Parameters
----------
ifm : tvm.relay.Expr
The Input Feature Map tensor (IFM).
weight : tvm.relay.Expr
The weight tensor.
scale_bias : tvm.relay.Expr
The packed per-channel weight scale and bias tensor.
lut : tvm.relay.Expr
The look-up table of values to use if activation = "LUT".
ifm_scale : float
The quantization scale for the Input Feature Map tensor.
ifm_zero_point : int
The quantization zero point for the Input Feature Map tensor.
weight_zero_point : int
The quantization zero point for the weight tensor.
ofm_scale : int
The quantization scale for the Output Feature Map tensor.
ofm_zero_point : int
The quantization zero point for the Output Feature Map tensor.
kernel_shape : tuple of int
The 2 dimensional kernel shape as (kernel_height, kernel_width).
ofm_channels : int
The number of the Output Feature Map channels.
strides : tuple of int, optional
The 2 dimensional strides as (stride_height, stride_width).
padding : tuple of int, optional
The 4 dimensional padding as (pad_top, pad_left, pad_bottom, pad_right).
dilation : tuple of int, optional
The 2 dimensional dilation as (dilation_height, dilation_width).
activation : str, optional
The activation function to use.
"NONE" - no activation function.
"CLIP" - clip the output between clip_min and clip_max.
"TANH" - tanh activation function.
"SIGMOID" - sigmoid activation function.
"LUT" - use a look-up table to perform the activation function.
clip_min : int, optional
The minimum clipping value if activation = "CLIP"
clip_max : int, optional,
The maximum clipping value if activation = "CLIP"
rounding_mode : str, optional
The rounding mode to apply to the Output Feature Map tensor.
"TFL" - Tensorflow Lite rounding scheme.
"TRUNCATE" - Truncate towards zero.
"NATURAL" - Round to nearest value, with x.5 rounded up towards +infinity.
upscale : str, optional
The 2x2 upscaling mode to apply to the Input Feature Map tensor.
"NONE" - no upscaling.
"NEAREST" - upscale using nearest neighbour.
"ZEROS" - upscale using zeros.
ifm_layout : str, optional
The layout of the Input Feature Map tensor. Can be "NHWC" or "NHCWB16".
ofm_layout : str, optional
The layout of the Output Feature Map tensor. Can be "NHWC" or "NHCWB16".
Returns
-------
tvm.relay.Call
A call to the ethosu_conv2d op.
"""
return _make.ethosu_conv2d(
ifm,
weight,
scale_bias,
lut,
ifm_scale,
ifm_zero_point,
weight_zero_point,
ofm_scale,
ofm_zero_point,
kernel_shape,
ofm_channels,
strides,
padding,
dilation,
activation,
clip_min,
clip_max,
rounding_mode,
upscale,
ifm_layout,
ofm_layout,
)
| 7,246 | 32.864486 | 86 | py |
tvm | tvm-main/python/tvm/relay/backend/contrib/ethosu/op/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"Relay operators for the Arm(R) Ethos(TM)-U NPU"
from .convolution import ethosu_conv2d
from .depthwise import ethosu_depthwise_conv2d
from .pooling import ethosu_pooling
from .binary_elementwise import ethosu_binary_elementwise
from .identity import ethosu_identity
from .unary_elementwise import ethosu_unary_elementwise
| 1,109 | 43.4 | 62 | py |
tvm | tvm-main/python/tvm/relay/backend/contrib/uma/backend.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=unnecessary-ellipsis
"""Backend base class of the Universal Modular Accelerator Interface (UMA)"""
from abc import ABC, abstractmethod
from typing import Union, Dict, Callable, Optional, Any
import tvm
from tvm.relay.backend.contrib.uma.api.codegen import UMACodegen
from tvm.relay.backend.contrib.uma.api.lower import UMALower
from tvm.relay.backend.contrib.uma.api.partitioner import UMAPartitioner
from tvm.relay.backend.contrib.uma.api.utils import PassPhase
class UMABackend(ABC):
"""Backend base class of the Universal Modular Accelerator Interface (UMA)"""
def __init__(self, merge_compiler_regions: bool = True) -> None:
self._target_attrs: Dict = {}
self._target_preprocessor: Callable[[str], Dict[str, Any]] = None
self._relay_to_relay = UMAPartitioner(self.target_name, merge_compiler_regions)
self._relay_to_tir = UMALower(self.target_name)
self._tir_to_runtime = UMACodegen(self.target_name)
@property
@abstractmethod
def target_name(self) -> str:
"""Name of the hardware target.
Returns
-------
out : str
The hardware target name.
"""
...
# Target configuration
def _register_target_attr(
self,
name: str,
default: Optional[Union[str, int, bool]] = "",
) -> None:
"""Register a target attribute name that can be used during target instantiation.
Parameters
----------
name: str
The name of the target attribute.
default: Optional[Union[str, int, bool]]
A default value for the attribute.
If none is provided, the attribute will be treated as a string.
Example
-------
Here is an example of how two attribute options are registered.
.. code-block:: python
self._register_target_attr("attrA", default=0)
self._register_target_attr("attrB", default=False)
"""
self._target_attrs[name] = default
# Relay to Relay function registration
def _register_relay_pass(self, phase: PassPhase, relay_pass: tvm.transform.Pass) -> None:
"""Registers a relay pass at the given phase in the lowering process.
Parameters
----------
phase: PassPhase
The phase at which the pass is registered.
relay_pass: tvm.transform.Pass
The relay pass to be registered.
Example
-------
Here is an example of how two relay passes are registered.
Passes of the same phase are executed in the order they are registered.
.. code-block:: python
self._register_relay_pass(PassPhase.PRE_PARTITIONING, MyPassA)
self._register_relay_pass(PassPhase.POST_PARTITIONING, MyPassB)
Where a relay pass can look like this:
.. code-block:: python
@tvm.ir.transform.module_pass(opt_level=0)
class MyPassA:
def transform_module(self, mod, ctx):
# My pass functionality...
return mod
"""
self._relay_to_relay._relay_passes.append((phase, relay_pass))
def _register_pattern(
self,
name: str,
pattern: tvm.relay.dataflow_pattern.DFPattern,
predicate: Optional[Callable] = None,
) -> None:
"""Registers a dataflow pattern that is used to partition the relay graph.
Parameters
----------
name: str
The name of the pattern
pattern: tvm.relay.dataflow_pattern.DFPattern
Relay DFPattern
predicate: Optional[Callable]
Optional predicate for Relay DFPattern
Example
-------
Here is an example of how two dataflow patterns are registered.
During partioning, patterns are searched in order of registration.
.. code-block:: python
self._register_pattern("conv1d", conv1d_pattern)
self._register_pattern("conv2d", conv2d_pattern)
Where a dataflow pattern can look like this:
.. code-block:: python
conv1d_pattern = is_op("nn.conv1d")(wildcard(), wildcard())
optional_bias = lambda x: is_op("nn.bias_add")(x, wildcard())
optional_relu = lambda x: is_op("nn.relu")(x)
conv1d_pattern = conv1d_pattern.optional(optional_bias).optional(optional_relu)
"""
self._relay_to_relay.add_pattern(name, pattern, predicate)
# Relay to TIR function registration
def _register_operator_strategy(
self,
op: str,
strategy: Callable[
[tvm.ir.Attrs, tvm.ir.Array, tvm.ir.TensorType, tvm.target.Target],
tvm.relay.op.op.OpStrategy,
],
plevel: Optional[int] = 11,
) -> None:
"""Registers an operator strategy that is used to partition the relay graph.
Parameters
----------
op: str
The name of the operator for which this strategy will be registered.
strategy: Callable[[tvm.ir.Attrs, tvm.ir.Array, tvm.ir.TensorType, tvm.target.Target],
tvm.relay.op.op.OpStrategy]
The strategy function.
plevel: Optional[int] = 11
The priority level of the strategy. Higher plevel equals higher priorization.
The TVM default for topi strategies is 10 so by default new UMA strategies are
always used.
Example
-------
Here is an example of how two operator strategies are registered.
.. code-block:: python
self._register_operator_strategy("nn.conv1d", custom_conv1d_strategy)
self._register_operator_strategy("nn.conv2d", custom_conv2d_strategy)
Where a strategy function can look like this:
.. code-block:: python
@relay.op.strategy.override_native_generic_func("custom_conv1d_strategy")
def custom_conv1d_strategy(attrs, inputs, out_type, target):
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_conv1d(custom_conv1d_compute),
wrap_topi_schedule(custom_conv1d_schedule),
name="custom_conv1d.generic",
return strategy
"""
self._relay_to_tir._operator_strategies.append((op, strategy, plevel))
def _register_tir_pass(
self, phase: PassPhase, tir_pass: tvm.tir.transform.PrimFuncPass
) -> None:
"""Registers a TIR pass at the given phase in the lowering process.
Parameters
----------
phase: PassPhase
The phase at which the pass is registered.
tir_pass: tvm.tir.transform.PrimFuncPass
The TIR pass to be registered.
Example
-------
Here is an example of how two TIR passes are registered.
Passes of the same phase are executed in the order they are registered.
.. code-block:: python
self._register_tir_pass(PassPhase.TIR_PHASE_0, MyPassA)
self._register_tir_pass(PassPhase.TIR_PHASE_1, MyPassB)
Where a TIR pass can look like this:
.. code-block:: python
@tvm.tir.transform.prim_func_pass(opt_level=0)
class MyPassA:
def transform_function(self, func, mod, ctx):
# My pass functionality...
return func
"""
self._relay_to_tir._tir_passes.append((phase, tir_pass))
# TIR to runtime function registration
def _register_codegen(self, fmt: str = "c", **kwargs) -> None:
"""Registers a codegen which is used in place of the default C-codegen.
Parameters
----------
fmt: str
The codegen format. For now, only C-codegen is supported by UMA.
**kwargs
Keyword arguments for the chosen codegen.
Example
-------
Here is an example of how the custom C-codegen is registered and configured.
Passes of the same phase are executed in the order they are registered.
.. code-block:: python
self._register_codegen(
fmt="c", includes=gen_includes
)
The C-codegen currently provides one hook which allows the user to insert code through
the python API.
- `includes` hooks into the include stream and allows insertion of custom includes.
The code generation functions can look like this:
.. code-block:: python
def gen_includes() -> str:
includes = "#include <my_custom_header.h>\n"
return includes
"""
self._tir_to_runtime._register_codegen(fmt, **kwargs)
# Backend functions
def register(self) -> None:
"""
Registering UMABackend:
registering target attributes, relay_to_relay, relay_to_tir and tir_to_runtime
"""
registration_func = tvm.get_global_func("relay.backend.contrib.uma.RegisterTarget")
for _, attr in self._target_attrs.items():
if attr is None:
raise ValueError("Target attribute None is not supported.")
# skip if target is already registered
if self.target_name not in tvm.target.Target.list_kinds():
registration_func(self.target_name, self._target_attrs)
self._relay_to_relay.register()
self._relay_to_tir.register()
self._tir_to_runtime.register()
def partition(
self, mod: tvm.IRModule, params: Optional[Dict[str, tvm.runtime.NDArray]] = None
) -> tvm.IRModule:
return self._relay_to_relay.partition(mod, params)
| 10,541 | 34.614865 | 95 | py |
tvm | tvm-main/python/tvm/relay/backend/contrib/uma/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""UMA modules for Relay."""
from .backend import UMABackend
from .api.utils import uma_available
__all__ = ["UMABackend", "uma_available"]
| 928 | 37.708333 | 62 | py |
tvm | tvm-main/python/tvm/relay/backend/contrib/uma/api/partitioner.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Partitioner base class of the Universal Modular Accelerator Interface (UMA)"""
from typing import Callable, Dict, List, Tuple, Optional
import tvm
from tvm import relay
from tvm.relay.build_module import bind_params_by_name
from tvm.relay.op.contrib.register import register_pattern_table
from .utils import PassPhase
PatternTable = List[Tuple[str, tvm.relay.dataflow_pattern.DFPattern, Callable]]
class UMAPartitioner:
"""Partitioner base class of the Universal Modular Accelerator Interface (UMA)."""
def __init__(self, target_name: str, merge_compiler_regions: bool = True) -> None:
self.target_name = target_name
self.merge_compiler_regions = merge_compiler_regions
self._relay_passes: List[Tuple[PassPhase, tvm.transform.Pass]] = []
self._patterns: PatternTable = []
def add_pattern(
self,
name: str,
pattern: tvm.relay.dataflow_pattern.DFPattern,
predicate: Optional[Callable] = None,
) -> None:
"""Add pattern to UMA partitioner
Parameters
----------
name : str
relay name of pattern
pattern: tvm.relay.dataflow_pattern.DFPattern
pattern description as DFPattern
predicate: Optional[Callable]
Optional predicate
"""
name = self.target_name + "." + name
if predicate:
self._patterns.append((name, pattern, predicate))
else:
self._patterns.append((name, pattern))
def _pattern_table(self) -> PatternTable:
return self._patterns
def register(self) -> None:
"""Register all relevant relay-to-relay functions."""
register_pattern_table(self.target_name, self._pattern_table)
def partition(
self, mod: tvm.IRModule, params: Optional[Dict[str, tvm.runtime.NDArray]] = None
) -> tvm.IRModule:
"""Partition the relay graph in parts supported and unsupported by the
target hardware accelerator.
Parameters
----------
mod : tvm.IRModule
The relay module to be partitioned.
params: Optional[Dict[str, tvm.runtime.NDArray]]
Returns
-------
out : tvm.IRModule
The partitioned relay module.
"""
if params:
mod["main"] = bind_params_by_name(mod["main"], params)
pass_sequence = []
pass_sequence.extend(
[p[1] for p in self._relay_passes if p[0] == PassPhase.PRE_PARTITIONING]
)
pass_sequence.append(relay.transform.MergeComposite(self._pattern_table()))
pass_sequence.append(relay.transform.AnnotateTarget(self.target_name))
if self.merge_compiler_regions:
pass_sequence.append(relay.transform.MergeCompilerRegions())
pass_sequence.append(relay.transform.PartitionGraph())
pass_sequence.extend(
[p[1] for p in self._relay_passes if p[0] == PassPhase.POST_PARTITIONING_0]
)
sequential_passes = tvm.transform.Sequential(pass_sequence)
mod = sequential_passes(mod)
# Defunctionalize the partitioned functions to allow lowering
for gvar, func in mod.functions.items():
mod.update_func(gvar, relay.transform.Defunctionalization(func, mod))
post_partition_passes_1 = tvm.transform.Sequential(
[p[1] for p in self._relay_passes if p[0] == PassPhase.POST_PARTITIONING_1]
)
mod = post_partition_passes_1(mod)
return mod
| 4,304 | 34 | 88 | py |
tvm | tvm-main/python/tvm/relay/backend/contrib/uma/api/codegen.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Codegen base class of the Universal Modular Accelerator Interface (UMA)"""
from typing import Callable, Optional
import tvm
class UMACodegen(object):
"""
Codegen base class of the Universal Modular Accelerator Interface (UMA)
"""
def __init__(self, target_name: str) -> None:
self.target_name = target_name
def _register_codegen(
self, fmt: str = "c", includes: Optional[Callable[[], str]] = None, **kwargs
) -> None:
"""Registration codegen in UMA.
Parameters
----------
fmt: str
format of codegen. Currently only "c" is supported.
includes : OptionalCallable[[], str]]
user-defined function that adds C-#include statement to UMA C-Code.
"""
if fmt == "c":
self._register_c_codegen(includes, **kwargs)
else:
raise RuntimeError(f'Unsupported codegen format "{fmt}"')
def _register_c_codegen(self, includes: Optional[Callable[[], str]] = None) -> None:
"""Registration of UMA helper functions, e.g. includes and replace_call_extern.
Parameters
----------
includes : OptionalCallable[[], str]]
user-defined function that adds C-#include statement to UMA C-Code.
"""
if includes is not None:
tvm._ffi.register_func(
f"relay.ext.uma.codegen_c_includes_{self.target_name}",
includes,
override=True,
)
def register(self) -> None:
pass
| 2,326 | 34.8 | 88 | py |
tvm | tvm-main/python/tvm/relay/backend/contrib/uma/api/lower.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Lowering base class of the Universal Modular Accelerator Interface (UMA)"""
from typing import List, Tuple, Callable, Optional
import tvm
from tvm import relay, te
from tvm.relay.op.op import register_strategy
from . import _ffi_api
from .utils import PassPhase
OperatorStrategies = List[
Tuple[
str,
Callable[
[tvm.ir.Attrs, tvm.ir.Array, tvm.ir.TensorType, tvm.target.Target],
tvm.relay.op.op.OpStrategy,
],
Optional[int],
]
]
class UMALower:
"""Lowering base class of the Universal Modular Accelerator Interface (UMA)."""
def __init__(self, target_name: str) -> None:
self.target_name = target_name
self._operator_strategies: OperatorStrategies = []
self._tir_passes: List[Tuple[PassPhase, tvm.tir.transform.PrimFuncPass]] = []
def _lower_relay_to_tir(self, relay_prim_func: relay.Function) -> tvm.tir.PrimFunc:
"""Lower a Relay primitive function to a S-TIR primitive function.
Parameters
----------
prim_func : tvm.relay.Function
The Relay function to lower.
Returns
-------
out : tvm.tir.PrimFunc
The lowered schedulable TensorIR primitive function.
"""
def _get_tensors(te_cached_func):
return list(te_cached_func.inputs) + list(te_cached_func.outputs)
lower_to_te = tvm._ffi.get_global_func("relay.backend.LowerToTE")
te_cached_func = lower_to_te(relay_prim_func)
x = _get_tensors(te_cached_func)
tir_prim_func = te.create_prim_func(x)
tir_prim_func = tir_prim_func.with_attr(
"global_symbol", relay_prim_func.attrs["global_symbol"]
)
compiler_attr = relay_prim_func.attrs["Compiler"]
target = tvm.target.Target.current()
if target.kind.name != compiler_attr:
target = tvm.target.Target(compiler_attr)
tir_prim_func = tir_prim_func.with_attr("target", target)
tir_prim_func = tir_prim_func.with_attr("relay_attrs", relay_prim_func.attrs)
return tir_prim_func
def _lower_stir_to_nstir(self, prim_func: tvm.tir.PrimFunc) -> tvm.tir.PrimFunc:
"""Lower a S-TIR primitive function to a NS-TIR primitive function.
Parameters
----------
prim_func : tvm.tir.PrimFunc
The primitive function to lower.
Returns
-------
out : tvm.tir.PrimFunc
The lowered non-schedulable TensorIR primitive function.
"""
curr_ctxt = tvm.transform.PassContext().current()
assert "tir.add_lower_pass" not in curr_ctxt.config
pass_map = {
PassPhase.TIR_PHASE_0: 0,
PassPhase.TIR_PHASE_1: 1,
PassPhase.TIR_PHASE_2: 2,
PassPhase.TIR_PHASE_3: 3,
}
lower_passes = [(pass_map[k], v) for k, v in self._tir_passes]
with tvm.transform.PassContext(
opt_level=curr_ctxt.opt_level,
required_pass=curr_ctxt.required_pass,
disabled_pass=curr_ctxt.disabled_pass,
instruments=curr_ctxt.instruments,
config={**dict(curr_ctxt.config), "tir.add_lower_pass": lower_passes},
):
mod = tvm.lower(tvm.ir.IRModule.from_expr(prim_func))
prim_func = mod[prim_func.attrs["global_symbol"]]
return prim_func
def relay_to_tir(self, mod: tvm.ir.IRModule) -> tvm.ir.IRModule:
"""
This is the hook for python-based lowering of a Relay module which lowers NPU
external functions to TIR.
Parameters
----------
mod : tvm.ir.IRModule
This is the Relay module.
Returns
-------
mod : tvm.ir.IRModule
The Relay module with scheduled NPU external functions.
"""
mod = _ffi_api.OutlineCompilerFunctions(self.target_name)(mod)
for gvar, func in mod.functions.items():
if "Compiler" in func.attrs and func.attrs["Compiler"] == self.target_name:
func = self._lower_relay_to_tir(func)
func = self._lower_stir_to_nstir(func)
mod.update_func(gvar, func)
return mod
def register(self) -> None:
"""Register all relevant relay-to-tir functions."""
tvm._ffi.register_func(f"relay.ext.uma.{self.target_name}.relay_to_tir", self.relay_to_tir)
for op, strategy, plevel in self._operator_strategies:
register_strategy(op, strategy, plevel)
| 5,316 | 35.417808 | 99 | py |
tvm | tvm-main/python/tvm/relay/backend/contrib/uma/api/utils.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""Utility methods for the Universal Modular Accelerator Interface (UMA)"""
from enum import Enum, auto
import uuid
import tvm
import tvm.tir
from tvm.contrib import utils, clang
def uma_available() -> bool:
registration_func = tvm.get_global_func(
"relay.backend.contrib.uma.RegisterTarget", allow_missing=True
)
return registration_func is not None
class PassPhase(Enum):
"""
UMA pass phases:
PRE_PARTITIONING: prior to UMA partitioning
POST_PARTITIONING_0: after UMA partitioning, before Defunctionalization
POST_PARTITIONING_1: after UMA partitioning and after Defunctionalization
TIR_PHASE_0: Generates the raw IR and loop levels.
TIR_PHASE_1: Flattens the array storage.
TIR_PHASE_2: Transforms loops, like unroll, vectorization and thread-binding.
TIR_PHASE_3: Does some cleanup work.
Reference to TIR phases: src/driver/driver_api.c
"""
PRE_PARTITIONING = auto()
POST_PARTITIONING_0 = auto()
POST_PARTITIONING_1 = auto()
TIR_PHASE_0 = auto()
TIR_PHASE_1 = auto()
TIR_PHASE_2 = auto()
TIR_PHASE_3 = auto()
def _c_to_llvm(c_code: str) -> str:
unique_filename = str(uuid.uuid4())
temp = utils.tempdir()
ll_path = temp.relpath(f"{unique_filename}.ll")
ll_code = clang.create_llvm([c_code], output=ll_path)
return ll_code
def add_llvm_to_block(
sch: tvm.tir.Schedule, block_name: str, c_code_str: str = ""
) -> tvm.tir.Schedule:
block = sch.get_block(block_name)
loops = sch.get_loops(block)
assert len(loops) > 0
sch.annotate(loops[0], "pragma_import_llvm", _c_to_llvm(c_code_str))
return sch
| 2,462 | 31.84 | 81 | py |
tvm | tvm-main/python/tvm/relay/backend/contrib/uma/api/_ffi_api.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""FFI APIs for relay transformation passes."""
import tvm._ffi # type: ignore
tvm._ffi._init_api("relay.ext.uma", __name__)
| 912 | 42.47619 | 62 | py |
tvm | tvm-main/python/tvm/relay/backend/contrib/uma/api/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""UMA: Universal Modular Accelerator Interface API"""
from .codegen import UMACodegen
from .lower import UMALower
from .partitioner import UMAPartitioner
__all__ = ["UMACodegen", "UMALower", "UMAPartitioner"]
| 999 | 37.461538 | 62 | py |
tvm | tvm-main/python/tvm/relay/qnn/transform.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,arguments-differ,no-else-return,unused-argument,missing-docstring
"""
QNN pass transformation infrastructure.
"""
from tvm import relay
def CanonicalizeOps():
"""Converts/Lowers an expression containing QNN ops to an expression containing only core
(non-Dialect) Relay ops. Each QNN op is lowered to a sequence of existing Relay ops. This is a
target-independent pass. One can register the lowering/transformation function for this op using
FTVMQnnCanonicalize attr_name for FTVMLegalize op attribute. An example of this transformation
is below
Examples
________
.. code-block:: python
# Original expression
qnn_expr = relay.qnn.op.requantize(y,
input_scale=1,
input_zero_point=0,
output_scale=1,
output_zero_point=0,
out_dtype='int8')
# We want to utilize all the existing Relay infrastructure. So, instead of supporting this
# QNN requantize op, we convert it into a sequence of existing Relay operators.
mod = tvm.IRModule.from_expr(qnn_expr)
mod = relay.qnn.transform.CanonicalizeOps()(mod)
relay_expr = mod['main']
print(relay_expr)
def @main(%quantized_data: Tensor[(200), int32]) -> Tensor[(200), int8] {
%0 = cast(%quantized_data, dtype="int64") /* ty=Tensor[(200), int64] */;
%1 = multiply(%0, 2 /* ty=int64 */) /* ty=Tensor[(200), int64] */;
%2 = multiply(%1, 1073741824 /* ty=int64 */) /* ty=Tensor[(200), int64] */;
%3 = add(%2, 1073741824 /* ty=int64 */) /* ty=Tensor[(200), int64] */;
%4 = right_shift(%3, 31 /* ty=int64 */) /* ty=Tensor[(200), int64] */;
%5 = add(0 /* ty=int64 */, %4) /* ty=Tensor[(200), int64] */;
%6 = clip(%5, a_min=-128f, a_max=127f) /* ty=Tensor[(200), int64] */;
cast(%6, dtype="int8") /* ty=Tensor[(200), int8] */
}
Returns
-------
ret : tvm.transform.Pass
The registered pass that canonicalizes QNN ops to Relay ops.
"""
return relay.transform.Legalize("FTVMQnnCanonicalize")
def Legalize():
"""Legalizes QNN ops. As opposed to Relay Legalize, this one legalizes only QNN ops. One can
register a transformation/legalization function for an op by using the FTVMQnnLegalize attr_name
for FTVMLegalize op attribute. The isolation of QNN and Relay Legalize gives us separation of
concerns, leading to a better software practice. The legalization can be configured to happen
per target. An example of this type of legalization is shown below.
Examples
________
Suppose the original graph is as follows
data(u8) weight(u8)
| |
| |
qnn.conv2d (int32)
|
|
nn.relu (int32)
Now, we know that Intel Cascade Lake has VNNI instructions to speedup convolution. However, it
only works on u8 x i8 inputs. So, here, we can use QNN Legalize to transform the above graph as
follows
data(u8) weight(u8)
| |
| |
| requantize(i8)
| |
| |
qnn.conv2d (int32)
|
|
nn.relu (int32)
In this legalization, since we have isolated legalization for QNN ops, it will only trigger the
transformation for qnn.conv2d (and not nn.relu). This pass can be followed by CanonicalizeOps to
further lower the qnn.requantize and qnn.conv2d into an expr containing only Relay ops.
Returns
-------
ret : tvm.transform.Pass
The registered pass that legalizes QNN ops.
"""
return relay.transform.Legalize("FTVMQnnLegalize")
| 4,792 | 39.965812 | 100 | py |
tvm | tvm-main/python/tvm/relay/qnn/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=wildcard-import
"""QNN dialect operators and IR passes."""
from __future__ import absolute_import as _abs
from . import op
from . import transform
| 950 | 42.227273 | 62 | py |
tvm | tvm-main/python/tvm/relay/qnn/strategy/hexagon.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Definition of Hexagon operator strategy."""
# pylint: disable=unused-argument,wildcard-import,unused-wildcard-import
import re
from tvm import topi
from .generic import *
from ... import op as _op
from ...op.strategy.generic import is_depthwise_conv2d
NCHWC_MATCHER = re.compile("^NCHW[0-9]+c$")
OIHWIOI_MATCHER = re.compile("^OIHW[0-9]+i[0-9]+o[0-9]+i$")
@qnn_quantize_strategy.register("hexagon")
def qnn_quantize_strategy_hexagon(attrs, inputs, out_type, target):
"""qnn.quantize strategy for Hexagon"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_quantize(topi.hexagon.qnn_quantize),
wrap_topi_schedule(topi.hexagon.schedule_qnn_quantize),
name="qnn_quantize.hexagon",
)
return strategy
@qnn_dequantize_strategy.register("hexagon")
def qnn_dequantize_strategy_hexagon(attrs, inputs, out_type, target):
"""qnn.dequantize strategy for Hexagon"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_dequantize(topi.hexagon.qnn_dequantize),
wrap_topi_schedule(topi.hexagon.schedule_qnn_dequantize),
name="qnn_dequantize.hexagon",
)
return strategy
@qnn_requantize_strategy.register("hexagon")
def qnn_requantize_strategy_hexagon(attrs, inputs, out_type, target):
"""qnn.requantize strategy for Hexagon"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_quantize(topi.hexagon.qnn_requantize),
wrap_topi_schedule(topi.hexagon.schedule_qnn_requantize),
name="qnn_requantize.hexagon",
)
return strategy
@qnn_add_strategy.register("hexagon")
def qnn_add_strategy_hexagon(attrs, inputs, out_type, target):
"""qnn.add strategy for Hexagon"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_topi_compute(topi.hexagon.qnn_add),
wrap_topi_schedule(topi.hexagon.schedule_qnn_add),
name="qnn_add.hexagon",
)
return strategy
@qnn_subtract_strategy.register("hexagon")
def qnn_subtract_strategy_hexagon(attrs, inputs, out_type, target):
"""qnn.subtract strategy for Hexagon"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_topi_compute(topi.hexagon.qnn_subtract),
wrap_topi_schedule(topi.hexagon.schedule_qnn_subtract),
name="qnn_subtract.hexagon",
)
return strategy
@qnn_mul_strategy.register("hexagon")
def qnn_mul_strategy_hexagon(attrs, inputs, out_type, target):
"""qnn.mul strategy for Hexagon"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_topi_compute(topi.hexagon.qnn_mul),
wrap_topi_schedule(topi.hexagon.schedule_qnn_mul),
name="qnn_mul.hexagon",
)
return strategy
@qnn_tanh_strategy.register("hexagon")
def qnn_tanh_strategy_hexagon(attrs, inputs, out_type, target):
"""qnn.tanh strategy for Hexagon"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_topi_compute(topi.hexagon.qnn_tanh),
wrap_topi_schedule(topi.hexagon.schedule_qnn_tanh),
name="qnn_tanh.hexagon",
)
return strategy
@qnn_concatenate_strategy.register("hexagon")
def qnn_concatenate_strategy_hexagon(attrs, inputs, out_type, target):
"""qnn.concatenate strategy for Hexagon"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_topi_concatenate(topi.hexagon.qnn_concatenate),
wrap_topi_schedule(topi.hexagon.schedule_qnn_concatenate),
name="qnn_concatenate.hexagon",
)
return strategy
@qnn_conv2d_strategy.register("hexagon")
def qnn_conv2d_strategy_hexagon(attrs, inputs, out_type, target):
"""qnn.conv2d strategy for Hexagon"""
data = inputs[0]
kernel = inputs[1]
data_layout = attrs.data_layout
kernel_layout = attrs.kernel_layout
groups = attrs.groups
strategy = _op.OpStrategy()
if groups == 1:
if data_layout == "NCHW" and kernel_layout == "OIHW":
strategy.add_implementation(
wrap_topi_qnn_conv2d(topi.hexagon.qnn_conv2d),
wrap_topi_schedule(topi.hexagon.schedule_qnn_conv2d),
name="qnn_conv2d.hexagon",
)
elif NCHWC_MATCHER.match(data_layout) and OIHWIOI_MATCHER.match(kernel_layout):
if data.dtype == "uint8" and kernel.dtype == "int8":
strategy.add_implementation(
wrap_topi_qnn_conv2d(topi.hexagon.qnn_conv2d_NCHWc_int8),
wrap_topi_schedule(topi.hexagon.schedule_qnn_conv2d_NCHWc_int8),
name="qnn_conv2d_NCHWc_int8.hexagon",
)
elif is_depthwise_conv2d(data.shape, data_layout, kernel.shape, kernel_layout, groups):
if data_layout == "NCHW" and kernel_layout == "OIHW":
strategy.add_implementation(
wrap_topi_qnn_conv2d(topi.hexagon.qnn_depthwise_conv2d),
wrap_topi_schedule(topi.hexagon.schedule_qnn_depthwise_conv2d),
name="qnn_depthwise_conv2d.hexagon",
)
else:
raise RuntimeError("Unsupported strategy for group qnn.conv2d")
return strategy
@qnn_dense_strategy.register("hexagon")
def qnn_dense_strategy_hexagon(attrs, inputs, out_type, target):
"""qnn.dense strategy for Hexagon"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_topi_qnn_dense(topi.hexagon.qnn_dense),
wrap_topi_schedule(topi.hexagon.schedule_qnn_dense),
name="qnn_dense.hexagon",
)
return strategy
@qnn_dense_pack_strategy.register("hexagon")
def qnn_dense_pack_strategy_hexagon(attrs, inputs, out_type, target):
"""qnn.contrib_dense_pack strategy for Hexagon"""
strategy = _op.OpStrategy()
if (
"uint8" in inputs[0].dtype
and "int8" in inputs[1].dtype
and attrs["weight_layout"] == "NC32n4c"
):
# uint8 + uint8|int8 case
strategy.add_implementation(
wrap_topi_qnn_dense(topi.hexagon.qnn_dense_pack_vrmpy),
wrap_topi_schedule(topi.hexagon.schedule_qnn_dense_pack_vrmpy),
name="qnn_dense_pack_vrmpy.hexagon",
)
return strategy
@qnn_batch_matmul_strategy.register("hexagon")
def qnn_batch_matmul_strategy_hexagon(attrs, inputs, out_type, target):
"""qnn.batch_matmul strategy for Hexagon"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_topi_qnn_batch_matmul(topi.hexagon.qnn_batch_matmul),
wrap_topi_schedule(topi.hexagon.schedule_qnn_batch_matmul),
name="qnn_batch_matmul.hexagon",
)
return strategy
@qnn_avg_pool2d_strategy.register(["hexagon"])
def qnn_avg_pool2d_strategy_hexagon(attrs, inputs, out_type, target):
"""qnn.avg_pool2d strategy for Hexagon"""
data_layout = attrs.layout
if data_layout == "NHWC":
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_qnn_avg_pool2d(topi.hexagon.qnn.qnn_avg_pool2d_wrapper_compute_NHWC),
wrap_topi_schedule(topi.hexagon.qnn.schedule_qnn_avg_pool2d),
name="qnn_avg_pool2d.hexagon",
)
return strategy
elif data_layout == "NCHW":
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_qnn_avg_pool2d(topi.hexagon.qnn.qnn_avg_pool2d_wrapper_compute_NCHW),
wrap_topi_schedule(topi.hexagon.qnn.schedule_qnn_avg_pool2d),
name="qnn_avg_pool2d.hexagon",
)
return strategy
else:
raise RuntimeError("Unsupported strategy for qnn.avg_pool2d")
| 8,378 | 35.75 | 94 | py |
tvm | tvm-main/python/tvm/relay/qnn/strategy/arm_cpu.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Quantized operator strategy for Arm CPU.
As quantized op schedules, these are only used if the qnn.Legalize pass is disabled. The current
schedules only work for fused operators with bias, as this is the most common use case. Only
regular/depthwise conv2d is supported, but qnn_dense will be added eventually."""
from tvm import topi, TVMError
from tvm.topi.utils import get_const_tuple
from ... import op as _op
from ...op.strategy.generic import is_depthwise_conv2d
from .generic import (
qnn_conv2d_strategy,
qnn_dense_strategy,
qnn_dequantize_strategy,
qnn_quantize_strategy,
wrap_compute_dequantize,
wrap_compute_quantize,
wrap_topi_qnn_dense,
wrap_topi_schedule,
)
@qnn_quantize_strategy.register("arm_cpu")
def qnn_quantize_strategy_arm_cpu(_attrs, _inputs, _out_type, _target):
"""qnn.quantize strategy for arm_cpu"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_quantize(topi.hexagon.qnn_quantize),
wrap_topi_schedule(topi.hexagon.schedule_qnn_quantize),
name="qnn_quantize.arm_cpu",
)
return strategy
@qnn_dequantize_strategy.register("arm_cpu")
def qnn_dequantize_strategy_arm_cpu(_attrs, _inputs, _out_type, _target):
"""qnn.dequantize strategy for arm_cpu"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_dequantize(topi.hexagon.qnn_dequantize),
wrap_topi_schedule(topi.hexagon.schedule_qnn_dequantize),
name="qnn_dequantize.arm_cpu",
)
return strategy
@qnn_dense_strategy.register("arm_cpu")
def qnn_dense_strategy_arm_cpu(_attrs, _inputs, _out_type, _target):
"""qnn.dense strategy for arm_cpu"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_topi_qnn_dense(topi.hexagon.qnn_dense),
wrap_topi_schedule(topi.hexagon.schedule_qnn_dense),
name="qnn_dense.arm_cpu",
)
return strategy
@qnn_conv2d_strategy.register("arm_cpu")
def qnn_conv2d_strategy_arm_cpu(attrs, inputs, _out_type, target):
"""qnn.conv2d strategy for Arm Cortex-M CPUs with DSP.
When computing convolutions, we want data that will be used to compute the same output values to
be adjacent in memory, as this lets us reuse memory loads and use more SIMD instructions.
For depthwise convolutions, channels do not interact with each other, so the NCHW and IOHW
layouts to the best job of keeping "related" data close. In contrast, computing one output of a
regular convolution requires reading all input channels, so NHWC and OHWI are best. Hence, these
are the layouts we support.
"""
if not (target.features.has_dsp and "cortex-m" in target.mcpu):
raise TVMError(
"Quantized Arm schedules only exist for Cortex-M with DSP! "
"The qnn.Legalize pass should be run for other Arm processors."
)
data = inputs[0]
kernel = inputs[1]
data_layout = attrs.data_layout
kernel_layout = attrs.kernel_layout
groups = attrs.groups
strategy = _op.OpStrategy()
if groups == 1:
if data_layout == "NHWC" and kernel_layout == "OHWI":
strategy.add_implementation(
topi.arm_cpu.qnn_conv2d,
topi.arm_cpu.schedule_qnn_conv2d,
name="qnn_conv2d.arm_cpu",
)
else:
raise TVMError("QNN regular Conv2D for Arm Cortex-M DSP got incorrect input layout!")
elif is_depthwise_conv2d(data.shape, data_layout, kernel.shape, kernel_layout, groups):
if data_layout == "NCHW" and kernel_layout == "IOHW":
height, width = data.shape[2:]
y_stride, x_stride = get_const_tuple(attrs.strides)
if height * width * y_stride % 2 == 0:
strategy.add_implementation(
topi.arm_cpu.qnn_depthwise_conv2d,
topi.arm_cpu.schedule_qnn_depthwise_conv2d,
name="qnn_depthwise_conv2d.arm_cpu",
)
elif y_stride == x_stride == 1:
strategy.add_implementation(
topi.arm_cpu.qnn_unrolled_depthwise_conv2d,
topi.arm_cpu.schedule_qnn_unrolled_depthwise_conv2d,
name="qnn_unrolled_depthwise_conv2d.arm_cpu",
)
else:
raise TVMError("No QNN depthwise Conv2D Cortex-M schedule supports these params!")
else:
raise TVMError("QNN depthwise Conv2D for Arm Cortex-M DSP got incorrect input layout!")
else:
raise TVMError("No Arm Cortex-M DSP strategy exists for generic group qnn.conv2d")
return strategy
| 5,451 | 39.686567 | 100 | py |
tvm | tvm-main/python/tvm/relay/qnn/strategy/generic.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Definition of generic operator strategy."""
from tvm.target import override_native_generic_func
def wrap_topi_schedule(topi_schedule):
"""Wrap TOPI schedule which doesn't use attrs"""
def wrapper(_attrs, outs, target):
with target:
return topi_schedule(outs)
return wrapper
def wrap_topi_compute(topi_compute):
"""Wrap TOPI compute which doesn't use attrs"""
def wrapper(_attrs, inputs, _out_type):
return [topi_compute(*inputs)]
return wrapper
def wrap_compute_quantize(topi_compute):
"""Wrap TOPI compute which use axis and out data type from attrs"""
def wrapper(attrs, inputs, _out_type):
axis = attrs.axis
out_dtype = attrs.out_dtype
args = [*inputs, axis, out_dtype]
return [topi_compute(*args)]
return wrapper
def wrap_compute_dequantize(topi_compute):
"""Wrap TOPI compute which use axis from attrs"""
def wrapper(attrs, inputs, _out_type):
args = [*inputs, attrs.axis]
return [topi_compute(*args)]
return wrapper
def wrap_topi_qnn_conv2d(topi_compute):
"""Wrap TOPI compute which use conv2d attrs and output data type"""
def wrapper(attrs, inputs, out_type):
out_dtype = out_type.dtype
oshape = out_type.shape
strides = attrs.strides
padding = attrs.padding
dilation = attrs.dilation
if len([*inputs]) == 11:
args = [*inputs, strides, padding, dilation, oshape, out_dtype]
elif len([*inputs]) == 10:
args = [ # QNN Conv2d params:
inputs[0],
inputs[1],
inputs[2],
inputs[3],
inputs[4],
inputs[5],
# Bias argument
None,
# Requantization params:
inputs[6],
inputs[7],
inputs[8],
inputs[9],
# Conv2d attrs:
strides,
padding,
dilation,
oshape,
out_dtype,
]
else:
assert len([*inputs]) == 6
args = [ # QNN Conv2d params:
*inputs,
# Bias argument:
None,
# Requantization params:
None,
None,
None,
None,
strides,
padding,
dilation,
oshape,
out_dtype,
]
return [topi_compute(*args)]
return wrapper
def wrap_topi_qnn_dense(topi_compute):
"""Wrap TOPI compute which use qnn.dense attrs"""
def wrapper(_attrs, inputs, out_type):
out_dtype = out_type.dtype
if len([*inputs]) == 11:
args = [*inputs, out_dtype]
elif len([*inputs]) == 10:
args = [ # QNN Dense params:
inputs[0],
inputs[1],
inputs[2],
inputs[3],
inputs[4],
inputs[5],
# Bias argument
None,
# Requantization params:
inputs[6],
inputs[7],
inputs[8],
inputs[9],
out_dtype,
]
else:
assert len([*inputs]) == 6
args = [ # QNN Dense params:
*inputs,
# Bias argument:
None,
# Requantization params:
None,
None,
None,
None,
out_dtype,
]
return [topi_compute(*args)]
return wrapper
def wrap_compute_qnn_avg_pool2d(topi_compute):
"""Wrap qnn.avg_pool2d topi compute"""
def wrapper(attrs, inputs, out_type):
kernel = attrs.pool_size
strides = attrs.strides
padding = attrs.padding
dilation = attrs.dilation
count_include_pad = attrs.count_include_pad
oshape = out_type.shape
odtype = out_type.dtype
args = [
inputs[0],
kernel,
strides,
padding,
dilation,
count_include_pad,
oshape,
odtype,
inputs[1],
inputs[2],
inputs[3],
inputs[4],
]
return [topi_compute(*args)]
return wrapper
def wrap_topi_concatenate(topi_compute):
"""Wrap TOPI compute which use qnn.concatenate attrs"""
def wrapper(attrs, inputs, out_type):
return [topi_compute(inputs, attrs.axis, out_type.dtype)]
return wrapper
def wrap_topi_qnn_batch_matmul(topi_compute):
"""Wrap TOPI compute which use qnn.batch_matmul attrs"""
def wrapper(attrs, inputs, _out_type):
assert len([*inputs]) == 6
args = [*inputs, attrs.transpose_a, attrs.transpose_b, attrs.out_dtype]
return [topi_compute(*args)]
return wrapper
@override_native_generic_func("qnn_quantize_strategy")
def qnn_quantize_strategy(attrs, inputs, out_type, target):
"""qnn.quantize generic strategy"""
raise RuntimeError(
"qnn.quantize is currently only supported with Hexagon. "
"Please run QNN Canonicalize pass to decompose this op into supported ops."
)
@override_native_generic_func("qnn_dequantize_strategy")
def qnn_dequantize_strategy(attrs, inputs, out_type, target):
"""qnn.dequantize generic strategy"""
raise RuntimeError(
"qnn.dequantize is currently only supported with Hexagon. "
"Please run QNN Canonicalize pass to decompose this op into supported ops."
)
@override_native_generic_func("qnn_requantize_strategy")
def qnn_requantize_strategy(attrs, inputs, out_type, target):
"""qnn.requantize generic strategy"""
raise RuntimeError(
"qnn.requantize is currently only supported with Hexagon. "
"Please run QNN Canonicalize pass to decompose this op into supported ops."
)
@override_native_generic_func("qnn_add_strategy")
def qnn_add_strategy(attrs, inputs, out_type, target):
"""qnn.add generic strategy"""
raise RuntimeError(
"qnn.add is currently only supported with Hexagon. "
"Please run QNN Canonicalize pass to decompose this op into supported ops."
)
@override_native_generic_func("qnn_subtract_strategy")
def qnn_subtract_strategy(attrs, inputs, out_type, target):
"""qnn.subtract generic strategy"""
raise RuntimeError(
"qnn.subtract is currently only supported with Hexagon. "
"Please run QNN Canonicalize pass to decompose this op into supported ops."
)
@override_native_generic_func("qnn_mul_strategy")
def qnn_mul_strategy(attrs, inputs, out_type, target):
"""qnn.mul generic strategy"""
raise RuntimeError(
"qnn.mul is currently only supported with Hexagon. "
"Please run QNN Canonicalize pass to decompose this op into supported ops."
)
@override_native_generic_func("qnn_tanh_strategy")
def qnn_tanh_strategy(attrs, inputs, out_type, target):
"""qnn.tanh generic strategy"""
raise RuntimeError(
"qnn.tanh is currently only supported with Hexagon. "
"Please run QNN Canonicalize pass to decompose this op into supported ops."
)
@override_native_generic_func("qnn_concatenate_strategy")
def qnn_concatenate_strategy(attrs, inputs, out_type, target):
"""qnn.concatenate generic strategy"""
raise RuntimeError(
"qnn.concatenate is currently only supported with Hexagon. "
"Please run QNN Canonicalize pass to decompose this op into supported ops."
)
@override_native_generic_func("qnn_conv2d_strategy")
def qnn_conv2d_strategy(attrs, inputs, out_type, target):
"""qnn.conv2d generic strategy"""
raise RuntimeError(
"qnn.conv2d is currently only supported with Hexagon. "
"Please run QNN Canonicalize pass to decompose this op into supported ops."
)
@override_native_generic_func("qnn_dense_strategy")
def qnn_dense_strategy(attrs, inputs, out_type, target):
"""qnn.dense generic strategy"""
raise RuntimeError(
"qnn.dense is currently only supported with Hexagon. "
"Please run QNN Canonicalize pass to decompose this op into supported ops."
)
@override_native_generic_func("qnn_dense_pack_strategy")
def qnn_dense_pack_strategy(attrs, inputs, out_type, target):
"""qnn.contrib_dense_pack generic strategy"""
raise RuntimeError("qnn.contrib_dense_pack is currently only supported with Hexagon. ")
@override_native_generic_func("qnn_batch_matmul_strategy")
def qnn_batch_matmul_strategy(attrs, inputs, out_type, target):
"""qnn.batch_matmul generic strategy"""
raise RuntimeError(
"qnn.batch_matmul is currently only supported with Hexagon. "
"Please run QNN Canonicalize pass to decompose this op into supported ops."
)
@override_native_generic_func("qnn_avg_pool2d_strategy")
def qnn_avg_pool2d_strategy(attrs, inputs, out_type, target):
"""qnn.avg_pool2d generic strategy"""
raise RuntimeError(
"qnn.avg_pool2d is currently only supported with Hexagon. "
"Please run QNN Canonicalize pass to decompose this op into supported ops."
)
| 10,109 | 30.397516 | 91 | py |
tvm | tvm-main/python/tvm/relay/qnn/strategy/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=wildcard-import
"""QNN op strategies."""
from __future__ import absolute_import as _abs
from .generic import *
from . import arm_cpu
from . import hexagon
| 960 | 37.44 | 62 | py |
tvm | tvm-main/python/tvm/relay/qnn/op/canonicalizations.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Consist of utilities and methods for lowering QNN into mainline relay."""
from typing import Callable
import numpy as np
import tvm
from tvm import relay
def run_const_expr(expr: "relay.Expr") -> np.ndarray:
"""Evaluate a const expression, receiving result as np array.
If a number of passes are disabled in the current Pass Context, then there is no need to disable
these passes for const expression evaluation as well. That's why we use empty list
"disabled_pass=[]", all other arguments are inherited from the current Pass Context.
"""
curr_pass_ctx = tvm.ir.transform.PassContext.current()
with tvm.ir.transform.PassContext(
opt_level=curr_pass_ctx.opt_level,
required_pass=curr_pass_ctx.required_pass,
disabled_pass=[],
instruments=curr_pass_ctx.instruments,
config=curr_pass_ctx.config,
):
mod = tvm.IRModule.from_expr(expr)
vm_exe = relay.create_executor("vm", mod=mod)
output = vm_exe.evaluate()().asnumpy()
return output
def create_integer_lookup_table(
floating_point_func: Callable[[np.ndarray], np.ndarray],
input_scale: "relay.Expr",
input_zero_point: "relay.Expr",
output_scale: "relay.Expr",
output_zero_point: "relay.Expr",
in_axis: int = -1,
out_axis: int = -1,
in_dtype: str = "uint8",
out_dtype: str = "uint8",
) -> np.ndarray:
"""
Return a table where each input indexes to the output quantizing the given function.
Note this also supports mapping unsigned and signed integers to each other.
Args:
floating_point_func: The numpy function which this table is to approximate
input_scale: The scale of the quantized input tensor.
input_zero_point: The zero point of the quantized input tensor.
output_scale: The scale of the quantized output tensor.
output_zero_point: The zero point of the quantized output tensor.
in_axis: The axis for multi-channel quantization of the input if applicable.
out_axis: The axis for multi-channel quantization of the output if applicable.
in_dtype: The dtype of the input tensor.
out_dtype: The wanted dtype of the output tensor.
Returns:
A numpy array where values in quantized space will index to the output in quantized space
approximating the given function.
"""
if not np.issubdtype(np.dtype(in_dtype), np.integer) or not np.issubdtype(
np.dtype(out_dtype), np.integer
):
raise ValueError(
f"Only integer dtypes allowed got {in_dtype} and {out_dtype} for in and out dtypes."
)
dtype_info = np.iinfo(in_dtype)
num_bits = dtype_info.bits
# Use TVMs quantization methods via relay to be consistent
# inputs_quantized = np.array(range(dtype_info.min, dtype_info.max + 1)).astype(in_dtype)
# First generate a list of all num_bit integer patterns
inputs_quantized = np.array(range(0, 2**num_bits), dtype=f"uint{num_bits}")
# Reinterpret bits as the real datatype
# Note what we are doing here is a bit tricky, the canonical view of our lookup table
# is using the uintX version. When we run the lookup in the relay graph, we cast the
# bit pattern back into this form.
inputs_quantized = inputs_quantized.view(in_dtype)
inputs_quantized = relay.const(inputs_quantized, dtype=in_dtype)
inputs_dequantized = run_const_expr(
relay.qnn.op.dequantize(
inputs_quantized,
input_scale=input_scale,
input_zero_point=input_zero_point,
axis=in_axis,
)
)
output_dequantized = relay.const(floating_point_func(inputs_dequantized))
output_quantized = run_const_expr(
relay.qnn.op.quantize(
output_dequantized, output_scale, output_zero_point, out_axis, out_dtype
)
)
return output_quantized
def create_integer_lookup_op(
input_arg: "relay.Expr",
floating_point_func: Callable[[np.array], np.array],
in_scale: "relay.Expr",
in_zero_point: "relay.Expr",
out_scale: "relay.Expr",
out_zero_point: "relay.Expr",
in_axis: int = -1,
out_axis: int = -1,
in_dtype: str = "uint8",
out_dtype: str = "uint8",
) -> "relay.Expr":
"""
Create a quantized version of the given floating point unary operation using table lookup.
Args:
input_arg: The quantized input to the final function.
floating_point_func: The numpy function which this table is to approximate
in_scale: The scale of the quantized input tensor.
in_zero_point: The zero point of the quantized input tensor.
out_scale: The scale of the quantized output tensor.
out_zero_point: The zero point of the quantized output tensor.
in_axis: The axis for multi-channel quantization of the input if applicable.
out_axis: The axis for multi-channel quantization of the output if applicable.
in_dtype: The dtype of the input tensor.
out_dtype: The wanted dtype of the output tensor.
Returns:
A Relay expression representing a quantized version of the given function.
"""
# TODO: handle multi-channel q, below will fail with multi-channel q
in_scale = in_scale.data.numpy().item()
in_zero_point = in_zero_point.data.numpy().item()
out_scale = out_scale.data.numpy().item()
out_zero_point = out_zero_point.data.numpy().item()
lookup_table = create_integer_lookup_table(
floating_point_func,
relay.const(in_scale),
relay.const(in_zero_point, dtype="int32"),
relay.const(out_scale),
relay.const(out_zero_point, dtype="int32"),
in_axis=in_axis,
in_dtype=in_dtype,
out_axis=out_axis,
out_dtype=out_dtype,
)
in_dtype_info = np.iinfo(in_dtype)
in_dtype_num_bits = in_dtype_info.bits
lookup_table = relay.const(lookup_table)
index_tensor = relay.reinterpret(input_arg, f"uint{in_dtype_num_bits}")
result = relay.take(lookup_table, index_tensor, axis=0, mode="fast")
return result
| 6,840 | 37.869318 | 100 | py |
tvm | tvm-main/python/tvm/relay/qnn/op/op.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=unused-argument
"""The register functions for the QNN dialect."""
import tvm.ir
def register_qnn_legalize(op_name, legal_op=None, level=10):
"""Register legal transformation function for a QNN op.
This helps QNN match hardware intrinsics better and is run before
canonicalization.
Parameters
----------
op_name : str
The name of the operator
legal_op: function (attrs: Attrs, inputs: List[Expr]) -> new_expr: Expr
The function for transforming an expr to another expr.
level : int
The priority level
"""
return tvm.ir.register_op_attr(op_name, "FTVMQnnLegalize", legal_op, level)
def register_qnn_canonicalize(op_name, legal_op=None, level=10):
"""Register canonicalization function for a QNN op.
This transforms QNN ops to mainline Relay components.
Parameters
----------
op_name : str
The name of the operator
legal_op: function (Attrs, List[Expr], List[relay.Type]) -> Expr
The function for transforming an expr to another expr.
level : int
The priority level
"""
return tvm.ir.register_op_attr(op_name, "FTVMQnnCanonicalize", legal_op, level)
| 1,987 | 32.133333 | 83 | py |
tvm | tvm-main/python/tvm/relay/qnn/op/layout_conversions.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument
"""Convert layout related registration"""
from __future__ import absolute_import
from tvm.relay.op import op as reg
from ...op.strategy.generic import is_depthwise_conv2d
@reg.register_convert_op_layout("qnn.conv2d")
def convert_qnn_conv2d(attrs, inputs, tinfos, desired_layouts):
"""Convert Layout pass registration for QNN conv2d op.
Parameters
----------
attrs : tvm.ir.Attrs
Attributes of current convolution
inputs : list of tvm.relay.Expr
The args of the Relay expr to be legalized
tinfos : list of types
List of input and output types
desired_layouts : list of layout strings
List of layouts defining our desired
layout for the data and kernel inputs respectively.
Returns
-------
result : tvm.relay.Expr
The transformed expr
"""
# pylint: disable=import-outside-toplevel
from tvm import relay
assert len(desired_layouts) == 2, "A desired layout is expected for both of qnn.conv2d's inputs"
desired_data_layout, desired_kernel_layout = map(str, desired_layouts)
assert desired_data_layout != "default", "Data layout cannot be default"
new_attrs = dict(attrs)
new_attrs["data_layout"] = desired_data_layout
if desired_kernel_layout != "default":
new_attrs["kernel_layout"] = desired_kernel_layout
return relay.qnn.op.conv2d(*inputs, **new_attrs)
if desired_data_layout == "NCHW":
new_attrs["kernel_layout"] = "OIHW"
return relay.qnn.op.conv2d(*inputs, **new_attrs)
if desired_data_layout == "NHWC":
# Check for depthwise convolution.
data_info = tinfos[0]
weight_info = tinfos[1]
if is_depthwise_conv2d(
data_info.shape,
attrs["data_layout"],
weight_info.shape,
attrs["kernel_layout"],
attrs["groups"],
):
new_attrs["kernel_layout"] = "HWOI"
else:
new_attrs["kernel_layout"] = "HWIO"
return relay.qnn.op.conv2d(*inputs, **new_attrs)
raise ValueError(f"Layout {desired_data_layout} is not yet supported")
@reg.register_convert_op_layout("qnn.conv2d_transpose")
def convert_qnn_conv2d_transpose(attrs, inputs, tinfos, desired_layouts):
"""Convert Layout pass registration for QNN conv2d_transpose op.
Parameters
----------
attrs : tvm.ir.Attrs
Attributes of current convolution
inputs : list of tvm.relay.Expr
The args of the Relay expr to be legalized
tinfos : list of types
List of input and output types
desired_layouts : list of layout strings
List of layouts defining our desired
layout for the data and kernel inputs respectively.
Returns
-------
result : tvm.relay.Expr
The transformed expr
"""
# pylint: disable=import-outside-toplevel
from tvm import relay
assert (
len(desired_layouts) == 2
), "A desired layout is expected for both of qnn.conv2d_transpose's inputs"
desired_data_layout, desired_kernel_layout = map(str, desired_layouts)
assert desired_data_layout != "default", "Data layout cannot be default"
new_attrs = dict(attrs)
new_attrs["data_layout"] = desired_data_layout
if desired_kernel_layout != "default":
new_attrs["kernel_layout"] = desired_kernel_layout
return relay.qnn.op.conv2d_transpose(*inputs, **new_attrs)
# Handle default kernel layouts
if desired_data_layout == "NCHW":
new_attrs["kernel_layout"] = "IOHW"
return relay.qnn.op.conv2d_transpose(*inputs, **new_attrs)
if desired_data_layout == "NHWC":
new_attrs["kernel_layout"] = "HWIO"
return relay.qnn.op.conv2d_transpose(*inputs, **new_attrs)
raise ValueError(f"Layout {desired_data_layout} is not yet supported")
@reg.register_convert_op_layout("qnn.avg_pool2d")
def convert_qnn_avg_pool2d(attrs, inputs, tinfos, desired_layouts):
"""Convert Layout pass registration for QNN avg_pool2d op.
Parameters
----------
attrs : tvm.ir.Attrs
Attributes of current avg_pool2d
inputs : list of tvm.relay.Expr
The args of the Relay expr to be legalized
tinfos : list of types
List of input and output types
desired_layouts : list of layout strings
List of layouts defining our desired
layout for the data input.
Returns
-------
result : tvm.relay.Expr
The transformed expr
"""
# pylint: disable=import-outside-toplevel
from tvm import relay
assert len(desired_layouts) == 1, "A desired layout is expected for qnn.avg_pool2d's input"
desired_data_layout = desired_layouts[0]
if desired_data_layout == "NCHW" or desired_data_layout == "NHWC":
new_attrs = dict(attrs)
new_attrs["layout"] = str(desired_data_layout)
new_attrs["out_layout"] = str(desired_data_layout)
return relay.qnn.op.avg_pool2d(*inputs, **new_attrs)
raise ValueError(f"Layout {desired_data_layout} is not yet supported")
| 5,883 | 34.878049 | 100 | py |
tvm | tvm-main/python/tvm/relay/qnn/op/_qnn.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument, len-as-condition
"""QNN operator feature registration"""
import numpy as np
from tvm import topi
from .. import strategy
from ...op.op import register_compute
from ...op.op import register_injective_schedule
from ...op.op import (
OpPattern,
register_alter_op_layout,
register_legalize,
register_pattern,
register_strategy,
)
@register_compute("qnn.simulated_quantize")
def simulated_quantize_compute(attrs, inputs, output_type):
assert len(inputs) == 4
return [
topi.nn.simulated_quantize(
inputs[0], inputs[1], inputs[2], inputs[3], axis=attrs.get_int("axis")
)
]
register_injective_schedule("qnn.simulated_quantize")
register_pattern("qnn.simulated_quantize", OpPattern.ELEMWISE)
@register_compute("qnn.simulated_dequantize")
def simulated_dequantize_compute(attrs, inputs, output_type):
assert len(inputs) == 4
return [
topi.nn.simulated_dequantize(
inputs[0], inputs[1], inputs[2], inputs[3], axis=attrs.get_int("axis")
)
]
register_injective_schedule("qnn.simulated_dequantize")
register_pattern("qnn.simulated_dequantize", OpPattern.ELEMWISE)
# qnn.quantize
register_strategy("qnn.quantize", strategy.qnn_quantize_strategy)
register_pattern("qnn.quantize", OpPattern.ELEMWISE)
# qnn.dequantize
register_strategy("qnn.dequantize", strategy.qnn_dequantize_strategy)
register_pattern("qnn.dequantize", OpPattern.ELEMWISE)
# qnn.requantize
register_strategy("qnn.requantize", strategy.qnn_requantize_strategy)
register_pattern("qnn.requantize", OpPattern.ELEMWISE)
# qnn.add
register_strategy("qnn.add", strategy.qnn_add_strategy)
# qnn.subtract
register_strategy("qnn.subtract", strategy.qnn_subtract_strategy)
# qnn.mul
register_strategy("qnn.mul", strategy.qnn_mul_strategy)
# qnn.tanh
register_strategy("qnn.tanh", strategy.qnn_tanh_strategy)
register_pattern("qnn.tanh", OpPattern.ELEMWISE)
# qnn.concatenate
register_strategy("qnn.concatenate", strategy.qnn_concatenate_strategy)
register_pattern("qnn.concatenate", OpPattern.INJECTIVE)
# qnn.conv2d
register_strategy("qnn.conv2d", strategy.qnn_conv2d_strategy)
@register_legalize("clip")
def legalize_clip(attrs, inputs, tinfos):
"""Removes clip operators with bounds matching the defaults for their dtype.
This is already done after alter_op by TVM's simplification passes, but certain QNN operator
implementations (like Cortex-M) need it to be done earlier in legalization.
"""
if (
hasattr(inputs[0], "op")
and hasattr(inputs[0].op, "name")
and inputs[0].op.name == "qnn.requantize"
):
dtype_info = np.iinfo(tinfos[0].dtype)
if dtype_info.min == attrs.a_min and dtype_info.max == attrs.a_max:
return inputs[0]
return None
@register_legalize("nn.bias_add")
def legalize_bias_add(attrs, inputs, tinfos):
"""Legalize a bias add operator.
May be used to "fold in" unused channels from quantized convolution operators. This should
be done before layout rewrites occur to minimize the amount of "extra" overhead operators
like "cast" and "layout_transform".
"""
return topi.nn.bias_add_legalize(attrs, inputs, tinfos)
@register_alter_op_layout("qnn.conv2d")
def alter_op_layout_qnn_conv2d(attrs, inputs, tinfos, out_type):
"""Alter the layout of a qnn conv2d op.
May be used to alter the current QNN Conv2D op, but can also be used to alter previous ops to
better match the current op. For example, Arm Cortex-M uses this to set the out_layout of
previous ops to the input layout preferred by future layouts.
"""
return topi.nn.qnn_conv2d_alter_layout(attrs, inputs, tinfos, out_type)
@register_alter_op_layout("add")
def alter_op_layout_add(attrs, inputs, tinfos, out_type):
"""Alter the layout of a add op.
Useful for fusing the bias constant with an input zero point constant in a previous quantized
op. Only used when previous op is a quantized op, which is why it lives in topi.nn.qnn.
"""
return topi.nn.add_alter_layout(attrs, inputs, tinfos, out_type)
@register_alter_op_layout("qnn.requantize")
def alter_op_layout_qnn_requantize(attrs, inputs, tinfos, out_type):
"""Alter the layout of a requantization op."""
return topi.nn.qnn_requantize_alter_layout(attrs, inputs, tinfos, out_type)
# qnn.dense
register_strategy("qnn.dense", strategy.qnn_dense_strategy)
@register_alter_op_layout("qnn.dense")
def alter_op_layout_qnn_dense(attrs, inputs, tinfos, out_type):
"""Alternate the layout of qnn.dense"""
return topi.nn.qnn_dense_alter_layout(attrs, inputs, tinfos, out_type)
# qnn.contrib_dense_pack
register_strategy("qnn.contrib_dense_pack", strategy.qnn_dense_pack_strategy)
# qnn.batch_matmul
register_strategy("qnn.batch_matmul", strategy.qnn_batch_matmul_strategy)
register_pattern("qnn.batch_matmul", OpPattern.OUT_ELEMWISE_FUSABLE)
# qnn.avg_pool2d
register_strategy("qnn.avg_pool2d", strategy.qnn_avg_pool2d_strategy)
| 5,833 | 32.722543 | 97 | py |
tvm | tvm-main/python/tvm/relay/qnn/op/qnn.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unused-argument, not-context-manager
"""QNN dialect operators."""
from __future__ import absolute_import as _abs
import tvm
import tvm.ir
from tvm import relay
from tvm.relay.expr import Tuple, TupleWrapper
from tvm.relay.op.nn.utils import get_pad_tuple2d
from tvm.runtime import Object
from tvm.target import Target
from tvm.topi.nn.qnn import SQNN_DTYPE_TO_CODE
from tvm.target.x86 import target_has_sse41
from . import _make, _requantize
@tvm._ffi.register_object("relay.qnn.op.RequantizeConfig")
class RequantizeConfig(Object):
"""Configure the requantization behavior by setting config variables.
Note
----
This object is backed by node system in C++, with arguments that can be
exchanged between python and C++.
Do not construct directly, use requantize_config instead.
The fields that are backed by the C++ node are immutable once an instance
is constructed. Use _node_defaults getters to get results for the fields.
"""
@staticmethod
def _get_node_default_rounding():
return "UPWARD"
@staticmethod
def _get_node_default_compute_dtype():
target = Target.current(True)
if target and str(target.kind) == "llvm" and target_has_sse41(target.mcpu):
return "float32"
return "int64"
_node_defaults = {
"rounding": _get_node_default_rounding.__func__,
"compute_dtype": _get_node_default_compute_dtype.__func__,
}
# pylint: disable=no-member
def __init__(self, handle):
"""Initialize the function with handle
Parameters
----------
handle : SymbolHandle
the handle to the underlying C++ Symbol
"""
super(RequantizeConfig, self).__init__(handle)
self.handle = handle
def __enter__(self):
# pylint: disable=protected-access
_requantize._EnterRequantizeConfigScope(self)
return self
def __exit__(self, ptype, value, trace):
_requantize._ExitRequantizeConfigScope()
def __setattr__(self, name, value):
if name in RequantizeConfig._node_defaults:
raise AttributeError(f"'{type(self)}' object cannot set attribute '{name}'")
return super(RequantizeConfig, self).__setattr__(name, value)
def current_requantize_config():
"""Get the current requantization configuration."""
return _requantize._GetCurrentRequantizeConfig()
def requantize_config(**kwargs):
"""Configure the requantization behavior by setting config variables.
Parameters
---------
rounding: "UPWARD" or "TONEAREST"
Rounding direction for fixed point multiplications.
compute_dtype:
Specifies the data type used during requantize.
Supported options: \"int64\", \"float32\", \"float64\"
Returns
-------
config: RequantizeConfig
The requantization configuration
"""
node_args = {
k: v() if k not in kwargs else kwargs[k] for k, v in RequantizeConfig._node_defaults.items()
}
return tvm.ir.make_node("relay.qnn.op.RequantizeConfig", **node_args)
def requantize(
data,
input_scale,
input_zero_point,
output_scale,
output_zero_point,
axis=-1,
rounding="None",
compute_dtype="None",
out_dtype="int8",
):
r"""Requantized operator.
The requantize operator converts one quantized tensor representation to
another quantized tensor representation. For the output tensor, we are
provided with output scale and zero point. The computation is as follows
Q_output = zp_output + (scale_input)/(scale_output) * (Q_input - zp_input)
Parameters
----------
data : tvm.relay.Expr
The input data to the operator.
input_scale: tvm.relay.Expr
The quantization scale for the input tensor.
input_zero_point: tvm.relay.Expr
The zero point of the input tensor.
output_scale: tvm.relay.Expr
The quantization scale for the output tensor.
output_zero_point: tvm.relay.Expr
The zero point of the output tensor.
axis : int
The channel axis for quantization. Default value is -1 which corresponds to the last axis.
rounding : string, optional
Defines the rounding direction when the value is midway between two
representable values.
compute_dtype:
Specifies the data type used during requantize.
Supported options: \"int64\", \"float32\", \"float64\"
out_dtype : str, optional
Specifies the output data type.
Returns
-------
result : tvm.relay.Expr
The computed result.
"""
return _make.requantize(
data,
input_scale,
input_zero_point,
output_scale,
output_zero_point,
axis,
rounding,
compute_dtype,
out_dtype,
)
def quantize(data, output_scale, output_zero_point, axis=-1, out_dtype="int8"):
r"""Quantize op
This operator takes float32 input and produces quantized output. The input
tensor can be of any shape. The output shape is the same as input shape.
Q_output = clamp((round(input_tensor/output_scale) + output_zero_point),
out_dtype::min,
out_dtype::max)
Parameters
----------
data : tvm.relay.Expr
The input tensor to be quantized. Can be of type float32.
output_scale : tvm.relay.Expr
The output scale.
output_zero_point : tvm.relay.Expr
The output zero_point.
axis : int
The channel axis for quantization. Default value is -1 which corresponds to the last axis.
out_dtype : str, optional
The data type of the output tensor. Can be [int8, unit8, int16, uint16, int32].
Returns
-------
result : tvm.relay.Expr
The computed result.
"""
return _make.quantize(data, output_scale, output_zero_point, axis, out_dtype)
def simulated_quantize(data, output_scale, output_zero_point, axis=-1, out_dtype="int8"):
r"""Simulated Quantize op
Mimics the quantize op but has more flexibility in valid inputs and always
outputs the same type as the input. This can be useful for
calibrating or training a quantized network.
Parameters
----------
data : tvm.relay.Expr
The input tensor to be quantized. Can be of type float32.
out_dtype : string or tvm.relay.Expr
A string or tensor indicating which datatype to quantize to.
output_scale : tvm.relay.Expr
The output scale.
output_zero_point : tvm.relay.Expr
The output zero_point.
axis : int
The channel axis for quantization. Default value is -1 which corresponds to the last axis.
Returns
-------
result : tvm.relay.Expr
The computed result.
"""
# Convert string dtype to a constant if needed.
if isinstance(out_dtype, str):
type_code = SQNN_DTYPE_TO_CODE[out_dtype]
out_dtype = relay.const(type_code, dtype="int32")
# Wrap reshapes around qnn parameter tensors to guarantee shape compatibility.
output_scale = relay.op.reshape(output_scale, [-1])
output_zero_point = relay.op.reshape(output_zero_point, [-1])
return _make.simulated_quantize(data, out_dtype, output_scale, output_zero_point, axis)
def dequantize(data, input_scale, input_zero_point, axis=-1, out_dtype="float32"):
r"""Dequantize op
This operator takes quantized input and produces dequantized float output.
The output shape is the same as input shape. The input tensor can be of any shape.
Parameters
----------
data : tvm.relay.Expr
The input tensor to be dequantized. Can be of type [int8, unit8, int16, uint16, int32].
input_scale : tvm.relay.Expr
The input scale.
input_zero_point : tvm.relay.Expr
The input zero_point.
axis : int
The channel axis for quantization. Default value is -1 which corresponds to the last axis.
out_dtype : str, optional
The data type of the output tensor. Can be [float16, float32].
Returns
-------
result : tvm.relay.Expr
The computed result.
"""
return _make.dequantize(data, input_scale, input_zero_point, axis, out_dtype)
def simulated_dequantize(data, input_scale, input_zero_point, axis=-1, in_dtype="int8"):
r"""Simulated Dequantize op
Mimics the dequantize op but has more flexibility in valid inputs and always
outputs the same type as the input. This can be useful for calibrating or
training a quantized network.
Parameters
----------
data : tvm.relay.Expr
The input tensor to be dequantized.
in_dtype : string or tvm.relay.Expr
A string or tensor indicating which datatype to dequantize from.
input_scale : tvm.relay.Expr
The input scale.
input_zero_point : tvm.relay.Expr
The input zero_point.
axis : int
The channel axis for quantization. Default value is -1 which corresponds to the last axis.
Returns
-------
result : tvm.relay.Expr
The computed result.
"""
# Convert string dtype to a constant if needed.
if isinstance(in_dtype, str):
type_code = SQNN_DTYPE_TO_CODE[in_dtype]
in_dtype = relay.const(type_code, dtype="int32")
# Wrap reshapes around qnn parameter tensors to guarantee shape compatibility.
input_scale = relay.op.reshape(input_scale, [-1])
input_zero_point = relay.op.reshape(input_zero_point, [-1])
return _make.simulated_dequantize(data, in_dtype, input_scale, input_zero_point, axis)
def concatenate(data, input_scales, input_zero_points, output_scale, output_zero_point, axis):
"""Concatenate the quantized input tensors along the given axis.
Parameters
----------
data : Union(List[relay.Expr], Tuple[relay.Expr], TupleWrapper[relay.Expr])
The list of quantized tensors.
input_scales : List[relay.Expr]
The list of scales of input quantized tensors.
input_zero_points : List[relay.Expr]
The list of zero points of input quantized tensors.
output_scale : relay.Expr
The scale of the output quantized tensor.
output_zero_point : relay.Expr
The zero point of the output quantized tensor.
axis : int
The axis along which the tensors are concatenated.
Returns
-------
result: relay.Expr
The concatenated quantized tensor.
"""
if isinstance(data, (list, tuple)):
data = Tuple(data)
elif isinstance(data, TupleWrapper):
data = data.tuple_value
if not isinstance(axis, int):
raise ValueError("For now, we only support integer axis")
input_scales = list(input_scales)
input_zero_points = list(input_zero_points)
return _make.concatenate(
data, Tuple(input_scales), Tuple(input_zero_points), output_scale, output_zero_point, axis
)
def conv2d(
data,
kernel,
input_zero_point,
kernel_zero_point,
input_scale,
kernel_scale,
kernel_size,
channels,
strides=(1, 1),
padding=(0, 0),
dilation=(1, 1),
groups=1,
data_layout="NCHW",
kernel_layout="OIHW",
out_layout="",
out_dtype="int32",
):
r"""Quantized 2D convolution.
This operator convolves quantized data with quantized kernel.
If doing Per-channel quantization, qnn expects the kernel_zero_scale
and optionally the kernel_zero_point will be 1-D vectors instead of scalars.
The scale of the output quantized tensor is the product of the kernel_scale and
input_scale of the input quantized tensors. The zero point of the output
quantized tensor is 0. By default, the dtype of output is int32. Please also
refer to Requantize operator to understand how to scale back the int32
output to (u)int8.
Parameters
----------
data : tvm.relay.Expr
The input data to the operator.
kernel : tvm.relay.Expr
The kernel expressions.
input_zero_point: tvm.relay.Expr
The zero point of the data distribution.
kernel_zero_point: tvm.relay.Expr
The zero point of the quantized_kernel distribution.
input_scale: tvm.relay.Expr
The scale for the input tensor. The scale for the input tensor is
stored purely for convenience here. See more commentary below.
kernel_scale: tvm.relay.Expr
The scale for the weight tensor. The scale for the weight tensor is
stored for access to this during relay. This information is not
needed in the pass pipeline after qnn.conv2d is lowered to the
sequence of steps as in nn.conv2d. See also input_scale in Requantize.
kernel_size : tuple of int
The spatial width and height of the convolution kernel.
channels : int
Number of output channels of this convolution.
strides : tuple of int, optional
The strides of convolution.
padding : tuple of int, optional
The padding of convolution on both sides of inputs before convolution.
dilation : tuple of int, optional
Specifies the dilation rate to be used for dilated convolution.
groups : int, optional
Number of groups for grouped convolution.
data_layout : str, optional
Layout of the input.
kernel_layout : str, optional
Layout of the kernel.
out_layout : str, optional
Layout of the output, by default, out_layout is the same as data_layout
out_dtype : str, optional
Specifies the output data type for mixed precision conv2d.
Returns
-------
result : tvm.relay.Expr
The computed result.
"""
# TODO enforce 4-way padding in topi/nn/conv2d after #4644 merged
# convert 2-way padding to 4-way padding
padding = get_pad_tuple2d(padding)
return _make.conv2d(
data,
kernel,
input_zero_point,
kernel_zero_point,
input_scale,
kernel_scale,
strides,
padding,
dilation,
groups,
channels,
kernel_size,
data_layout,
kernel_layout,
out_layout,
out_dtype,
)
def conv2d_transpose(
data,
weight,
input_zero_point,
kernel_zero_point,
input_scale,
kernel_scale,
strides=(1, 1),
padding=(0, 0),
dilation=(1, 1),
groups=1,
channels=None,
kernel_size=None,
data_layout="NCHW",
kernel_layout="IOHW",
out_layout="",
output_padding=(0, 0),
out_dtype="int32",
):
"""This operator deconvolves quantized data with quantized kernel. The scale of
the output quantized tensor is the product of the kernel_scale and
input_scale of the input quantized tensors. The zero point of the output
quantized tensor is 0. By default, the dtype of output is int32. Please also
refer to Requantize operator to understand how to scale back the int32
output to (u)int8.
Parameters
----------
data : tvm.relay.Expr
The input data to the operator.
weight : tvm.relay.Expr
The weight expressions.
input_zero_point: tvm.relay.Expr
The zero point of the data distribution.
kernel_zero_point: tvm.relay.Expr
The zero point of the quantized_kernel distribution.
input_scale: tvm.relay.Expr
The scale for the input tensor. The scale for the input tensor is
stored purely for convenience here. See more commentary below.
kernel_scale: tvm.relay.Expr
The scale for the weight tensor. The scale for the weight tensor is
stored for access to this during relay. This information is not
needed in the pass pipeline after qnn.conv2d_transpose is lowered to the
sequence of steps as in nn.conv2d_transpose. See also input_scale in Requantize.
strides : Tuple[int], optional
The strides of convolution.
padding : Tuple[int], optional
The padding of convolution.
dilation : Tuple[int], optional
Specifies the dilation rate to be used for dilated convolution.
channels : int, optional
Number of output channels of this convolution.
kernel_size : tuple of int, optional
The spatial dimensions of the convolution kernel.
groups : int, optional
Number of groups for grouped convolution.
data_layout : str, optional
Layout of the input.
kernel_layout : str, optional
Layout of the weight.
out_layout : Optional[str]
Layout of the output, by default, out_layout is the same as data_layout
output_padding : Tuple[int], optional
Used to identify the padding within the output shape
(only used in training, where transpose_conv represents the gradient of a convolution )
out_dtype : str, optional
Specifies the output data type for mixed precision conv2d.
Returns
-------
result : tvm.relay.Expr
The computed result.
"""
# convert 2-way padding to 4-way padding
padding = get_pad_tuple2d(padding)
return _make.conv2d_transpose(
data,
weight,
input_zero_point,
kernel_zero_point,
input_scale,
kernel_scale,
strides,
padding,
dilation,
groups,
channels,
kernel_size,
data_layout,
kernel_layout,
out_layout,
output_padding,
out_dtype,
)
def add(
lhs,
rhs,
lhs_scale,
lhs_zero_point,
rhs_scale,
rhs_zero_point,
output_scale,
output_zero_point,
lhs_axis=-1,
rhs_axis=-1,
):
"""Quantized addition with numpy-style broadcasting.
Parameters
----------
lhs : relay.Expr
The left hand side quantized input data.
rhs : relay.Expr
The right hand side quantized input data.
lhs_scale: relay.Expr
The scale of the lhs quantized expr.
lhs_zero_point: relay.Expr
The zero point of lhs quantized expr.
rhs_scale: relay.Expr
The scale of the rhs quantized expr.
rhs_zero_point: relay.Expr
The zero point of rhs quantized expr.
output_scale: relay.Expr
The scale of the output quantized expr.
output_zero_point: relay.Expr
The zero point of output quantized expr.
lhs_axis: int
The channel axis for lhs quantization. Default value is -1 which corresponds
to the last axis.
rhs_axis: int
The channel axis for rhs quantization. Default value is -1 which corresponds
to the last axis.
Returns
-------
result : relay.Expr
The computed result.
"""
return _make.add(
lhs,
rhs,
lhs_scale,
lhs_zero_point,
rhs_scale,
rhs_zero_point,
output_scale,
output_zero_point,
lhs_axis,
rhs_axis,
)
def dense(
data,
weight,
input_zero_point,
kernel_zero_point,
input_scale,
kernel_scale,
units,
out_dtype="int32",
):
"""Qnn Dense operator.
Applies a quantized linear transformation
.. math::
`Y = X * W`
If doing Per-channel quantization, qnn expects the kernel_zero_scale
and optionally the kernel_zero_point will be 1-D vectors instead of scalars.
Parameters
----------
data : tvm.relay.Expr
The quantized input data to the operator.
weight : tvm.relay.Expr
The quantized weight expressions.
input_zero_point: tvm.relay.Expr
The input zero point.
kernel_zero_point: tvm.relay.Expr
The kernel zero point.
input_scale: tvm.relay.Expr
The scale for the input tensor.
kernel_scale: tvm.relay.Expr
The scale for the weight tensor. The scale for the weight tensor is
stored for access to this during relay. This information is not
needed in the pass pipeline after qnn.conv2d is lowered to the
sequence of steps as in nn.conv2d. See also input_scale in Requantize.
units : int
Number of hidden units of the dense transformation.
out_dtype : str, optional
Specifies the output data type for mixed precision dense can be int32 or int16.
Returns
-------
result : tvm.relay.Expr
The computed result.
"""
return _make.dense(
data,
weight,
input_zero_point,
kernel_zero_point,
input_scale,
kernel_scale,
units,
out_dtype,
)
def contrib_dense_pack(
data,
weight,
input_zero_point,
kernel_zero_point,
input_scale,
kernel_scale,
kernel_layout="NC",
units=None,
out_dtype="int32",
):
"""Qnn contrib_dense_pack operator.
Applies a quantized linear transformation
.. math::
`Y = X * W`
If doing Per-channel quantization, qnn expects the kernel_zero_scale
and optionally the kernel_zero_point will be 1-D vectors instead of scalars.
Parameters
----------
data : tvm.relay.Expr
The quantized input data to the operator.
weight : tvm.relay.Expr
The quantized weight expressions.
input_zero_point: tvm.relay.Expr
The input zero point.
kernel_zero_point: tvm.relay.Expr
The kernel zero point.
input_scale: tvm.relay.Expr
The scale for the input tensor.
kernel_scale: tvm.relay.Expr
The scale for the weight tensor. The scale for the weight tensor is
stored for access to this during relay. This information is not
needed in the pass pipeline after qnn.conv2d is lowered to the
sequence of steps as in nn.conv2d. See also input_scale in Requantize.
kernel_layout: str
The layout of weight, such as "NC" or "NC32n4c".
units : int, optional
Number of hidden units of the dense transformation.
out_dtype : str, optional
Specifies the output data type for mixed precision dense can be int32 or int16.
Returns
-------
result : tvm.relay.Expr
The computed result.
"""
return _make.contrib_dense_pack(
data,
weight,
input_zero_point,
kernel_zero_point,
input_scale,
kernel_scale,
kernel_layout,
units,
out_dtype,
)
def mul(
lhs,
rhs,
lhs_scale,
lhs_zero_point,
rhs_scale,
rhs_zero_point,
output_scale,
output_zero_point,
lhs_axis=-1,
rhs_axis=-1,
):
"""Quantized multiplication with numpy-style broadcasting.
Parameters
----------
lhs : relay.Expr
The left hand side quantized input data.
rhs : relay.Expr
The right hand side quantized input data.
lhs_scale: relay.Expr
The scale of the lhs quantized expr.
lhs_zero_point: relay.Expr
The zero point of lhs quantized expr.
rhs_scale: relay.Expr
The scale of the rhs quantized expr.
rhs_zero_point: relay.Expr
The zero point of rhs quantized expr.
output_scale: relay.Expr
The scale of the output quantized expr.
output_zero_point: relay.Expr
The zero point of output quantized expr.
lhs_axis: int
The channel axis for lhs quantization. Default value is -1 which corresponds
to the last axis.
rhs_axis: int
The channel axis for rhs quantization. Default value is -1 which corresponds
to the last axis.
Returns
-------
result : relay.Expr
The computed result.
"""
return _make.mul(
lhs,
rhs,
lhs_scale,
lhs_zero_point,
rhs_scale,
rhs_zero_point,
output_scale,
output_zero_point,
lhs_axis,
rhs_axis,
)
def tanh(x, scale, zero_point, output_scale, output_zero_point):
"""Quantized tanh.
Parameters
----------
x : relay.Expr
The quantized input tensor.
scale: relay.Expr
The scale of the quantized expr.
zero_point: relay.Expr
The zero point of quantized expr.
output_scale: relay.Expr
The scale of the output quantized expr.
output_zero_point: relay.Expr
The zero point of output quantized expr.
Returns
-------
result : relay.Expr
The computed result.
"""
return _make.tanh(x, scale, zero_point, output_scale, output_zero_point)
def exp(x, scale, zero_point, output_scale, output_zero_point):
"""Quantized exponential function.
Parameters
----------
x : relay.Expr
The quantized input tensor.
scale: relay.Expr
The scale of the quantized expr.
zero_point: relay.Expr
The zero point of quantized expr.
output_scale: relay.Expr
The scale of the output quantized expr.
output_zero_point: relay.Expr
The zero point of output quantized expr.
Returns
-------
result : relay.Expr
The computed result.
"""
return _make.exp(x, scale, zero_point, output_scale, output_zero_point)
def sqrt(x, scale, zero_point, output_scale, output_zero_point):
"""Quantized square root.
Parameters
----------
x : relay.Expr
The quantized input tensor.
scale: relay.Expr
The scale of the quantized expr.
zero_point: relay.Expr
The zero point of quantized expr.
output_scale: relay.Expr
The scale of the output quantized expr.
output_zero_point: relay.Expr
The zero point of output quantized expr.
Returns
-------
result : relay.Expr
The computed result.
"""
return _make.sqrt(x, scale, zero_point, output_scale, output_zero_point)
def rsqrt(x, scale, zero_point, output_scale, output_zero_point):
"""Quantized reciprocal square root.
Parameters
----------
x : relay.Expr
The quantized input tensor.
scale: relay.Expr
The scale of the quantized expr.
zero_point: relay.Expr
The zero point of quantized expr.
output_scale: relay.Expr
The scale of the output quantized expr.
output_zero_point: relay.Expr
The zero point of output quantized expr.
Returns
-------
result : relay.Expr
The computed result.
"""
return _make.rsqrt(x, scale, zero_point, output_scale, output_zero_point)
def erf(x, scale, zero_point, output_scale, output_zero_point):
"""Quantized error function.
Parameters
----------
x : relay.Expr
The quantized input tensor.
scale: relay.Expr
The scale of the quantized expr.
zero_point: relay.Expr
The zero point of quantized expr.
output_scale: relay.Expr
The scale of the output quantized expr.
output_zero_point: relay.Expr
The zero point of output quantized expr.
Returns
-------
result : relay.Expr
The computed result.
"""
return _make.erf(x, scale, zero_point, output_scale, output_zero_point)
# pylint: disable=redefined-builtin
def abs(x, scale, zero_point, output_scale, output_zero_point):
"""Quantized abs function.
Parameters
----------
x : relay.Expr
The quantized input tensor.
scale: relay.Expr
The scale of the quantized expr.
zero_point: relay.Expr
The zero point of quantized expr.
output_scale: relay.Expr
The scale of the output quantized expr.
output_zero_point: relay.Expr
The zero point of output quantized expr.
Returns
-------
result : relay.Expr
The computed result.
"""
return _make.abs(x, scale, zero_point, output_scale, output_zero_point)
def sigmoid(x, scale, zero_point, output_scale, output_zero_point):
"""Quantized sigmoid.
Parameters
----------
x : relay.Expr
The quantized input tensor.
scale: relay.Expr
The scale of the quantized expr.
zero_point: relay.Expr
The zero point of quantized expr.
output_scale: relay.Expr
The scale of the output quantized expr.
output_zero_point: relay.Expr
The zero point of output quantized expr.
Returns
-------
result : relay.Expr
The computed result.
"""
return _make.sigmoid(x, scale, zero_point, output_scale, output_zero_point)
def hardswish(x, scale, zero_point, output_scale, output_zero_point):
"""Quantized hardswish.
Parameters
----------
x : relay.Expr
The quantized input tensor.
scale: relay.Expr
The scale of the quantized expr.
zero_point: relay.Expr
The zero point of quantized expr.
output_scale: relay.Expr
The scale of the output quantized expr.
output_zero_point: relay.Expr
The zero point of output quantized expr.
Returns
-------
result : relay.Expr
The computed result.
"""
return _make.hardswish(x, scale, zero_point, output_scale, output_zero_point)
def log(x, scale, zero_point, output_scale, output_zero_point):
"""Quantized log.
Parameters
----------
x : relay.Expr
The quantized input tensor.
scale: relay.Expr
The scale of the quantized expr.
zero_point: relay.Expr
The zero point of quantized expr.
output_scale: relay.Expr
The scale of the output quantized expr.
output_zero_point: relay.Expr
The zero point of output quantized expr.
Returns
-------
result : relay.Expr
The computed result.
"""
return _make.log(x, scale, zero_point, output_scale, output_zero_point)
def subtract(
lhs,
rhs,
lhs_scale,
lhs_zero_point,
rhs_scale,
rhs_zero_point,
output_scale,
output_zero_point,
lhs_axis=-1,
rhs_axis=-1,
):
"""Quantized subtraction with numpy-style broadcasting.
Parameters
----------
lhs : relay.Expr
The left hand side quantized input data.
rhs : relay.Expr
The right hand side quantized input data.
lhs_scale: relay.Expr
The scale of the lhs quantized expr.
lhs_zero_point: relay.Expr
The zero point of lhs quantized expr.
rhs_scale: relay.Expr
The scale of the rhs quantized expr.
rhs_zero_point: relay.Expr
The zero point of rhs quantized expr.
output_scale: relay.Expr
The scale of the output quantized expr.
output_zero_point: relay.Expr
The zero point of output quantized expr.
lhs_axis: int
The channel axis for lhs quantization. Default value is -1 which corresponds
to the last axis.
rhs_axis: int
The channel axis for rhs quantization. Default value is -1 which corresponds
to the last axis.
Returns
-------
result : relay.Expr
The computed result.
"""
return _make.subtract(
lhs,
rhs,
lhs_scale,
lhs_zero_point,
rhs_scale,
rhs_zero_point,
output_scale,
output_zero_point,
lhs_axis,
rhs_axis,
)
def batch_matmul(x, y, x_zero_point, y_zero_point, x_scale, y_scale, out_dtype="int32"):
r"""
Computes batch matrix multiplication of `x` and `y` when `x` and `y` are data
in batch.
.. math::
\mbox{batch_matmul}(x, y)[i, :, :] = \mbox{matmul}(x[i, :, :], y[i, :, :]^T)
Parameters
----------
x : tvm.relay.Expr
The first quantized input.
A quantized tensor is represented in following manner
`A = scale_a x (QA - zp_A)`
where QA is quantized tensor, scale_a and zp_A are quantization
params.
y : tvm.relay.Expr
The second quantized input.
x_zero_point: tvm.relay.Expr
The first input zero point.
y_zero_point: tvm.relay.Expr
The second input zero point.
x_scale: tvm.relay.Expr
The scale for the first input tensor.
y_scale: tvm.relay.Expr
The scale for the second input tensor.
out_dtype : str, optional
Specifies the output data type for mixed precision dense can be int32 or int16.
Returns
-------
result: tvm.relay.Expr
The computed result.
"""
return _make.batch_matmul(x, y, x_zero_point, y_zero_point, x_scale, y_scale, out_dtype)
def leaky_relu(x, alpha, input_scale, input_zero_point, output_scale, output_zero_point):
"""Quantized leaky relu.
Parameters
----------
x : relay.Expr
The quantized input tensor.
alpha: double
The alpha value.
input_scale: relay.Expr
The scale of the input quantized expr.
input_zero_point: relay.Expr
The zero point of input quantized expr.
output_scale: relay.Expr
The scale of the output quantized expr.
output_zero_point: relay.Expr
The zero point of output quantized expr.
Returns
-------
result : relay.Expr
The computed result.
"""
return _make.leaky_relu(
x, alpha, input_scale, input_zero_point, output_scale, output_zero_point
)
def softmax(x, scale, zero_point, output_scale, output_zero_point, axis=-1):
return _make.softmax(x, axis, scale, zero_point, output_scale, output_zero_point)
def avg_pool2d(
data,
input_scale,
input_zero_point,
output_scale,
output_zero_point,
pool_size,
strides,
padding,
dilation,
ceil_mode=False,
count_include_pad=True,
layout="NHWC",
out_layout="",
):
"""Quantized avg_pool2d
Parameters
----------
data : relay.Expr
The quantized input tensor.
input_scale: float
The scale of the input quantized expr.
input_zero_point: int
The zero point of input quantized expr.
output_scale: flaot
The scale of the output quantized expr.
output_zero_point: int
The zero point of output quantized expr.
pool_size : relay.Expr
The pool_size
strides : relay.Expr
The strides
padding : relay.Expr
The padding size
dilation : relay.Expr
The dilation size
ceil_mode : bool, optional
Whether to use ceil or floor for calculating the output shape
count_include_pad : bool, optional
Determines if padding should be taken into account in the computation
layout: string, optinal
out_layout: string, optional
Returns
-------
result : relay.Expr
The computed result.
"""
return _make.avg_pool2d(
data,
input_scale,
input_zero_point,
output_scale,
output_zero_point,
pool_size,
strides,
padding,
dilation,
ceil_mode,
count_include_pad,
layout,
out_layout,
)
| 35,468 | 25.850114 | 100 | py |
tvm | tvm-main/python/tvm/relay/qnn/op/_requantize.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=unused-argument
"""Internal module for qnn requantization."""
import tvm._ffi
tvm._ffi._init_api("relay._requantize", __name__)
| 932 | 41.409091 | 62 | py |
tvm | tvm-main/python/tvm/relay/qnn/op/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=wildcard-import, redefined-builtin
"""QNN dialect related operators."""
from __future__ import absolute_import as _abs
from .qnn import *
from .op import register_qnn_legalize, register_qnn_canonicalize
from . import _qnn, legalizations, layout_conversions, canonicalizations
| 1,080 | 44.041667 | 72 | py |
tvm | tvm-main/python/tvm/relay/qnn/op/legalizations.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument
"""Backend QNN related feature registration"""
import numpy as np
from scipy import special
import tvm
from tvm import relay
from tvm._ffi.base import TVMError
from tvm.relay.qnn.op.canonicalizations import create_integer_lookup_op
from ....target.x86 import target_has_sse42
from ....topi.utils import is_target
from .. import op as reg
#################################################
# Register the functions for different operators.
#################################################
# Registering QNN Conv2D legalization function.
@reg.register_qnn_legalize("qnn.conv2d")
def legalize_qnn_conv2d(attrs, inputs, types):
return qnn_conv2d_legalize(attrs, inputs, types)
# Registering QNN Conv2DTranspose legalization function.
@reg.register_qnn_legalize("qnn.conv2d_transpose")
def legalize_qnn_conv2d_transpose(attrs, inputs, types):
return qnn_conv2d_transpose_legalize(attrs, inputs, types)
# Registering QNN dense legalization function.
@reg.register_qnn_legalize("qnn.dense")
def legalize_qnn_dense(attrs, inputs, types):
return qnn_dense_legalize(attrs, inputs, types)
def register_qnn_unary_op_legalize(op_name, floating_point_func):
"""Register unary qnn op for legalization via table lookup op."""
def legalize_qnn_unary_op(attrs, inputs, types):
return create_integer_lookup_op(
input_arg=inputs[0],
floating_point_func=floating_point_func,
in_scale=inputs[1],
in_zero_point=inputs[2],
out_scale=inputs[3],
out_zero_point=inputs[4],
in_dtype=types[0].dtype,
out_dtype=types[0].dtype,
)
return reg.register_qnn_legalize(op_name, legalize_qnn_unary_op)
def hardswish_func(x):
x2 = x + 3.0
x2 = np.clip(x2, 0.0, 6.0)
return x * x2 / 6.0
register_qnn_unary_op_legalize("qnn.sqrt", np.sqrt)
register_qnn_unary_op_legalize("qnn.rsqrt", lambda arr: 1 / np.sqrt(arr))
register_qnn_unary_op_legalize("qnn.exp", np.exp)
register_qnn_unary_op_legalize("qnn.erf", special.erf)
register_qnn_unary_op_legalize("qnn.sigmoid", lambda arr: 1 / (1 + np.exp(-arr)))
register_qnn_unary_op_legalize("qnn.hardswish", hardswish_func)
register_qnn_unary_op_legalize("qnn.tanh", np.tanh)
register_qnn_unary_op_legalize("qnn.log", np.log)
register_qnn_unary_op_legalize("qnn.abs", np.abs)
# Default to None. If overridden by target, this will not be run.
# Generic QNN Conv2D legalization function.
@tvm.target.generic_func
def qnn_conv2d_legalize(attrs, inputs, types):
"""Default legalization is None."""
return None
# Generic QNN Conv2DTranspose legalization function.
@tvm.target.generic_func
def qnn_conv2d_transpose_legalize(attrs, inputs, types):
"""Convert kernel and data to int16, subtract offsets upfront
and calls into relay.nn.conv2d_transpose."""
# Collect the input exprs.
data, kernel, input_zero_point, kernel_zero_point, _, _ = inputs
# If input zero point is a scalar, we can directly subtract it.
if len(types[2].shape) == 0:
shift_data = relay.subtract(
relay.cast(data, dtype="int16"), relay.cast(input_zero_point, "int16")
)
# Otherwise it needs to be broadcast.
else:
shift_data = relay.nn.bias_add(
relay.cast(data, dtype="int16"),
-relay.cast(input_zero_point, dtype="int16"),
)
# If kernel zero point is a scalar, we can directly subtract it.
if len(types[3].shape) == 0:
shift_kernel = relay.subtract(
relay.cast(kernel, dtype="int16"), relay.cast(kernel_zero_point, "int16")
)
# Otherwise it needs to be broadcast.
else:
shift_kernel = relay.nn.bias_add(
relay.cast(kernel, dtype="int16"),
-relay.cast(kernel_zero_point, dtype="int16"),
)
return relay.nn.conv2d_transpose(shift_data, shift_kernel, **attrs)
# Generic QNN Conv2D legalization function.
@tvm.target.generic_func
def qnn_dense_legalize(attrs, inputs, types):
"""Default legalization is None."""
return None
###################
# Helper functions.
###################
def get_scalar_from_constant(expr):
"""Returns scalar value from Relay constant scalar."""
assert (
isinstance(expr, relay.Constant) and not expr.data.shape
), "Expr is not a constant scalar."
value = expr.data.numpy()
assert value.dtype == np.dtype(np.int32) or value.dtype == np.dtype(
np.float32
), "value must be float32/int32"
return value.item(0)
def _shift(data, zero_point, out_dtype):
"""Shifts (add/subtracts) the qnn tensor with +/-128)"""
if out_dtype == "uint8":
shift = 128
elif out_dtype == "int8":
shift = -128
else:
raise ValueError("Unsupported out dtype.")
data_modified = relay.cast(data, "int32")
data_modified = relay.add(data_modified, relay.const(shift, "int32"))
data_modified = relay.cast(data_modified, out_dtype)
if isinstance(zero_point, relay.Constant):
zero_point_val = get_scalar_from_constant(zero_point)
zero_point_modified = relay.const(zero_point_val + shift, "int32")
else:
zero_point_modified = zero_point + relay.const(shift, "int32")
return (data_modified, zero_point_modified)
# Helper function for lowering in the abscence of fast Int8 arithmetic units.
def helper_no_fast_int8_hw_legalization(attrs, inputs, types, relay_op):
"""Converts QNN operators into a sequence of Relay operators that are friendly to HW that do
not have fast Int8 arithmetic. For example, for ARM, LLVM utilizes the assembly instructions
much more efficiently if the convolution or dense operator input datatypes are int16 instead of
int8. More details are present at https://github.com/apache/tvm/pull/4277.
Parameters
----------
attrs : tvm.ir.Attrs
Attributes of current convolution
inputs : list of tvm.relay.Expr
The args of the Relay expr to be legalized
types : list of types
List of input and output types
Returns
-------
result : tvm.relay.Expr
The legalized expr
"""
# Collect the input exprs.
data, kernel, input_zero_point, kernel_zero_point, _, _ = inputs
shift_data = relay.subtract(
relay.cast(data, dtype="int16"), relay.cast(input_zero_point, dtype="int16")
)
# If kernel zero point is a scalar we can directly subtract it.
if len(types[3].shape) == 0:
shift_kernel = relay.subtract(
relay.cast(kernel, dtype="int16"), relay.cast(kernel_zero_point, dtype="int16")
)
# Otherwise it needs to be broadcast.
else:
# Determine output axis of kernel for spatial operations.
if hasattr(attrs, "kernel_layout"):
output_axis = tvm.tir.layout(attrs["kernel_layout"]).index_of("O")
# For dense operations, broadcast to [N, K] layout.
elif isinstance(attrs, relay.op.op_attrs.DenseAttrs):
output_axis = 0
# For matrix multiplication instead expand to [K, N] layout.
elif isinstance(attrs, relay.op.op_attrs.MatmulAttrs):
output_axis = 1
else:
raise TVMError(
"Legalization of %s is not yet supported with per channel parameters"
% str(type(attrs))
)
shift_kernel = relay.nn.bias_add(
relay.cast(kernel, dtype="int16"),
-relay.cast(kernel_zero_point, dtype="int16"),
output_axis,
)
new_attrs = {k: attrs[k] for k in attrs.keys()}
return relay_op(shift_data, shift_kernel, **new_attrs)
# Helper function to change dtypes to uint8 x int8. Intel VNNI instructions prefer this setting.
def helper_change_dtypes_to_uint8_int8(attrs, inputs, types, relay_op):
"""Legalizes QNN conv2d/dense op for Intel HW. VNNI supports u8 x i8 fast conv/MM. If the dtypes
are already good, we dont transform. Else, we shift the tensor values and zero points to change
the dtype.
Converting from int8 to uint8 can be done in following manner.
Original equation
scale * (QA - zp_a)
scale * (QA + 128 - 128 - zp_a)
scale * ( (QA + 128) - (zp_a + 128))
Replacing QA + 128 with QA' and (zp_a + 128) with zp_a'
We get our new quantized uint8 tensor - scale * (QA' - zp_a')
Similarly we can convert from uint8 to int8.
Parameters
----------
attrs : tvm.ir.Attrs
Attributes of current convolution
inputs : list of tvm.relay.Expr
The args of the Relay expr to be legalized
types : list of types
List of input and output types
Returns
-------
result : tvm.relay.Expr
The legalized expr
"""
# Collect the dtypes.
data_dtype = types[0].dtype
kernel_dtype = types[1].dtype
# Collect the input exprs.
data, kernel, input_zero_point, kernel_zero_point, input_scale, kernel_scale = inputs
# VNNI supports u8 x i8 fast conv/MM. Don't do anything if it is already satisfied.
if data_dtype == "uint8" and kernel_dtype == "int8":
return None
# Shift input if necessary.
if data_dtype == "int8":
# Compute (QA + 128) and (zp_a + 128)
data, input_zero_point = _shift(data, input_zero_point, "uint8")
# Shift kernel if necessary.
if kernel_dtype == "uint8":
# Compute (QA - 128) and (zp_a - 128)
kernel, kernel_zero_point = _shift(kernel, kernel_zero_point, "int8")
# Call qnn.conv2d with modified inputs and zero points.
new_attrs = {k: attrs[k] for k in attrs.keys()}
return relay_op(
data, kernel, input_zero_point, kernel_zero_point, input_scale, kernel_scale, **new_attrs
)
# Helper function to change dtypes to int8 x int8. Cuda dp4a instructions prefer this setting.
def helper_change_dtypes_to_int8(attrs, inputs, types, relay_op):
"""Legalizes QNN conv2d/dense op for Nvidia HW. dp4a supports i8 x i8 fast conv/MM. If the
dtypes are already good, we dont transform. Else, we shift the tensor values and zero points
to change the dtype.
Parameters
----------
attrs : tvm.ir.Attrs
Attributes of current convolution
inputs : list of tvm.relay.Expr
The args of the Relay expr to be legalized
types : list of types
List of input and output types
Returns
-------
result : tvm.relay.Expr
The legalized expr
"""
# Collect the dtypes.
data_dtype = types[0].dtype
kernel_dtype = types[1].dtype
# Collect the input exprs.
data, kernel, input_zero_point, kernel_zero_point, input_scale, kernel_scale = inputs
# dp4a supports i8 x i8 fast conv/MM. Don't do anything if it is already satisfied.
if data_dtype == "int8" and kernel_dtype == "int8":
return None
# Shift input if necessary.
if data_dtype == "uint8":
# Compute (QA + 128) and (zp_a + 128)
data, input_zero_point = _shift(data, input_zero_point, "int8")
# Shift kernel if necessary.
if kernel_dtype == "uint8":
# Compute (QA - 128) and (zp_a - 128)
kernel, kernel_zero_point = _shift(kernel, kernel_zero_point, "int8")
# Call qnn.conv2d with modified inputs and zero points.
new_attrs = {k: attrs[k] for k in attrs.keys()}
return relay_op(
data, kernel, input_zero_point, kernel_zero_point, input_scale, kernel_scale, **new_attrs
)
def helper_change_dtypes_to_uint8(attrs, inputs, types, relay_op):
"""Helper function to change dtypes to uint8 x uint8.
Legalizes QNN dense op for Hexagon DSP. It supports fast u8 x u8 vrmpy instruction.
Converting from int8 to uint8 can be done in following manner:
Original equation
scale * (QA - zp_a)
scale * (QA + 128 - 128 - zp_a)
scale * ( (QA + 128) - (zp_a + 128))
Replacing QA + 128 with QA' and (zp_a + 128) with zp_a'
We get our new quantized uint8 tensor - scale * (QA' - zp_a')
Parameters
----------
attrs : tvm.ir.Attrs
Attributes of current convolution
inputs : list of tvm.relay.Expr
The args of the Relay expr to be legalized
types : list of types
List of input and output types
Returns
-------
result : tvm.relay.Expr
The legalized expr
"""
# Collect the dtypes.
data_dtype = types[0].dtype
kernel_dtype = types[1].dtype
# Do nothing since it is already uint8.
if data_dtype == "uint8" and kernel_dtype == "uint8":
return None
# Collect the input exprs.
data, kernel, input_zero_point, kernel_zero_point, input_scale, kernel_scale = inputs
# Shift input if necessary.
if data_dtype == "int8":
# Compute (QA + 128) and (zp_a + 128)
data, input_zero_point = _shift(data, input_zero_point, "uint8")
# Shift kernel if necessary.
if kernel_dtype == "int8":
# Compute (QA + 128) and (zp_a + 128)
kernel, kernel_zero_point = _shift(kernel, kernel_zero_point, "uint8")
# Call qnn.conv2d/qnn.dense with modified inputs and zero points.
new_attrs = dict(attrs)
return relay_op(
data, kernel, input_zero_point, kernel_zero_point, input_scale, kernel_scale, **new_attrs
)
# Helper function to change dtypes to be same. ARM dotprod instructions prefer this setting.
def helper_change_dtypes_to_be_same(attrs, inputs, types, relay_op):
"""Sometimes MxNet + MLDNN can lead to uint8 x int8 datatypes for the conv inputs. However,
many devices like ARM prefer the datatypes to be same for the HW units. This helper transforms
conv2d/dense such that both the dtypes are same.
Parameters
----------
attrs : tvm.ir.Attrs
Attributes of current convolution
inputs : list of tvm.relay.Expr
The args of the Relay expr to be legalized
types : list of types
List of input and output types
Returns
-------
result : tvm.relay.Expr
The legalized expr
"""
def _shift(data, zero_point, out_dtype):
"""Shifts (adds/subtracts) the qnn tensor by 128)"""
if out_dtype == "uint8":
shift = 128
elif out_dtype == "int8":
shift = -128
else:
raise ValueError("Unsupported out dtype.")
data_modified = relay.cast(data, "int32")
data_modified = relay.add(data_modified, relay.const(shift, "int32"))
data_modified = relay.cast(data_modified, out_dtype)
zero_point_val = get_scalar_from_constant(zero_point)
zero_point_modified = relay.const(zero_point_val + shift, "int32")
return (data_modified, zero_point_modified)
# Collect the dtypes.
data_dtype = types[0].dtype
kernel_dtype = types[1].dtype
if data_dtype == kernel_dtype:
return None
# Collect the input exprs.
data, kernel, input_zero_point, kernel_zero_point, input_scale, kernel_scale = inputs
assert (
"int8" in data_dtype and "int8" in kernel_dtype
), "Qnn Conv2D/Dense only accepts uint8 or int8 inputs"
# Shift input if necessary.
data, input_zero_point = _shift(data, input_zero_point, kernel_dtype)
new_attrs = {k: attrs[k] for k in attrs.keys()}
return relay_op(
data, kernel, input_zero_point, kernel_zero_point, input_scale, kernel_scale, **new_attrs
)
def is_fast_int8_on_intel():
"""Checks whether the hardware has support for fast Int8 arithmetic operations."""
target = tvm.target.Target.current(allow_none=False)
return target_has_sse42(target.mcpu)
# Helper function to align up given value.
def helper_align_up(value, aligner):
return ((value + aligner) // aligner) * aligner
########################
# ARM CPU legalizations.
########################
@qnn_conv2d_legalize.register("arm_cpu")
def _qnn_conv2d_legalize_arm_cpu(attrs, inputs, types):
target = tvm.target.Target.current(allow_none=False)
is_depthwise = relay.op.strategy.is_depthwise_conv2d(
types[0].shape,
attrs["data_layout"],
types[1].shape,
attrs["kernel_layout"],
attrs["groups"],
)
use_int8_on_arm = (not is_depthwise) and attrs["data_layout"] == "NHWC"
other_options = use_int8_on_arm or target.features.has_dotprod
if target.features.has_asimd and not other_options:
return helper_no_fast_int8_hw_legalization(attrs, inputs, types, relay.nn.conv2d)
# ARM prefers the dtypes to be same.
return helper_change_dtypes_to_be_same(attrs, inputs, types, relay.qnn.op.conv2d)
@qnn_dense_legalize.register("arm_cpu")
def _qnn_dense_legalize_arm_cpu(attrs, inputs, types):
target = tvm.target.Target.current(allow_none=False)
if target.features.has_asimd and not target.features.has_dotprod:
return helper_no_fast_int8_hw_legalization(attrs, inputs, types, relay.nn.dense)
# ARM prefers the dtypes to be same.
return helper_change_dtypes_to_be_same(attrs, inputs, types, relay.qnn.op.dense)
##########################
# Intel CPU legalizations.
##########################
@qnn_conv2d_legalize.register("cpu")
def _qnn_conv2d_legalize_intel_cpu(attrs, inputs, types):
# TODO(vvchernov): not only VNNI
# The VNNI transformations prefer uint8 x int8 datatypes.
if is_fast_int8_on_intel():
return helper_change_dtypes_to_uint8_int8(attrs, inputs, types, relay.qnn.op.conv2d)
return helper_no_fast_int8_hw_legalization(attrs, inputs, types, relay.nn.conv2d)
@qnn_dense_legalize.register("cpu")
def _qnn_dense_legalize_intel_cpu(attrs, inputs, types):
# TODO(vvchernov): not only VNNI
# The VNNI transformations prefer uint8 x int8 datatypes.
if is_fast_int8_on_intel():
return helper_change_dtypes_to_uint8_int8(attrs, inputs, types, relay.qnn.op.dense)
return helper_no_fast_int8_hw_legalization(attrs, inputs, types, relay.nn.dense)
#####################
# CUDA and vulkan legalizations.
#####################
@qnn_conv2d_legalize.register(["cuda", "gpu"])
def _qnn_conv2d_legalize_cuda(attrs, inputs, types):
if is_target("vulkan"):
# prefers the dtypes to be same. Mixed type is not yet supported.
return helper_change_dtypes_to_be_same(attrs, inputs, types, relay.qnn.op.conv2d)
if is_target(["cuda", "rocm"]):
# CUDA prefers both datatypes to be int8.
return helper_change_dtypes_to_int8(attrs, inputs, types, relay.qnn.op.conv2d)
return None
@qnn_dense_legalize.register(["cuda", "gpu"])
def _qnn_dense_legalize_cuda(attrs, inputs, types):
if is_target("vulkan"):
# prefers the dtypes to be same. Mixed type is not yet supported.
return helper_change_dtypes_to_be_same(attrs, inputs, types, relay.qnn.op.dense)
if is_target(["cuda", "rocm"]):
# CUDA prefers both datatypes to be the int8.
return helper_change_dtypes_to_int8(attrs, inputs, types, relay.qnn.op.dense)
return None
########################
# Hexagon legalizations.
########################
IN_CHANNEL_VECTOR_LENGTH = 4
OUT_CHANNEL_VECTOR_LENGTH = 32
@qnn_conv2d_legalize.register("hexagon")
def _qnn_conv2d_legalize_hexagon(attrs, inputs, types):
"""Legalize qnn.conv2d op for vrmpy tensorization.
If the inputs are signed or unsigned int8 and data/kernel layouts are NCHW/OIHW, then the input
and output channels are padded to be a multiple of 4 and 32 respectively.
"""
data_layout = attrs["data_layout"]
kernel_layout = attrs["kernel_layout"]
if data_layout != "NCHW" or kernel_layout != "OIHW":
return None
data_tensor, kernel_tensor = types[0], types[1]
if "int8" in data_tensor.dtype and "int8" in kernel_tensor.dtype:
in_channel = data_tensor.shape[1].value
out_channel = kernel_tensor.shape[0].value
ic_modified = False
oc_modified = False
data, kernel, data_zp, kernel_zp, data_scale, kernel_scale = inputs
if in_channel % IN_CHANNEL_VECTOR_LENGTH != 0:
new_in_channel = helper_align_up(in_channel, IN_CHANNEL_VECTOR_LENGTH)
diff = new_in_channel - in_channel
pad_width = ((0, 0), (0, diff), (0, 0), (0, 0))
data = relay.nn.pad(data, pad_width=pad_width)
kernel = relay.nn.pad(kernel, pad_width=pad_width)
ic_modified = True
new_out_channel = out_channel
if out_channel % OUT_CHANNEL_VECTOR_LENGTH != 0:
new_out_channel = helper_align_up(out_channel, OUT_CHANNEL_VECTOR_LENGTH)
diff = new_out_channel - out_channel
kernel = relay.nn.pad(kernel, pad_width=((0, diff), (0, 0), (0, 0), (0, 0)))
oc_modified = True
# Pad kernel zero point by 'diff' elements of 0 if it is not scalar
kernel_zp_tensor = types[3]
if len(kernel_zp_tensor.shape) != 0:
assert isinstance(kernel_zp, relay.Constant)
padded_kernel_zp_np = np.append(kernel_zp.data.numpy(), [0] * diff)
kernel_zp = relay.const(padded_kernel_zp_np)
# Pad kernel scale by 'diff' elements of 1.0 if it is not scalar
kernel_scale_tensor = types[5]
if len(kernel_scale_tensor.shape) != 0:
assert isinstance(kernel_scale, relay.Constant)
padded_kernel_scale_np = np.append(kernel_scale.data.numpy(), [1.0] * diff)
kernel_scale = relay.const(padded_kernel_scale_np)
if ic_modified is True or oc_modified is True:
new_attrs = dict(attrs)
if oc_modified:
new_attrs["channels"] = new_out_channel
out = relay.qnn.op.conv2d(
data, kernel, data_zp, kernel_zp, data_scale, kernel_scale, **new_attrs
)
output_tensor = types[6]
original_out_shape = list(output_tensor.shape)
out = relay.strided_slice(out, begin=[0, 0, 0, 0], end=original_out_shape)
else:
out = relay.qnn.op.conv2d(
data, kernel, data_zp, kernel_zp, data_scale, kernel_scale, **new_attrs
)
return out
return None
@qnn_dense_legalize.register("hexagon")
def _qnn_dense_legalize_hexagon(attrs, inputs, types):
"""Legalize qnn.dense op for vrmpy tensorization.
N dimension of weights should be aligned on vector length. If not, then N dimension is padded to
be a multiple of 32.
"""
assert len(types) == 7
assert len(inputs) == 6
data_tensor, kernel_tensor = types[0], types[1]
if "int8" not in data_tensor.dtype or "int8" not in kernel_tensor.dtype:
return None
N, _ = kernel_tensor.shape
if N % OUT_CHANNEL_VECTOR_LENGTH != 0:
N_padded = helper_align_up(N, OUT_CHANNEL_VECTOR_LENGTH)
diff = N_padded - N
data, kernel, data_zp, kernel_zp, data_scale, kernel_scale = inputs
# Pad weights by 'diff'
padded_kernel = relay.nn.pad(kernel, pad_width=((0, diff), (0, 0)))
kernel_zp_tensor, kernel_scale_tensor = types[3], types[5]
# Pad kernel zero point by 'diff' elements of 0 if it is not scalar
if len(kernel_zp_tensor.shape) != 0:
assert isinstance(kernel_zp, relay.Constant)
assert isinstance(diff, tvm.tir.IntImm)
padded_kernel_zp_np = np.append(kernel_zp.data.numpy(), [0] * diff.value)
kernel_zp = relay.const(padded_kernel_zp_np)
# Pad kernel scale by 'diff' elements of 1.0 if it is not scalar
if len(kernel_scale_tensor.shape) != 0:
assert isinstance(kernel_scale, relay.Constant)
assert isinstance(diff, tvm.tir.IntImm)
padded_kernel_scale_np = np.append(kernel_scale.data.numpy(), [1.0] * diff.value)
kernel_scale = relay.const(padded_kernel_scale_np)
# If units is explicitly specified, it is used to compute the output shape.
# We need to update units after padding to prevent a type error.
new_attrs = dict(attrs)
if attrs["units"] is not None:
new_attrs["units"] = N + diff
new_inputs = (data, padded_kernel, data_zp, kernel_zp, data_scale, kernel_scale)
out = relay.qnn.op.dense(*new_inputs, **new_attrs)
output_tensor = types[6]
out = relay.strided_slice(out, begin=[0, 0], end=list(output_tensor.shape))
return out
return None
| 25,261 | 35.825073 | 100 | py |
tvm | tvm-main/python/tvm/relay/qnn/op/_make.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Constructor APIs"""
import tvm._ffi
tvm._ffi._init_api("relay.qnn.op._make", __name__)
| 876 | 40.761905 | 62 | py |
tvm | tvm-main/python/tvm/relay/collage/collage.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Mostly helper methods which interface the main C++ Collage implementation with Python.
See relay.transform.CollagePartition for the main Collage entrypoint."""
import logging
import os
import math
import tempfile
import numpy as np
import tvm
from tvm._ffi.registry import register_func, register_object
from tvm.runtime import Object
from . import _ffi_api
# Parameters to use when estimating latency (of both partitions and overall models).
MEASURE_NUMBER = 20
MEASURE_REPEAT = 5
WARMUP_MIN_REPEAT_MS = 250
@register_object("relay.collage.CostEstimator")
class CostEstimator(Object):
"""CostEstimator class"""
def __init__(self):
self.__init_handle_by_constructor__(_ffi_api.CostEstimator)
@register_object("relay.collage.MockCostEstimator")
class MockCostEstimator(Object):
"""MockEstimator class"""
def __init__(self, target_costs, max_estimates=0):
self.__init_handle_by_constructor__(_ffi_api.MockCostEstimator, target_costs, max_estimates)
@register_object("relay.collage.CustomCostEstimator")
class CustomCostEstimator(Object):
"""CustomEstimator class"""
def __init__(self, py_fn_estimator="tvm.relay.collage.estimate_seconds_custom"):
self.__init_handle_by_constructor__(_ffi_api.CustomCostEstimator, py_fn_estimator)
def arg_for(arg_type, device):
"""Returns a test argument of Relay arg_type on device"""
assert isinstance(arg_type, tvm.ir.TensorType)
return tvm.nd.array(
np.random.uniform(-1.0, 1.0, size=arg_type.concrete_shape).astype(arg_type.dtype),
device=device,
)
def vm_estimate_seconds(device, the_vm, func_name, args):
"""Returns the estimated latency, in seconds, of running func_name with args on the_vm."""
# Warmup
the_vm.benchmark(
device, repeat=1, number=1, min_repeat_ms=WARMUP_MIN_REPEAT_MS, func_name=func_name, **args
)
# One more time, with feeling
return the_vm.benchmark(
device,
repeat=MEASURE_REPEAT,
number=MEASURE_NUMBER,
min_repeat_ms=0,
func_name=func_name,
**args,
)
@register_func("tvm.relay.collage.estimate_seconds")
def estimate_seconds(mod, target):
"""Returns the mean execution time of "main" in mod on target with params. The module
may contain "Primitive" functions, possibly with "Compiler" attributes."""
device = tvm.device(target.get_target_device_type())
try:
# Build the module.
logging.info("Compiling module to estimate")
exe = tvm.relay.vm.compile(mod, target)
except RuntimeError as err:
# A build failure indicates the partition is not supported.
# eg trying to build an nn.batch_norm on GPU, which has no schedule since we assume it
# is only ever used with a tuple projection which is rewritten away.
logging.info("Assigning module infinite cost since unable to build: %s", err)
return math.inf
# Finalize compilation
tmp_dir = tempfile.mkdtemp()
code, lib = exe.save()
lib_path = os.path.join(tmp_dir, "library.so")
# TODO(mbs): Avoid nvcc dependency?
lib.export_library(lib_path, workspace_dir=tmp_dir, cc="nvcc")
lib = tvm.runtime.load_module(lib_path)
exe = tvm.runtime.vm.Executable.load_exec(code, lib)
# Benchmark the module.
the_vm = tvm.runtime.vm.VirtualMachine(exe, device)
func_name = "main"
main_args = {v.name_hint: arg_for(v.checked_type, device) for v in mod[func_name].params}
logging.info("Benchmarking module to estimate")
profile = vm_estimate_seconds(device, the_vm, func_name, main_args)
logging.info("profile: %s", profile)
return profile.median # seconds
def make_labelled_dfpattern_partition_rule_wrapper(compiler, pattern_tuple):
"""Returns a DFPatternPartitionRule representing one (label, pattern, predicate) entry from
the pattern table for external codegen compiler"""
if len(pattern_tuple) == 2:
rule_name, dataflow_pattern = pattern_tuple
return _ffi_api.MakeLabelledDFPatternPartitionRule(compiler, rule_name, dataflow_pattern)
else:
rule_name, dataflow_pattern, predicate = pattern_tuple
return _ffi_api.MakeLabelledDFPatternPartitionRuleWithPredicate(
compiler, rule_name, dataflow_pattern, predicate
)
@register_func("tvm.relay.collage.make_byoc_partition_rule")
def make_byoc_partition_rule(compiler):
"""Returns the PartitionRule for external codegen compiler"""
pattern_table = tvm.relay.op.contrib.get_pattern_table(compiler)
assert (
pattern_table is not None
), f"No pattern table entry was found for BYOC compiler {compiler}"
logging.info(
"Converting %s rules for %s for use in pattern style BYOC lowering/codegen",
len(pattern_table),
compiler,
)
sub_rules = [
make_labelled_dfpattern_partition_rule_wrapper(compiler, pattern_tuple)
for pattern_tuple in pattern_table
]
return _ffi_api.MakePatternBYOCPartitionRule(compiler, sub_rules)
| 5,831 | 36.625806 | 100 | py |
tvm | tvm-main/python/tvm/relay/collage/_ffi_api.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""FFI APIs for the Collage partitioner."""
import tvm._ffi
tvm._ffi._init_api("relay.collage", __name__)
| 893 | 39.636364 | 62 | py |
tvm | tvm-main/python/tvm/relay/collage/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""relay.collage exports"""
from .collage import (
MEASURE_NUMBER,
MEASURE_REPEAT,
WARMUP_MIN_REPEAT_MS,
CostEstimator,
MockCostEstimator,
CustomCostEstimator,
)
| 971 | 36.384615 | 62 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.