repo stringlengths 2 99 | file stringlengths 13 225 | code stringlengths 0 18.3M | file_length int64 0 18.3M | avg_line_length float64 0 1.36M | max_line_length int64 0 4.26M | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
tvm | tvm-main/python/tvm/topi/hexagon/slice_ops/global_avg_pool2d.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Assumptions:
1) The input is in NCHW layout. Squeezenet is the only model that calls
nn.global_avg_pool2d and the only layout it uses is 'NCHW'.
2) The op takes input data as an argument.
3) Both input and output dtype is float32 and
4) Input is assumed to always be multiple of fixed chunk 32c8h4w.
"""
from tvm import te
from tvm import tir
from tvm import topi
from ..utils import get_layout_transform_fn
def global_avg_pool2d(
data: te.Tensor,
):
"""global_avg_pool2d"""
return topi.nn.global_pool(data, "avg", "NCHW")
def stir_global_avg_pool2d_schedule(outs: te.Tensor, ins: te.Tensor, input_layout: str):
"""Schedule"""
func = te.create_prim_func([ins, outs])
s = tir.Schedule(func)
sum_block = s.get_block("adaptive_pool_sum")
# Input is multiple of fixed chunk but output is NxCx1x1
# Hence transform_layout is only applied on input
input_transformed_layout = get_layout_transform_fn(input_layout)
s.transform_layout(sum_block, buffer=("read", 0), index_map=input_transformed_layout)
return s
| 1,851 | 33.943396 | 89 | py |
tvm | tvm-main/python/tvm/topi/hexagon/slice_ops/dwconv2d.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=line-too-long
"""Hexagon slice dwconv2d compute and schedule"""
import typing
import tvm
from tvm import te
from ..utils import get_layout_transform_fn
def dwconv2d_compute(
activations: te.Tensor,
weights: te.Tensor,
out_shape: typing.Tuple,
stride: typing.Tuple,
dilation: typing.Tuple,
dtype: str,
) -> te.Tensor:
"""Compute for slice dwconv2d op for hexagon.
This op makes the following assumptions:
1. This op is written for a sliced dw convolution with 2d physical buffers
2. The input activations is assumed to be in NHWC layout and filter is in HWIO layout
Parameters
----------
activations : te.Tensor
Input activations padded for inner dimension size
weights : te.Tensor
Weights without dilation
out_shape : typing.Tuple
The logical output shape without considering input padding
stride : typing.Tuple
stride
dilation : typing.Tuple
dilation
dtype : str
dtype
Returns
-------
output : te.Tensor
Output of applying 2D depthwise convolution of Weights on Input
"""
filt_shape = weights.shape
reduce_height = tvm.te.reduce_axis((0, filt_shape[0]), name="reduce_height")
reduce_width = tvm.te.reduce_axis((0, filt_shape[1]), name="reduce_width")
stride_height, stride_width = stride
dilation_height, dilation_width = dilation
output = tvm.te.compute(
out_shape,
lambda n, h, w, c: tvm.te.sum(
(
activations[
n,
h * stride_height + reduce_height * dilation_height,
w * stride_width + reduce_width * dilation_width,
c,
]
* weights[reduce_height, reduce_width, 0, c]
).astype(dtype),
axis=[reduce_height, reduce_width],
),
name="Output",
)
return output
def dwconv2d_schedule(
outs: te.Tensor,
ins: typing.List[te.Tensor],
transform_activation_layout: str,
transform_weights: str,
) -> tvm.tir.Schedule:
"""STIR schedule definition for the compute defined above by dwconv2d_compute.
- Auto-generated prim_func before applying schedule primitives for reference
- The below TVMScript code is for dwconv2d with padded input dimensions and a stride of 1x1
# from tvm.script import tir as T
@tvm.script.ir_module
class Module:
@T.prim_func
def main(InputTensor: T.Buffer((1, 16, 8, 32), "float16"), Weights: T.Buffer((3, 3, 1, 32), "float16"), Output: T.Buffer((1, 8, 4, 32), "float16")) -> None:
# function attr dict
T.func_attr({"global_symbol": "main", "tir.noalias": True})
# body
# with T.block("root")
for i0, i1, i2, i3, i4, i5 in T.grid(1, 8, 4, 32, 3, 3):
with T.block("Output"):
n, h, w, c, reduce_height, reduce_width = T.axis.remap("SSSSRR", [i0, i1, i2, i3, i4, i5])
T.reads(InputTensor[n, h + reduce_height, w + reduce_width, c], Weights[reduce_height, reduce_width, 0, c])
T.writes(Output[n, h, w, c])
with T.init():
Output[n, h, w, c] = T.float16(0)
Output[n, h, w, c] = Output[n, h, w, c] + InputTensor[n, h + reduce_height, w + reduce_width, c] * Weights[reduce_height, reduce_width, 0, c]
Parameters
----------
outs : te.Tensor
The output Tensor as returned by a call to dwconv2d_compute
ins : typing.List[te.Tensor]
This is a list of 2 tensors - Input activations and Weights
transform_activation_layout : str
The transformation string representing the expected activations layout
transform_weights : typing.Callable
The transformation function definition for the expected weights layout
Returns
-------
sch : tvm.tir.Schedule
The STIR schedule for slice dwconv2d compute
"""
assert len(ins) == 2, "This schedule expects only 2 inputs - Activations and Weights"
source_expr = ins + [outs]
prim_func = tvm.te.create_prim_func(source_expr)
sch = tvm.tir.Schedule(prim_func)
compute = sch.get_block("Output")
transform_layout_fn = get_layout_transform_fn(transform_activation_layout)
transform_layout_weights = get_layout_transform_fn(transform_weights)
# Apply layout_transform for activation
sch.transform_layout(compute, ins[0].name, transform_layout_fn)
# Apply layout_transform for weights
sch.transform_layout(compute, ins[1].name, transform_layout_weights)
# Apply layout_transform for output
sch.transform_layout(compute, outs.name, transform_layout_fn)
batch, height, width, channel, reduce_height, reduce_width = sch.get_loops(
compute
) # This still returns the original 6d loop
h_outer, h_inner = sch.split(height, [None, 8])
w_outer, w_inner = sch.split(width, [None, 4])
w_inner_outer, w_inner_inner = sch.split(w_inner, [2, 2])
c_outer, c_inner = sch.split(channel, [None, 32])
sch.reorder(
batch,
h_outer,
w_outer,
c_outer,
h_inner,
w_inner_outer,
reduce_height,
reduce_width,
c_inner,
w_inner_inner,
)
sch.decompose_reduction(compute, reduce_height)
# ci_wii = sch.fuse(c_inner, w_inner_inner)
# sch.vectorize(ci_wii)
return sch
| 6,336 | 37.640244 | 164 | py |
tvm | tvm-main/python/tvm/topi/hexagon/slice_ops/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" Computes and Schedules for Hexagon slice ops. """
from .avg_pool2d import avg_pool2d_NHWC, avg_pool2d_NCHW, avg_pool2d_schedule
from .max_pool2d import max_pool2d_compute, max_pool2d_STIR_schedule
from .add_subtract_multiply import *
from .argmax import argmax_compute, argmax_schedule
from .batch_flatten import batch_flatten_compute, batch_flatten_stir_schedule
from .softmax_slice import *
from .clip import *
from .cast import (
cast_f16_f32_compute,
cast_f16_f32_schedule,
cast_f32_f16_compute,
cast_f32_f16_schedule,
)
from .conv2d import *
from .reshape import reshape_compute, reshape_stir_schedule
from .relu import relu_compute, relu_stir_schedule
from .tanh import tanh_te_compute, tanhf16_schedule
from .dwconv2d import *
from .depth_to_space import d2s_compute, d2s_schedule
from .global_avg_pool2d import *
from .dense import *
| 1,649 | 39.243902 | 77 | py |
tvm | tvm-main/python/tvm/topi/hexagon/slice_ops/tanh.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
""" Hexagon tanh slice op compute and schedule """
import tvm
from tvm import te, tir
from ..utils import get_layout_transform_fn
def tanh_te_compute(in_tensor):
out_tensor = te.compute(
in_tensor.shape, lambda n, h, w, c: tvm.tir.tanh(in_tensor[n, h, w, c]), name="tanhf16"
)
return out_tensor
def tanhf16_stir_sched_nhwc(func, in_layout, out_layout, h_split_factor=8):
"""Schedule for nhwc fp16 to nchw fp16 layout"""
sch = tir.Schedule(func, debug_mask="all")
block_name = "tanhf16"
n, h, w, c = sch.get_loops(sch.get_block(block_name))
h_outer, h_inner = sch.split(h, [None, h_split_factor])
w_outer, w_inner = sch.split(w, [None, 4])
c_outer, c_inner = sch.split(c, [None, 32])
w_inner_o, w_inner_i = sch.split(w_inner, [None, 2])
sch.reorder(n, h_outer, w_outer, c_outer, h_inner, w_inner_o, c_inner, w_inner_i)
sch.transform_layout(block_name, "A", in_layout)
sch.transform_layout(block_name, block_name, out_layout)
fused = sch.fuse(c_inner, w_inner_i)
sch.vectorize(fused)
return sch
def tanhf16_schedule(tanh_func, in_layout_str, out_layout_str):
in_layout_transform_func = get_layout_transform_fn(in_layout_str)
out_layout_transform_func = get_layout_transform_fn(out_layout_str)
return tanhf16_stir_sched_nhwc(
tanh_func,
in_layout_transform_func,
out_layout_transform_func,
)
| 2,228 | 38.105263 | 95 | py |
tvm | tvm-main/python/tvm/topi/hexagon/slice_ops/relu.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""Hexagon slice relu op"""
from tvm import te, tir, topi
from ..utils import get_layout_transform_fn
def relu_compute(Input):
"""Relu topi compute"""
return topi.nn.relu(Input)
def relu_te_sched(Output, Input, layout):
"""
Schedule assumes the layout function to be bijective
"""
s = te.create_schedule(Output.op)
s[Input].transform_layout(layout)
out_axes = s[Output].transform_layout(layout)
fused = s[Output].fuse(out_axes[6], out_axes[7])
s[Output].vectorize(fused)
return s
def relu_stir_schedule(Input, Output, input_layout, output_layout):
"""
Schedule assumes the layout function to be bijective
"""
if (input_layout != output_layout) or (output_layout != "nhwc-8h2w32c2w-2d"):
raise RuntimeError(
f"Unexpected input_layout, output_layout '{input_layout, output_layout}'"
)
relu_func = te.create_prim_func([Input, Output])
sch = tir.Schedule(relu_func, debug_mask="all")
block = sch.get_block("compute")
sch.transform_layout(block, Input.name, get_layout_transform_fn(input_layout))
sch.transform_layout(block, Output.name, get_layout_transform_fn(output_layout))
n, h, w, c = sch.get_loops(block)
h_o, h_i = sch.split(h, [None, 8])
w_o, w_i = sch.split(w, [None, 4])
c_o, c_i = sch.split(c, [None, 32])
wio, wii = sch.split(w_i, [None, 2])
sch.reorder(n, h_o, w_o, c_o, h_i, wio, c_i, wii)
fused = sch.fuse(c_i, wii)
sch.vectorize(fused)
return sch
| 2,329 | 34.30303 | 85 | py |
tvm | tvm-main/python/tvm/topi/cpp/rocm.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""FFI for Rocm TOPI ops and schedules"""
import tvm._ffi
tvm._ffi._init_api("topi.rocm", "tvm.topi.cpp.rocm")
| 897 | 41.761905 | 62 | py |
tvm | tvm-main/python/tvm/topi/cpp/x86.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""FFI for x86 TOPI ops and schedules"""
import tvm._ffi
tvm._ffi._init_api("topi.x86", "tvm.topi.cpp.x86")
| 894 | 41.619048 | 62 | py |
tvm | tvm-main/python/tvm/topi/cpp/cuda.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""FFI for CUDA TOPI ops and schedules"""
import tvm._ffi
tvm._ffi._init_api("topi.cuda", "tvm.topi.cpp.cuda")
| 897 | 41.761905 | 62 | py |
tvm | tvm-main/python/tvm/topi/cpp/utils.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""FFI for TOPI utility functions"""
import tvm._ffi
tvm._ffi._init_api("topi.utils", "tvm.topi.cpp.utils")
| 894 | 41.619048 | 62 | py |
tvm | tvm-main/python/tvm/topi/cpp/generic.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""FFI for generic TOPI ops and schedules"""
import tvm._ffi
tvm._ffi._init_api("topi.generic", "tvm.topi.cpp.generic")
| 906 | 42.190476 | 62 | py |
tvm | tvm-main/python/tvm/topi/cpp/nn.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""FFI for NN TOPI ops and schedules"""
import tvm._ffi
tvm._ffi._init_api("topi.nn", "tvm.topi.cpp.nn")
| 891 | 41.47619 | 62 | py |
tvm | tvm-main/python/tvm/topi/cpp/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""FFI for C++ TOPI ops and schedules"""
from .impl import * # pylint: disable=wildcard-import
from . import cuda
from . import nn
from . import vision
from . import x86
from . import generic
from . import rocm
from . import utils
| 1,018 | 36.740741 | 62 | py |
tvm | tvm-main/python/tvm/topi/cpp/impl.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Load Lib for C++ TOPI ops and schedules"""
import tvm._ffi
tvm._ffi._init_api("topi", "tvm.topi.cpp")
| 891 | 41.47619 | 62 | py |
tvm | tvm-main/python/tvm/topi/cpp/vision/yolo.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""FFI for Yolo TOPI ops and schedules"""
import tvm._ffi
tvm._ffi._init_api("topi.vision.yolo", "tvm.topi.cpp.vision.yolo")
| 911 | 42.428571 | 66 | py |
tvm | tvm-main/python/tvm/topi/cpp/vision/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""FFI for vision TOPI ops and schedules"""
import tvm._ffi
from . import yolo
tvm._ffi._init_api("topi.vision", "tvm.topi.cpp.vision")
| 924 | 37.541667 | 62 | py |
tvm | tvm-main/python/tvm/topi/vision/nms.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=import-error, invalid-name, no-member, too-many-locals, too-many-arguments, undefined-variable, too-many-nested-blocks, too-many-branches, too-many-statements, too-many-function-args
"""Non-maximum suppression operator"""
import tvm
from tvm import te
from tvm.te import hybrid
from tvm.tir import if_then_else
from ..sort import argsort
from ..math import cast
from ..transform import reshape, gather
from .. import reduction
from ..scan import cumsum
from .nms_util import (
binary_search,
collect_selected_indices,
collect_selected_indices_and_scores,
run_all_class_nms,
)
@hybrid.script
def hybrid_rearrange_box_out(data, one, batch_size, num_anchors):
"""Hybrid routine to rearrange nms output to
move all valid entries to top.
Parameters
----------
data : tvm.te.Tensor or numpy NDArray
NMS output. 3-D tensor with shape
[batch_size, num_anchors, 6].
one: tvm.tir.const
Constant one with the same dtype as data.
batch_size: tvm.tir.IntImm or tvm.tir.Var
Batch size. We need to pass it in since hybrid script doesn't support
binding variable to symbolic dim.
num_anchors: tvm.tir.IntImm or tvm.tir.Var
Number of anchors.
Returns
-------
output : tvm.te.Tensor or numpy NDArray
Transformed NMS output. 3-D tensor with shape
[batch_size, num_anchors, 6].
"""
elem_length = data.shape[2]
output = output_tensor((batch_size, num_anchors, elem_length), data.dtype)
valid_indices = allocate((batch_size,), "int32")
for i in parallel(batch_size):
valid_indices[i] = 0
for j in range(num_anchors):
if data[i, j, 0] >= 0:
for k in range(elem_length):
output[i, valid_indices[i], k] = data[i, j, k]
valid_indices[i] += 1
if j >= valid_indices[i]:
for k in range(elem_length):
output[i, j, k] = -one
return output
@hybrid.script
def hybrid_rearrange_indices_out(data, one, batch_size, num_anchors):
"""Hybrid routine to rearrange nms output to
move all valid entries to top.
Parameters
----------
data : tvm.te.Tensor or numpy NDArray
NMS output. 3-D tensor with shape
[batch_size, num_anchors, 6] or
[batch_size, num_anchors, 5], or 2-D
tensor with shape [batch_size, num_anchors].
one: tvm.tir.const
Constant one with the same dtype as data.
batch_size: tvm.tir.IntImm or tvm.tir.Var
Batch size. We need to pass it in since hybrid script doesn't support
binding variable to symbolic dim.
num_anchors: tvm.tir.IntImm or tvm.tir.Var
Number of anchors.
Returns
-------
output : tvm.te.Tensor or numpy NDArray
2-D tensor with shape [batch_size, num_anchors].
valid_box_count : tvm.te.Tensor or numpy NDArray
Tensor with shape [batch_size, 1], indicates
the valid number of boxes.
"""
valid_box_count = output_tensor((batch_size, 1), "int32")
output = output_tensor((batch_size, num_anchors), data.dtype)
valid_indices = allocate((batch_size,), "int32")
for i in parallel(batch_size):
valid_indices[i] = 0
for j in range(num_anchors):
if data[i, j] >= 0:
output[i, valid_indices[i]] = data[i, j]
valid_indices[i] += 1
if data[i, j] > num_anchors or data[i, j] < -num_anchors:
output[i, valid_indices[i]] = 0
valid_indices[i] += 1
if j >= valid_indices[i]:
output[i, j] = -one
valid_box_count[i, 0] = valid_indices[i]
return output, valid_box_count
@hybrid.script
def hybrid_get_valid_counts(
data, score_threshold, id_index, score_index, one, batch_size, num_anchors
):
"""Hybrid routine to get valid count of bounding boxes
given a score threshold. Also moves valid boxes to the
top of input data.
Parameters
----------
data : tvm.te.Tensor or numpy NDArray
Input data. 3-D tensor with shape [batch_size, num_anchors, 6]
or [batch_size, num_anchors, 5].
score_threshold : tvm.te.Tensor
Lower limit of score for valid bounding boxes.
id_index : tvm.tir.const
index of the class categories, -1 to disable.
score_index: tvm.tir.const
Index of the scores/confidence of boxes.
one: tvm.tir.const
Constant one with the same dtype as data.
batch_size: tvm.tir.IntImm or tvm.tir.Var
Batch size. We need to pass it in since hybrid script doesn't support
binding variable to symbolic dim.
num_anchors: tvm.tir.IntImm or tvm.tir.Var
Number of anchors.
Returns
-------
valid_count : tvm.te.Tensor or numpy NDArray
1-D tensor for valid number of boxes.
out_tensor : tvm.te.Tensor or numpy NDArray
Rearranged data tensor.
out_indices: tvm.te.Tensor or numpy NDArray
Related index in input data.
"""
box_data_length = data.shape[2]
valid_count = output_tensor((batch_size,), "int32")
out_tensor = output_tensor((batch_size, num_anchors, box_data_length), data.dtype)
out_indices = output_tensor((batch_size, num_anchors), "int32")
for i in parallel(batch_size):
valid_count[i] = 0
for j in range(num_anchors):
score = data[i, j, score_index]
if score > score_threshold and (id_index < 0 or data[i, j, id_index] >= 0):
for k in range(box_data_length):
out_tensor[i, valid_count[i], k] = data[i, j, k]
out_indices[i, valid_count[i]] = j
valid_count[i] += 1
if j >= valid_count[i]:
for k in range(box_data_length):
out_tensor[i, j, k] = -one
out_indices[i, j] = -1
return valid_count, out_tensor, out_indices
def get_valid_counts(data, score_threshold=0, id_index=0, score_index=1):
"""Get valid count of bounding boxes given a score threshold.
Also moves valid boxes to the top of input data.
Parameters
----------
data : tvm.te.Tensor
Input data. 3-D tensor with shape [batch_size, num_anchors, 6]
or [batch_size, num_anchors, 5].
score_threshold : optional, float
Lower limit of score for valid bounding boxes.
id_index : optional, int
index of the class categories, -1 to disable.
score_index: optional, int
Index of the scores/confidence of boxes.
Returns
-------
valid_count : tvm.te.Tensor
1-D tensor for valid number of boxes.
out_tensor : tvm.te.Tensor
Rearranged data tensor.
out_indices: tvm.te.Tensor or numpy NDArray
Related index in input data.
"""
if isinstance(score_threshold, (float, int)):
score_threshold = tvm.tir.const(score_threshold, dtype=data.dtype)
id_index_const = tvm.tir.const(id_index, "int32")
score_index_const = tvm.tir.const(score_index, "int32")
return hybrid_get_valid_counts(
data,
score_threshold,
id_index_const,
score_index_const,
tvm.tir.const(1, data.dtype),
data.shape[0],
data.shape[1],
)
@hybrid.script
def hybrid_nms(
data,
sorted_index,
valid_count,
indices,
batch_size,
num_anchors,
max_output_size,
iou_threshold,
force_suppress,
top_k,
coord_start,
score_index,
id_index,
return_indices,
zero,
one,
):
"""Hybrid routing for non-maximum suppression.
Parameters
----------
data: tvm.te.Tensor or numpy NDArray
Bounding boxes with class and score. 3-D tensor with shape
[batch_size, num_anchors, 6]. It could be the second output
out_tensor of get_valid_counts.
sorted_index : tvm.te.Tensor or numpy NDArray
Bounding box indexes sorted by score, with shape
[batch_size, num_anchors].
valid_count : tvm.te.Tensor or numpy NDArray
1-D tensor for valid number of boxes. It could be the output
valid_count of get_valid_counts.
indices : tvm.te.Tensor or numpy.NDArray
indices in original tensor, with shape [batch_size, num_anchors],
represents the index of box in original data. It could be the third
output out_indices of get_valid_counts. The values in the second
dimension are like the output of arange(num_anchors) if get_valid_counts
is not used before non_max_suppression.
batch_size: tvm.tir.IntImm or tvm.tir.Var
Batch size. We need to pass it in since hybrid script doesn't support
binding variable to symbolic dim.
num_anchors: tvm.tir.IntImm or tvm.tir.Var
The number of anchors.
max_output_size : tvm.te.Tensor
Max number of output valid boxes for each instance.
Return all valid boxes if max_output_size < 0.
iou_threshold : tvm.te.Tensor
Overlapping(IoU) threshold to suppress object with smaller score.
force_suppress : tvm.tir.const
Whether to suppress all detections regardless of class_id.
top_k : tvm.tir.const
Keep maximum top k detections before nms, -1 for no limit.
coord_start : tvm.tir.const
Start index of the consecutive 4 coordinates.
score_index: tvm.tir.const
Index of the scores/confidence of boxes.
id_index : tvm.tir.const
index of the class categories, -1 to disable.
return_indices : tvm.tir.const
Whether to return box indices in input data.
zero: tvm.tir.const
Constant zero with the same dtype as data.
one: tvm.tir.const
Constant one with the same dtype as data.
Returns
-------
output : tvm.te.Tensor
3-D tensor with shape [batch_size, num_anchors, 6]
or [batch_size, num_anchors, 5].
box_indices: tvm.te.Tensor
2-D tensor with shape [batch_size, num_anchors].
"""
box_data_length = data.shape[2]
# box_indices is the expected indices of boxes
box_indices = output_tensor((batch_size, num_anchors), sorted_index.dtype)
output = output_tensor(
(
batch_size,
num_anchors,
box_data_length,
),
data.dtype,
)
for i in range(batch_size):
if iou_threshold > 0:
if valid_count[i] > 0:
# Reorder output
nkeep = valid_count[i]
if 0 < top_k < nkeep:
nkeep = top_k
for j in parallel(nkeep):
for k in range(box_data_length):
output[i, j, k] = data[i, sorted_index[i, j], k]
box_indices[i, j] = sorted_index[i, j]
if 0 < top_k < valid_count[i]:
for j in parallel(valid_count[i] - nkeep):
for k in range(box_data_length):
output[i, j + nkeep, k] = -one
box_indices[i, j + nkeep] = -1
# Apply nms
box_start_idx = coord_start
batch_idx = i
num_valid_boxes = 0
for j in range(valid_count[i]):
if num_valid_boxes == max_output_size:
for k in range(box_data_length):
output[i, j, k] = -one
box_indices[i, j] = -1
elif output[i, j, score_index] > 0:
box_a_idx = j
is_valid_box = 1
# a_l: left, a_t: top, a_r: right, a_b: bottom
a_l = min(
output[batch_idx, box_a_idx, box_start_idx],
output[batch_idx, box_a_idx, box_start_idx + 2],
)
a_t = min(
output[batch_idx, box_a_idx, box_start_idx + 1],
output[batch_idx, box_a_idx, box_start_idx + 3],
)
a_r = max(
output[batch_idx, box_a_idx, box_start_idx],
output[batch_idx, box_a_idx, box_start_idx + 2],
)
a_b = max(
output[batch_idx, box_a_idx, box_start_idx + 1],
output[batch_idx, box_a_idx, box_start_idx + 3],
)
# check if current box j is valid by calculating iou with
# all existing valid boxes
for k in range(j):
check_iou = 0
if (
is_valid_box == 1
and k < j
and output[i, k, score_index] > 0
and (id_index < 0 or output[i, k, id_index] >= 0)
):
if force_suppress:
check_iou = 1
elif id_index < 0 or output[i, j, id_index] == output[i, k, id_index]:
check_iou = 1
if check_iou > 0:
box_b_idx = k
# b_l: left, b_t: top, b_r: right, b_b: bottom
b_l = min(
output[batch_idx, box_b_idx, box_start_idx],
output[batch_idx, box_b_idx, box_start_idx + 2],
)
b_t = min(
output[batch_idx, box_b_idx, box_start_idx + 1],
output[batch_idx, box_b_idx, box_start_idx + 3],
)
b_r = max(
output[batch_idx, box_b_idx, box_start_idx],
output[batch_idx, box_b_idx, box_start_idx + 2],
)
b_b = max(
output[batch_idx, box_b_idx, box_start_idx + 1],
output[batch_idx, box_b_idx, box_start_idx + 3],
)
# Overlapping width and height
w = max(zero, min(a_r, b_r) - max(a_l, b_l))
h = max(zero, min(a_b, b_b) - max(a_t, b_t))
# Overlapping area
area = h * w
# total area of the figure formed by box a and box b
# except for overlapping area
u = (a_r - a_l) * (a_b - a_t) + (b_r - b_l) * (b_b - b_t) - area
# get the iou
iou = zero if u <= zero else area / u
if iou >= iou_threshold:
is_valid_box = 0
if is_valid_box == 0:
for k in range(box_data_length):
output[i, j, k] = -one
box_indices[i, j] = -1
else:
num_valid_boxes += 1
else:
for j in parallel(valid_count[i]):
for k in range(box_data_length):
output[i, j, k] = data[i, j, k]
box_indices[i, j] = j
# Set invalid entry to be -1
for j in parallel(num_anchors - valid_count[i]):
for k in range(box_data_length):
output[i, j + valid_count[i], k] = -one
box_indices[i, j + valid_count[i]] = -1
if return_indices:
for j in range(valid_count[i]):
idx = box_indices[i, j]
if box_indices[i, j] >= 0:
box_indices[i, j] = indices[i, idx]
return output, box_indices
@tvm.target.generic_func
def non_max_suppression(
data,
valid_count,
indices,
max_output_size=-1,
iou_threshold=0.5,
force_suppress=False,
top_k=-1,
coord_start=2,
score_index=1,
id_index=0,
return_indices=True,
invalid_to_bottom=False,
):
"""Non-maximum suppression operator for object detection.
Parameters
----------
data : tvm.te.Tensor
3-D tensor with shape [batch_size, num_anchors, 6] or [batch_size, num_anchors, 5].
valid_count : tvm.te.Tensor
1-D tensor for valid number of boxes.
indices : tvm.te.Tensor
2-D tensor with shape [batch_size, num_anchors].
max_output_size : optional, int or tvm.te.Tensor
Max number of output valid boxes for each instance.
Return all valid boxes if the value of max_output_size is less than 0.
iou_threshold : optional, float or tvm.te.Tensor
Non-maximum suppression threshold.
force_suppress : optional, boolean
Whether to suppress all detections regardless of class_id.
top_k : optional, int
Keep maximum top k detections before nms, -1 for no limit.
coord_start : required, int
Start index of the consecutive 4 coordinates.
score_index: optional, int
Index of the scores/confidence of boxes.
id_index : optional, int
index of the class categories, -1 to disable.
return_indices : optional, boolean
Whether to return box indices in input data.
invalid_to_bottom : optional, boolean
Whether to move all valid bounding boxes to the top.
Returns
-------
out : tvm.te.Tensor or tuple of tvm.te.Tensor
3-D tensor with shape [batch_size, num_anchors, 6]
or [batch_size, num_anchors, 5]. Out is a tuple of tvm.te.Tensor
if return_indices is True, the Tensor in the tuple is 2-D tensor
with shape [batch_size, num_anchors] and shape
[batch_size, num_valid_anchors] respectively.
Example
--------
.. code-block:: python
# An example to use non_max_suppression
dshape = (1, 5, 6)
data = te.placeholder(dshape, name="data")
valid_count = te.placeholder((dshape[0],), dtype="int32", name="valid_count")
iou_threshold = 0.7
force_suppress = True
top_k = -1
out = non_max_suppression(data, valid_count, indices, iou_threshold=iou_threshold,
force_suppress=force_suppress, top_k=top_k)
np_data = np.random.uniform(dshape)
np_valid_count = np.array([4])
s = topi.generic.schedule_nms(out)
f = tvm.build(s, [data, valid_count, out], "llvm")
dev = tvm.cpu()
tvm_data = tvm.nd.array(np_data, dev)
tvm_valid_count = tvm.nd.array(np_valid_count, dev)
tvm_out = tvm.nd.array(np.zeros(dshape, dtype=data.dtype), dev)
f(tvm_data, tvm_valid_count, tvm_out)
"""
batch_size = data.shape[0]
num_anchors = data.shape[1]
if isinstance(max_output_size, int):
max_output_size = tvm.tir.const(max_output_size, dtype="int32")
if isinstance(iou_threshold, float):
iou_threshold = tvm.tir.const(iou_threshold, dtype=data.dtype)
score_axis = score_index
score_shape = (batch_size, num_anchors)
score_tensor = te.compute(score_shape, lambda i, j: data[i, j, score_axis])
sort_tensor = argsort(score_tensor, valid_count=valid_count, axis=1, is_ascend=False)
out, box_indices = hybrid_nms(
data,
sort_tensor,
valid_count,
indices,
batch_size,
num_anchors,
max_output_size,
iou_threshold,
tvm.tir.const(force_suppress, dtype="bool"),
tvm.tir.const(top_k, dtype="int32"),
tvm.tir.const(coord_start, dtype="int32"),
tvm.tir.const(score_index, dtype="int32"),
tvm.tir.const(id_index, dtype="int32"),
tvm.tir.const(return_indices, dtype="bool"),
zero=tvm.tir.const(0, dtype=data.dtype),
one=tvm.tir.const(1, dtype=data.dtype),
)
if return_indices:
return hybrid_rearrange_indices_out(
box_indices,
one=tvm.tir.const(1, dtype="int32"),
batch_size=batch_size,
num_anchors=num_anchors,
)
if invalid_to_bottom:
out = hybrid_rearrange_box_out(
out,
one=tvm.tir.const(1, dtype=data.dtype),
batch_size=batch_size,
num_anchors=num_anchors,
)
return out
def _nms_loop(
ib,
batch_size,
top_k,
iou_threshold,
max_output_size,
valid_count,
on_new_valid_box_func,
on_new_invalidated_box_func,
needs_bbox_check_func,
calc_overlap_func,
out_scores,
num_valid_boxes,
):
def nms_inner_loop(ib, i, j, nkeep, num_valid_boxes_local):
# The box j is valid, invalidate other boxes that overlap with j above iou_threshold
on_new_valid_box_func(ib, 0, num_valid_boxes_local[0], i, j)
num_valid_boxes_local[0] += 1
num_boxes_to_check = nkeep - (j + 1)
with ib.for_range(0, num_boxes_to_check, name="_k", kind="parallel") as _k:
k = j + 1 + _k
with ib.if_scope(
tvm.tir.all(
k < nkeep,
out_scores[i, k] > 0, # is the box k still valid?
needs_bbox_check_func(i, j, k),
)
):
iou = calc_overlap_func(i, j, k)
with ib.if_scope(iou >= iou_threshold):
# invalidate the box k
out_scores[i, k] = -1.0
on_new_invalidated_box_func(i, k)
with ib.for_range(0, batch_size, name="i") as i:
nkeep = if_then_else(tvm.tir.all(top_k > 0, top_k < valid_count[i]), top_k, valid_count[i])
max_output_size = if_then_else(max_output_size > 0, max_output_size, nkeep)
with ib.if_scope(tvm.tir.all(iou_threshold > 0, valid_count[i] > 0)):
num_valid_boxes_local = ib.allocate(
"int32", (1,), name="num_valid_boxes_local", scope="local"
)
box_idx = ib.allocate("int32", (1,), name="box_idx", scope="local")
num_valid_boxes_local[0] = 0
box_idx[0] = 0
# Apply nms
# No need to do more iteration if we have already reached max_output_size boxes
with ib.while_loop(
tvm.tir.all(box_idx[0] < nkeep, num_valid_boxes_local[0] < max_output_size)
):
# Proceed to the inner loop if the box with id box_idx is still valid
with ib.if_scope(out_scores[i, box_idx[0]] > -1.0):
nms_inner_loop(ib, i, box_idx[0], nkeep, num_valid_boxes_local)
box_idx[0] += 1
num_valid_boxes[i] = num_valid_boxes_local[0]
with ib.else_scope():
num_valid_boxes[i] = 0
return ib.get()
def _get_valid_box_count(scores, score_threshold):
batch_classes, num_boxes = scores.shape
def searchsorted_ir(scores, valid_count):
ib = tvm.tir.ir_builder.create()
scores = ib.buffer_ptr(scores)
valid_count = ib.buffer_ptr(valid_count)
with ib.for_range(0, batch_classes, name="i", kind="parallel") as i:
binary_search(ib, i, num_boxes, scores, score_threshold, valid_count)
return ib.get()
scores_buf = tvm.tir.decl_buffer(scores.shape, scores.dtype, "scores_buf", data_alignment=8)
return te.extern(
[(batch_classes,)],
[scores],
lambda ins, outs: searchsorted_ir(ins[0], outs[0]),
dtype=["int32"],
in_buffers=[scores_buf],
name="searchsorted",
tag="searchsorted",
)
def _collect_selected_indices_ir(num_class, selected_indices, num_detections, row_offsets, out):
batch_classes, _ = selected_indices.shape
ib = tvm.tir.ir_builder.create()
selected_indices = ib.buffer_ptr(selected_indices)
num_detections = ib.buffer_ptr(num_detections)
row_offsets = ib.buffer_ptr(row_offsets)
out = ib.buffer_ptr(out)
with ib.for_range(0, batch_classes, name="i", kind="parallel") as i:
i = cast(i, "int64")
batch_id = i // num_class
class_id = i % num_class
with ib.for_range(0, num_detections[i], name="j") as j:
out[row_offsets[i] + j, 0] = batch_id
out[row_offsets[i] + j, 1] = class_id
out[row_offsets[i] + j, 2] = cast(selected_indices[i, j], "int64")
return ib.get()
def _collect_selected_indices_and_scores_ir(
selected_indices,
selected_scores,
num_detections,
row_offsets,
num_total_detections,
collected_indices,
collected_scores,
):
batch_size, num_class = row_offsets.shape
num_boxes = selected_indices.shape[1]
ib = tvm.tir.ir_builder.create()
selected_indices = ib.buffer_ptr(selected_indices)
selected_scores = ib.buffer_ptr(selected_scores)
num_detections = ib.buffer_ptr(num_detections)
row_offsets = ib.buffer_ptr(row_offsets)
num_total_detections = ib.buffer_ptr(num_total_detections)
collected_indices = ib.buffer_ptr(collected_indices)
collected_scores = ib.buffer_ptr(collected_scores)
zero = cast(0, "int64")
with ib.for_range(0, batch_size * num_class, name="i", kind="parallel") as i:
i = cast(i, "int64")
batch_id = i // num_class
class_id = i % num_class
with ib.for_range(0, num_boxes, name="j") as j:
with ib.if_scope(j < num_detections[batch_id, class_id]):
offset = row_offsets[batch_id, class_id] + j
collected_indices[batch_id, offset, 0] = class_id
collected_indices[batch_id, offset, 1] = cast(selected_indices[i, j], "int64")
collected_scores[batch_id, offset] = selected_scores[i, j]
with ib.else_scope():
offset = (
num_total_detections[batch_id]
+ class_id * num_boxes
- row_offsets[batch_id, class_id]
+ j
- num_detections[batch_id, class_id]
)
collected_indices[batch_id, offset, 0] = zero
collected_indices[batch_id, offset, 1] = zero
collected_scores[batch_id, offset] = 0.0
return ib.get()
def all_class_non_max_suppression(
boxes,
scores,
max_output_boxes_per_class,
iou_threshold,
score_threshold,
output_format="onnx",
):
"""Non-maximum suppression operator for object detection, corresponding to ONNX
NonMaxSuppression and TensorFlow combined_non_max_suppression.
NMS is performed for each class separately.
Parameters
----------
boxes : tvm.te.Tensor
3-D tensor with shape (batch_size, num_boxes, 4)
scores: tvm.te.Tensor
3-D tensor with shape (batch_size, num_classes, num_boxes)
max_output_boxes_per_class : int or tvm.te.Tensor, optional
The maxinum number of output selected boxes per class
iou_threshold : float or tvm.te.Tensor, optionaIl
IoU test threshold
score_threshold : float or tvm.te.Tensor, optional
Score threshold to filter out low score boxes early
output_format : str, optional
"onnx" or "tensorflow", see below.
Returns
-------
out : list of tvm.te.Tensor
If `output_format` is "onnx", the output is two tensors. The first is `indices` of size
`(batch_size * num_class* num_boxes , 3)` and the second is a scalar tensor
`num_total_detection` of shape `(1,)` representing the total number of selected
boxes. The three values in `indices` encode batch, class, and box indices.
Rows of `indices` are ordered such that selected boxes from batch 0, class 0 come
first, in descending of scores, followed by boxes from batch 0, class 1 etc. Out of
`batch_size * num_class* num_boxes` rows of indices, only the first `num_total_detection`
rows are valid.
If `output_format` is "tensorflow", the output is three tensors, the first
is `indices` of size `(batch_size, num_class * num_boxes , 2)`, the second is `scores` of
size `(batch_size, num_class * num_boxes)`, and the third is `num_total_detection` of size
`(batch_size,)` representing the total number of selected boxes per batch. The two values
in `indices` encode class and box indices. Of num_class * num_boxes boxes in `indices` at
batch b, only the first `num_total_detection[b]` entries are valid. The second axis of
`indices` and `scores` are sorted within each class by box scores, but not across classes.
So the box indices and scores for the class 0 come first in a sorted order, followed by
the class 1 etc.
"""
batch, num_class, num_boxes = scores.shape
scores = reshape(scores, (batch * num_class, num_boxes))
sorted_indices = argsort(scores, axis=1, is_ascend=False, dtype="int32")
sorted_scores = gather(scores, 1, sorted_indices)
valid_count = _get_valid_box_count(sorted_scores, score_threshold)
selected_indices, selected_scores, num_detections = run_all_class_nms(
boxes,
sorted_scores,
sorted_indices,
valid_count,
max_output_boxes_per_class,
iou_threshold,
_nms_loop,
return_scores=(output_format == "tensorflow"),
)
if output_format == "onnx":
row_offsets = cumsum(num_detections, exclusive=True, dtype="int64")
num_total_detections = reduction.sum(cast(num_detections, "int64"), axis=1)
selected_indices = collect_selected_indices(
num_class, selected_indices, num_detections, row_offsets, _collect_selected_indices_ir
)
return [selected_indices, num_total_detections]
num_detections_per_batch = reshape(num_detections, (batch, num_class))
row_offsets = cumsum(num_detections_per_batch, exclusive=True, dtype="int64", axis=1)
num_total_detections = reduction.sum(cast(num_detections_per_batch, "int64"), axis=1)
selected_indices, selected_scores = collect_selected_indices_and_scores(
selected_indices,
selected_scores,
num_detections_per_batch,
row_offsets,
num_total_detections,
_collect_selected_indices_and_scores_ir,
)
return [selected_indices, selected_scores, num_total_detections]
| 31,285 | 34.714612 | 200 | py |
tvm | tvm-main/python/tvm/topi/vision/nms_util.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""Common utilities used in Non-maximum suppression operators"""
import tvm
from tvm import te
def _get_boundaries(output, box_idx):
l = tvm.te.min(
output[box_idx],
output[box_idx + 2],
)
t = tvm.te.min(
output[box_idx + 1],
output[box_idx + 3],
)
r = tvm.te.max(
output[box_idx],
output[box_idx + 2],
)
b = tvm.te.max(
output[box_idx + 1],
output[box_idx + 3],
)
return l, t, r, b
def calculate_overlap(out_tensor, box_a_idx, box_b_idx):
"""Calculate overlap of two boxes."""
a_l, a_t, a_r, a_b = _get_boundaries(out_tensor, box_a_idx)
b_l, b_t, b_r, b_b = _get_boundaries(out_tensor, box_b_idx)
# Overlapping width and height
w = tvm.te.max(0.0, tvm.te.min(a_r, b_r) - tvm.te.max(a_l, b_l))
h = tvm.te.max(0.0, tvm.te.min(a_b, b_b) - tvm.te.max(a_t, b_t))
# Overlapping area
area = h * w
# total area of the figure formed by box a and box b
# except for overlapping area
u = (a_r - a_l) * (a_b - a_t) + (b_r - b_l) * (b_b - b_t) - area
return tvm.tir.Select(u <= 0.0, 0.0, area / u)
def binary_search(ib, y, num_boxes, scores, score_threshold, out):
"""Binary search for score_threshold on scores sorted in descending order"""
lo = ib.allocate("int32", (1,), name="lo", scope="local")
hi = ib.allocate("int32", (1,), name="hi", scope="local")
lo[0] = 0
hi[0] = num_boxes
with ib.while_loop(lo[0] < hi[0]):
mid = (hi[0] + lo[0]) >> 1
with ib.if_scope(scores[y, mid] > score_threshold):
lo[0] = mid + 1
with ib.else_scope():
hi[0] = mid
out[y] = lo[0]
def collect_selected_indices(num_class, selected_indices, num_detections, row_offsets, ir):
"""Collect selected indices from the core NMS loop into one linear output
Parameters
----------
num_class : int
selected_indices: tvm.te.Tensor
2-D tensor with shape (batch_size * num_classes, num_boxes), representing the indices
of selected boxes by the core NMS loop.
num_detections tvm.te.Tensor
1-D tensor with shape (batch_size * num_classes,), representing
the number of boxes selected by the core NMS loop, per batch and class
row_offsets tvm.te.Tensor
1-D tensor with shape (batch_size * num_classes,), this should be the exclusive scan
of num_detections
ir : function
A function to generate IR for CPU or GPU, see its usage in vision/nms.py and cuda/nms.py
Returns
-------
out : tvm.te.Tensor
The output is indices of size (batch_size * num_class* num_boxes , 3).
Rows of indices are ordered such that selected boxes from batch 0, class 0 come
first, in descending of scores, followed by boxes from batch 0, class 1 etc.
"""
batch_class, num_boxes = selected_indices.shape
return te.extern(
[(batch_class * num_boxes, 3)],
[selected_indices, num_detections, row_offsets],
lambda ins, outs: ir(num_class, ins[0], ins[1], ins[2], outs[0]),
dtype=["int64"],
name="collect_indices",
tag="collect_indices",
)
def collect_selected_indices_and_scores(
selected_indices, selected_scores, num_detections, row_offsets, num_total_detections, ir
):
"""Collect selected indices and scores from the core NMS loop into one linear output
Parameters
----------
num_class : int
selected_indices: tvm.te.Tensor
2-D tensor with shape (batch_size * num_classes, num_boxes), representing the indices
of selected boxes by the core NMS loop.
selected_indices: tvm.te.Tensor
2-D tensor with shape (batch_size * num_classes, num_boxes), representing the scores
of selected boxes by the core NMS loop.
num_detections tvm.te.Tensor
2-D tensor with shape (batch_size, num_classes), representing
the number of boxes selected by the core NMS loop, per batch and class
row_offsets tvm.te.Tensor
2-D tensor with shape (batch_size, num_classes), this should be the exclusive scan
of num_detections along axis 1
ir : function
A function to generate IR for CPU or GPU, see its usage in vision/nms.py and cuda/nms.py
Returns
-------
out : [tvm.te.Tensor, tvm.te.Tensor]
The output is two tensors. The first is indices of size
(batch_size, num_class* num_boxes, 2), and the second is scores of size
(batch_size, num_class* num_boxes).
"""
batch_size, num_class = row_offsets.shape
num_boxes = selected_indices.shape[1]
return te.extern(
[(batch_size, num_class * num_boxes, 2), (batch_size, num_class * num_boxes)],
[selected_indices, selected_scores, num_detections, row_offsets, num_total_detections],
lambda ins, outs: ir(ins[0], ins[1], ins[2], ins[3], ins[4], outs[0], outs[1]),
dtype=["int64", "float32"],
name="collect_indices_and_scores",
tag="collect_indices_and_scores",
)
def _all_class_nms_ir(
boxes,
sorted_scores,
sorted_indices,
valid_count,
batch_class,
num_class,
num_anchors,
iou_threshold,
max_output_size_per_class,
box_indices,
selected_scores,
num_valid_boxes,
nms_loop,
):
ib = tvm.tir.ir_builder.create()
boxes = ib.buffer_ptr(boxes)
sorted_scores = ib.buffer_ptr(sorted_scores)
sorted_indices = ib.buffer_ptr(sorted_indices)
valid_count = ib.buffer_ptr(valid_count)
box_indices = ib.buffer_ptr(box_indices)
num_valid_boxes = ib.buffer_ptr(num_valid_boxes)
if selected_scores is not None:
selected_scores = ib.buffer_ptr(selected_scores)
if isinstance(iou_threshold, float):
iou_threshold = tvm.tir.FloatImm("float32", iou_threshold)
if isinstance(max_output_size_per_class, int):
max_output_size_per_class = tvm.tir.const(max_output_size_per_class)
def calc_overlap(i, j, k):
offset_j = sorted_indices[i, j] * 4
offset_k = sorted_indices[i, k] * 4
batch_id = i // num_class
base_bbox_idx = batch_id * num_anchors * 4
return calculate_overlap(
boxes,
base_bbox_idx + offset_j,
base_bbox_idx + offset_k,
)
def on_new_valid_box(ib, tid, num_current_valid_box, i, j):
with ib.if_scope(tid + 0 == 0):
box_indices[i, num_current_valid_box] = sorted_indices[i, j]
if selected_scores is not None:
selected_scores[i, num_current_valid_box] = sorted_scores[i, j]
def on_new_invalidated_box(*_):
pass
def needs_bbox_check(*_):
return tvm.tir.const(True)
return nms_loop(
ib,
batch_class,
tvm.tir.IntImm("int32", -1), # top_k
iou_threshold,
max_output_size_per_class,
valid_count,
on_new_valid_box,
on_new_invalidated_box,
needs_bbox_check,
calc_overlap,
sorted_scores,
num_valid_boxes,
)
def run_all_class_nms(
boxes,
sorted_scores,
sorted_indices,
valid_count,
max_output_size_per_class,
iou_threshold,
nms_loop,
return_scores=False,
):
"""The core all class NMS routine
Parameters
----------
boxes : tvm.te.Tensor
3-D tensor with shape (batch_size, num_boxes, 4)
sorted_scores: tvm.te.Tensor
2-D tensor with shape (batch_size * num_classes, num_boxes)
One of the outputs from argsort
sorted_indices: tvm.te.Tensor
2-D tensor with shape (batch_size * num_classes, num_boxes)
The other output from argsort
valid_count: tvm.te.Tensor
1-D tensor with shape (batch_size * num_classes,), representing
the number of boxes whose score is above score_threshold, per batch and class
max_output_boxes_per_class : int or tvm.te.Tensor, optional
The maxinum number of output selected boxes per class
iou_threshold : float or tvm.te.Tensor, optionaIl
IoU test threshold
nms_loop : function
A core NMS loop, see its usage in vision/nms.py and cuda/nms.py
return_scores : bool, optional
Whether or not to return selected scores, needed by the tensorflow output format.
Returns
-------
out : a list of tvm.te.Tensor
The output is three tensors, the first and second are indices and scores of size
(batch_size * num_class, num_boxes), and the third is a tensor
num_selected_boxes of shape (batch_size * num_class,) representing the total number of
selected boxes per batch and class. If return_scores is False, the second output is
None.
"""
batch, num_boxes, _ = boxes.shape
batch_class = sorted_scores.shape[0]
num_class = batch_class // batch
if return_scores is False:
selected_indices, num_detections = te.extern(
[(batch_class, num_boxes), (1, batch_class)],
[boxes, sorted_scores, sorted_indices, valid_count],
lambda ins, outs: _all_class_nms_ir(
ins[0], # boxes
ins[1], # sorted_scores
ins[2], # sorted_indices
ins[3], # valid_count
batch_class,
num_class,
num_boxes,
iou_threshold,
max_output_size_per_class,
outs[0], # box_indices
None, # scores
outs[1], # num_selected_boxes
nms_loop,
),
dtype=["int32", "int32"],
name="all_class_nms",
tag="all_class_nms",
)
return selected_indices, None, num_detections
return te.extern(
[(batch_class, num_boxes), (batch_class, num_boxes), (1, batch_class)],
[boxes, sorted_scores, sorted_indices, valid_count],
lambda ins, outs: _all_class_nms_ir(
ins[0], # boxes
ins[1], # sorted_scores
ins[2], # sorted_indices
ins[3], # valid_count
batch_class,
num_class,
num_boxes,
iou_threshold,
max_output_size_per_class,
outs[0], # box_indices
outs[1], # selected scores
outs[2], # num_selected_boxes
nms_loop,
),
dtype=["int32", "float32", "int32"],
name="all_class_nms",
tag="all_class_nms",
)
| 11,318 | 32.389381 | 96 | py |
tvm | tvm-main/python/tvm/topi/vision/reorg.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
REORG Operator
====================
Reorg operator, used in darknet.
"""
from __future__ import absolute_import as _abs
from .. import cpp
def reorg(data, stride):
"""Reorg forward operators.
Parameters
----------
Input : tvm.te.Tensor
4-D with shape [batch, in_channel, in_height, in_width]
stride : int
Stride value for reorganization
Returns
-------
Output : tvm.te.Tensor
4-D with shape [batch, out_channel, out_height, out_width]
"""
return cpp.vision.reorg(data, stride)
| 1,335 | 30.069767 | 66 | py |
tvm | tvm-main/python/tvm/topi/vision/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=wildcard-import
"""VISION network operators"""
from __future__ import absolute_import as _abs
from . import ssd
from .reorg import *
from .nms import *
from .rcnn import *
| 977 | 36.615385 | 62 | py |
tvm | tvm-main/python/tvm/topi/vision/ssd/multibox.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, no-member, too-many-locals, too-many-arguments, undefined-variable
"""SSD multibox operators"""
import tvm
from tvm.te import hybrid
from tvm.tir import exp, sqrt
from tvm import topi
from ..nms import non_max_suppression
@hybrid.script
def hybrid_multibox_prior(data, sizes, ratios, steps, offsets):
"""Hybrid routing for multibox_prior operator.
Parameters
----------
data : tvm.te.Tensor or numpy NDArray
4-D tensor with shape [batch, channel, height, width]]
sizes : tvm ConsExpr
Sizes for anchor boxes.
ratios : tvm ConsExpr
Ratios for anchor boxes.
steps : tvm ConsExpr
Priorbox step across y and x, -1 for auto calculation.
offsets : tvm ConsExpr
Priorbox center offsets, y and x respectively.
Returns
-------
output : tvm.te.Tensor or numpy NDArray
3-D tensor with shape [1, h_in * w_in * (num_sizes + num_ratios - 1), 4]
"""
in_height = data.shape[2]
in_width = data.shape[3]
num_sizes = len(sizes)
num_ratios = len(ratios)
num_boxes = in_height * in_width * (num_sizes + num_ratios - 1)
output = output_tensor((1, num_boxes, 4), "float32")
steps_h = steps[0] * 1.0 if steps[0] > 0 else 1.0 / in_height
steps_w = steps[1] * 1.0 if steps[1] > 0 else 1.0 / in_width
offset_h = offsets[0]
offset_w = offsets[1]
# Need to define var out of const_range + if
w = 0.0
h = 0.0
for i in parallel(in_height):
center_h = (i + offset_h) * steps_h
for j in range(in_width):
center_w = (j + offset_w) * steps_w
for k in const_range(num_sizes + num_ratios - 1):
if k < num_sizes:
w = float32(sizes[k] * in_height) / in_width / 2.0
h = sizes[k] / 2.0
else:
w = (
float32(sizes[0] * in_height)
/ in_width
* sqrt(ratios[k - num_sizes + 1] * 1.0)
/ 2.0
)
h = sizes[0] / sqrt(ratios[k - num_sizes + 1] * 1.0) / 2.0
count = (
i * in_width * (num_sizes + num_ratios - 1)
+ j * (num_sizes + num_ratios - 1)
+ k
)
output[0, count, 0] = center_w - w
output[0, count, 1] = center_h - h
output[0, count, 2] = center_w + w
output[0, count, 3] = center_h + h
return output
def multibox_prior(data, sizes=(1,), ratios=(1,), steps=(-1, -1), offsets=(0.5, 0.5), clip=False):
"""Generate prior(anchor) boxes from data, sizes and ratios.
Parameters
----------
data : tvm.te.Tensor
4-D with shape [batch, c_in, h_in, w_in]]
sizes : tuple of float
Tuple of sizes for anchor boxes.
ratios : tuple of float
Tuple of ratios for anchor boxes.
steps : Tuple of float
Priorbox step across y and x, -1 for auto calculation.
offsets : tuple of int
Priorbox center offsets, y and x respectively.
clip : boolean
Whether to clip out-of-boundary boxes.
Returns
-------
out : tvm.te.Tensor
3-D tensor with shape [1, h_in * w_in * (num_sizes + num_ratios - 1), 4]
"""
out = hybrid_multibox_prior(
data,
tvm.runtime.convert(sizes),
tvm.runtime.convert(ratios),
tvm.runtime.convert(steps),
tvm.runtime.convert(offsets),
)
if clip:
out = topi.clip(out, 0, 1)
return out
@hybrid.script
def _hybridy_transform_loc(anchor, pred_loc, variance, clip, batch_idx, anchor_idx):
"""Transform prior anchor box to output box through location predictions."""
al = anchor[0, anchor_idx, 0]
at = anchor[0, anchor_idx, 1]
ar = anchor[0, anchor_idx, 2]
ab = anchor[0, anchor_idx, 3]
px = pred_loc[batch_idx, 0]
py = pred_loc[batch_idx, 1]
pw = pred_loc[batch_idx, 2]
ph = pred_loc[batch_idx, 3]
vx = variance[0]
vy = variance[1]
vw = variance[2]
vh = variance[3]
output = output_tensor((4,), pred_loc.dtype)
aw = ar - al
ah = ab - at
ax = (al + ar) / 2.0
ay = (at + ab) / 2.0
ox = px * vx * aw + ax
oy = py * vy * ah + ay
ow = exp(pw * vw) * aw / 2.0
oh = exp(ph * vh) * ah / 2.0
output[0] = max(0.0, min(1.0, ox - ow)) if clip else ox - ow
output[1] = max(0.0, min(1.0, oy - oh)) if clip else oy - oh
output[2] = max(0.0, min(1.0, ox + ow)) if clip else ox + ow
output[3] = max(0.0, min(1.0, oy + oh)) if clip else oy + oh
return output
@hybrid.script
def hybrid_multibox_transform_loc(cls_prob, loc_pred, anchor, clip, threshold, variances):
"""Hybrid routing for transform location in multibox_detection operator.
Parameters
----------
cls_prob : tvm.te.Tensor or numpy NDArray
3-D tensor of class probabilities.
loc_pred : tvm.te.Tensor or numpy NDArray
2-D tensor of location regression predictions.
anchor : tvm.te.Tensor or numpy NDArray
3-D tensor of prior anchor boxes.
clip : tvm.tir.const
Whether to clip out-of-boundary boxes.
threshold : tvm.tir.const
Threshold to be a positive prediction.
variances : tvm.nd.NDArray
Variances to be decoded from box regression output.
Returns
-------
out_loc : tvm.te.Tensor or numpy NDArray
3-D tensor of transformed location.
valid_count : tvm.te.Tensor or numpy NDArray
1_d tensor of valid counts for boxes.
"""
batch_size = cls_prob.shape[0]
num_classes = cls_prob.shape[1]
num_anchors = cls_prob.shape[2]
pred_coord = allocate(
(
batch_size,
4,
),
loc_pred.dtype,
)
out_loc = output_tensor((batch_size, num_anchors, 6), loc_pred.dtype)
valid_count = output_tensor((batch_size,), "int32")
for i in parallel(batch_size):
valid_count[i] = 0
for j in range(num_anchors):
# Find the predicted class id and probability
score = -1.0
cls_id = 0
for k in range(num_classes):
if k > 0:
temp = cls_prob[i, k, j]
cls_id = k if temp > score else cls_id
score = max(temp, score)
if cls_id > 0 and score < threshold:
cls_id = 0
# [id, prob, xmin, ymin, xmax, ymax]
# Remove background, restore original id
if cls_id > 0:
out_loc[i, valid_count[i], 0] = cls_id - 1.0
out_loc[i, valid_count[i], 1] = score
for l in range(4):
pred_coord[i, l] = loc_pred[i, j * 4 + l]
out_coord = _hybridy_transform_loc(anchor, pred_coord, variances, clip, i, j)
out_loc[i, valid_count[i], 2] = out_coord[0]
out_loc[i, valid_count[i], 3] = out_coord[1]
out_loc[i, valid_count[i], 4] = out_coord[2]
out_loc[i, valid_count[i], 5] = out_coord[3]
valid_count[i] += 1
return out_loc, valid_count
def multibox_transform_loc(
cls_prob, loc_pred, anchor, clip=True, threshold=0.01, variances=(0.1, 0.1, 0.2, 0.2)
):
"""Location transformation for multibox detection
Parameters
----------
cls_prob : tvm.te.Tensor
Class probabilities.
loc_pred : tvm.te.Tensor
Location regression predictions.
anchor : tvm.te.Tensor
Prior anchor boxes.
clip : boolean
Whether to clip out-of-boundary boxes.
threshold : float
Threshold to be a positive prediction.
variances : tuple of float
Variances to be decoded from box regression output.
Returns
-------
ret : tuple of tvm.te.Tensor
"""
return hybrid_multibox_transform_loc(
cls_prob,
loc_pred,
anchor,
tvm.tir.const(clip, "bool"),
tvm.tir.const(threshold, "float32"),
tvm.runtime.convert(variances),
)
def multibox_detection(
cls_prob,
loc_pred,
anchor,
clip=True,
threshold=0.01,
nms_threshold=0.5,
force_suppress=False,
variances=(0.1, 0.1, 0.2, 0.2),
nms_topk=-1,
):
"""Convert multibox detection predictions.
Parameters
----------
cls_prob : tvm.te.Tensor
Class probabilities.
loc_pred : tvm.te.Tensor
Location regression predictions.
anchor : tvm.te.Tensor
Prior anchor boxes.
clip : boolean
Whether to clip out-of-boundary boxes.
nms_threshold : float
Non-maximum suppression threshold.
force_suppress : boolean
Whether to suppress all detections regardless of class_id.
threshold : float
Threshold to be a positive prediction.
variances : tuple of float
Variances to be decoded from box regression output.
nms_topk : int
Keep maximum top k detections before nms, -1 for no limit.
Returns
-------
out : tvm.te.Tensor
3-D tensor with shape (batch_size, num_anchors, 6)
"""
inter_out = multibox_transform_loc(cls_prob, loc_pred, anchor, clip, threshold, variances)
out = non_max_suppression(
inter_out[0],
inter_out[1],
inter_out[1],
max_output_size=-1,
iou_threshold=nms_threshold,
force_suppress=force_suppress,
top_k=nms_topk,
return_indices=False,
)
return out
| 10,388 | 28.939481 | 98 | py |
tvm | tvm-main/python/tvm/topi/vision/ssd/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=wildcard-import
"""VISION network operators"""
from __future__ import absolute_import as _abs
from .multibox import *
| 923 | 39.173913 | 62 | py |
tvm | tvm-main/python/tvm/topi/vision/rcnn/roi_pool.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""ROI pool operator"""
import tvm
from tvm import te
from ...utils import get_const_tuple
def roi_pool_nchw(data, rois, pooled_size, spatial_scale):
"""ROI pool operator in NCHW layout.
Parameters
----------
data : tvm.te.Tensor
4-D with shape [batch, channel, height, width]
rois : tvm.te.Tensor
2-D with shape [num_roi, 5]. The last dimension should be in format of
[batch_index, w_start, h_start, w_end, h_end]
pooled_size : int or list/tuple of two ints
output size, or [out_height, out_width]
spatial_scale : float
Ratio of input feature map height (or w) to raw image height (or w). Equals the reciprocal
of total stride in convolutional layers, which should be in range (0.0, 1.0]
Returns
-------
output : tvm.te.Tensor
4-D with shape [num_roi, channel, pooled_size, pooled_size]
"""
dtype = rois.dtype
_, channel, height, width = get_const_tuple(data.shape)
num_roi, _ = get_const_tuple(rois.shape)
if isinstance(pooled_size, int):
pooled_size_h = pooled_size_w = pooled_size
else:
pooled_size_h, pooled_size_w = pooled_size
def _pool(i, c, ph, pw):
roi = rois[i]
batch_index = roi[0].astype("int32")
roi_start_w, roi_start_h, roi_end_w, roi_end_h = roi[1], roi[2], roi[3], roi[4]
roi_start_h = te.round(roi_start_h * spatial_scale).astype("int32")
roi_start_w = te.round(roi_start_w * spatial_scale).astype("int32")
roi_end_h = te.round(roi_end_h * spatial_scale).astype("int32")
roi_end_w = te.round(roi_end_w * spatial_scale).astype("int32")
# force malformed ROIs to be 1x1
roi_h = tvm.te.max(roi_end_h - roi_start_h + 1, tvm.tir.const(1, "int32"))
roi_w = tvm.te.max(roi_end_w - roi_start_w + 1, tvm.tir.const(1, "int32"))
bin_h = roi_h.astype(dtype) / pooled_size_h
bin_w = roi_w.astype(dtype) / pooled_size_w
# use epsilon to prevent floating point precision loss in floor/ceil
epsilon = tvm.tir.const(0.00001, dtype)
hstart = te.floor(ph * bin_h + epsilon).astype("int32")
wstart = te.floor(pw * bin_w + epsilon).astype("int32")
hend = te.ceil((ph + 1) * bin_h - epsilon).astype("int32")
wend = te.ceil((pw + 1) * bin_w - epsilon).astype("int32")
hstart = tvm.te.min(tvm.te.max(hstart + roi_start_h, 0), height)
wstart = tvm.te.min(tvm.te.max(wstart + roi_start_w, 0), width)
hend = tvm.te.min(tvm.te.max(hend + roi_start_h, 0), height)
wend = tvm.te.min(tvm.te.max(wend + roi_start_w, 0), width)
non_empty = tvm.tir.all(hstart < hend, wstart < wend)
min_value = lambda dtype: tvm.tir.if_then_else(
non_empty, tvm.te.min_value(dtype), tvm.tir.const(0.0, dtype)
)
# pylint: disable=unnecessary-lambda
_max = te.comm_reducer(lambda x, y: tvm.te.max(x, y), min_value, name="max")
rh = te.reduce_axis((0, hend - hstart), "rh")
rw = te.reduce_axis((0, wend - wstart), "rw")
return _max(data[batch_index, c, hstart + rh, wstart + rw], axis=[rh, rw])
return te.compute((num_roi, channel, pooled_size_h, pooled_size_w), _pool, tag="pool,roi_pool")
| 4,084 | 41.552083 | 99 | py |
tvm | tvm-main/python/tvm/topi/vision/rcnn/roi_align.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""Roi align operator"""
import tvm
from tvm import te
from ...utils import get_const_tuple
from ...cpp.utils import bilinear_sample_nchw, bilinear_sample_nhwc
def _sample_common(
i,
c,
ph,
pw,
rois,
pooled_size_h,
pooled_size_w,
spatial_scale,
sample_ratio,
dtype,
avg_mode,
bilinear_func,
):
roi = rois[i]
batch_index = roi[0].astype("int32")
roi_start_w, roi_start_h, roi_end_w, roi_end_h = roi[1], roi[2], roi[3], roi[4]
roi_start_h *= spatial_scale
roi_end_h *= spatial_scale
roi_start_w *= spatial_scale
roi_end_w *= spatial_scale
# force malformed ROIs to be 1x1
roi_h = tvm.te.max(roi_end_h - roi_start_h, tvm.tir.const(1.0, dtype))
roi_w = tvm.te.max(roi_end_w - roi_start_w, tvm.tir.const(1.0, dtype))
bin_h = roi_h / pooled_size_h
bin_w = roi_w / pooled_size_w
if sample_ratio > 0:
roi_bin_grid_h = roi_bin_grid_w = tvm.tir.const(sample_ratio, "int32")
else:
roi_bin_grid_h = te.ceil(roi_h / pooled_size_h).astype("int32")
roi_bin_grid_w = te.ceil(roi_w / pooled_size_w).astype("int32")
count = roi_bin_grid_h * roi_bin_grid_w
rh = te.reduce_axis((0, roi_bin_grid_h), name="rh")
rw = te.reduce_axis((0, roi_bin_grid_w), name="rw")
roi_start_h += ph * bin_h
roi_start_w += pw * bin_w
if avg_mode:
return te.sum(
bilinear_func(
batch_index,
c,
roi_start_h + (rh + 0.5) * bin_h / roi_bin_grid_h,
roi_start_w + (rw + 0.5) * bin_w / roi_bin_grid_w,
)
/ count,
axis=[rh, rw],
)
# max mode
return te.max(
bilinear_func(
batch_index,
c,
roi_start_h + (rh + 0.5) * bin_h / roi_bin_grid_h,
roi_start_w + (rw + 0.5) * bin_w / roi_bin_grid_w,
),
axis=[rh, rw],
)
def roi_align_nchw(data, rois, pooled_size, spatial_scale, mode, sample_ratio=-1):
"""ROI align operator in NCHW layout.
Parameters
----------
data : tvm.te.Tensor
4-D with shape [batch, channel, height, width]
rois : tvm.te.Tensor
2-D with shape [num_roi, 5]. The last dimension should be in format of
[batch_index, w_start, h_start, w_end, h_end]
pooled_size : int or list/tuple of two ints
output size, or [out_height, out_width]
spatial_scale : float
Ratio of input feature map height (or w) to raw image height (or w). Equals the reciprocal
of total stride in convolutional layers, which should be in range (0.0, 1.0]
mode : int or str
There are two modes, average and max. For the average mode, you can pass b'avg' or 0, and
for the max mode, you can pass b'max' or 1.
sample_ratio : int
Optional sampling ratio of ROI align, using adaptive size by default.
Returns
-------
output : tvm.te.Tensor
4-D with shape [num_roi, channel, pooled_size, pooled_size]
"""
avg_mode = mode in (b"avg", 0)
max_mode = mode in (b"max", 1)
assert avg_mode or max_mode, "Mode must be avg or max. Please pass in a valid mode."
dtype = rois.dtype
_, channel, height, width = get_const_tuple(data.shape)
num_roi, _ = get_const_tuple(rois.shape)
if isinstance(pooled_size, int):
pooled_size_h = pooled_size_w = pooled_size
else:
pooled_size_h, pooled_size_w = pooled_size
def _bilinear(i, c, y, x):
outside = tvm.tir.any(y < -1.0, x < -1.0, y > height, x > width)
y = tvm.te.min(tvm.te.max(y, 0.0), height - 1)
x = tvm.te.min(tvm.te.max(x, 0.0), width - 1)
val = bilinear_sample_nchw(data, (i, c, y, x), height - 1, width - 1)
return tvm.tir.if_then_else(outside, 0.0, val)
def _sample(i, c, ph, pw):
return _sample_common(
i,
c,
ph,
pw,
rois,
pooled_size_h,
pooled_size_w,
spatial_scale,
sample_ratio,
dtype,
avg_mode,
_bilinear,
)
return te.compute(
(num_roi, channel, pooled_size_h, pooled_size_w), _sample, tag="pool,roi_align_nchw"
)
def roi_align_nhwc(data, rois, pooled_size, spatial_scale, mode, sample_ratio=-1):
"""ROI align operator in NHWC layout.
Parameters
----------
data : tvm.te.Tensor
4-D with shape [batch, height, width, channel]
rois : tvm.te.Tensor
2-D with shape [num_roi, 5]. The last dimension should be in format of
[batch_index, w_start, h_start, w_end, h_end]
pooled_size : int or list/tuple of two ints
output size, or [out_height, out_width]
spatial_scale : float
Ratio of input feature map height (or w) to raw image height (or w). Equals the reciprocal
of total stride in convolutional layers, which should be in range (0.0, 1.0]
mode : int or str
There are two modes, average and max. For the average mode, you can pass b'avg' or 0, and
for the max mode, you can pass b'max' or 1.
sample_ratio : int
Optional sampling ratio of ROI align, using adaptive size by default.
Returns
-------
output : tvm.te.Tensor
4-D with shape [num_roi, pooled_size, pooled_size, channel]
"""
avg_mode = mode in (b"avg", 0)
max_mode = mode in (b"max", 1)
assert avg_mode or max_mode, "Mode must be avg or max. Please pass in a valid mode."
dtype = rois.dtype
_, height, width, channel = get_const_tuple(data.shape)
num_roi, _ = get_const_tuple(rois.shape)
if isinstance(pooled_size, int):
pooled_size_h = pooled_size_w = pooled_size
else:
pooled_size_h, pooled_size_w = pooled_size
def _bilinear(i, c, y, x):
outside = tvm.tir.any(y < -1.0, x < -1.0, y > height, x > width)
y = tvm.te.min(tvm.te.max(y, 0.0), height - 1)
x = tvm.te.min(tvm.te.max(x, 0.0), width - 1)
val = bilinear_sample_nhwc(data, (i, y, x, c), height - 1, width - 1)
return tvm.tir.if_then_else(outside, 0.0, val)
def _sample(i, ph, pw, c):
return _sample_common(
i,
c,
ph,
pw,
rois,
pooled_size_h,
pooled_size_w,
spatial_scale,
sample_ratio,
dtype,
avg_mode,
_bilinear,
)
return te.compute(
(num_roi, pooled_size_h, pooled_size_w, channel), _sample, tag="pool,roi_align_nchw"
)
| 7,429 | 31.445415 | 98 | py |
tvm | tvm-main/python/tvm/topi/vision/rcnn/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=wildcard-import
"""Faster R-CNN and Mask R-CNN operators"""
from .roi_align import *
from .roi_pool import *
from .proposal import *
| 937 | 39.782609 | 62 | py |
tvm | tvm-main/python/tvm/topi/vision/rcnn/proposal.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, singleton-comparison, bad-continuation
"""Proposal operator"""
import math
import tvm
from tvm import te
from ...utils import get_const_tuple, get_const_int
from ...sort import argsort
def generate_anchor(ratio, scale, base_size):
"""Generate anchor"""
w = h = float(base_size)
x_ctr = 0.5 * (w - 1.0)
y_ctr = 0.5 * (h - 1.0)
size = w * h
size_ratios = math.floor(size / ratio)
new_w = math.floor(math.sqrt(size_ratios) + 0.5) * scale
new_h = math.floor((new_w / scale * ratio) + 0.5) * scale
return (
x_ctr - 0.5 * (new_w - 1.0),
y_ctr - 0.5 * (new_h - 1.0),
x_ctr + 0.5 * (new_w - 1.0),
y_ctr + 0.5 * (new_h - 1.0),
)
def reg_bbox(x1, y1, x2, y2, dx, dy, dw, dh):
"""Bounding box regression function"""
bbox_w = x2 - x1 + 1.0
bbox_h = y2 - y1 + 1.0
ctr_x = x1 + 0.5 * (bbox_w - 1.0)
ctr_y = y1 + 0.5 * (bbox_h - 1.0)
pred_ctr_x = dx * bbox_w + ctr_x
pred_ctr_y = dy * bbox_h + ctr_y
pred_w = te.exp(dw) * bbox_w
pred_h = te.exp(dh) * bbox_h
pred_x1 = pred_ctr_x - 0.5 * (pred_w - 1.0)
pred_y1 = pred_ctr_y - 0.5 * (pred_h - 1.0)
pred_x2 = pred_ctr_x + 0.5 * (pred_w - 1.0)
pred_y2 = pred_ctr_y + 0.5 * (pred_h - 1.0)
return pred_x1, pred_y1, pred_x2, pred_y2
def reg_iou(x1, y1, x2, y2, dx1, dy1, dx2, dy2):
"""Bounding box regression function"""
pred_x1 = x1 + dx1
pred_y1 = y1 + dy1
pred_x2 = x2 + dx2
pred_y2 = y2 + dy2
return pred_x1, pred_y1, pred_x2, pred_y2
def predict_bbox_ir(
cls_prob_buf,
bbox_pred_buf,
im_info_buf,
out_buf,
scales,
ratios,
feature_stride,
rpn_min_size,
iou_loss,
):
"""Predict bounding boxes based on anchors, scores and deltas.
Parameters
----------
cls_prob_buf : tvm.te.schedule.Buffer
4-D with shape [batch, 2 * num_anchors, height, width]
bbox_pred_buf : tvm.te.schedule.Buffer
4-D with shape [batch, 4 * num_anchors, height, width]
im_info_buf : tvm.te.schedule.Buffer
2-D with shape [batch, 3]
out_buf : tvm.te.schedule.Buffer
3-D with shape [batch, num_bbox, 5]
The last dimension is in format of [w_start, h_start, w_end, h_end, score]
scales : list/tuple of float
Scales of anchor windows.
ratios : list/tuple of float
Ratios of anchor windows.
feature_stride : int
The size of the receptive field each unit in the convolution layer of the rpn, for example
the product of all stride's prior to this layer.
rpn_min_size : int
Minimum height or width in proposal.
iou_loss : bool
Usage of IoU loss.
Returns
-------
stmt : Stmt
The result IR statement.
"""
batch, num_anchors, height, width = get_const_tuple(cls_prob_buf.shape)
num_anchors //= 2
ib = tvm.tir.ir_builder.create()
p_score = ib.buffer_ptr(cls_prob_buf)
p_delta = ib.buffer_ptr(bbox_pred_buf)
p_im_info = ib.buffer_ptr(im_info_buf)
p_out = ib.buffer_ptr(out_buf)
idxm = tvm.tir.indexmod
idxd = tvm.tir.indexdiv
with ib.for_range(0, batch * height * width) as tid:
w = idxm(tid, width)
h = idxm(idxd(tid, width), height)
b = idxd(idxd(tid, width), height)
for k in range(num_anchors):
out_index = tid * num_anchors + k
ratio = ratios[k // len(scales)]
scale = scales[k % len(scales)]
anchor = generate_anchor(ratio, scale, feature_stride)
im_height = p_im_info[b * 3]
im_width = p_im_info[b * 3 + 1]
x1 = anchor[0] + w * feature_stride
y1 = anchor[1] + h * feature_stride
x2 = anchor[2] + w * feature_stride
y2 = anchor[3] + h * feature_stride
delta = [
p_delta[((((b * num_anchors + k) * 4 + i) * height + h) * width + w)]
for i in range(4)
]
regression_func = reg_iou if iou_loss else reg_bbox
pred_x1, pred_y1, pred_x2, pred_y2 = regression_func(x1, y1, x2, y2, *delta)
pred_x1 = tvm.te.max(tvm.te.min(pred_x1, im_width - 1.0), 0.0)
pred_y1 = tvm.te.max(tvm.te.min(pred_y1, im_height - 1.0), 0.0)
pred_x2 = tvm.te.max(tvm.te.min(pred_x2, im_width - 1.0), 0.0)
pred_y2 = tvm.te.max(tvm.te.min(pred_y2, im_height - 1.0), 0.0)
real_height = (im_height / feature_stride).astype("int32")
real_width = (im_width / feature_stride).astype("int32")
bbox_w = pred_x2 - pred_x1 + 1.0
bbox_h = pred_y2 - pred_y1 + 1.0
min_size = p_im_info[b * 3 + 2] * rpn_min_size
pred_score = p_score[((b * num_anchors * 2 + num_anchors + k) * height + h) * width + w]
pred_score = tvm.tir.Select(
tvm.tir.any(h >= real_height, w >= real_width), -1.0, pred_score
)
p_out[out_index * 5 + 0] = pred_x1
p_out[out_index * 5 + 1] = pred_y1
p_out[out_index * 5 + 2] = pred_x2
p_out[out_index * 5 + 3] = pred_y2
p_out[out_index * 5 + 4] = pred_score
with ib.if_scope(tvm.tir.any(bbox_w < min_size, bbox_h < min_size)):
p_out[out_index * 5 + 0] -= min_size / 2.0
p_out[out_index * 5 + 1] -= min_size / 2.0
p_out[out_index * 5 + 2] += min_size / 2.0
p_out[out_index * 5 + 3] += min_size / 2.0
p_out[out_index * 5 + 4] = -1.0
return ib.get()
def argsort_ir(data_buf, out_index_buf):
"""Batched odd-even transposition sort.
Parameters
----------
data_buf : tvm.te.schedule.Buffer
2-D with shape [batch, num_bbox]
out_index_buf : tvm.te.schedule.Buffer
2-D with shape [batch, num_bbox]. Indices of data in sorted order.
Returns
-------
stmt : Stmt
The result IR statement.
"""
batch, num_bbox = get_const_tuple(data_buf.shape)
ib = tvm.tir.ir_builder.create()
p_data = ib.buffer_ptr(data_buf)
index_out = ib.buffer_ptr(out_index_buf)
temp_data = ib.allocate("float32", (1,), name="temp_data", scope="local")
temp_index = ib.allocate("int32", (1,), name="temp_index", scope="local")
idxm = tvm.tir.indexmod
with ib.for_range(0, batch, kind="unroll") as b:
start = b * num_bbox
for i in range(2):
with ib.for_range(0, (num_bbox + 1) // 2) as tid:
bbox_id = tid * 2 + i
with ib.if_scope(bbox_id < num_bbox):
index_out[start + bbox_id] = bbox_id
with ib.for_range(0, num_bbox) as k:
with ib.for_range(0, (num_bbox + 1) // 2) as tid:
offset = start + 2 * tid + idxm(k, 2)
with ib.if_scope(
tvm.tir.all(offset + 1 < num_bbox, p_data[offset] < p_data[offset + 1])
):
temp_data[0] = p_data[offset]
p_data[offset] = p_data[offset + 1]
p_data[offset + 1] = temp_data[0]
temp_index[0] = index_out[offset]
index_out[offset] = index_out[offset + 1]
index_out[offset + 1] = temp_index[0]
return ib.get()
def nms_ir(sorted_bbox_buf, out_buf, nms_threshold):
"""Non-maximum suppression.
Parameters
----------
sorted_bbox_buf : tvm.te.schedule.Buffer
3-D with shape [batch, num_bbox, 5]. The last dimension is in format of
[w_start, h_start, w_end, h_end, score].
out_buf : tvm.te.schedule.Buffer
2-D with shape [batch, num_bbox]. Boolean mask of whether a bounding box should be removed.
nms_threshold : float
Non-maximum suppression threshold.
Returns
-------
stmt : Stmt
The result IR statement.
"""
def calculate_overlap(out_tensor, box_a_idx, box_b_idx):
"""Calculate overlap of two boxes."""
w = tvm.te.max(
0.0,
tvm.te.min(out_tensor[box_a_idx + 2], out_tensor[box_b_idx + 2])
- tvm.te.max(out_tensor[box_a_idx], out_tensor[box_b_idx])
+ 1.0,
)
h = tvm.te.max(
0.0,
tvm.te.min(out_tensor[box_a_idx + 3], out_tensor[box_b_idx + 3])
- tvm.te.max(out_tensor[box_a_idx + 1], out_tensor[box_b_idx + 1])
+ 1.0,
)
i = w * h
u = (
(out_tensor[box_a_idx + 2] - out_tensor[box_a_idx] + 1.0)
* (out_tensor[box_a_idx + 3] - out_tensor[box_a_idx + 1] + 1.0)
+ (out_tensor[box_b_idx + 2] - out_tensor[box_b_idx] + 1.0)
* (out_tensor[box_b_idx + 3] - out_tensor[box_b_idx + 1] + 1.0)
- i
)
return i / u
batch, num_bbox = get_const_tuple(out_buf.shape)
ib = tvm.tir.ir_builder.create()
p_data = ib.buffer_ptr(sorted_bbox_buf)
p_out = ib.buffer_ptr(out_buf)
with ib.for_range(0, batch, kind="unroll", name="n") as b:
base_idx = b * num_bbox
for i in range(num_bbox):
p_out[base_idx + i] = False
with ib.for_range(0, num_bbox - 1) as l:
with ib.for_range(0, num_bbox) as i:
with ib.if_scope(tvm.tir.all(i < num_bbox, i > l, p_out[base_idx + l] == False)):
iou = calculate_overlap(p_data, (base_idx + l) * 5, (base_idx + i) * 5)
with ib.if_scope(iou > nms_threshold):
p_out[base_idx + i] = True
return ib.get()
def prepare_output_ir(sorted_bbox_buf, remove_mask_buf, out_buf):
"""Copy output after applying nms to continuous memory.
Parameters
----------
sorted_bbox_buf : tvm.te.schedule.Buffer
3-D with shape [batch, num_bbox, 5]. The last dimension is in format of
[w_start, h_start, w_end, h_end, score].
remove_mask_buf : tvm.te.schedule.Buffer
2-D with shape [batch, num_bbox]. Boolean mask of whether a bounding box should be removed.
out_buf : tvm.te.schedule.Buffer
2-D with shape [batch * rpn_post_nms_top_n, 5]. The last dimension is in format of
[batch_index, w_start, h_start, w_end, h_end].
Returns
-------
stmt : Stmt
The result IR statement.
"""
batch, num_bbox, _ = get_const_tuple(sorted_bbox_buf.shape)
rpn_post_nms_top_n = get_const_int(out_buf.shape[0]) // batch
ib = tvm.tir.ir_builder.create()
i = ib.allocate("int32", (batch,), "i", scope="local")
p_sorted_bbox = ib.buffer_ptr(sorted_bbox_buf)
p_remove = ib.buffer_ptr(remove_mask_buf)
p_out = ib.buffer_ptr(out_buf)
nkeep = ib.allocate("int32", (batch,), "nkeep", scope="local")
with ib.for_range(0, batch) as b:
nkeep[b] = 0
i[b] = 0
with ib.for_range(0, num_bbox) as j:
with ib.for_range(0, batch) as b:
with ib.if_scope(p_remove[b * num_bbox + j] == False):
nkeep[b] += 1
with ib.for_range(0, batch) as b:
with ib.if_scope(nkeep[b] > 0):
with ib.for_range(
0, te.ceil(tvm.tir.const(rpn_post_nms_top_n, "float32") / nkeep[b]).astype("int32")
):
with ib.for_range(0, num_bbox) as j:
offset_j = (b * num_bbox + j) * 5
offset_i = (b * rpn_post_nms_top_n + i[b]) * 5
with ib.if_scope(
tvm.tir.all(
i[b] < rpn_post_nms_top_n, p_remove[(b * num_bbox + j)] == False
)
):
p_out[offset_i] = tvm.tir.Cast("float32", b)
with ib.for_range(0, 4, kind="unroll") as k:
p_out[offset_i + k + 1] = p_sorted_bbox[offset_j + k]
i[b] = i[b] + 1
body = ib.get()
return body
def proposal(
cls_prob,
bbox_pred,
im_info,
scales,
ratios,
feature_stride,
threshold,
rpn_pre_nms_top_n,
rpn_post_nms_top_n,
rpn_min_size,
iou_loss,
):
"""Proposal operator.
Parameters
----------
cls_prob : tvm.te.Tensor
4-D with shape [batch, 2 * num_anchors, height, width]
bbox_pred : tvm.te.Tensor
4-D with shape [batch, 4 * num_anchors, height, width]
im_info : tvm.te.Tensor
2-D with shape [batch, 3]
scales : list/tuple of float
Scales of anchor windows.
ratios : list/tuple of float
Ratios of anchor windows.
feature_stride : int
The size of the receptive field each unit in the convolution layer of the rpn, for example
the product of all stride's prior to this layer.
threshold : float
Non-maximum suppression threshold.
rpn_pre_nms_top_n : int
Number of top scoring boxes to apply NMS. -1 to use all boxes.
rpn_post_nms_top_n : int
Number of top scoring boxes to keep after applying NMS to RPN proposals.
rpn_min_size : int
Minimum height or width in proposal.
iou_loss : bool
Usage of IoU loss.
Returns
-------
out : tvm.te.Tensor
2-D tensor with shape [batch * rpn_post_nms_top_n, 5]. The last dimension is in format of
[batch_index, w_start, h_start, w_end, h_end].
"""
# pylint: disable=unused-argument
batch, _, height, width = get_const_tuple(cls_prob.shape)
num_anchors = len(scales) * len(ratios)
num_bbox = height * width * num_anchors
rpn_pre_nms_top_n = min(rpn_pre_nms_top_n, num_bbox) if rpn_pre_nms_top_n > 0 else num_bbox
bbox = te.extern(
(batch, num_bbox, 5),
[cls_prob, bbox_pred, im_info],
lambda ins, outs: predict_bbox_ir(
ins[0], ins[1], ins[2], outs[0], scales, ratios, feature_stride, rpn_min_size, iou_loss
),
dtype=bbox_pred.dtype,
)
score = te.compute((batch, num_bbox), lambda b, i: bbox[b, i, 4], tag="bbox_score")
valid_count_shape = (1,)
valid_count = te.compute(valid_count_shape, lambda i: num_bbox)
sorted_index = argsort(score, valid_count=valid_count, axis=1, is_ascend=False)
sorted_bbox = te.compute(
(batch, rpn_pre_nms_top_n, 5),
lambda b, i, j: bbox[b, sorted_index[b, i], j],
tag="sorted_bbox",
)
nms_remove_mask = te.extern(
(batch, rpn_pre_nms_top_n),
[sorted_bbox],
lambda ins, outs: nms_ir(ins[0], outs[0], threshold),
dtype="bool",
)
nms_out = te.extern(
(batch * rpn_post_nms_top_n, 5),
[sorted_bbox, nms_remove_mask],
lambda ins, outs: prepare_output_ir(ins[0], ins[1], outs[0]),
dtype=sorted_bbox.dtype,
)
return nms_out
| 15,572 | 33.683742 | 100 | py |
tvm | tvm-main/python/tvm/topi/adreno/injective.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-variable,
"""Schedule for composition of injective operator"""
import tvm
from tvm import te
from .utils import bind_data_copy
from .. import utils
def schedule_injective_from_existing(sch, out):
"""Schedule for injective op from existing schedule.
Parameters
----------
sch: Schedule
The schedule to update.
out: Tensor
The tensor representing the injective op.
Returns
-------
sch: Schedule
The updated schedule.
"""
bind_data_copy(sch[out])
return sch
def schedule_injective(outs):
"""Schedule for injective op.
Parameters
----------
outs: Array of Tensor
The computation graph description of injective in the format
of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
tvm.te.schedule.AutoInlineInjective(s)
for out in outs:
if not utils.is_empty_shape(out.shape):
schedule_injective_from_existing(s, out)
return s
| 1,973 | 28.462687 | 70 | py |
tvm | tvm-main/python/tvm/topi/adreno/pooling.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unused-variable,unused-argument,no-else-return
"""pooling schedules for Qualcomm Adreno GPU"""
import tvm
from tvm import te
from .. import tag
from .utils import get_div
def schedule_adaptive_pool(outs, layout="NCHW"):
"""Schedule for adaptive_pool.
Parameters
----------
outs: Array of Tensor
The computation graph description of adaptive_pool
in the format of an array of tensors.
Returns
-------
s: Schedule
The computation schedule for adaptive_pool.
"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
def _schedule_global(Pool, layout):
# examples of latest pool op is global max pool and non latest is global avg pooling
# OL - an Expr will be used for rfactor
# Out - programming of the parallelizm on the global level
# shared is not required, local could be enough but shared scope gives quite significant
# perf boost
if Pool.op in s.outputs:
Out = Pool
OL = s.cache_write(Pool, "shared")
else:
Out = outs[0].op.output(0)
s[Pool].set_scope("shared")
OL = Pool
PaddedInput = Pool.op.input_tensors[0]
# detect axis for later reorder and binding of batch/channel to blocks and
# spatial to threads
if layout in ("NCHW", "NCHW4c"):
channel_index = 1
height_index = 2
width_index = 3
else:
channel_index = 3
height_index = 1
width_index = 2
if isinstance(PaddedInput.op, tvm.te.ComputeOp):
s[PaddedInput].compute_inline()
fused_reduce = s[OL].fuse(*s[OL].op.reduce_axis)
spatial = PaddedInput.shape[height_index].value * PaddedInput.shape[width_index].value
# below values were selected empirically assuming that we should have some work in each
# thread (currently from 25-49) and number of threads not exceeding some threshold that
# was selected as 256 from performance point of view after experiments on Adreno 660
max_threads = spatial // 25 if spatial > 25 else 1
max_threads = 256 if max_threads > 256 else max_threads
num_thread = get_div(spatial, max_threads)
thread_y = te.thread_axis((0, num_thread), "threadIdx.y")
_, ki = s[OL].split(fused_reduce, factor=num_thread)
data_out_rf = s.rfactor(OL, ki)
s[data_out_rf].compute_at(s[OL], s[OL].op.reduce_axis[0])
s[OL].bind(s[OL].op.reduce_axis[0], thread_y)
naxis = s[Out].op.axis[0]
caxis = s[Out].op.axis[channel_index]
haxis = s[Out].op.axis[height_index]
waxis = s[Out].op.axis[width_index]
if layout in ("NHWC4c", "NCHW4c"):
texture_axis = s[Out].op.axis[-1]
s[Out].reorder(naxis, caxis, haxis, waxis, texture_axis)
s[Out].vectorize(texture_axis)
else:
texture_axis = None
s[Out].reorder(naxis, caxis, haxis, waxis)
bx = s[Out].fuse(naxis, caxis, haxis, waxis)
s[Out].bind(bx, te.thread_axis("blockIdx.x"))
s[OL].compute_at(s[Out], bx)
scheduled_ops = []
def traverse(OP):
"""Internal traverse function"""
# inline all one-to-one-mapping operators except the last stage (output)
if tag.is_injective(OP.tag):
if OP not in s.outputs:
s[OP].compute_inline()
for tensor in OP.input_tensors:
if isinstance(tensor.op, te.tensor.ComputeOp) and tensor.op not in scheduled_ops:
traverse(tensor.op)
# schedule global_pool
elif OP.tag.startswith("adaptive_pool"):
Pool = OP.output(0)
_schedule_global(Pool, layout)
else:
raise RuntimeError(f"Unsupported operator: {OP.tag}")
scheduled_ops.append(OP)
traverse(outs[0].op)
return s
def schedule_pool(outs, layout):
"""Schedule for various pooling operators.
Parameters
----------
outs: Array of Tensor
The computation graph description of pool
in the format of an array of tensors.
layout: str
Data layout.
Returns
-------
s: Schedule
The computation schedule for pool.
"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
def _schedule(PaddedInput, Pool):
if isinstance(PaddedInput.op, tvm.te.ComputeOp):
s[PaddedInput].compute_inline()
num_thread = tvm.target.Target.current(allow_none=False).max_num_threads
num_thread = int(num_thread * 2)
if Pool.op in s.outputs:
Out = Pool
OL = s.cache_write(Pool, "local")
else:
Out = outs[0].op.output(0)
s[Pool].set_scope("local")
fused = s[Out].fuse(*s[Out].op.axis[:-1])
bx, tx = s[Out].split(fused, factor=num_thread)
s[Out].bind(bx, te.thread_axis("blockIdx.x"))
s[Out].bind(tx, te.thread_axis("threadIdx.x"))
s[Out].vectorize(s[Out].op.axis[-1])
if Pool.op in s.outputs:
s[OL].compute_at(s[Out], tx)
s[OL].vectorize(s[OL].op.axis[-1])
else:
s[Pool].compute_at(s[Out], tx)
s[Pool].vectorize(s[Pool].op.axis[-1])
scheduled_ops = []
def traverse(OP):
"""Internal traverse function"""
# inline all one-to-one-mapping operators except the last stage (output)
if tag.is_broadcast(OP.tag):
if OP not in s.outputs:
s[OP].compute_inline()
for tensor in OP.input_tensors:
if isinstance(tensor.op, te.tensor.ComputeOp) and tensor.op not in scheduled_ops:
traverse(tensor.op)
# schedule pool
elif OP.tag.startswith("pool"):
PaddedInput = OP.input_tensors[0]
Pool = OP.output(0)
_schedule(PaddedInput, Pool)
else:
raise RuntimeError(f"Unsupported operator: {OP.tag}")
scheduled_ops.append(OP)
traverse(outs[0].op)
return s
| 7,044 | 34.761421 | 97 | py |
tvm | tvm-main/python/tvm/topi/adreno/reduction.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unused-variable,too-many-locals,len-as-condition
"""Schedule for reduce operators"""
import numpy
from tvm import te
from ..utils import get_const_tuple
from .injective import schedule_injective_from_existing
from .utils import get_div
from ..cuda.reduction import schedule_reduce_impl
def _schedule_reduce_adreno(op, sch, is_idx_reduce=False):
sch_output = sch.outputs[0].output(0)
use_rfactor = False
if not is_idx_reduce:
rdomain = 1
whole_rop_output = op.output(0)
for axis in sch[whole_rop_output].op.reduce_axis:
rdomain = rdomain * axis.dom.extent
if rdomain > 50:
use_rfactor = True
# shared goves better perf, but works only for rfactor flow
scope = "shared"
else:
# in case of direct scheduling, shared is failed to be compiled
scope = "local"
if op in sch.outputs:
whole_rop_output = sch.cache_write(sch_output, scope)
else:
# no change for whole_rop_output def, but need to set proper scope
sch[whole_rop_output].set_scope(scope)
else:
temp_idx_input = op.input_tensors[0].op.output(0)
temp_val_input = op.input_tensors[0].op.output(1)
sch[temp_idx_input].set_scope("local")
sch[temp_val_input].set_scope("local")
shape = get_const_tuple(sch_output.shape)
latest4 = shape[-1] == 4
div4 = numpy.prod(shape) % 4 == 0
# Fuse and split the axis
if latest4:
fused_outer = sch[sch_output].fuse(
*[sch[sch_output].op.axis[i] for i in range(len(sch[sch_output].op.axis) - 1)]
)
else:
fused_outer = sch[sch_output].fuse(
*[sch[sch_output].op.axis[i] for i in range(len(sch[sch_output].op.axis))]
)
ftc = numpy.prod(shape)
a = fused_outer
if not is_idx_reduce:
if use_rfactor:
# below values were selected empirically assuming that we should have some work in each
# thread (currently from 25-49) and number of threads not exceeding some threshold that
# was selected as 256 from performance point of view after experiments on Adreno 660
max_threads = rdomain.value // 25 if rdomain > 25 else 1
max_threads = 256 if max_threads > 256 else max_threads
num_thread = get_div(rdomain, max_threads)
fused_reduce = sch[whole_rop_output].fuse(*sch[whole_rop_output].op.reduce_axis)
thread_y = te.thread_axis((0, num_thread), "threadIdx.y")
_, ki = sch[whole_rop_output].split(fused_reduce, factor=num_thread)
data_out_rf = sch.rfactor(whole_rop_output, ki)
sch[data_out_rf].compute_at(
sch[whole_rop_output], sch[whole_rop_output].op.reduce_axis[0]
)
sch[whole_rop_output].bind(sch[whole_rop_output].op.reduce_axis[0], thread_y)
if div4:
if latest4:
b = sch[sch_output].op.axis[-1]
else:
a, b = sch[sch_output].split(fused_outer, factor=4)
sch[sch_output].vectorize(b)
if not use_rfactor:
if is_idx_reduce:
sch[temp_idx_input].compute_at(sch[sch_output], b)
sch[temp_val_input].compute_at(sch[sch_output], b)
else:
sch[whole_rop_output].compute_at(sch[sch_output], b)
if not use_rfactor:
num_thread = get_div(ftc, 128)
bx, outer_in = sch[sch_output].split(a, factor=num_thread)
sch[sch_output].bind(bx, te.thread_axis("blockIdx.x"))
sch[sch_output].bind(outer_in, te.thread_axis("threadIdx.x"))
if not div4:
if is_idx_reduce:
sch[temp_idx_input].compute_at(sch[sch_output], outer_in)
sch[temp_val_input].compute_at(sch[sch_output], outer_in)
else:
sch[whole_rop_output].compute_at(sch[sch_output], outer_in)
else:
sch[sch_output].bind(a, te.thread_axis("blockIdx.x"))
if not div4 or use_rfactor:
if is_idx_reduce:
sch[temp_idx_input].compute_at(sch[sch_output], a)
sch[temp_val_input].compute_at(sch[sch_output], a)
else:
sch[whole_rop_output].compute_at(sch[sch_output], a)
def schedule_reduce(outs):
return schedule_reduce_impl(
outs, _schedule_reduce_adreno, schedule_injective_from_existing, True
)
| 5,266 | 40.472441 | 99 | py |
tvm | tvm-main/python/tvm/topi/adreno/conv2d_nchw.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unused-variable,unused-argument,no-else-return
"""conv2d nchw schedule on Qualcomm Adreno GPU"""
import tvm
from tvm import te
from tvm import autotvm
from ..utils import get_const_tuple, traverse_inline
from .utils import (
split_to_chunks,
pack_input,
pack_filter,
expand_spatial_dimensions,
add_pad,
bind_data_copy,
get_default_conv2d_config,
get_texture_storage,
)
@autotvm.register_topi_schedule("conv2d_nchwc.image2d")
def schedule_conv2d_nchwc(cfg, outs):
"""Create the schedule for conv2d_nchw"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
def _callback(op):
if op.tag == "adreno_conv2d_latest_op":
schedule_conv2d_NCHWc_KCRSk(cfg, s, op.output(0))
traverse_inline(s, outs[0].op, _callback)
return s
@autotvm.register_topi_compute("conv2d_nchwc.image2d")
def conv2d_nchwc(cfg, Input, Filter, stride, padding, dilation, out_dtype):
"""
Convolution operator in NCHWc layout.
Algo:
1. Convert into blocked format if we have 4d original tensor.
In case of AutoTVM we override the convert by just tensors since such conversion
will be absent for real blocked convolution, no sense to include into tuning
2. Expand spatial dimensions to have width and height be dividable by factor 4
This leads to slightly bigger amount of compute but allow utilize GPU much better
3. Add paddings. This happens even if we do not need pad originaly. This is useful
due to work arounding of the gaps of texture annotation between Primary Functions
and limited support of textures in schedules. Later on this pad will be executed
separately and will produce texture
4. 5d Convolution compute with accumulating into out_dtype
5. Cast to the origin output data type
6. For case of 4d convolution: convert of output from 5d to 4d
"""
if out_dtype is None:
out_dtype = Input.dtype
assert isinstance(stride, int) or len(stride) == 2
assert isinstance(dilation, int) or len(dilation) == 2
if isinstance(stride, int):
stride_h = stride_w = stride
else:
stride_h, stride_w = stride
if isinstance(dilation, int):
dilation_h = dilation_w = dilation
else:
dilation_h, dilation_w = dilation
convert_from4d = False
if len(Input.shape) == 4:
batch, in_channels, in_height, in_width = Input.shape
in_channel_chunks, in_channel_block, in_channel_tail = split_to_chunks(in_channels, 4)
if autotvm.GLOBAL_SCOPE.in_tuning:
dshape = (batch, in_channel_chunks, in_height, in_width, in_channel_block)
Input = tvm.te.placeholder(dshape, Input.dtype, name="data_placeholder")
else:
Input = pack_input(
Input,
"NCHW",
batch,
in_channel_chunks,
in_channel_block,
in_channel_tail,
in_height,
in_width,
)
else:
batch, in_channel_chunks, in_height, in_width, in_channel_block = Input.shape
if len(Filter.shape) == 4:
out_channles, in_filter_channels, kernel_h, kernel_w = Filter.shape
out_channel_chunks, out_channel_block, out_channel_tail = split_to_chunks(out_channles, 4)
if autotvm.GLOBAL_SCOPE.in_tuning:
kshape = (out_channel_chunks, in_filter_channels, kernel_h, kernel_w, out_channel_block)
Filter = tvm.te.placeholder(kshape, Filter.dtype, name="kernel_placeholder")
else:
convert_from4d = True
Filter = pack_filter(
Filter,
"OIHW",
out_channel_chunks,
out_channel_block,
out_channel_tail,
in_filter_channels,
in_channel_chunks,
in_channel_block,
in_channel_tail,
kernel_h,
kernel_w,
)
else:
out_channel_chunks, in_filter_channels, kernel_h, kernel_w, out_channel_block = Filter.shape
out_height_orig, out_height, out_width_orig, out_width = expand_spatial_dimensions(
in_height, in_width, kernel_h, kernel_w, dilation_h, dilation_w, padding, stride_h, stride_w
)
temp = add_pad(
Input,
"NCHW",
out_height_orig,
out_width_orig,
kernel_h,
kernel_w,
dilation_h,
dilation_w,
padding,
stride_h,
stride_w,
)
rcc = te.reduce_axis((0, in_channel_chunks), name="rc")
rcb = te.reduce_axis((0, in_channel_block), name="rc")
ry = te.reduce_axis((0, kernel_h), name="ry")
rx = te.reduce_axis((0, kernel_w), name="rx")
conv = te.compute(
(batch, out_channel_chunks, out_height, out_width, out_channel_block),
lambda nn, ffc, yy, xx, ffb: te.sum(
(
temp[nn, rcc, yy * stride_h + ry * dilation_h, xx * stride_w + rx * dilation_w, rcb]
* Filter[ffc, rcc * in_channel_block + rcb, ry, rx, ffb]
).astype(out_dtype),
axis=[rcc, rcb, ry, rx],
),
tag="conv2d_nchwc",
)
if convert_from4d and not autotvm.GLOBAL_SCOPE.in_tuning:
dummy_cast = te.compute(
(batch, out_channel_chunks, out_height_orig, out_width_orig, out_channel_block),
lambda n, fc, y, x, fb: conv[n, fc, y, x, fb].astype(out_dtype),
tag="dummy_cast",
)
return te.compute(
(batch, out_channles, out_height_orig, out_width_orig),
lambda n, c, y, x: dummy_cast[n, c // out_channel_block, y, x, c % out_channel_block],
tag="adreno_conv2d_latest_op",
)
else:
return te.compute(
(batch, out_channel_chunks, out_height_orig, out_width_orig, out_channel_block),
lambda n, ffc, y, x, ffb: conv[n, ffc, y, x, ffb].astype(out_dtype),
tag="adreno_conv2d_latest_op",
)
def schedule_conv2d_NCHWc_KCRSk(cfg, s, output):
"""
schedule optimized for batch size = 1
Algo:
1. Split output axis to three parts: global work size, vthread, local worksize.
The limitations for tuning includes heuristics from some tuned networks to limit
search space and not pay much time for useles configurations.
2. In case of 4d convolution schedule copying of the input (and filter) into
5d tensors
4. pad should be scheduled separately to create independent opencl kernel. If pad is
inlined into convolution, this gives 1.5x performance drop
5. We are using cache_read for intermediate tensors to produce texture and guarantee
the best performance on the next stage.
The weights are managed through static texture planning mechanism and guarantied come
in texture memory scope.
Thus way we are calling cache_read only for data tensor
6. For 5d convolution we schedule the latest op with binding 5d axis and vectorize
for textures
For 4d tensor we are doing the same for the latest blocked stage, i.e. conversion
of data type
7. In case of 4d conv we need to schedule postops as well
"""
latest = s.outputs[0].output(0)
if len(latest.op.axis) == 4:
latest_blocked = dummy = output.op.input_tensors[0]
conv = dummy.op.input_tensors[0]
else:
conv = output.op.input_tensors[0]
latest_blocked = latest
pad_data, kernel = s[conv].op.input_tensors
filter_pack_rt = bool(
isinstance(kernel.op, tvm.te.ComputeOp) and "filter_pack" in kernel.op.tag
)
if "pad_temp" in pad_data.op.name:
input_pad_temp = pad_data.op.input_tensors[0]
else:
input_pad_temp = pad_data
input_pack_rt = bool(
isinstance(input_pad_temp.op, tvm.te.ComputeOp) and "input_pack" in input_pad_temp.op.tag
)
##### space definition begin #####
n, fc, y, x, fb = s[conv].op.axis
rcc, rcb, ry, rx = s[conv].op.reduce_axis
if conv.shape[1] % 2 == 0:
min_threads_div = 2
else:
min_threads_div = 1
cfg.define_split(
"tile_fc",
fc,
num_outputs=3,
filter=lambda entity: entity.size[1] <= 8
and entity.size[2] >= min_threads_div
and entity.size[2] < 256,
)
cfg.define_split(
"tile_y",
y,
num_outputs=3,
filter=lambda entity: entity.size[1] <= 8 and entity.size[2] <= 16,
)
cfg.define_split(
"tile_x",
x,
num_outputs=3,
filter=lambda entity: entity.size[1] <= 8 and entity.size[2] <= 16,
)
cfg.define_split("tile_rcc", rcc, num_outputs=2)
cfg.define_split("tile_ry", ry, num_outputs=2)
cfg.define_split("tile_rx", rx, num_outputs=2)
cfg.define_knob("auto_unroll_max_step", [0, 512, 1500])
cfg.define_knob("unroll_explicit", [0, 1])
cfg.multi_filter(
filter=lambda entity: ( # pylint: disable=chained-comparison
entity["tile_fc"].size[1] * entity["tile_y"].size[1] * entity["tile_x"].size[1]
)
<= 24
and 32
<= (entity["tile_fc"].size[2] * entity["tile_y"].size[2] * entity["tile_x"].size[2])
< 1024
)
if cfg.is_fallback:
get_default_conv2d_config(cfg, conv.shape[1], conv.shape[2], conv.shape[3])
##### space definition end #####
pad_data, kernel = s[conv].op.input_tensors
# There are several conditions that have to be handled:
# 1. If we are in the tuning, we always add cache read for data to main conv kernel
# to get texture in tuning opencl kernel
# 2. If we are repacking input in runtime, we should always explicit schedule this one more
# stage of data copy from 4d to 5d (referred as pack_data).
# 3. If we have pad (independently if we have runtime repack or not) we should inline it in the
# cache_read("texture")
if autotvm.GLOBAL_SCOPE.in_tuning or input_pack_rt:
if autotvm.GLOBAL_SCOPE.in_tuning:
if "pad_temp" in pad_data.op.name:
s[pad_data].compute_inline()
else:
if "pad_temp" in pad_data.op.name:
pack_data = pad_data.op.input_tensors[0]
bind_data_copy(s[pack_data])
s[pad_data].compute_inline()
else:
pack_data = pad_data
bind_data_copy(s[pack_data])
AT = s.cache_read(pad_data, get_texture_storage(pad_data.shape), [conv])
bind_data_copy(s[AT])
elif "pad_temp" in pad_data.op.name:
s[pad_data].compute_inline()
# create cache stage
AT = s.cache_read(pad_data, get_texture_storage(pad_data.shape), [conv])
bind_data_copy(s[AT])
if autotvm.GLOBAL_SCOPE.in_tuning or filter_pack_rt:
if not autotvm.GLOBAL_SCOPE.in_tuning:
bind_data_copy(s[kernel])
if kernel.shape[2] == 1 and kernel.shape[3] == 1:
WT = s.cache_read(kernel, get_texture_storage(kernel.shape), [conv])
bind_data_copy(s[WT])
s[conv].set_scope("local")
if latest_blocked == latest and output != latest:
s[output].compute_inline()
# tile and bind spatial axes
n, fc, y, x, fb = s[latest_blocked].op.axis
kernel_scope, n = s[latest_blocked].split(n, nparts=1)
bf, vf, tf = cfg["tile_fc"].apply(s, latest_blocked, fc)
by, vy, ty = cfg["tile_y"].apply(s, latest_blocked, y)
bx, vx, tx = cfg["tile_x"].apply(s, latest_blocked, x)
bf = s[latest_blocked].fuse(n, bf)
s[latest_blocked].bind(bf, te.thread_axis("blockIdx.z"))
s[latest_blocked].bind(by, te.thread_axis("blockIdx.y"))
s[latest_blocked].bind(bx, te.thread_axis("blockIdx.x"))
s[latest_blocked].bind(vf, te.thread_axis("vthread"))
s[latest_blocked].bind(vy, te.thread_axis("vthread"))
s[latest_blocked].bind(vx, te.thread_axis("vthread"))
s[latest_blocked].bind(tf, te.thread_axis("threadIdx.z"))
s[latest_blocked].bind(ty, te.thread_axis("threadIdx.y"))
s[latest_blocked].bind(tx, te.thread_axis("threadIdx.x"))
s[latest_blocked].reorder(bf, by, bx, vf, vy, vx, tf, ty, tx, fb)
s[latest_blocked].vectorize(fb)
s[conv].compute_at(s[latest_blocked], tx)
# tile reduction axes
n, fc, y, x, fb = s[conv].op.axis
rcc, rcb, ry, rx = s[conv].op.reduce_axis
rco, rci = cfg["tile_rcc"].apply(s, conv, rcc)
ryo, ryi = cfg["tile_ry"].apply(s, conv, ry)
rxo, rxi = cfg["tile_rx"].apply(s, conv, rx)
s[conv].reorder(rco, ryo, rxo, rci, ryi, rxi, rcb, n, fc, y, x, fb)
s[conv].vectorize(fb)
s[conv].unroll(rcb)
# unroll
s[latest_blocked].pragma(kernel_scope, "auto_unroll_max_step", cfg["auto_unroll_max_step"].val)
s[latest_blocked].pragma(kernel_scope, "unroll_explicit", cfg["unroll_explicit"].val)
if latest_blocked != latest:
s[latest].compute_root()
bind_data_copy(s[latest], 1)
if latest != output:
s[output].compute_inline()
N, OCC, OH, OW, OCB = get_const_tuple(latest_blocked.shape)
_, IC, KH, KW, _ = get_const_tuple(kernel.shape)
ICKHKW = IC * KH * KW
if isinstance(N, int):
cfg.add_flop(2 * N * OH * OW * OCC * OCB * ICKHKW)
| 14,174 | 37.519022 | 100 | py |
tvm | tvm-main/python/tvm/topi/adreno/depthwise_conv2d_nhwc.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unused-variable,unused-argument,no-else-return
"""depthwise_conv2d_nhwc(c) schedule on Qualcomm Adreno GPU"""
import tvm
from tvm import te
from tvm import autotvm
from ..utils import get_const_tuple, traverse_inline
from .utils import (
split_to_chunks,
pack_input,
pack_filter,
expand_spatial_dimensions,
add_pad,
bind_data_copy,
get_texture_storage,
get_default_conv2d_config,
)
@autotvm.register_topi_schedule("depthwise_conv2d_nhwc.image2d")
def schedule_depthwise_conv2d_nhwc(cfg, outs):
"""Create the schedule for depthwise conv2d_nchw4c_ohwi4o"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
def _callback(op):
if op.tag == "adreno_dw_conv2d_latest_op":
schedule_depthwise_conv2d_NHWC_HWOI(cfg, s, op.output(0))
traverse_inline(s, outs[0].op, _callback)
return s
@autotvm.register_topi_compute("depthwise_conv2d_nhwc.image2d")
def depthwise_conv2d_nhwc(cfg, Input, Filter, stride, padding, dilation, out_dtype):
"""
Depthwise convolution operator in NCHWc layout.
Algo:
1. Convert into blocked format if we have 4d original tensor.
In case of AutoTVM we override the convert by just tensors since such conversion
will be absent for real blocked convolution, no sense to include into tuning
2. Expand spatial dimensions to have width and height be dividable by factor 4
This leads to slightly bigger amount of compute but allow utilize GPU much better
3. Add paddings. This happens even if we do not need pad originaly. This is useful
due to work arounding of the gaps of texture annotation between Primary Functions
and limited support of textures in schedules. Later on this pad will be executed
separately and will produce texture
4. 5d Convolution compute with accumulating into out_dtype
5. Cast to the origin output data type
6. For case of 4d convolution: convert of output from 5d to 4d
"""
if out_dtype is None:
out_dtype = Input.dtype
assert isinstance(stride, int) or len(stride) == 2
assert isinstance(dilation, int) or len(dilation) == 2
if isinstance(stride, int):
stride_h = stride_w = stride
else:
stride_h, stride_w = stride
if isinstance(dilation, int):
dilation_h = dilation_w = dilation
else:
dilation_h, dilation_w = dilation
convert_from4d = False
if len(Input.shape) == 4:
batch, in_height, in_width, in_channels = Input.shape
kernel_h, kernel_w, out_channles, in_filter_channels = Filter.shape
in_channel_chunks, in_channel_block, in_channel_tail = split_to_chunks(in_channels, 4)
out_channel_chunks, out_channel_block, out_channel_tail = split_to_chunks(out_channles, 4)
if autotvm.GLOBAL_SCOPE.in_tuning:
dshape = (batch, in_height, in_width, in_channel_chunks, in_channel_block)
Input = tvm.te.placeholder(dshape, Input.dtype, name="data_placeholder")
kshape = (kernel_h, kernel_w, out_channel_block, in_filter_channels, out_channel_chunks)
Filter = tvm.te.placeholder(kshape, Filter.dtype, name="kernel_placeholder")
else:
convert_from4d = True
Input = pack_input(
Input,
"NHWC",
batch,
in_channel_chunks,
in_channel_block,
in_channel_tail,
in_height,
in_width,
)
Filter = pack_filter(
Filter,
"HWOI",
out_channel_chunks,
out_channel_block,
out_channel_tail,
in_filter_channels,
in_channel_chunks,
in_channel_block,
in_channel_tail,
kernel_h,
kernel_w,
)
else:
batch, in_height, in_width, in_channel_chunks, in_channel_block = Input.shape
kernel_h, kernel_w, out_channel_chunks, in_filter_channels, out_channel_block = Filter.shape
out_height_orig, out_height, out_width_orig, out_width = expand_spatial_dimensions(
in_height, in_width, kernel_h, kernel_w, dilation_h, dilation_w, padding, stride_h, stride_w
)
temp = add_pad(
Input,
"NHWC",
out_height_orig,
out_width_orig,
kernel_h,
kernel_w,
dilation_h,
dilation_w,
padding,
stride_h,
stride_w,
)
ry = te.reduce_axis((0, kernel_h), name="ry")
rx = te.reduce_axis((0, kernel_w), name="rx")
conv = te.compute(
(batch, out_height, out_width, out_channel_chunks, out_channel_block),
lambda nn, yy, xx, ffc, ffb: te.sum(
(
temp[nn, yy * stride_h + ry * dilation_h, xx * stride_w + rx * dilation_w, ffc, ffb]
* Filter[ry, rx, ffc, 0, ffb]
).astype(out_dtype),
axis=[ry, rx],
),
tag="depthwise_conv2d_nhwc",
)
if convert_from4d and not autotvm.GLOBAL_SCOPE.in_tuning:
dummy_cast = te.compute(
(batch, out_height_orig, out_width_orig, out_channel_chunks, out_channel_block),
lambda n, y, x, fc, fb: conv[n, y, x, fc, fb].astype(out_dtype),
tag="dummy_cast",
)
return te.compute(
(batch, out_height_orig, out_width_orig, out_channles),
lambda n, y, x, c: dummy_cast[n, y, x, c // out_channel_block, c % out_channel_block],
tag="adreno_dw_conv2d_latest_op",
)
else:
return te.compute(
(batch, out_height_orig, out_width_orig, out_channel_chunks, out_channel_block),
lambda n, y, x, ffc, ffb: conv[n, y, x, ffc, ffb].astype(out_dtype),
tag="adreno_dw_conv2d_latest_op",
)
def schedule_depthwise_conv2d_NHWC_HWOI(cfg, s, output):
"""
schedule optimized for batch size = 1
Algo:
1. Split output axis to three parts: global work size, vthread, local worksize.
The limitations for tuning includes heuristics from some tuned networks to limit
search space and not pay much time for useles configurations.
2. In case of 4d convolution schedule copying of the input (and filter) into
5d tensors
3. For depthwise convolution it's better to inline pad into the conv2d compute, the
divergence in opencl kernel will not so significant as for regular conv2d.
4. For 5d convolution we schedule the latest op with binding 5d axis and vectorize
for textures
For 4d tensor we are doing the same for the latest blocked stage, i.e. conversion
of data type
5. In case of 4d conv we need to schedule postops as well
"""
latest = s.outputs[0].output(0)
if len(latest.op.axis) == 4:
latest_blocked = dummy = output.op.input_tensors[0]
conv = dummy.op.input_tensors[0]
else:
conv = output.op.input_tensors[0]
latest_blocked = latest
##### space definition begin #####
n, y, x, fc, fb = s[conv].op.axis
ry, rx = s[conv].op.reduce_axis
cfg.define_split("tile_fc", fc, num_outputs=3)
cfg.define_split("tile_y", y, num_outputs=3)
cfg.define_split("tile_x", x, num_outputs=3)
cfg.define_split("tile_ry", ry, num_outputs=2)
cfg.define_split("tile_rx", rx, num_outputs=2)
cfg.define_knob("auto_unroll_max_step", [0, 512, 1500])
cfg.define_knob("unroll_explicit", [0, 1])
cfg.multi_filter(
filter=lambda entity: ( # pylint: disable=chained-comparison
entity["tile_fc"].size[1] * entity["tile_y"].size[1] * entity["tile_x"].size[1]
)
<= 32
and 32
<= (entity["tile_fc"].size[2] * entity["tile_y"].size[2] * entity["tile_x"].size[2])
< 1024
)
if cfg.is_fallback:
get_default_conv2d_config(cfg, conv.shape[3], conv.shape[1], conv.shape[2])
##### space definition end #####
pad_data, kernel = s[conv].op.input_tensors
if (
isinstance(kernel.op, tvm.te.ComputeOp) and "filter_pack" in kernel.op.tag
): # len(latest.op.axis) == 4:
# manage scheduling of datacopy
pad_data, kernel = s[conv].op.input_tensors
if "pad_temp" in pad_data.op.name:
pack_data = pad_data.op.input_tensors[0]
bind_data_copy(s[pack_data])
else:
bind_data_copy(s[pad_data])
bind_data_copy(s[kernel])
pad_data, kernel = s[conv].op.input_tensors
if "pad_temp" in pad_data.op.name:
s[pad_data].compute_inline()
s[conv].set_scope("local")
if latest_blocked == latest and output != latest:
s[output].compute_inline()
if autotvm.GLOBAL_SCOPE.in_tuning or len(latest.op.axis) == 4:
# create cache stage for tuning only or in case of 4d case
AT = s.cache_read(pad_data, get_texture_storage(pad_data.shape), [conv])
bind_data_copy(s[AT])
if kernel.shape[0] == 1 and kernel.shape[1] == 1:
WT = s.cache_read(kernel, get_texture_storage(kernel.shape), [conv])
bind_data_copy(s[WT])
# tile and bind spatial axes
n, y, x, fc, fb = s[latest_blocked].op.axis
kernel_scope, n = s[latest_blocked].split(n, nparts=1)
bf, vf, tf = cfg["tile_fc"].apply(s, latest_blocked, fc)
by, vy, ty = cfg["tile_y"].apply(s, latest_blocked, y)
bx, vx, tx = cfg["tile_x"].apply(s, latest_blocked, x)
by = s[latest_blocked].fuse(n, by)
s[latest_blocked].bind(bf, te.thread_axis("blockIdx.z"))
s[latest_blocked].bind(by, te.thread_axis("blockIdx.y"))
s[latest_blocked].bind(bx, te.thread_axis("blockIdx.x"))
s[latest_blocked].bind(vf, te.thread_axis("vthread"))
s[latest_blocked].bind(vy, te.thread_axis("vthread"))
s[latest_blocked].bind(vx, te.thread_axis("vthread"))
s[latest_blocked].bind(tf, te.thread_axis("threadIdx.z"))
s[latest_blocked].bind(ty, te.thread_axis("threadIdx.y"))
s[latest_blocked].bind(tx, te.thread_axis("threadIdx.x"))
s[latest_blocked].reorder(bf, by, bx, vf, vy, vx, tf, ty, tx, fb)
s[latest_blocked].vectorize(fb)
s[conv].compute_at(s[latest_blocked], tx)
# tile reduction axes
n, y, x, fc, fb = s[conv].op.axis
ry, rx = s[conv].op.reduce_axis
ryo, ryi = cfg["tile_ry"].apply(s, conv, ry)
rxo, rxi = cfg["tile_rx"].apply(s, conv, rx)
s[conv].reorder(ryo, rxo, ryi, rxi, n, fc, y, x, fb)
s[conv].vectorize(fb)
# unroll
s[latest_blocked].pragma(kernel_scope, "auto_unroll_max_step", cfg["auto_unroll_max_step"].val)
s[latest_blocked].pragma(kernel_scope, "unroll_explicit", cfg["unroll_explicit"].val)
if latest_blocked != latest:
s[latest].compute_root()
bind_data_copy(s[latest], 1)
if latest != output:
s[output].compute_inline()
N, OH, OW, OCC, OCB = get_const_tuple(latest_blocked.shape)
KH, KW, _, _, _ = get_const_tuple(kernel.shape)
KHKW = KH * KW
if isinstance(N, int):
cfg.add_flop(2 * N * OH * OW * OCC * OCB * KHKW)
| 11,995 | 38.331148 | 100 | py |
tvm | tvm-main/python/tvm/topi/adreno/depthwise_conv2d_nchw.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unused-variable,unused-argument,no-else-return
"""depthwise_conv2d_nchw(c) schedule on Qualcomm Adreno GPU"""
import tvm
from tvm import te
from tvm import autotvm
from ..utils import get_const_tuple, traverse_inline
from .utils import (
split_to_chunks,
pack_input,
pack_filter,
expand_spatial_dimensions,
add_pad,
bind_data_copy,
get_texture_storage,
get_default_conv2d_config,
)
@autotvm.register_topi_schedule("depthwise_conv2d_nchwc.image2d")
def schedule_depthwise_conv2d_nchwc(cfg, outs):
"""Create the schedule for depthwise conv2d_nchw4c_ohwi4o"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
def _callback(op):
if op.tag == "adreno_dw_conv2d_latest_op":
schedule_depthwise_conv2d_NCHWc_KCRSk(cfg, s, op.output(0))
traverse_inline(s, outs[0].op, _callback)
return s
@autotvm.register_topi_compute("depthwise_conv2d_nchwc.image2d")
def depthwise_conv2d_nchwc(cfg, Input, Filter, stride, padding, dilation, out_dtype):
"""
Depthwise convolution operator in NCHWc layout.
Algo:
1. Convert into blocked format if we have 4d original tensor.
In case of AutoTVM we override the convert by just tensors since such conversion
will be absent for real blocked convolution, no sense to include into tuning
2. Expand spatial dimensions to have width and height be dividable by factor 4
This leads to slightly bigger amount of compute but allow utilize GPU much better
3. Add paddings. This happens even if we do not need pad originaly. This is useful
due to work arounding of the gaps of texture annotation between Primary Functions
and limited support of textures in schedules. Later on this pad will be executed
separately and will produce texture
4. 5d Convolution compute with accumulating into out_dtype
5. Cast to the origin output data type
6. For case of 4d convolution: convert of output from 5d to 4d
"""
if out_dtype is None:
out_dtype = Input.dtype
assert isinstance(stride, int) or len(stride) == 2
assert isinstance(dilation, int) or len(dilation) == 2
if isinstance(stride, int):
stride_h = stride_w = stride
else:
stride_h, stride_w = stride
if isinstance(dilation, int):
dilation_h = dilation_w = dilation
else:
dilation_h, dilation_w = dilation
convert_from4d = False
if len(Input.shape) == 4:
batch, in_channels, in_height, in_width = Input.shape
out_channles, in_filter_channels, kernel_h, kernel_w = Filter.shape
in_channel_chunks, in_channel_block, in_channel_tail = split_to_chunks(in_channels, 4)
out_channel_chunks, out_channel_block, out_channel_tail = split_to_chunks(out_channles, 4)
if autotvm.GLOBAL_SCOPE.in_tuning:
dshape = (batch, in_channel_chunks, in_height, in_width, in_channel_block)
Input = tvm.te.placeholder(dshape, Input.dtype, name="data_placeholder")
kshape = (out_channel_chunks, in_filter_channels, kernel_h, kernel_w, out_channel_block)
Filter = tvm.te.placeholder(kshape, Filter.dtype, name="kernel_placeholder")
else:
convert_from4d = True
Input = pack_input(
Input,
"NCHW",
batch,
in_channel_chunks,
in_channel_block,
in_channel_tail,
in_height,
in_width,
)
Filter = pack_filter(
Filter,
"OIHW",
out_channel_chunks,
out_channel_block,
out_channel_tail,
in_filter_channels,
in_channel_chunks,
in_channel_block,
in_channel_tail,
kernel_h,
kernel_w,
)
else:
batch, in_channel_chunks, in_height, in_width, in_channel_block = Input.shape
out_channel_chunks, in_filter_channels, kernel_h, kernel_w, out_channel_block = Filter.shape
out_height_orig, out_height, out_width_orig, out_width = expand_spatial_dimensions(
in_height, in_width, kernel_h, kernel_w, dilation_h, dilation_w, padding, stride_h, stride_w
)
temp = add_pad(
Input,
"NCHW",
out_height_orig,
out_width_orig,
kernel_h,
kernel_w,
dilation_h,
dilation_w,
padding,
stride_h,
stride_w,
)
ry = te.reduce_axis((0, kernel_h), name="ry")
rx = te.reduce_axis((0, kernel_w), name="rx")
conv = te.compute(
(batch, out_channel_chunks, out_height, out_width, out_channel_block),
lambda nn, ffc, yy, xx, ffb: te.sum(
(
temp[
nn,
ffc // in_filter_channels,
yy * stride_h + ry * dilation_h,
xx * stride_w + rx * dilation_w,
ffb,
]
* Filter[ffc // in_filter_channels, ffc % in_filter_channels, ry, rx, ffb]
).astype(out_dtype),
axis=[ry, rx],
),
tag="depthwise_conv2d_nchwc",
)
if convert_from4d and not autotvm.GLOBAL_SCOPE.in_tuning:
dummy_cast = te.compute(
(batch, out_channel_chunks, out_height_orig, out_width_orig, out_channel_block),
lambda n, fc, y, x, fb: conv[n, fc, y, x, fb].astype(out_dtype),
tag="dummy_cast",
)
return te.compute(
(batch, out_channles, out_height_orig, out_width_orig),
lambda n, c, y, x: dummy_cast[n, c // out_channel_block, y, x, c % out_channel_block],
tag="adreno_dw_conv2d_latest_op",
)
else:
return te.compute(
(batch, out_channel_chunks, out_height_orig, out_width_orig, out_channel_block),
lambda n, ffc, y, x, ffb: conv[n, ffc, y, x, ffb].astype(out_dtype),
tag="adreno_dw_conv2d_latest_op",
)
def schedule_depthwise_conv2d_NCHWc_KCRSk(cfg, s, output):
"""
schedule optimized for batch size = 1
Algo:
1. Split output axis to three parts: global work size, vthread, local worksize.
The limitations for tuning includes heuristics from some tuned networks to limit
search space and not pay much time for useles configurations.
2. For depthwise convolution it's better to inline pad into the conv2d compute, the
divergence in opencl kernel will not so significant as for regular conv2d.
3. For 5d convolution we schedule the latest op with binding 5d axis and vectorize
for textures
For 4d tensor we are doing the same for the latest blocked stage, i.e. conversion
of data type
4. In case of 4d conv we need to schedule postops as well
"""
latest = s.outputs[0].output(0)
if len(latest.op.axis) == 4:
latest_blocked = dummy = output.op.input_tensors[0]
conv = dummy.op.input_tensors[0]
else:
conv = output.op.input_tensors[0]
latest_blocked = latest
##### space definition begin #####
n, fc, y, x, fb = s[conv].op.axis
ry, rx = s[conv].op.reduce_axis
cfg.define_split("tile_fc", fc, num_outputs=3)
cfg.define_split("tile_y", y, num_outputs=3)
cfg.define_split("tile_x", x, num_outputs=3)
cfg.define_split("tile_ry", ry, num_outputs=2)
cfg.define_split("tile_rx", rx, num_outputs=2)
cfg.define_knob("auto_unroll_max_step", [0, 512, 1500])
cfg.define_knob("unroll_explicit", [0, 1])
cfg.multi_filter(
filter=lambda entity: ( # pylint: disable=chained-comparison
entity["tile_fc"].size[1] * entity["tile_y"].size[1] * entity["tile_x"].size[1]
)
<= 32
and 32
<= (entity["tile_fc"].size[2] * entity["tile_y"].size[2] * entity["tile_x"].size[2])
< 1024
)
if cfg.is_fallback:
get_default_conv2d_config(cfg, conv.shape[1], conv.shape[2], conv.shape[3])
##### space definition end #####
pad_data, kernel = s[conv].op.input_tensors
if (
isinstance(kernel.op, tvm.te.ComputeOp) and "filter_pack" in kernel.op.tag
): # len(latest.op.axis) == 4:
# manage scheduling of datacopy
pad_data, kernel = s[conv].op.input_tensors
if "pad_temp" in pad_data.op.name:
pack_data = pad_data.op.input_tensors[0]
bind_data_copy(s[pack_data])
else:
bind_data_copy(s[pad_data])
bind_data_copy(s[kernel])
pad_data, kernel = s[conv].op.input_tensors
if "pad_temp" in pad_data.op.name:
s[pad_data].compute_inline()
s[conv].set_scope("local")
if latest_blocked == latest and output != latest:
s[output].compute_inline()
if autotvm.GLOBAL_SCOPE.in_tuning or len(latest.op.axis) == 4:
# create cache stage for tuning only or in case of 4d case
AT = s.cache_read(pad_data, get_texture_storage(pad_data.shape), [conv])
bind_data_copy(s[AT])
if kernel.shape[2] == 1 and kernel.shape[3] == 1:
WT = s.cache_read(kernel, get_texture_storage(kernel.shape), [conv])
bind_data_copy(s[WT])
# tile and bind spatial axes
n, fc, y, x, fb = s[latest_blocked].op.axis
kernel_scope, n = s[latest_blocked].split(n, nparts=1)
bf, vf, tf = cfg["tile_fc"].apply(s, latest_blocked, fc)
by, vy, ty = cfg["tile_y"].apply(s, latest_blocked, y)
bx, vx, tx = cfg["tile_x"].apply(s, latest_blocked, x)
bf = s[latest_blocked].fuse(n, bf)
s[latest_blocked].bind(bf, te.thread_axis("blockIdx.z"))
s[latest_blocked].bind(by, te.thread_axis("blockIdx.y"))
s[latest_blocked].bind(bx, te.thread_axis("blockIdx.x"))
s[latest_blocked].bind(vf, te.thread_axis("vthread"))
s[latest_blocked].bind(vy, te.thread_axis("vthread"))
s[latest_blocked].bind(vx, te.thread_axis("vthread"))
s[latest_blocked].bind(tf, te.thread_axis("threadIdx.z"))
s[latest_blocked].bind(ty, te.thread_axis("threadIdx.y"))
s[latest_blocked].bind(tx, te.thread_axis("threadIdx.x"))
s[latest_blocked].reorder(bf, by, bx, vf, vy, vx, tf, ty, tx, fb)
s[latest_blocked].vectorize(fb)
s[conv].compute_at(s[latest_blocked], tx)
# tile reduction axes
n, fc, y, x, fb = s[conv].op.axis
ry, rx = s[conv].op.reduce_axis
ryo, ryi = cfg["tile_ry"].apply(s, conv, ry)
rxo, rxi = cfg["tile_rx"].apply(s, conv, rx)
s[conv].reorder(ryo, rxo, ryi, rxi, n, fc, y, x, fb)
s[conv].vectorize(fb)
# unroll
s[latest_blocked].pragma(kernel_scope, "auto_unroll_max_step", cfg["auto_unroll_max_step"].val)
s[latest_blocked].pragma(kernel_scope, "unroll_explicit", cfg["unroll_explicit"].val)
if latest_blocked != latest:
s[latest].compute_root()
bind_data_copy(s[latest], 1)
if latest != output:
s[output].compute_inline()
N, OCC, OH, OW, OCB = get_const_tuple(latest_blocked.shape)
_, _, KH, KW, ICB = get_const_tuple(kernel.shape)
KHKW = KH * KW
if isinstance(N, int):
cfg.add_flop(2 * N * OH * OW * OCC * OCB * KHKW * ICB)
| 12,100 | 38.161812 | 100 | py |
tvm | tvm-main/python/tvm/topi/adreno/utils.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unused-variable,unused-argument,no-else-return
"""util functions to be reused in different compute/schedule on Qualcomm Adreno GPU"""
import numpy
import tvm
from tvm import te
from tvm._ffi.registry import register_func
from tvm.topi.utils import simplify
from tvm.topi import nn
from tvm.autotvm.task.space import SplitEntity
from ..utils import get_const_tuple
def get_div(value, start):
"""Returns the maximum divider for `value` starting from `start` value"""
div = 1
for d in range(start, 0, -1):
if (value % d) == 0:
div = d
break
return div
def split_to_chunks(extent, block):
"""
Splits the trip count value to chunks and block, returns the remainder as well
the chunks and blocks covers or overlaps the origin value
If extent can be divisible by block:
extent = chunks * block
else
extent = (chunks - 1) * block + tail
Parameters
----------
extent: int
tripcount for original compute
block: int
size of the block
Returns
----------
out: tuple of the (chunks, block, tail)
chunks = ceildiv(extent, block)
tail = number of origin elements in the latest chunk
"""
tail = extent % block
chunks = extent // block
if tail == 0:
tail = block
else:
chunks += 1
return chunks, block, tail
def pack_input(Input, layout, batch, chunks, block, original_tail, in_height, in_width):
"""
Adds compute stages for packing of the data in runtime. Extends channel dimensions
to be dividable by factor 4
This function should be substituted by Schedule.transform_layout() in the future: see
https://github.com/apache/tvm-rfcs/blob/main/rfcs/0039-buffer-physical-layout.md
Parameters
----------
Input: tvm.te.Tensor
Input tensor to be repacked in runtime
layout: string
Layout of origin 4d tensor
NCHW or NHWC are acceptable
batch: int
Batch size
chunks: int
Number of channel chunks been in the final tensor
block: int
size of the channel block
original_tail: int
Tail in the latest chunk diffing original number of channels vs blocked one
If original_tail != block:
original_channels = chunks * block - original_tail
else
original_channels = chunks * block
in_height: int
Height of the feature map
in_width: int
Width of the feature map
"""
pad_value = tvm.tir.const(0, Input.dtype)
def _reorder_data_nchw(*indices):
condition = []
condition.append(indices[1] == chunks - 1)
condition.append(indices[4] >= original_tail)
condition = tvm.tir.all(*condition)
return tvm.tir.if_then_else(
condition,
pad_value,
Input[indices[0], indices[1] * block + indices[4], indices[2], indices[3]],
)
def _reorder_data_nhwc(*indices):
condition = []
condition.append(indices[3] == chunks - 1)
condition.append(indices[4] >= original_tail)
condition = tvm.tir.all(*condition)
return tvm.tir.if_then_else(
condition,
pad_value,
Input[indices[0], indices[1], indices[2], indices[3] * block + indices[4]],
)
# compute:
if layout == "NCHW":
reordered_data = te.compute(
[batch, chunks, in_height, in_width, block],
_reorder_data_nchw,
name="input_pack",
tag="input_pack",
)
elif layout == "NHWC":
reordered_data = te.compute(
[batch, in_height, in_width, chunks, block],
_reorder_data_nhwc,
name="input_pack",
tag="input_pack",
)
else:
assert False, "Adreno util function pack_input does not accept unknown layout"
return reordered_data
def pack_filter(
Filter,
layout,
out_chunks,
out_block,
out_original_tail,
in_filter_channels,
in_chunks,
in_block,
in_original_tail,
kernel_h,
kernel_w,
):
"""
Adds compute stages for packing of the filter in runtime. Extends channels dimensions
to be dividable by factor 4
This function should be substituted by Schedule.transform_layout() in the future: see
https://github.com/apache/tvm-rfcs/blob/main/rfcs/0039-buffer-physical-layout.md
Parameters
----------
Filter: tvm.te.Tensor
Filter tensor to be repacked in runtime
layout: string
Layout of origin 4d tensor
NCHW or NHWC are acceptable
out_chunks: int
Number of chunks for filters
out_block: int
Size of the block for output channels
out_original_tail: int
Original size of the latest chunk of output filters
in_filter_channels: int
Number of filter channels. might be different vs input channels in the
data due to groups/depthwise nature
in_chunks: int
Number of input data channel chunks
in_block: int
Size of the block for input data channels
in_original_tail
Original size of the latest chunk for input data channels
kernel_h: int
Height of the conv2d kernel
kernel_w: int
Width of the conv2d kernel
"""
pad_value = tvm.tir.const(0, Filter.dtype)
def _reorder_weights_depthwise_oihw(*indices):
conditionA = []
conditionA.append(indices[0] == out_chunks - 1)
conditionA.append(indices[4] >= out_original_tail)
conditionAT = tvm.tir.all(*conditionA)
return tvm.tir.if_then_else(
conditionAT,
pad_value,
Filter[indices[0] * out_block + indices[4], indices[1], indices[2], indices[3]],
)
def _reorder_weights_depthwise_hwoi(*indices):
conditionA = []
conditionA.append(indices[2] == out_chunks - 1)
conditionA.append(indices[4] >= out_original_tail)
conditionAT = tvm.tir.all(*conditionA)
return tvm.tir.if_then_else(
conditionAT,
pad_value,
Filter[indices[0], indices[1], indices[2] * out_block + indices[4], indices[3]],
)
def _reorder_weights_depthwise_hwio(*indices):
conditionA = []
conditionA.append(indices[3] == out_chunks - 1)
conditionA.append(indices[4] >= out_original_tail)
conditionAT = tvm.tir.all(*conditionA)
return tvm.tir.if_then_else(
conditionAT,
pad_value,
Filter[indices[0], indices[1], indices[2], indices[3] * out_block + indices[4]],
)
def _reorder_weights_oihw(*indices):
conditionA = []
conditionA.append(indices[0] == out_chunks - 1)
conditionA.append(indices[4] >= out_original_tail)
conditionAT = tvm.tir.all(*conditionA)
conditionO = []
conditionO.append(conditionAT)
conditionO.append(indices[1] >= in_chunks * in_block + in_original_tail)
conditionOT = tvm.tir.any(*conditionO)
return tvm.tir.if_then_else(
conditionOT,
pad_value,
Filter[indices[0] * out_block + indices[4], indices[1], indices[2], indices[3]],
)
def _reorder_weights_hwio(*indices):
conditionA = []
conditionA.append(indices[3] == out_chunks - 1)
conditionA.append(indices[4] >= out_original_tail)
conditionAT = tvm.tir.all(*conditionA)
conditionO = []
conditionO.append(conditionAT)
conditionO.append(indices[2] >= in_chunks * in_block + in_original_tail)
conditionOT = tvm.tir.any(*conditionO)
return tvm.tir.if_then_else(
conditionOT,
pad_value,
Filter[indices[0], indices[1], indices[2], indices[3] * out_block + indices[4]],
)
if in_filter_channels == 1:
if layout == "OIHW":
reordered_filter = te.compute(
[out_chunks, in_filter_channels, kernel_h, kernel_w, out_block],
_reorder_weights_depthwise_oihw,
name="filter_pack",
tag="filter_pack",
)
elif layout == "HWOI":
reordered_filter = te.compute(
[kernel_h, kernel_w, out_chunks, in_filter_channels, out_block],
_reorder_weights_depthwise_hwoi,
name="filter_pack",
tag="filter_pack",
)
elif layout == "HWIO":
reordered_filter = te.compute(
[kernel_h, kernel_w, in_filter_channels, out_chunks, out_block],
_reorder_weights_depthwise_hwio,
name="filter_pack",
tag="filter_pack",
)
else:
assert False, "Adreno util function def pack_filter does not accept unknown layout"
else:
if layout == "OIHW":
reordered_filter = te.compute(
[out_chunks, in_filter_channels, kernel_h, kernel_w, out_block],
_reorder_weights_oihw,
name="filter_pack",
tag="filter_pack",
)
elif layout == "HWIO":
reordered_filter = te.compute(
[kernel_h, kernel_w, in_filter_channels, out_chunks, out_block],
_reorder_weights_hwio,
name="filter_pack",
tag="filter_pack",
)
else:
assert False, "Adreno util function def pack_filter does not accept unknown layout"
return reordered_filter
def expand_spatial_dimensions(
in_height, in_width, kernel_h, kernel_w, dilation_h, dilation_w, padding, stride_h, stride_w
):
"""
Expands spatial dimensions to be dividable by factor 4. This will allow us to do extrimely
better parallel computation on GPU. The drawback of this solution - it will be number of
useless computations. By fact the speed-up of parallelism significantly overcomes the slowdown
of extra compute and eventuially this is useful approach, at least for GPU
Parameters
----------
in_height: int
Height of the feature map
in_width: int
Width of the feature map
kernel_h: int
Height of the conv2d kernel
kernel_w: int
Width of the conv2d kernel
dilation_h: int
Vertical dilation of the conv2d kernel
dilation_w: int
Horizontal dilation of the conv2d kernel
padding: tuple or list
Conv2d paddings
stride_h: int
Vertical stride of the conv2d kernel
stride_w: int
Horizontal stride of the conv2d kernel
"""
dilated_kernel_h = (kernel_h - 1) * dilation_h + 1
dilated_kernel_w = (kernel_w - 1) * dilation_w + 1
pad_top, pad_left, pad_down, pad_right = nn.get_pad_tuple(
padding, (dilated_kernel_h, dilated_kernel_w)
)
out_height_orig = out_height = simplify(
(in_height - dilated_kernel_h + pad_top + pad_down) // stride_h + 1
)
out_width_orig = out_width = simplify(
(in_width - dilated_kernel_w + pad_left + pad_right) // stride_w + 1
)
# can output shape be divded by 2 or even 4?
# if it cannot be divided, need to extend for further help with split
# theortically there should be addition padding for inputs, but it will be optimized by
# cache_read InferBound. We must proceed pad here exactly to produce tensor which is
# required for calculation of original out size, not more! In other case intermediate
# tensor might be allcoated with less sizes while compute will try to fill the expanded
# one - data discrepancy as a result
# And in case of textures it is not a problem if we provide texture of less size because
# 1. It is not important which values would be for extra calc - these calculations are
# required only for better utilizatin of GPU fit to working groups
# 2. When we request pixel out opf bound, texture will handle this correctly. As mentioned
# above, the value itself is not important
if out_height % 2 != 0:
out_height += 1
if out_width % 2 != 0:
out_width += 1
if out_height % 4 != 0:
out_height += 2
if out_width % 4 != 0:
out_width += 2
return out_height_orig, out_height, out_width_orig, out_width
def add_pad(
data,
layout,
out_height,
out_width,
kernel_h,
kernel_w,
dilation_h,
dilation_w,
padding,
stride_h,
stride_w,
):
"""Computes required padding values by the parameters of conv2d and adds
compute for extending of original tensor
Parameters
----------
data: tvm.te.Tensor
5d tensor, the layout of spatial dimensions are defined as separate argument
layout: string
Layout of origin 4d tensor
out_height: int
Height of the output feature map
out_width: int
Width of the output feature map
kernel_h: int
Height of the conv2d kernel
kernel_w: int
Width of the conv2d kernel
dilation_h: int
Height dilation value from conv2d attributes
dilation_w: int
Width dilation value from conv2d attributes
padding: list / tuple of n ints
Padding values from conv2d attributes
stride_h: int
Height stride value from conv2d attributes
stride_w: int
Width stride value from conv2d attributes
Returns
-------
Output : tvm.te.Tensor
n-D, the same layout as Input.
"""
dilated_kernel_h = (kernel_h - 1) * dilation_h + 1
dilated_kernel_w = (kernel_w - 1) * dilation_w + 1
pad_top, pad_left, pad_down, pad_right = nn.get_pad_tuple(
padding, (dilated_kernel_h, dilated_kernel_w)
)
# compute graph
if layout == "NCHW":
y_axis = 2
x_axis = 3
if len(data.shape) == 4:
_, _, in_height, in_width = data.shape
else:
_, _, in_height, in_width, _ = data.shape
elif layout == "NHWC":
y_axis = 1
x_axis = 2
if len(data.shape) == 4:
_, in_height, in_width, _ = data.shape
else:
_, in_height, in_width, _, _ = data.shape
else:
assert False, "not supported layout in adreno util add_pad"
pad_before = [0, 0, 0, 0, 0]
pad_after = [0, 0, 0, 0, 0]
pad_before[y_axis] = pad_top
pad_before[x_axis] = pad_left
pad_after[y_axis] = pad_down
pad_after[x_axis] = pad_right
# calculation of real used input size:
input_latest_w = (out_width - 1) * stride_w + (kernel_w - 1) * dilation_w + 1
input_latest_h = (out_height - 1) * stride_h + (kernel_h - 1) * dilation_h + 1
if input_latest_w < in_width + pad_before[x_axis] + pad_after[x_axis]:
pad_after[x_axis] -= in_width + pad_before[x_axis] + pad_after[x_axis] - input_latest_w
if input_latest_h < in_height + pad_before[y_axis] + pad_after[y_axis]:
pad_after[y_axis] -= in_height + pad_before[y_axis] + pad_after[y_axis] - input_latest_h
if (
pad_before[0] == 0
and pad_before[1] == 0
and pad_before[2] == 0
and pad_before[3] == 0
and pad_after[0] == 0
and pad_after[1] == 0
and pad_after[2] == 0
and pad_after[3] == 0
):
return data
else:
return nn.pad(data, pad_before, pad_after, name="pad_temp")
def bind_data_copy(stage, axis_to_vectorize=None):
"""
Schedules the eltwise stages like copying of data or postops
Parameters
----------
stage: tvm.te.Tensor
axis_to_vectorize:
Causes to split certain axis, moves inner part to the end of schedule
and enable vectorization by this axis
If parameter is not pointed, the schedule will be vectorized if the most inner
dim is eq to 4 (size of the vector in texture)
"""
shape = get_const_tuple(stage.op.output(0).shape)
if axis_to_vectorize and len(shape) == 4 and shape[axis_to_vectorize] % 4 == 0:
ax0, ax1, ax2, ax3 = stage.op.axis
if axis_to_vectorize == 1:
oax1, iax1 = stage.split(ax1, factor=4)
stage.reorder(ax0, oax1, ax2, ax3, iax1)
stage.vectorize(iax1)
fused = stage.fuse(ax0, oax1, ax2, ax3)
elif axis_to_vectorize == 3:
oax3, iax3 = stage.split(ax3, factor=4)
stage.reorder(ax0, ax1, ax2, oax3, iax3)
stage.vectorize(iax3)
fused = stage.fuse(ax0, ax1, ax2, oax3)
ftc = numpy.prod(shape) / 4
div = get_div(ftc, 128)
block, thread = stage.split(fused, factor=div)
stage.bind(block, te.thread_axis("blockIdx.z"))
stage.bind(thread, te.thread_axis("threadIdx.z"))
else:
if shape[-1] == 4:
axes = stage.op.axis
fused = stage.fuse(*axes[:-1])
ftc = numpy.prod(shape[:-1])
div = get_div(ftc, 64)
block, thread = stage.split(fused, factor=div)
stage.bind(block, te.thread_axis("blockIdx.x"))
stage.bind(thread, te.thread_axis("threadIdx.x"))
stage.vectorize(axes[-1])
else:
ftc = numpy.prod(shape)
vthread = get_div(ftc, 8)
fused = stage.fuse(*stage.op.axis)
ftc = ftc / vthread
# 1024 is a maximum work group size on the most Adreno GPU
num_thread = get_div(ftc, 1024 // vthread)
a, b = stage.split(fused, factor=num_thread)
a, c = stage.split(a, factor=vthread)
stage.bind(c, te.thread_axis("vthread"))
stage.bind(a, te.thread_axis("blockIdx.x"))
stage.bind(b, te.thread_axis("threadIdx.x"))
def get_texture_storage(shape):
"""
Returns the texture layout acceptable for the shape
Parameters
----------
shape: array
Shape of the tensor to be packed to texture
"""
# certain limitation of the Qualcomm devices. Subject to be determined for certain device
# individually, but until we have access to remote device during compilation, we have to
# define it uniformly for all target devices
# limit = 16384
limit = tvm.target.Target.current().attrs["texture_spatial_limit"]
if shape[0] * shape[1] * shape[2] < limit and shape[3] < limit:
return "global.texture"
elif shape[0] * shape[1] < limit and shape[2] * shape[3] < limit:
return "global.texture-nhwc"
else:
return "global.texture-weight"
@register_func("tvm.info.mem.global.texture")
@register_func("tvm.info.mem.global.texture-nhwc")
@register_func("tvm.info.mem.global.texture-weight")
def mem_info_global_texture_variants():
return tvm.ir.make_node(
"MemoryInfo",
unit_bits=16,
max_num_bits=16384 * 16384 * 4 * 32,
max_simd_bits=4 * 32,
head_address=None,
)
def infer_tile_size(data, layout):
"""Compute the tile size for Winograd algorithm
Parameters
----------
data: tvm.te.Tensor
Data tensor
layout: string
Layout of data tebsir
NCHW, NCHW4c, NHWC or NHWC4c are acceptable
Returns
-------
tile_size : int
Calculated tile size
"""
assert layout in ("NCHW", "NCHW4c", "NHWC", "NHWC4c"), "Incompatible layout"
if layout in ("NCHW", "NCHW4c"):
H = get_const_tuple(data.shape)[2]
else:
H = get_const_tuple(data.shape)[1]
if H % 8 == 0:
return 4
return 2
def get_default_conv2d_config(cfg, fc, y, x):
"""Defines conv2d default parameters for split axis for Adreno conv2d and depthwise conv2d"""
# look for vthread params:
vy = 1
for n in range(5, 0, -1):
if y % n == 0:
vy = n
break
vx = 1
for n in range(5, 0, -1):
if x % n == 0 and vy * n < 9:
vx = n
break
y = y // vy
x = x // vx
tfc = 1
for n in range(64, 0, -1):
if fc % n == 0:
tfc = n
break
ty = 1
for n in range(16, 0, -1):
if y % n == 0 and tfc * n <= 512:
ty = n
break
tx = 1
for n in range(16, 0, -1):
if x % n == 0 and tfc * ty * n <= 512:
tx = n
break
fc = fc // tfc
y = y // ty
x = x // tx
cfg["tile_fc"] = SplitEntity([fc, 1, tfc])
cfg["tile_y"] = SplitEntity([y, vy, ty])
cfg["tile_x"] = SplitEntity([x, vx, tx])
| 21,312 | 30.574815 | 98 | py |
tvm | tvm-main/python/tvm/topi/adreno/conv2d_nchw_winograd.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unused-variable,unused-argument
"""Winograd NCHW template for Adreno backend"""
import logging
from tvm import autotvm
from .conv2d_winograd_common import conv2d_winograd_comp, schedule_conv2d_winograd_impl
logger = logging.getLogger("conv2d_nchw_winograd")
@autotvm.register_topi_compute("conv2d_nchw_winograd.image2d")
def conv2d_nchw_winograd(cfg, data, kernel, strides, padding, dilation, out_dtype):
return conv2d_nchw_winograd_comp(
cfg, data, kernel, strides, padding, dilation, out_dtype, pre_computed=False
)
@autotvm.register_topi_schedule("conv2d_nchw_winograd.image2d")
def schedule_conv2d_nchw_winograd(cfg, outs):
return schedule_conv2d_winograd_impl(cfg, outs, tag="dummy_compute_at")
@autotvm.register_topi_compute("conv2d_nchw_winograd_without_weight_transform.image2d")
def conv2d_nchw_winograd_without_weight_transform(
cfg, data, kernel, strides, padding, dilation, out_dtype
):
return conv2d_nchw_winograd_comp(
cfg, data, kernel, strides, padding, dilation, out_dtype, pre_computed=True
)
@autotvm.register_topi_schedule("conv2d_nchw_winograd_without_weight_transform.image2d")
def schedule_conv2d_nchw_winograd_without_weight_transform(cfg, outs):
return schedule_conv2d_winograd_impl(cfg, outs, tag="dummy_compute_at", pre_computed=True)
def conv2d_nchw_winograd_comp(
cfg, data, kernel, strides, padding, dilation, out_dtype, pre_computed
):
"""Compute declaration for winograd
Parameters
----------
cfg: ConfigEntity
The config for this template
data: tvm.te.Tensor
4-D or 5-D Data tensor with shape NCHW or NCHW4c
kernel: tvm.te.Tensor
4-D or 5-D tensor with shape OIHW or OIHW4o
strides: int or a list/tuple of two ints
stride size, or [stride_height, stride_width]
padding: int or a list/tuple of 2 or 4 ints
padding size, or
[pad_height, pad_width] for 2 ints, or
[pad_top, pad_left, pad_bottom, pad_right] for 4 ints
dilation: int or a list/tuple of two ints
dilation size, or [dilation_height, dilation_width]
out_dtype: str
The output type. This is used for mixed precision.
pre_computed: bool
Flag if weights were pre computed if true or the weights should be
computed in runtime
Returns
-------
output: tvm.te.Tensor
4-D or 5-D with shape NCHW or NCHW4c
"""
return conv2d_winograd_comp(
cfg, data, kernel, strides, padding, dilation, out_dtype, pre_computed, "NCHW"
)
| 3,361 | 34.020833 | 94 | py |
tvm | tvm-main/python/tvm/topi/adreno/conv2d_alter_op.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unused-variable,unused-argument,no-member
"""Conv2D alter op for Qualcomm Adreno GPU"""
import logging
import re
import tvm
from tvm import te
from tvm import relay
from tvm import autotvm
from ..utils import get_const_tuple
from .utils import infer_tile_size
from ..nn import conv2d_alter_layout
logger = logging.getLogger("topi")
# Number of wildcards for matching of supported layouts to be transformed
_NCHWc_matcher = re.compile("^NCHW[0-9]+c$")
_OIHWo_matcher = re.compile("^OIHW[0-9]+o$")
_NHWCc_matcher = re.compile("^NHWC[0-9]+c$")
_HWIOo_matcher = re.compile("^HWIO[0-9]+o$")
_HWOIo_matcher = re.compile("^HWOI[0-9]+o$")
@conv2d_alter_layout.register("adreno")
def _alter_conv2d_layout(attrs, inputs, tinfos, out_type):
"""
Prepare of the new conv2d with proper target blocked layout attributes
OpenCL Textures supports 1d/2d/3d/4d tetures but read happens always only for 4 elements
in a line. Thus way we are supporting for now only 4d conversions on the end
NCHW -> NCHW4c & OIHW ->OIHW4o
NHWC -> NHWC4c & HWIO -> HWIO4o & HWOI -> HWOI4o
"""
target = tvm.target.Target.current(allow_none=False)
dispatch_ctx = autotvm.task.DispatchContext.current
new_attrs = {k: attrs[k] for k in attrs.keys()}
# Parse the attributes.
padding = attrs.get_int_tuple("padding")
strides = attrs.get_int_tuple("strides")
dilation = attrs.get_int_tuple("dilation")
data_layout = attrs["data_layout"]
kernel_layout = attrs["kernel_layout"]
data_tensor, kernel_tensor = tinfos
data_dtype = data_tensor.dtype
out_dtype = out_type.dtype
if isinstance(dispatch_ctx, autotvm.task.ApplyGraphBest):
cfg = dispatch_ctx.query(target, None)
workload = cfg.workload
else:
impl, outs = relay.backend.te_compiler.select_implementation(
relay.op.get("nn.conv2d"), attrs, tinfos, out_type, target
)
workload = autotvm.task.get_workload(outs)
if workload is None:
if impl.name.find("winograd") != -1:
if dilation != (1, 1):
logger.warning("Does not support weight pre-transform for dilated convolution.")
return None
assert (data_layout == "NCHW" and kernel_layout == "OIHW") or (
data_layout == "NHWC" and kernel_layout == "HWIO"
)
if data_layout == "NCHW":
N, CI, H, W = get_const_tuple(data_tensor.shape)
CO, _, KH, KW = get_const_tuple(kernel_tensor.shape)
weight = inputs[1]
else:
N, H, W, CI = get_const_tuple(data_tensor.shape)
KH, KW, _, CO = get_const_tuple(kernel_tensor.shape)
weight = relay.layout_transform(inputs[1], "HWIO", "OIHW")
# Pre-compute weight transformation in winograd
tile_size = infer_tile_size(data_tensor, data_layout)
# alpha, alpha, CO, CI
weight = relay.nn.contrib_conv2d_winograd_weight_transform(
weight, tile_size=tile_size
)
new_attrs["tile_size"] = tile_size
new_attrs["channels"] = CO
return relay.nn.contrib_conv2d_winograd_without_weight_transform(
inputs[0], weight, **new_attrs
)
return None
cfg = dispatch_ctx.query(target, workload)
topi_tmpl = workload[0]
if "conv2d_nchw_winograd" in topi_tmpl:
suffix = "_acc32" if "acc32" in topi_tmpl else ""
wkl_name = "conv2d_nchw_winograd_without_weight_transform" + suffix + ".image2d"
if dilation != (1, 1):
logger.warning("Does not support weight pre-transform for dilated convolution.")
return None
tile_size = infer_tile_size(data_tensor, data_layout)
if len(data_tensor.shape) == 5:
assert data_layout == "NCHW4c" and kernel_layout == "OIHW4o"
N, CI, H, W, CB = get_const_tuple(data_tensor.shape)
CO, _, KH, KW, COB = get_const_tuple(kernel_tensor.shape)
weight = relay.layout_transform(inputs[1], "OIHW4o", "OIHW")
weight = relay.nn.contrib_conv2d_winograd_weight_transform(weight, tile_size=tile_size)
weight = relay.layout_transform(weight, "HWOI", "HWIO4o")
new_attrs["tile_size"] = tile_size
new_attrs["channels"] = CO * COB
new_data = data_tensor
new_weight = te.placeholder(
(KH + tile_size - 1, KW + tile_size - 1, CI * CB, CO, COB),
dtype=kernel_tensor.dtype,
)
new_workload = autotvm.task.args_to_workload(
[new_data, new_weight, strides, padding, dilation, out_dtype], wkl_name
)
dispatch_ctx.update(target, new_workload, cfg)
return relay.nn.contrib_conv2d_winograd_without_weight_transform(
inputs[0], weight, **new_attrs
)
assert data_layout == "NCHW" and kernel_layout == "OIHW"
N, CI, H, W = get_const_tuple(data_tensor.shape)
CO, _, KH, KW = get_const_tuple(kernel_tensor.shape)
# pre-compute weight transformation in winograd
# alpha, alpha, CO, CI
weight = relay.nn.contrib_conv2d_winograd_weight_transform(inputs[1], tile_size=tile_size)
weight = relay.transpose(weight, axes=[2, 3, 0, 1]) # HWOI -> OIHW
# (oc, ic, h, w) -> (h, w, ic, oc)
new_attrs["kernel_layout"] = "HWIO"
new_attrs["tile_size"] = tile_size
new_attrs["channels"] = CO
# Store the same config for the altered operator (workload)
new_data = data_tensor
new_weight = te.placeholder(
(KH + tile_size - 1, KW + tile_size - 1, CI, CO), dtype=kernel_tensor.dtype
)
in_channel_block = CI % 4
if in_channel_block == 0:
in_channel_block = 4
num_filter_block = CO % 4
if num_filter_block == 0:
num_filter_block = 4
if in_channel_block != 4 or num_filter_block != 4:
new_workload = autotvm.task.args_to_workload(
[new_data, new_weight, strides, padding, dilation, out_dtype], wkl_name
)
dispatch_ctx.update(target, new_workload, cfg)
return relay.nn.contrib_conv2d_winograd_without_weight_transform(
inputs[0], weight, **new_attrs
)
new_attrs["data_layout"] = f"NCHW{in_channel_block}c"
# (oc, ic, h, w) -> (h, w, ic, oc // 4, oc % 4)
new_attrs["kernel_layout"] = f"HWIO{num_filter_block}o"
new_attrs["out_layout"] = f"NCHW{num_filter_block}c"
# Store altered operator's config
new_data = te.placeholder(
(N, CI // in_channel_block, H, W, in_channel_block), dtype=data_dtype
)
new_weight = te.placeholder(
(KH + tile_size - 1, KW + tile_size - 1, CI, CO // num_filter_block, num_filter_block),
dtype=kernel_tensor.dtype,
)
new_workload = autotvm.task.args_to_workload(
[new_data, new_weight, strides, padding, dilation, out_dtype], wkl_name
)
dispatch_ctx.update(target, new_workload, cfg)
return relay.nn.contrib_conv2d_winograd_without_weight_transform(
inputs[0], weight, **new_attrs
)
if "conv2d_nhwc_winograd" in topi_tmpl:
suffix = "_acc32" if "acc32" in topi_tmpl else ""
wkl_name = "conv2d_nhwc_winograd_without_weight_transform" + suffix + ".image2d"
if dilation != (1, 1):
logger.warning("Does not support weight pre-transform for dilated convolution.")
return None
tile_size = infer_tile_size(data_tensor, data_layout)
if len(data_tensor.shape) == 5:
assert data_layout == "NHWC4c" and kernel_layout == "HWIO4o"
N, CI, H, W, CB = get_const_tuple(data_tensor.shape)
KH, KW, _, CO, COB = get_const_tuple(kernel_tensor.shape)
weight = relay.layout_transform(inputs[1], "HWIO4o", "OIHW")
weight = relay.nn.contrib_conv2d_winograd_weight_transform(weight, tile_size=tile_size)
weight = relay.layout_transform(weight, "HWOI", "HWIO4o")
new_attrs["tile_size"] = tile_size
new_attrs["channels"] = CO * COB
new_data = data_tensor
new_weight = te.placeholder(
(KH + tile_size - 1, KW + tile_size - 1, CI * CB, CO, COB),
dtype=kernel_tensor.dtype,
)
new_workload = autotvm.task.args_to_workload(
[new_data, new_weight, strides, padding, dilation, out_dtype], wkl_name
)
dispatch_ctx.update(target, new_workload, cfg)
return relay.nn.contrib_conv2d_winograd_without_weight_transform(
inputs[0], weight, **new_attrs
)
assert data_layout == "NHWC" and kernel_layout == "HWIO"
N, H, W, CI = get_const_tuple(data_tensor.shape)
KH, KW, _, CO = get_const_tuple(kernel_tensor.shape)
# pre-compute weight transformation in winograd
weight = relay.layout_transform(inputs[1], "HWIO", "OIHW")
weight = relay.nn.contrib_conv2d_winograd_weight_transform(weight, tile_size=tile_size)
weight = relay.transpose(weight, axes=[0, 1, 3, 2]) # HWOI -> HWIO
new_attrs["tile_size"] = tile_size
new_attrs["channels"] = CO
# Store the same config for the altered operator (workload)
new_data = data_tensor
new_weight = te.placeholder(
(KH + tile_size - 1, KW + tile_size - 1, CI, CO), dtype=kernel_tensor.dtype
)
in_channel_block = CI % 4
if in_channel_block == 0:
in_channel_block = 4
num_filter_block = CO % 4
if num_filter_block == 0:
num_filter_block = 4
if in_channel_block != 4 or num_filter_block != 4:
new_workload = autotvm.task.args_to_workload(
[new_data, new_weight, strides, padding, dilation, out_dtype], wkl_name
)
dispatch_ctx.update(target, new_workload, cfg)
return relay.nn.contrib_conv2d_winograd_without_weight_transform(
inputs[0], weight, **new_attrs
)
new_attrs["data_layout"] = f"NHWC{in_channel_block}c"
# (oc, ic, h, w) -> (h, w, ic, oc // 4, oc % 4)
new_attrs["kernel_layout"] = f"HWIO{num_filter_block}o"
new_attrs["out_layout"] = f"NHWC{num_filter_block}c"
# Store altered operator's config
new_data = te.placeholder(
(N, H, W, CI // in_channel_block, in_channel_block), dtype=data_dtype
)
new_weight = te.placeholder(
(KH + tile_size - 1, KW + tile_size - 1, CI, CO // num_filter_block, num_filter_block),
dtype=kernel_tensor.dtype,
)
new_workload = autotvm.task.args_to_workload(
[new_data, new_weight, strides, padding, dilation, out_dtype], wkl_name
)
dispatch_ctx.update(target, new_workload, cfg)
return relay.nn.contrib_conv2d_winograd_without_weight_transform(
inputs[0], weight, **new_attrs
)
if "conv2d_nchwc" in topi_tmpl: # covers both conv2d_nchwc and depthwise_conv2d_nchwc
if data_layout == "NCHW" and kernel_layout == "OIHW":
batch, in_channels, in_height, in_width = data_tensor.shape
out_channles, _, kernel_h, kernel_w = kernel_tensor.shape
in_channel_block = in_channels % 4
if in_channel_block == 0:
in_channel_block = 4
num_filter_block = out_channles % 4
if num_filter_block == 0:
num_filter_block = 4
# no support yet for tensors that cannot be divisible by factor 4
if num_filter_block != 4:
return None
batch_size, in_channel, height, width = get_const_tuple(data_tensor.shape)
out_channel, in_filter_channel, kh, kw = get_const_tuple(kernel_tensor.shape)
# update new attrs
new_attrs["channels"] = out_channel
if in_channel_block == 4:
new_attrs["data_layout"] = f"NCHW{in_channel_block}c"
else:
new_attrs["data_layout"] = "NCHW"
# (oc, ic, h, w) -> (OC, ic, h, w, oc)
new_attrs["kernel_layout"] = f"OIHW{num_filter_block}o"
new_attrs["out_layout"] = f"NCHW{num_filter_block}c"
# Store altered operator's config for applying of tuned AutoTVM statistics
if in_channel_block == 4:
new_data = te.placeholder(
(batch_size, in_channel // in_channel_block, height, width, in_channel_block),
dtype=data_dtype,
)
else:
new_data = data_tensor
new_kernel = te.placeholder(
(out_channel // num_filter_block, in_filter_channel, kh, kw, num_filter_block),
dtype=kernel_tensor.dtype,
)
new_workload = autotvm.task.args_to_workload(
[new_data, new_kernel, strides, padding, dilation, out_dtype],
topi_tmpl, # "conv2d_nchwc.image2d",
)
dispatch_ctx.update(target, new_workload, cfg)
else:
assert _NCHWc_matcher.match(data_layout)
assert _OIHWo_matcher.match(kernel_layout)
return relay.nn.conv2d(*inputs, **new_attrs)
if "conv2d_nhwc" in topi_tmpl: # covers both conv2d_nhwcc and depthwise_conv2d_nhwcc
if (data_layout == "NHWC" and kernel_layout == "HWIO") or (
data_layout == "NHWC" and kernel_layout == "HWOI"
):
if kernel_layout == "HWIO":
batch_size, in_height, in_width, in_channels = data_tensor.shape
kernel_h, kernel_w, in_filter_channel, out_channles = kernel_tensor.shape
else:
batch_size, in_height, in_width, in_channels = data_tensor.shape
kernel_h, kernel_w, out_channles, in_filter_channel = kernel_tensor.shape
in_channel_block = in_channels % 4
if in_channel_block == 0:
in_channel_block = 4
num_filter_block = out_channles % 4
if num_filter_block == 0:
num_filter_block = 4
# no support yet for tensors cannot be divisible by factor 4
if num_filter_block != 4:
return None
# update new attrs
new_attrs["channels"] = out_channles
if in_channel_block == 4:
new_attrs["data_layout"] = f"NHWC{in_channel_block}c"
else:
new_attrs["data_layout"] = "NHWC"
# (h, w, ic, oc) -> (h, w, ic, OC, oc)
if kernel_layout == "HWIO":
new_attrs["kernel_layout"] = f"HWIO{num_filter_block}o"
else:
new_attrs["kernel_layout"] = f"HWOI{num_filter_block}o"
new_attrs["out_layout"] = f"NHWC{num_filter_block}c"
# Store altered operator's config for applying of tuned AutoTVM statistics
if in_channel_block == 4:
new_data = te.placeholder(
(
batch_size,
in_height,
in_width,
in_channels // in_channel_block,
in_channel_block,
),
dtype=data_dtype,
)
else:
new_data = data_tensor
if kernel_layout == "HWIO":
new_kernel = te.placeholder(
(
kernel_h,
kernel_w,
in_filter_channel,
out_channles // num_filter_block,
num_filter_block,
),
dtype=kernel_tensor.dtype,
)
else:
new_kernel = te.placeholder(
(
kernel_h,
kernel_w,
out_channles // num_filter_block,
in_filter_channel,
num_filter_block,
),
dtype=kernel_tensor.dtype,
)
new_workload = autotvm.task.args_to_workload(
[new_data, new_kernel, strides, padding, dilation, out_dtype], topi_tmpl
)
dispatch_ctx.update(target, new_workload, cfg)
else:
assert _NHWCc_matcher.match(data_layout)
assert _HWIOo_matcher.match(kernel_layout) or _HWOIo_matcher.match(kernel_layout)
return relay.nn.conv2d(*inputs, **new_attrs)
return None
| 17,860 | 42.776961 | 100 | py |
tvm | tvm-main/python/tvm/topi/adreno/conv2d_nhwc_winograd.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unused-variable,unused-argument
"""Winograd NHWC template for Adreno backend"""
import logging
from tvm import autotvm
from .conv2d_winograd_common import conv2d_winograd_comp, schedule_conv2d_winograd_impl
logger = logging.getLogger("conv2d_nhwc_winograd")
@autotvm.register_topi_compute("conv2d_nhwc_winograd.image2d")
def conv2d_nhwc_winograd(cfg, data, kernel, strides, padding, dilation, out_dtype):
return conv2d_nhwc_winograd_comp(
cfg, data, kernel, strides, padding, dilation, out_dtype, pre_computed=False
)
@autotvm.register_topi_schedule("conv2d_nhwc_winograd.image2d")
def schedule_conv2d_nhwc_winograd(cfg, outs):
return schedule_conv2d_winograd_impl(cfg, outs, tag="dummy_compute_at")
@autotvm.register_topi_compute("conv2d_nhwc_winograd_without_weight_transform.image2d")
def conv2d_nhwc_winograd_without_weight_transform(
cfg, data, kernel, strides, padding, dilation, out_dtype
):
return conv2d_nhwc_winograd_comp(
cfg, data, kernel, strides, padding, dilation, out_dtype, pre_computed=True
)
@autotvm.register_topi_schedule("conv2d_nhwc_winograd_without_weight_transform.image2d")
def schedule_conv2d_nhwc_winograd_without_weight_transform(cfg, outs):
return schedule_conv2d_winograd_impl(cfg, outs, tag="dummy_compute_at", pre_computed=True)
def conv2d_nhwc_winograd_comp(
cfg, data, kernel, strides, padding, dilation, out_dtype, pre_computed
):
"""Compute declaration for winograd
Parameters
----------
cfg: ConfigEntity
The config for this template
data: tvm.te.Tensor
4-D or 5-D Data tensor with shape NCHW or NCHW4c
kernel: tvm.te.Tensor
4-D or 5-D tensor with shape OIHW or OIHW4o
strides: int or a list/tuple of two ints
stride size, or [stride_height, stride_width]
padding: int or a list/tuple of 2 or 4 ints
padding size, or
[pad_height, pad_width] for 2 ints, or
[pad_top, pad_left, pad_bottom, pad_right] for 4 ints
dilation: int or a list/tuple of two ints
dilation size, or [dilation_height, dilation_width]
out_dtype: str
The output type. This is used for mixed precision.
pre_computed: bool
Flag if weights were pre computed if true or the weights should be
computed in runtime
Returns
-------
output: tvm.te.Tensor
4-D or 5-D with shape NCHW or NCHW4c
"""
return conv2d_winograd_comp(
cfg, data, kernel, strides, padding, dilation, out_dtype, pre_computed, "NHWC"
)
| 3,361 | 34.020833 | 94 | py |
tvm | tvm-main/python/tvm/topi/adreno/conv2d_nhwc.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unused-variable,unused-argument,no-else-return
"""conv2d nhwc schedule on Qualcomm Adreno GPU"""
import tvm
from tvm import te
from tvm import autotvm
from ..utils import get_const_tuple, traverse_inline
from .utils import (
split_to_chunks,
pack_input,
pack_filter,
expand_spatial_dimensions,
add_pad,
bind_data_copy,
get_texture_storage,
get_default_conv2d_config,
)
@autotvm.register_topi_schedule("conv2d_nhwc.image2d")
def schedule_conv2d_nhwc(cfg, outs):
"""Create the schedule for conv2d_nhwc"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
def _callback(op):
if op.tag == "adreno_conv2d_latest_op":
schedule_conv2d_NHWC(cfg, s, op.output(0))
traverse_inline(s, outs[0].op, _callback)
return s
@autotvm.register_topi_compute("conv2d_nhwc.image2d")
def conv2d_nhwc(cfg, Input, Filter, stride, padding, dilation, out_dtype):
"""
Convolution operator in NHWC layout.
Algo:
1. Convert into blocked format if we have 4d original tensor.
In case of AutoTVM we override the convert by just tensors since such conversion
will be absent for real blocked convolution, no sense to include into tuning
2. Expand spatial dimensions to have width and height be dividable by factor 4
This leads to slightly bigger amount of compute but allow utilize GPU much better
3. Add paddings. This happens even if we do not need pad originaly. This is useful
due to work arounding of the gaps of texture annotation between Primary Functions
and limited support of textures in schedules. Later on this pad will be executed
separately and will produce texture
4. 5d Convolution compute with accumulating into out_dtype
5. Cast to the origin output data type
6. For case of 4d convolution: convert of output from 5d to 4d
"""
if out_dtype is None:
out_dtype = Input.dtype
assert isinstance(stride, int) or len(stride) == 2
assert isinstance(dilation, int) or len(dilation) == 2
if isinstance(stride, int):
stride_h = stride_w = stride
else:
stride_h, stride_w = stride
if isinstance(dilation, int):
dilation_h = dilation_w = dilation
else:
dilation_h, dilation_w = dilation
convert_from4d = False
if len(Input.shape) == 4:
batch, in_height, in_width, in_channels = Input.shape
in_channel_chunks, in_channel_block, in_channel_tail = split_to_chunks(in_channels, 4)
if autotvm.GLOBAL_SCOPE.in_tuning:
dshape = (batch, in_height, in_width, in_channel_chunks, in_channel_block)
Input = tvm.te.placeholder(dshape, Input.dtype, name="data_placeholder")
else:
Input = pack_input(
Input,
"NHWC",
batch,
in_channel_chunks,
in_channel_block,
in_channel_tail,
in_height,
in_width,
)
else:
batch, in_height, in_width, in_channel_chunks, in_channel_block = Input.shape
if len(Filter.shape) == 4:
kernel_h, kernel_w, in_filter_channels, out_channles = Filter.shape
out_channel_chunks, out_channel_block, out_channel_tail = split_to_chunks(out_channles, 4)
if autotvm.GLOBAL_SCOPE.in_tuning:
kshape = (kernel_h, kernel_w, in_filter_channels, out_channel_chunks, out_channel_block)
Filter = tvm.te.placeholder(kshape, Filter.dtype, name="kernel_placeholder")
else:
convert_from4d = True
Filter = pack_filter(
Filter,
"HWIO",
out_channel_chunks,
out_channel_block,
out_channel_tail,
in_filter_channels,
in_channel_chunks,
in_channel_block,
in_channel_tail,
kernel_h,
kernel_w,
)
else:
kernel_h, kernel_w, in_filter_channels, out_channel_chunks, out_channel_block = Filter.shape
out_height_orig, out_height, out_width_orig, out_width = expand_spatial_dimensions(
in_height, in_width, kernel_h, kernel_w, dilation_h, dilation_w, padding, stride_h, stride_w
)
temp = add_pad(
Input,
"NHWC",
out_height_orig,
out_width_orig,
kernel_h,
kernel_w,
dilation_h,
dilation_w,
padding,
stride_h,
stride_w,
)
rcc = te.reduce_axis((0, in_channel_chunks), name="rcc")
rcb = te.reduce_axis((0, in_channel_block), name="rcb")
ry = te.reduce_axis((0, kernel_h), name="ry")
rx = te.reduce_axis((0, kernel_w), name="rx")
conv = te.compute(
(batch, out_height, out_width, out_channel_chunks, out_channel_block),
lambda nn, yy, xx, fc, fb: te.sum(
(
temp[nn, yy * stride_h + ry * dilation_h, xx * stride_w + rx * dilation_w, rcc, rcb]
* Filter[ry, rx, rcc * in_channel_block + rcb, fc, fb]
).astype(out_dtype),
axis=[ry, rx, rcc, rcb],
),
tag="conv2d_nhwc",
)
if convert_from4d and not autotvm.GLOBAL_SCOPE.in_tuning:
dummy_cast = te.compute(
(batch, out_height_orig, out_width_orig, out_channel_chunks, out_channel_block),
lambda n, y, x, fc, fb: conv[n, y, x, fc, fb].astype(out_dtype),
tag="dummy_cast",
)
return te.compute(
(batch, out_height_orig, out_width_orig, out_channles),
lambda n, y, x, c: dummy_cast[n, y, x, c // out_channel_block, c % out_channel_block],
tag="adreno_conv2d_latest_op",
)
else:
return te.compute(
(batch, out_height_orig, out_width_orig, out_channel_chunks, out_channel_block),
lambda n, y, x, ffc, ffb: conv[n, y, x, ffc, ffb].astype(out_dtype),
tag="adreno_conv2d_latest_op",
)
def schedule_conv2d_NHWC(cfg, s, output):
"""
schedule optimized for batch size = 1
Algo:
1. Split output axis to three parts: global work size, vthread, local worksize.
The limitations for tuning includes heuristics from some tuned networks to limit
search space and not pay much time for useles configurations.
2. In case of 4d convolution schedule copying of the input (and filter) into
5d tensors
4. pad should be scheduled separately to create independent opencl kernel. If pad is
inlined into convolution, this gives 1.5x performance drop
5. We are using cache_read for intermediate tensors to produce texture and guarantee
the best performance on the next stage.
The weights are managed through static texture planning mechanism and guarantied come
in texture memory scope.
Thus way we are calling cache_read only for data tensor
6. For 5d convolution we schedule the latest op with binding 5d axis and vectorize
for textures
For 4d tensor we are doing the same for the latest blocked stage, i.e. conversion
of data type
7. In case of 4d conv we need to schedule postops as well
"""
latest = s.outputs[0].output(0)
if len(latest.op.axis) == 4:
latest_blocked = dummy = output.op.input_tensors[0]
conv = dummy.op.input_tensors[0]
else:
conv = output.op.input_tensors[0]
latest_blocked = latest
pad_data, kernel = s[conv].op.input_tensors
filter_pack_rt = bool(
isinstance(kernel.op, tvm.te.ComputeOp) and "filter_pack" in kernel.op.tag
)
if "pad_temp" in pad_data.op.name:
input_pad_temp = pad_data.op.input_tensors[0]
else:
input_pad_temp = pad_data
input_pack_rt = bool(
isinstance(input_pad_temp.op, tvm.te.ComputeOp) and "input_pack" in input_pad_temp.op.tag
)
##### space definition begin #####
n, y, x, fc, fb = s[conv].op.axis
ry, rx, rcc, rcb = s[conv].op.reduce_axis
if conv.shape[3] % 2 == 0:
min_threads_div = 2
else:
min_threads_div = 1
cfg.define_split(
"tile_fc",
fc,
num_outputs=3,
filter=lambda entity: entity.size[1] <= 8
and entity.size[2] >= min_threads_div
and entity.size[2] < 256,
)
cfg.define_split(
"tile_y",
y,
num_outputs=3,
filter=lambda entity: entity.size[1] <= 8 and entity.size[2] <= 16,
)
cfg.define_split(
"tile_x",
x,
num_outputs=3,
filter=lambda entity: entity.size[1] <= 8 and entity.size[2] <= 16,
)
cfg.define_split("tile_rcc", rcc, num_outputs=2)
cfg.define_split("tile_ry", ry, num_outputs=2)
cfg.define_split("tile_rx", rx, num_outputs=2)
cfg.define_knob("auto_unroll_max_step", [0, 512, 1500])
cfg.define_knob("unroll_explicit", [0, 1])
cfg.multi_filter(
filter=lambda entity: ( # pylint: disable=chained-comparison
entity["tile_fc"].size[1] * entity["tile_y"].size[1] * entity["tile_x"].size[1]
)
<= 24
and 32
<= (entity["tile_fc"].size[2] * entity["tile_y"].size[2] * entity["tile_x"].size[2])
< 1024
)
if cfg.is_fallback:
get_default_conv2d_config(cfg, conv.shape[3], conv.shape[1], conv.shape[2])
##### space definition end #####
pad_data, kernel = s[conv].op.input_tensors
# There are several conditions that have to be handled:
# 1. If we are in the tuning, we always add cache read for data to main conv kernel
# to get texture in tuning opencl kernel
# 2. If we are repacking input in runtime, we should always explicit schedule this one more
# stage of data copy from 4d to 5d (referred as pack_data).
# 3. If we have pad (independently if we have runtime repack or not) we should inline it in the
# cache_read("texture")
if autotvm.GLOBAL_SCOPE.in_tuning or input_pack_rt:
if autotvm.GLOBAL_SCOPE.in_tuning:
if "pad_temp" in pad_data.op.name:
s[pad_data].compute_inline()
else:
if "pad_temp" in pad_data.op.name:
s[pad_data].compute_inline()
pack_data = pad_data.op.input_tensors[0]
bind_data_copy(s[pack_data])
else:
pack_data = pad_data
bind_data_copy(s[pack_data])
AT = s.cache_read(pad_data, get_texture_storage(pad_data.shape), [conv])
bind_data_copy(s[AT])
elif "pad_temp" in pad_data.op.name:
s[pad_data].compute_inline()
# create cache stage
AT = s.cache_read(pad_data, get_texture_storage(pad_data.shape), [conv])
bind_data_copy(s[AT])
if autotvm.GLOBAL_SCOPE.in_tuning or filter_pack_rt:
if not autotvm.GLOBAL_SCOPE.in_tuning:
bind_data_copy(s[kernel])
if kernel.shape[0] == 1 and kernel.shape[1] == 1:
WT = s.cache_read(kernel, get_texture_storage(kernel.shape), [conv])
bind_data_copy(s[WT])
s[conv].set_scope("local")
if latest_blocked == latest and output != latest:
s[output].compute_inline()
# tile and bind spatial axes
n, y, x, fc, fb = s[latest_blocked].op.axis
kernel_scope, n = s[latest_blocked].split(n, nparts=1)
bf, vf, tf = cfg["tile_fc"].apply(s, latest_blocked, fc)
by, vy, ty = cfg["tile_y"].apply(s, latest_blocked, y)
bx, vx, tx = cfg["tile_x"].apply(s, latest_blocked, x)
by = s[latest_blocked].fuse(n, by)
s[latest_blocked].bind(bf, te.thread_axis("blockIdx.z"))
s[latest_blocked].bind(by, te.thread_axis("blockIdx.y"))
s[latest_blocked].bind(bx, te.thread_axis("blockIdx.x"))
s[latest_blocked].bind(vf, te.thread_axis("vthread"))
s[latest_blocked].bind(vy, te.thread_axis("vthread"))
s[latest_blocked].bind(vx, te.thread_axis("vthread"))
s[latest_blocked].bind(tf, te.thread_axis("threadIdx.z"))
s[latest_blocked].bind(ty, te.thread_axis("threadIdx.y"))
s[latest_blocked].bind(tx, te.thread_axis("threadIdx.x"))
s[latest_blocked].reorder(bf, by, bx, vf, vy, vx, tf, ty, tx, fb)
s[latest_blocked].vectorize(fb)
s[conv].compute_at(s[latest_blocked], tx)
# tile reduction axes
n, y, x, fc, fb = s[conv].op.axis
ry, rx, rcc, rcb = s[conv].op.reduce_axis
rco, rci = cfg["tile_rcc"].apply(s, conv, rcc)
ryo, ryi = cfg["tile_ry"].apply(s, conv, ry)
rxo, rxi = cfg["tile_rx"].apply(s, conv, rx)
s[conv].reorder(rco, ryo, rxo, rci, ryi, rxi, rcb, n, fc, y, x, fb)
s[conv].vectorize(fb)
s[conv].unroll(rcb)
# unroll
s[latest_blocked].pragma(kernel_scope, "auto_unroll_max_step", cfg["auto_unroll_max_step"].val)
s[latest_blocked].pragma(kernel_scope, "unroll_explicit", cfg["unroll_explicit"].val)
if latest_blocked != latest:
s[latest].compute_root()
bind_data_copy(s[latest], 1)
if latest != output:
s[output].compute_inline()
N, OH, OW, OCC, OCB = get_const_tuple(latest_blocked.shape)
KH, KW, IC, _, _ = get_const_tuple(kernel.shape)
ICKHKW = IC * KH * KW
if isinstance(N, int):
cfg.add_flop(2 * N * OH * OW * OCC * OCB * ICKHKW)
| 14,150 | 37.663934 | 100 | py |
tvm | tvm-main/python/tvm/topi/adreno/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=redefined-builtin, wildcard-import
"""Qualcomm Adreno GPU specific declaration and schedules."""
from .conv2d_nchw import *
from .depthwise_conv2d_nchw import *
from .conv2d_nhwc import *
from .depthwise_conv2d_nhwc import *
from .pooling import *
from .conv2d_alter_op import *
from .conv2d_nchw_winograd import *
from .conv2d_nhwc_winograd import *
from .injective import schedule_injective
from .reduction import *
| 1,222 | 39.766667 | 62 | py |
tvm | tvm-main/python/tvm/topi/adreno/conv2d_winograd_common.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unused-variable,unused-argument
"""Common Winograd implementation for Adreno backend"""
import tvm
from tvm import te
from tvm import autotvm
from tvm.topi import nn
from tvm.topi.utils import get_const_int, get_const_tuple, traverse_inline
from ..nn.winograd_util import winograd_transform_matrices
from .utils import (
split_to_chunks,
pack_input,
pack_filter,
bind_data_copy,
get_texture_storage,
infer_tile_size,
)
def conv2d_winograd_comp(
cfg, data, kernel, strides, padding, dilation, out_dtype, pre_computed, layout
):
"""Compute declaration for winograd
Parameters
----------
cfg: ConfigEntity
The config for this template
data: tvm.te.Tensor
4-D or 5-D Data tensor with shape NCHW or NCHW4c
kernel: tvm.te.Tensor
4-D or 5-D tensor with shape OIHW or OIHW4o
strides: int or a list/tuple of two ints
stride size, or [stride_height, stride_width]
padding: int or a list/tuple of 2 or 4 ints
padding size, or
[pad_height, pad_width] for 2 ints, or
[pad_top, pad_left, pad_bottom, pad_right] for 4 ints
dilation: int or a list/tuple of two ints
dilation size, or [dilation_height, dilation_width]
out_dtype: str
The output type. This is used for mixed precision.
pre_computed: bool
Flag if weights were pre computed if true or the weights should be
computed in runtime
layout: str
NHWC or NCHW values are accepted
Returns
-------
output: tvm.te.Tensor
4-D or 5-D with shape NCHW or NCHW4c
"""
assert layout in ("NCHW", "NHWC")
tile_size = infer_tile_size(data, layout)
if isinstance(dilation, int):
dilation_h = dilation_w = dilation
else:
dilation_h, dilation_w = dilation
HSTR, WSTR = (strides, strides) if isinstance(strides, int) else strides
convert_from4d = False
if len(data.shape) == 4:
convert_from4d = True
if layout == "NCHW":
N, DCI, H, W = get_const_tuple(data.shape)
else:
N, H, W, DCI = get_const_tuple(data.shape)
if not pre_computed:
if layout == "NCHW":
out_channels, CI, KH, KW = get_const_tuple(kernel.shape)
else:
KH, KW, CI, out_channels = get_const_tuple(kernel.shape)
else:
alpha, _, CI, out_channels = get_const_tuple(kernel.shape)
KH = KW = alpha + 1 - tile_size
in_channel_chunks, in_channel_block, in_channel_tail = split_to_chunks(CI, 4)
out_channel_chunks, out_channel_block, out_channel_tail = split_to_chunks(out_channels, 4)
if autotvm.GLOBAL_SCOPE.in_tuning is True:
if layout == "NCHW":
dshape = (N, in_channel_chunks, H, W, in_channel_block)
else:
dshape = (N, H, W, in_channel_chunks, in_channel_block)
if not pre_computed: # kernel tensor is raw tensor, do strict check
if layout == "NCHW":
kshape = (out_channel_chunks, CI, KH, KW, out_channel_block)
else:
kshape = (KH, KW, CI, out_channel_chunks, out_channel_block)
else:
kshape = (alpha, alpha, CI, out_channel_chunks, out_channel_block)
data = tvm.te.placeholder(dshape, data.dtype, name="data_placeholder")
kernel = tvm.te.placeholder(kshape, kernel.dtype, name="kernel_placeholder")
else:
data = pack_input(
data, layout, N, in_channel_chunks, in_channel_block, in_channel_tail, H, W
)
kernel_layout = "OIHW" if layout == "NCHW" else "HWIO"
if not pre_computed: # kernel tensor is raw tensor, do strict check
kernel = pack_filter(
kernel,
kernel_layout,
out_channel_chunks,
out_channel_block,
out_channel_tail,
CI,
in_channel_chunks,
in_channel_block,
in_channel_tail,
KH,
KW,
)
else:
kernel = pack_filter(
kernel,
"HWIO",
out_channel_chunks,
out_channel_block,
out_channel_tail,
CI,
in_channel_chunks,
in_channel_block,
in_channel_tail,
alpha,
alpha,
)
if layout == "NCHW":
N, DCI, H, W, CB = get_const_tuple(data.shape)
else:
N, H, W, DCI, CB = get_const_tuple(data.shape)
if not pre_computed: # kernel tensor is raw tensor, do strict check
if layout == "NCHW":
CO, CI, KH, KW, COB = get_const_tuple(kernel.shape)
else:
KH, KW, CI, CO, COB = get_const_tuple(kernel.shape)
alpha = KW + tile_size - 1
assert HSTR == 1 and WSTR == 1 and KH == KW
else:
alpha, _, CI, CO, COB = get_const_tuple(kernel.shape)
KH = KW = alpha + 1 - tile_size
assert HSTR == 1 and WSTR == 1 and dilation_h == 1 and dilation_w == 1
if isinstance(N, tvm.tir.Any):
N = tvm.te.size_var("n")
if not isinstance(H, int) or not isinstance(W, int):
raise RuntimeError(
"adreno winograd conv2d doesn't support dynamic input\
height or width."
)
pt, pl, pb, pr = nn.get_pad_tuple(padding, (KH, KW))
if layout == "NCHW":
data_pad = nn.pad(data, (0, 0, pt, pl, 0), (0, 0, pb, pr, 0), name="data_pad")
else:
data_pad = nn.pad(data, (0, pt, pl, 0, 0), (0, pb, pr, 0, 0), name="data_pad")
r = KW
m = tile_size
A, B, G = winograd_transform_matrices(m, r, data.dtype)
H = (H + pt + pb - KH) // HSTR + 1
W = (W + pl + pr - KW) // WSTR + 1
nH, nW = (H + m - 1) // m, (W + m - 1) // m
P = N * nH * nW if isinstance(N, int) else nH * nW
# transform kernel
if not pre_computed:
r_kh = te.reduce_axis((0, KH), name="r_kh")
r_kw = te.reduce_axis((0, KW), name="r_kw")
if layout == "NCHW":
kernel_pack = te.compute(
(alpha, alpha, CI, CO, COB),
lambda eps, nu, ci, co, cob: te.sum(
kernel[co][ci][r_kh][r_kw][cob] * G[eps][r_kh] * G[nu][r_kw], axis=[r_kh, r_kw]
),
name="kernel_pack",
)
else:
kernel_pack = te.compute(
(alpha, alpha, CI, CO, COB),
lambda eps, nu, ci, co, cob: te.sum(
kernel[r_kh][r_kw][ci][co][cob] * G[eps][r_kh] * G[nu][r_kw], axis=[r_kh, r_kw]
),
name="kernel_pack",
)
else:
kernel_pack = kernel
idxdiv = tvm.tir.indexdiv
idxmod = tvm.tir.indexmod
if layout == "NCHW":
N, CI, _, _, CB = get_const_tuple(data.shape)
else:
N, _, _, CI, CB = get_const_tuple(data.shape)
# pack input tile
if layout == "NCHW":
input_tile = te.compute(
(alpha, alpha, CI, P, CB),
lambda eps, nu, c, p, cb: data_pad[idxdiv(p, (nH * nW))][c][
idxmod(idxdiv(p, nW), nH) * m + eps
][idxmod(p, nW) * m + nu][cb],
name="d",
)
else:
input_tile = te.compute(
(alpha, alpha, CI, P, CB),
lambda eps, nu, c, p, cb: data_pad[idxdiv(p, (nH * nW))][
idxmod(idxdiv(p, nW), nH) * m + eps
][idxmod(p, nW) * m + nu][c][cb],
name="d",
)
# transform data
r_a = te.reduce_axis((0, alpha), "r_a")
r_b = te.reduce_axis((0, alpha), "r_a")
data_pack = te.compute(
(P, CI, alpha, alpha, CB),
lambda p, ci, eps, nu, cb: te.sum(
input_tile[r_a][r_b][ci][p][cb] * B[r_a][eps] * B[r_b][nu], axis=[r_a, r_b]
),
name="data_pack",
)
# repack transformed data
data_pack_trans = te.compute(
(alpha, alpha, CI, P, CB),
lambda eps, nu, c, p, cb: data_pack[p][c][eps][nu][cb],
name="data_pack_trans",
)
# do batch gemm
ci = te.reduce_axis((0, CI), name="ci")
cb = te.reduce_axis((0, CB), name="cb")
bgemm = te.compute(
(alpha, alpha, CO, P, COB),
lambda eps, nu, co, p, cob: te.sum(
(
kernel_pack[eps][nu][ci * CB + cb][co][cob] * data_pack_trans[eps][nu][ci][p][cb]
).astype(out_dtype),
axis=[ci, cb],
),
name="bgemm",
)
# inverse transform
r_a = te.reduce_axis((0, alpha), "r_a")
r_b = te.reduce_axis((0, alpha), "r_a")
inverse = te.compute(
(CO, P, m, m, COB),
lambda co, p, vh, vw, cob: te.sum(
bgemm[r_a][r_b][co][p][cob] * (A[r_a][vh] * A[r_b][vw]).astype(out_dtype),
axis=[r_a, r_b],
),
name="inverse",
)
# output
if layout == "NCHW":
if convert_from4d and autotvm.GLOBAL_SCOPE.in_tuning is False:
output = te.compute(
(N, out_channels, H, W),
lambda n, c, h, w: inverse[c // CB][n * nH * nW + idxdiv(h, m) * nW + idxdiv(w, m)][
idxmod(h, m)
][idxmod(w, m)][c % CB].astype(out_dtype),
name="output",
tag="dummy_compute_at",
)
else:
output = te.compute(
(N, CO, H, W, COB),
lambda n, co, h, w, cob: inverse[co][
n * nH * nW + idxdiv(h, m) * nW + idxdiv(w, m)
][idxmod(h, m)][idxmod(w, m)][cob].astype(out_dtype),
name="output",
tag="dummy_compute_at",
)
else:
if convert_from4d and autotvm.GLOBAL_SCOPE.in_tuning is False:
output = te.compute(
(N, H, W, out_channels),
lambda n, h, w, c: inverse[c // CB][n * nH * nW + idxdiv(h, m) * nW + idxdiv(w, m)][
idxmod(h, m)
][idxmod(w, m)][c % CB].astype(out_dtype),
name="output",
tag="dummy_compute_at",
)
else:
output = te.compute(
(N, H, W, CO, COB),
lambda n, h, w, co, cob: inverse[co][
n * nH * nW + idxdiv(h, m) * nW + idxdiv(w, m)
][idxmod(h, m)][idxmod(w, m)][cob].astype(out_dtype),
name="output",
tag="dummy_compute_at",
)
if isinstance(N, int):
cfg.add_flop(2 * N * CO * COB * H * W * CI * CB * KH * KW)
return output
def schedule_conv2d_winograd_impl(cfg, outs, tag, pre_computed=False):
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
def _callback(op):
if op.tag == tag:
schedule_conv2d_winograd(cfg, s, op.output(0), pre_computed=pre_computed)
traverse_inline(s, outs[0].op, _callback)
return s
def schedule_conv2d_winograd(cfg, s, output, pre_computed):
"""Schedule winograd template"""
inverse = s[output].op.input_tensors[0]
bgemm, A = s[inverse].op.input_tensors
kernel_pack, data_pack_trans = s[bgemm].op.input_tensors
data_pack = s[data_pack_trans].op.input_tensors[0]
input_tile, B = s[data_pack].op.input_tensors
pad_data = s[input_tile].op.input_tensors[0]
# data transform
s[B].compute_inline()
s[A].compute_inline()
# probably will improve real topology execution
if autotvm.GLOBAL_SCOPE.in_tuning:
# Padding to texture
AA = s.cache_read(pad_data, get_texture_storage(pad_data.shape), [input_tile])
bind_data_copy(s[AA])
s[input_tile].compute_inline()
OL = s.cache_write(data_pack, "local")
c, p, eps, nu, cb = s[data_pack].op.axis
fused = s[data_pack].fuse(c, p, eps, nu)
bx, tx = s[data_pack].split(fused, 128)
s[data_pack].vectorize(cb)
s[data_pack].bind(bx, te.thread_axis("blockIdx.x"))
s[data_pack].bind(tx, te.thread_axis("threadIdx.x"))
_, _, eps, nu, cb = s[OL].op.axis
r_a, r_b = s[OL].op.reduce_axis
s[OL].unroll(eps)
s[OL].unroll(nu)
s[OL].unroll(r_a)
s[OL].unroll(r_b)
s[OL].vectorize(cb)
s[OL].compute_at(s[data_pack], tx)
s[data_pack].set_scope(get_texture_storage(data_pack.shape))
s[data_pack_trans].compute_inline()
# transform kernel
if not pre_computed:
kernel, G = s[kernel_pack].op.input_tensors
eps, nu, ci, co, cob = s[kernel_pack].op.axis
if autotvm.GLOBAL_SCOPE.in_tuning:
# skip this part during tuning to make recrods accurate
# this part will be pre-computed during pre-compute optimization pass
s[G].pragma(s[G].op.axis[0], "debug_skip_region")
s[kernel_pack].pragma(eps, "debug_skip_region")
else:
s[G].compute_inline()
r_a, r_b = s[kernel_pack].op.reduce_axis
for axis in [eps, nu, r_a, r_b]:
s[kernel_pack].unroll(axis)
fused = s[kernel_pack].fuse(ci, co)
bb, tt = s[kernel_pack].split(fused, 128)
s[kernel_pack].reorder(bb, tt, eps, nu, r_a, r_b, cob)
s[kernel_pack].vectorize(cob)
s[kernel_pack].bind(bb, te.thread_axis("blockIdx.x"))
s[kernel_pack].bind(tt, te.thread_axis("threadIdx.x"))
else:
kernel = kernel_pack
if isinstance(kernel.op, tvm.te.ComputeOp) and "filter_pack" in kernel.op.tag:
# manage scheduling of datacopy
pack_data = pad_data.op.input_tensors[0]
bind_data_copy(s[pack_data])
bind_data_copy(s[kernel])
elif isinstance(kernel.op, tvm.te.ComputeOp) and "dilate" in kernel.op.tag:
s[kernel].compute_inline()
s[pad_data].compute_inline()
##### space definition begin #####
cfg.define_knob("auto_unroll_max_step", [0, 4, 16])
b1, b2, y, x, cb = s[bgemm].op.axis
rcc = s[bgemm].op.reduce_axis[0]
alpha = get_const_int(b1.dom.extent)
cfg.define_split(
"tile_y", y, num_outputs=3, filter=lambda entry: entry.size[2] <= 64 and entry.size[1] <= 16
)
min_x_div = 1
for bn in range(4, 0, -1):
if bgemm.shape[3] % bn == 0:
min_x_div = bn
break
cfg.define_split(
"tile_x",
x,
num_outputs=3,
filter=lambda entry: entry.size[2] <= 64
and entry.size[1] >= min_x_div
and entry.size[1] <= 16,
)
cfg.define_split("tile_rc", rcc, num_outputs=2)
cfg.multi_filter(
filter=lambda entity: 32 <= (entity["tile_y"].size[2] * entity["tile_x"].size[2]) < 1024
)
##### space definition end #####
# batch gemm
OL = s.cache_write(bgemm, "local")
if (
autotvm.GLOBAL_SCOPE.in_tuning
or isinstance(kernel.op, tvm.te.ComputeOp)
and "filter_pack" in kernel.op.tag
and kernel.shape[2] == 1
and kernel.shape[3] == 1
):
BB = s.cache_read(kernel_pack, get_texture_storage(kernel_pack.shape), [OL])
bind_data_copy(s[BB])
by = s[bgemm].fuse(b1, b2, y)
# tile and bind spatial axes
bgemm_scope, by = s[bgemm].split(by, nparts=1)
by, vy, ty = cfg["tile_y"].apply(s, bgemm, by)
bx, vx, tx = cfg["tile_x"].apply(s, bgemm, x)
s[bgemm].bind(by, te.thread_axis("blockIdx.y"))
s[bgemm].bind(bx, te.thread_axis("blockIdx.x"))
s[bgemm].bind(vy, te.thread_axis("vthread"))
s[bgemm].bind(vx, te.thread_axis("vthread"))
s[bgemm].bind(ty, te.thread_axis("threadIdx.y"))
s[bgemm].bind(tx, te.thread_axis("threadIdx.x"))
s[bgemm].reorder(bgemm_scope, by, bx, vy, vx, ty, tx, cb)
s[bgemm].vectorize(cb)
s[bgemm].set_scope(get_texture_storage(bgemm.shape))
# tile reduction axes
s[OL].compute_at(s[bgemm], tx)
b1, b2, y, x, cb = s[OL].op.axis
(rcc, rcb) = s[OL].op.reduce_axis
b = s[OL].fuse(b1, b2)
s[OL].reorder(b, y, x, rcc, rcb, cb)
# s[OL].unroll(rcb)
s[OL].pragma(rcb, "auto_unroll_max_step", cfg["auto_unroll_max_step"].val)
s[OL].pragma(rcb, "unroll_explicit", True)
s[OL].vectorize(cb)
# schedule inverse, output and fusion
if output.op in s.outputs:
OL = None
else:
OL = output
s[OL].set_scope("local")
output = s.outputs[0]
if len(s[output].op.axis) == 4:
n, co, h, w = s[output].op.axis
cb = None
else:
n, co, h, w, cb = s[output].op.axis
inverse_scope, n = s[output].split(n, nparts=1)
fused = s[output].fuse(n, co, h, w)
bb, tt = s[output].split(fused, 128)
if cb is not None:
s[output].reorder(bb, tt, cb)
s[output].vectorize(cb)
s[output].bind(bb, te.thread_axis("blockIdx.x"))
s[output].bind(tt, te.thread_axis("threadIdx.x"))
if OL is not None:
s[OL].compute_at(s[output], tt)
co, p, vh, vw, cb = s[inverse].op.axis
r_a, r_b = s[inverse].op.reduce_axis
for axis in [vh, vw, r_a, r_b]:
s[inverse].unroll(axis)
s[inverse].vectorize(cb)
s[inverse].compute_at(s[output], tt)
return s
| 18,204 | 33.875479 | 100 | py |
tvm | tvm-main/python/tvm/topi/bifrost/conv2d.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unused-variable,unused-argument,no-else-return
"""conv2d schedule on ARM Mali (Bifrost) GPU"""
import tvm
from tvm import te
from tvm import relay
from tvm import autotvm
from .gemm import decl_winograd_gemm, schedule_gemm
from .transforms import tile_and_bind, tile_and_bind3d
from ..utils import traverse_inline, get_const_int, get_const_tuple
from .. import nn
from ..nn.winograd_util import winograd_transform_matrices
# reuse some compute declarations from ARM CPU
from ..arm_cpu.conv2d_spatial_pack import conv2d_spatial_pack_nchw
@autotvm.register_topi_compute("conv2d_nchw_spatial_pack.bifrost")
def conv2d_nchw_spatial_pack(cfg, data, kernel, strides, padding, dilation, out_dtype):
"""TOPI compute callback for conv2d
Parameters
----------
cfg: ConfigEntity
The config for this template
data : tvm.te.Tensor
4-D with shape [batch, in_channel, in_height, in_width]
kernel : tvm.te.Tensor
4-D with shape [num_filter, in_channel, filter_height, filter_width] or
pre-packed 5-D with shape [num_filter_chunk, in_channel, filter_height,
filter_width, num_filter_block]
strides : list of two ints
[stride_height, stride_width]
padding : list of two ints
[pad_height, pad_width]
dilation : list of two ints
[dilation_height, dilation_width]
out_dtype: str
The output type. This is used for mixed precision.
Returns
-------
output : tvm.te.Tensor
4-D with shape [batch, out_channel, out_height, out_width]
"""
return conv2d_spatial_pack_nchw(
cfg, data, kernel, strides, padding, dilation, out_dtype, num_tile=3
)
@autotvm.register_topi_schedule("conv2d_nchw_spatial_pack.bifrost")
def schedule_conv2d_nchw_spatial_pack(cfg, outs):
"""TOPI schedule callback for conv2d
Parameters
----------
cfg: ConfigEntity
The configuration of this template
outs: Array of Tensor
The computation graph description of convolution2d
in the format of an array of tensors.
Returns
-------
s: Schedule
The computation schedule for conv2d
"""
s = te.create_schedule([x.op for x in outs])
def _callback(op):
# schedule conv2d
if "spatial_conv2d_output" in op.tag:
output = op.output(0)
conv = op.input_tensors[0]
data_vec = conv.op.input_tensors[0]
data_pad = data_vec.op.input_tensors[0]
s[data_pad].compute_inline()
kernel_vec = conv.op.input_tensors[1]
if kernel_vec.op.name == "kernel_vec":
kernel = kernel_vec.op.input_tensors[0]
else:
kernel = kernel_vec
if isinstance(kernel.op, tvm.te.ComputeOp) and "dilate" in kernel.op.tag:
s[kernel].compute_inline()
_schedule_spatial_pack(cfg, s, output, conv, data_vec, kernel_vec)
traverse_inline(s, outs[0].op, _callback)
return s
def _schedule_spatial_pack(cfg, s, output, conv, data_vec, kernel_vec):
"""schedule the spatial packing for conv2d"""
data = s[data_vec].op.input_tensors[0]
max_unroll = 16
vec_size = [1, 2, 4, 8, 16]
# get tunable parameters (they are defined in compute)
BC, TC, VC = cfg["tile_co"].size
BH, TH, VH = cfg["tile_oh"].size
BW, TW, VW = cfg["tile_ow"].size
# schedule padding
if isinstance(data.op, tvm.te.ComputeOp) and "pad" in data.op.tag:
data_pad = data
s[data_pad].compute_inline()
# schedule data packing
if isinstance(data_vec.op, te.tensor.ComputeOp) and data_vec.op.name == "data_vec_undilated":
_, h, w, ci, _, _, vh, vw = s[data_vec].op.axis
else:
_, h, w, ci, vh, vw = s[data_vec].op.axis
tile_and_bind3d(s, data_vec, h, w, ci, 1)
if vh.dom.extent.value < max_unroll:
s[data_vec].unroll(vh)
if vw.dom.extent.value < max_unroll:
s[data_vec].unroll(vw)
if isinstance(kernel_vec.op, tvm.te.ComputeOp) and kernel_vec.name == "kernel_vec":
if not autotvm.GLOBAL_SCOPE.in_tuning:
max_threads = tvm.target.Target.current(allow_none=False).max_num_threads
co, ci, kh, kw, vc = s[kernel_vec].op.axis
fused = s[kernel_vec].fuse(co, ci, kh, kw, vc)
fused, vec = s[kernel_vec].split(fused, VC)
bb, tt = s[kernel_vec].split(fused, max_threads)
s[kernel_vec].bind(bb, te.thread_axis("blockIdx.x"))
s[kernel_vec].bind(tt, te.thread_axis("threadIdx.x"))
if VC in vec_size:
s[kernel_vec].vectorize(vec)
# schedule convolution
n, c, h, w, vh, vw, vc = s[conv].op.axis
kc, kh, kw = s[conv].op.reduce_axis
cfg["reorder_0"].apply(s, conv, [n, c, h, w, kc, kh, kw, vh, vw, vc])
tile_and_bind3d(s, conv, c, h, w, TC, TH, TW)
cfg["ann_reduce"].apply(
s,
conv,
[kh, kw],
axis_lens=[get_const_int(kernel_vec.shape[2]), get_const_int(kernel_vec.shape[3])],
max_unroll=max_unroll,
)
cfg["ann_spatial"].apply(
s,
conv,
[vh, vw, vc],
axis_lens=[VH, VW, VC],
max_unroll=max_unroll,
vec_size=vec_size,
cfg=cfg,
)
# schedule output
if output.op not in s.outputs: # has bias
s[output].compute_inline()
output = s.outputs[0]
_, co, oh, ow = s[output].op.axis
tile_and_bind3d(s, output, co, oh, ow, TC, TH, TW)
return s
@autotvm.register_topi_compute("conv2d_nchw_winograd.bifrost")
def conv2d_nchw_winograd(cfg, data, kernel, strides, padding, dilation, out_dtype):
"""Use Winograd as the convolution method"""
return _decl_winograd(cfg, data, kernel, strides, padding, dilation, out_dtype)
@autotvm.register_topi_schedule("conv2d_nchw_winograd.bifrost")
def schedule_conv2d_nchw_winograd(cfg, outs):
s = te.create_schedule([x.op for x in outs])
def _callback(op):
if "winograd_conv2d_output" in op.tag:
_schedule_winograd(cfg, s, op)
traverse_inline(s, outs[0].op, _callback)
return s
def _decl_winograd_kernel_transform(kernel, tile_size, G):
"""Declare a Winograd kernel transform
This exists separately to allow for precomputation
The precomputation will most often happen on CPU
Parameters
----------
kernel : tvm.te.Tensor
The kernel to transform
tile_size : int
The size of the tile to use for the Winograd filter
Returns
-------
U : tvm.te.Tensor
Transformed kernel
"""
CO, CI, KH, KW = [get_const_int(x) for x in kernel.shape]
# Only support 32 bit floats
out_dtype = "float32"
alpha = G.shape[0]
K = CO
C = CI
def upround(x, align):
return (x + align - 1) // align * align
ALIGN = 16
K_round = upround(K, ALIGN)
# Padded Kernel [K_round, C, KH, KW]
# Pad the number of kernels to multiple of ALIGN
padded_kernel = te.compute(
(K_round, C, KH, KW),
lambda k, c, h, w: tvm.tir.if_then_else(
k < K, kernel[k][c][h][w], tvm.tir.const(0, out_dtype)
),
name="padded_kernel",
)
# U [alpha, alpha, K_round, C]
# Perform the kernel transform
r_kh = te.reduce_axis((0, KH), "r_kh")
r_kw = te.reduce_axis((0, KW), "r_kw")
U = te.compute(
(alpha, alpha, K_round, C),
lambda eps, nu, k, c: te.sum(
padded_kernel[k][c][r_kh][r_kw] * G[eps][r_kh] * G[nu][r_kw], axis=[r_kh, r_kw]
),
name="U",
)
return U
def _decl_winograd(cfg, data, kernel, strides, padding, dilation, out_dtype, tile_size=2):
"""Declare a winograd convolution - only tile_size=2 is currently supported"""
N, CI, IH, IW = get_const_tuple(data.shape)
if isinstance(dilation, int):
dilation_h = dilation_w = dilation
else:
dilation_h, dilation_w = dilation
if int(kernel.shape[2]) == 3:
if dilation_h != 1 or dilation_w != 1:
kernel = nn.dilate(kernel, (1, 1, dilation_h, dilation_w))
pre_computed = False
CO, _, KH, KW = get_const_tuple(kernel.shape)
else:
assert (dilation_h, dilation_w) == (1, 1), "Does not support dilation"
pre_computed = True
H_CAT, W_CAT, CO, CI = get_const_tuple(kernel.shape)
KH, KW = H_CAT - tile_size + 1, W_CAT - tile_size + 1
HSTR, WSTR = strides if isinstance(strides, (tuple, list)) else (strides, strides)
pt, pl, pb, pr = nn.get_pad_tuple(padding, (KH, KW))
assert KH == 3 and KW == 3 and HSTR == 1 and WSTR == 1
data_pad = nn.pad(data, (0, 0, pt, pl), (0, 0, pb, pr), name="data_pad")
r = KW
m = tile_size
alpha = m + r - 1
A, B, G = winograd_transform_matrices(m, r, out_dtype)
K = CO
C = CI
H = (IH + pt + pb - 3) // HSTR + 1
W = (IW + pl + pr - 3) // WSTR + 1
nH, nW = (H + m - 1) // m, (W + m - 1) // m
P = N * nH * nW
def upround(x, align):
return (x + align - 1) // align * align
ALIGN = 16
P_round = upround(P, ALIGN)
K_round = upround(K, ALIGN)
# CONFIG
cfg.define_knob("data_transform_wgx", [1, 2, 4, 8, 16, 32, 64])
cfg.define_knob("data_transform_wgy", [1, 2, 4, 8, 16, 32, 64])
# Pack input tile
input_tile = te.compute((N, C, H + 2, W + 2), lambda n, c, h, w: data_pad[n][c][h][w], name="d")
if autotvm.GLOBAL_SCOPE.in_tuning:
VC = cfg["tile_k"].size[-1]
kvshape = (KH + tile_size - 1, KW + tile_size - 1, tvm.tir.indexdiv(CO, VC), CI, VC)
U = tvm.te.placeholder(kvshape, kernel.dtype, name="U")
else:
if pre_computed:
U = kernel
else:
U = _decl_winograd_kernel_transform(kernel, tile_size, G)
# V [alpha * alpha, C, P_round)
# Perform the image transform
r_eps = te.reduce_axis((0, alpha), "r_eps")
r_nu = te.reduce_axis((0, alpha), "r_nu")
V = te.compute(
(alpha * alpha, C, P_round),
lambda epsnu, c, b: te.sum(
input_tile[b // (nH * nW)][c][b // nW % nH * m + r_eps][b % nW * m + r_nu]
* B[r_eps][epsnu // alpha]
* B[r_nu][epsnu % alpha],
axis=[r_eps, r_nu],
),
name="V",
)
# Winograd GEMM is a wrapper around batched GEMM to convert U to a 3D Tensor
_, M = decl_winograd_gemm(cfg, U, V)
# Y [K, P, m, m]
# Winograd output transform
r_eps = te.reduce_axis((0, alpha), "r_eps")
r_nu = te.reduce_axis((0, alpha), "r_nu")
Y = te.compute(
(K, P, m, m),
lambda k, b, vh, vw: te.sum(
M[r_eps * alpha + r_nu][k][b] * A[r_eps][vh] * A[r_nu][vw], axis=[r_eps, r_nu]
),
name="Y",
)
# Output [N, K, H, W]
# Unpack back to NCHW format
# The last term ensures alignment is not lost to bound inference
output = te.compute(
(N, K, H, W),
lambda n, k, h, w: Y[k][n * nH * nW + (h // m) * nW + w // m][h % m][w % m]
+ tvm.tir.const(0, out_dtype) * M[(alpha * alpha) - 1][K_round - 1][P_round - 1],
name="output",
tag="winograd_conv2d_output",
)
return output
def _schedule_winograd(cfg, s, op):
"""Schedule Winograd convolution for Bifrost"""
# Get ops and tensors
output = op.output(0)
Y = op.input_tensors[0]
M, A = s[Y].op.input_tensors
U_3D, V = s[M].op.input_tensors
U = s[U_3D].op.input_tensors[0]
d, B = s[V].op.input_tensors
data_pad = s[d].op.input_tensors[0]
if isinstance(U.op, tvm.te.ComputeOp):
padded_kernel, G = s[U].op.input_tensors
kernel = s[padded_kernel].op.input_tensors[0]
s[G].compute_inline()
eps, _, _, _ = s[U].op.axis
y, _, _, _ = s[padded_kernel].op.axis
if not autotvm.GLOBAL_SCOPE.in_tuning:
# Pad kernel
y, x, ky, kx = s[padded_kernel].op.axis
s[padded_kernel].unroll(ky)
s[padded_kernel].unroll(kx)
tile_and_bind(s, padded_kernel, y, x, 1, 8)
# Transform kernel
eps, nu, k, c = s[U].op.axis
s[U].reorder(k, c, eps, nu)
r_kh, r_kw = s[U].op.reduce_axis
_ = [s[U].unroll(x) for x in [eps, nu, r_kh, r_kw]]
yo, xo, yi, xi = tile_and_bind(s, U, k, c, 1, 4)
# Dilation
if isinstance(kernel.op, tvm.te.ComputeOp) and "dilate" in kernel.op.tag:
s[kernel].compute_inline()
# Pad data
s[data_pad].compute_inline()
# Pack data
n, c, h, w = s[d].op.axis
w, wi = s[d].split(w, 4)
s[d].unroll(wi)
b = s[d].fuse(n, c)
tile_and_bind3d(s, d, b, h, w, 1, 4, 2)
# Transform data
bIL_d = s.cache_read(d, "local", [V])
s[B].compute_inline()
epsnu, c, b = s[V].op.axis
r_eps, r_nu = s[V].op.reduce_axis
s[V].reorder(b, c, epsnu, r_nu, r_eps)
_ = [s[V].unroll(x) for x in [epsnu, r_eps, r_nu]]
yo, xo, yi, xi = tile_and_bind(
s, V, b, c, cfg["data_transform_wgy"].val, cfg["data_transform_wgx"].val
)
s[bIL_d].compute_at(s[V], xi)
n, c, h, w = s[bIL_d].op.axis
s[bIL_d].unroll(h)
s[bIL_d].vectorize(w)
# Batched GEMM
# Inline the 4D -> 3D tensor transform on the kernel
s[U_3D].compute_inline()
U_transform, V_transform = schedule_gemm(
cfg, s, U_3D, V, M, batched=True, schedule_transforms=True
)
# Inverse transform
CR_M = s.cache_read(M, "local", [Y])
CW_Y = s.cache_write(Y, "local")
s[A].compute_inline()
k, b, vh, vw = s[Y].op.axis
fused = s[Y].fuse(vh, vw)
s[Y].vectorize(fused)
yo, xo, yi, xi = tile_and_bind(s, Y, k, b, 1, 4)
s[CR_M].compute_at(s[Y], xi)
k, b, epsnu = s[CR_M].op.axis
s[CR_M].unroll(k)
s[CW_Y].compute_at(s[Y], xi)
k, b, vh, vw = s[CW_Y].op.axis
r_eps, r_nu = s[CW_Y].op.reduce_axis
_ = [s[CW_Y].unroll(x) for x in [vh, vw, r_eps, r_nu]]
# Schedule output and fusion
if output.op not in s.outputs:
s[output].compute_inline()
output = s.outputs[0]
_, k, h, w = s[output].op.axis
tile_and_bind3d(s, output, k, h, w, 1, 2, 2)
##### REGISTER ALTER OP LAYOUT #####
@nn.conv2d_alter_layout.register("bifrost")
def _alter_conv2d_layout(attrs, inputs, tinfos, out_type):
target = tvm.target.Target.current(allow_none=False)
dispatch_ctx = autotvm.task.DispatchContext.current
_, outs = relay.backend.te_compiler.select_implementation(
relay.op.get("nn.conv2d"), attrs, tinfos, out_type, target
)
workload = autotvm.task.get_workload(outs)
if workload is None:
# The best implementation is not an AutoTVM template,
# we then assume it's not necessary to alter this op.
return None
cfg = dispatch_ctx.query(target, workload)
if cfg.is_fallback: # if is fallback, clear query cache and return None
autotvm.task.clear_fallback_cache(target, workload)
return None
topi_tmpl = workload[0]
new_attrs = {k: attrs[k] for k in attrs.keys()}
strides = attrs.get_int_tuple("strides")
padding = attrs.get_int_tuple("padding")
dilation = attrs.get_int_tuple("dilation")
data_layout = attrs["data_layout"]
kernel_layout = attrs["kernel_layout"]
data, kernel = tinfos
out_dtype = out_type.dtype
idxd = tvm.tir.indexdiv
if topi_tmpl == "conv2d_nchw_spatial_pack.bifrost":
assert data_layout == "NCHW" and kernel_layout == "OIHW"
N, CI, H, W = get_const_tuple(data.shape)
CO, _, KH, KW = get_const_tuple(kernel.shape)
VC = cfg["tile_co"].size[-1]
new_attrs["kernel_layout"] = f"OIHW{VC}o"
new_data = data
new_kernel = te.placeholder((idxd(CO, VC), CI, KH, KW, VC), dtype=kernel.dtype)
new_workload = autotvm.task.args_to_workload(
[new_data, new_kernel, strides, padding, dilation, out_dtype],
"conv2d_nchw_spatial_pack.bifrost",
)
dispatch_ctx.update(target, new_workload, cfg)
return relay.nn.conv2d(*inputs, **new_attrs)
if topi_tmpl == "conv2d_nchw_winograd.bifrost":
assert data_layout == "NCHW" and kernel_layout == "OIHW"
N, CI, H, W = get_const_tuple(data.shape)
CO, _, KH, KW = get_const_tuple(kernel.shape)
tile_size = 2
weight_expr = inputs[1]
weight_expr = relay.nn.contrib_conv2d_winograd_weight_transform(
weight_expr, tile_size=tile_size
)
weight_expr = relay.reshape(
weight_expr, newshape=(KH + tile_size - 1, KW + tile_size - 1, CO, CI)
)
new_attrs["tile_size"] = tile_size
new_data = data
new_kernel = te.placeholder((KH + tile_size - 1, KW + tile_size - 1, CO, CI), kernel.dtype)
new_workload = autotvm.task.args_to_workload(
[new_data, new_kernel, strides, padding, dilation, out_dtype],
"conv2d_nchw_winograd.bifrost",
)
dispatch_ctx.update(target, new_workload, cfg)
return relay.nn.contrib_conv2d_winograd_without_weight_transform(
inputs[0], weight_expr, **new_attrs
)
return None
| 18,009 | 31.567812 | 100 | py |
tvm | tvm-main/python/tvm/topi/bifrost/dense.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unused-variable
"""dense schedule on ARM Mali Biforst GPU"""
from tvm import te
from tvm import autotvm
from .. import nn
from ..utils import traverse_inline
@autotvm.register_topi_compute("dense.bifrost")
def dense(_, data, weight, bias=None, out_dtype=None):
"""Dense operator on Biforst"""
return nn.dense(data, weight, bias, out_dtype)
@autotvm.register_topi_schedule("dense.bifrost")
def schedule_dense(cfg, outs):
"""Schedule for dense operator.
Parameters
----------
cfg: ConfigEntity
The config entity for this template
outs: Array of Tensor
The computation graph description of dense
in the format of an array of tensors.
Returns
-------
s: Schedule
The computation schedule for dense.
"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
def _callback(op):
if op.tag == "dense":
vec_size = [1, 2, 4, 8, 16]
max_unroll = 32
dense_out = op.output(0)
output = outs[0]
y, x = s[output].op.axis
c = s[dense_out].op.reduce_axis[0]
##### space definition begin #####
cfg.define_split("tile_y", y, num_outputs=3)
cfg.define_split("tile_x", x, num_outputs=3)
cfg.define_split("c_unroll", c, num_outputs=2, max_factor=64)
# fallback support
if cfg.is_fallback:
ref_log = autotvm.tophub.load_reference_log("mali", "rk3399", "dense.bifrost")
cfg.fallback_with_reference_log(ref_log)
##### space definition end #####
if dense_out.op in s.outputs:
dense_out = s.cache_write(output, "local")
by, ty, yi = cfg["tile_y"].apply(s, output, y)
bx, tx, xi = cfg["tile_x"].apply(s, output, x)
s[output].bind(by, te.thread_axis("blockIdx.y"))
s[output].bind(bx, te.thread_axis("blockIdx.x"))
s[output].bind(ty, te.thread_axis("threadIdx.y"))
s[output].bind(tx, te.thread_axis("threadIdx.x"))
if cfg["tile_y"].size[-1] < max_unroll:
s[output].unroll(yi)
if cfg["tile_x"].size[-1] in vec_size:
s[output].vectorize(xi)
s[dense_out].compute_at(s[output], tx)
k = s[dense_out].op.reduce_axis[0]
y, x = s[dense_out].op.axis
k, k_unroll = cfg["c_unroll"].apply(s, dense_out, k)
s[dense_out].reorder(k, k_unroll, y, x)
s[dense_out].unroll(k_unroll)
if cfg["tile_y"].size[-1] < max_unroll:
s[dense_out].unroll(y)
if cfg["tile_x"].size[-1] in vec_size:
s[dense_out].vectorize(x)
traverse_inline(s, outs[0].op, _callback)
return s
def fuse_and_bind(s, tensor, axis=None, num_thread=None):
"""fuse all the axis and bind to GPU threads"""
axis = axis or s[tensor].op.axis
fused = s[tensor].fuse(*axis)
bx, tx = s[tensor].split(fused, num_thread)
s[tensor].bind(bx, te.thread_axis("blockIdx.x"))
s[tensor].bind(tx, te.thread_axis("threadIdx.x"))
return bx, tx
| 4,031 | 34.681416 | 94 | py |
tvm | tvm-main/python/tvm/topi/bifrost/gemm.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unused-variable,unused-argument
"""GEMM schedules for Mali Bifrost"""
from tvm import te
from .transforms import tile_and_bind, tile_and_bind3d, interleave_transpose, transpose_interleave
from .. import utils
def decl_gemm(cfg, A, B):
"""Declare a single GEMM computation for Mali Bifrost GPUs
Parameters
----------
cfg : Config
Schedule configuration
A : tvm.te.Tensor
2D Tensor, shape [n, k]
B : tvm.te.Tensor
2D Tensor, shape [k, m]
Returns
-------
C : tvm.te.Tensor
2D Tensor, shape [n, m]
"""
cfg.define_knob("work_group_x", [1, 2, 3, 4, 6, 8, 12, 16, 24, 32, 64])
cfg.define_knob("work_group_y", [1, 2, 3, 4, 6, 8, 12, 16, 24, 32, 64])
cfg.define_knob("unroll_k_factor", [1, 2, 4])
cfg.define_knob("A_interleave", [1, 4, 8, 16, 24, 32, 48, 64])
cfg.define_knob("B_interleave", [1, 4, 8, 16, 32])
cfg.define_knob("split_k_factor", [1, 4, 16])
# Mutual k axis must be of equal extent
assert utils.get_const_int(A.shape[1]) == utils.get_const_int(B.shape[0])
n = A.shape[0]
m = B.shape[1]
k_size = utils.get_const_int(A.shape[1])
unroll_gemm = cfg["split_k_factor"].val
if unroll_gemm == 1:
# No unrolling case must have the same set of tensors to keep scheduling consistent
# Create identity tensors to take the place of A_unrolled, B_unrolled and R
A_unrolled = te.compute((n, k_size), lambda i, j: A[i, j], name="A_unrolled")
B_unrolled = te.compute((k_size, m), lambda i, j: B[i, j], name="B_unrolled")
# Declare standard GEMM
k = te.reduce_axis((0, A.shape[1]), name="k")
C = te.compute(
(n, m), lambda i, j: te.sum(A_unrolled[i, k] * B_unrolled[k, j], axis=k), name="C"
)
R = te.compute((n, m), lambda i, j: C[i, j], name="R")
else:
unrolled_k_size = k_size // unroll_gemm
# Unroll the two input matrices along the shared k axis
A_unrolled = te.compute(
(unroll_gemm, n, unrolled_k_size),
lambda b, i, j: A[i][unrolled_k_size * b + j],
name="A_unrolled",
)
B_unrolled = te.compute(
(unroll_gemm, unrolled_k_size, m),
lambda b, i, j: B[unrolled_k_size * b + i][j],
name="B_unrolled",
)
# Declare a batched GEMM
k = te.reduce_axis((0, unrolled_k_size), name="k")
C = te.compute(
(unroll_gemm, n, m),
lambda b, i, j: te.sum(A_unrolled[b][i][k] * B_unrolled[b][k][j], axis=k),
name="C",
)
# Then declare a reduction to reduce the sub matrices
k = te.reduce_axis((0, unroll_gemm), name="k")
R = te.compute((n, m), lambda i, j: te.sum(C[k][i][j], axis=k), name="R")
return R
def decl_batched_gemm(cfg, A, B):
"""Declare a batched GEMM computation for Mali Bifrost GPUs
Parameters
----------
cfg : Config
Schedule configuration
A : tvm.te.Tensor
3D Tensor, shape [b, n, k]
B : tvm.te.Tensor
3D Tensor, shape [b, k, m]
Returns
-------
C : tvm.te.Tensor
3D Tensor, shape [b, n, m]
"""
# Mutual b and k axis must be of equal extent
assert utils.get_const_int(A.shape[2]) == utils.get_const_int(B.shape[1])
assert utils.get_const_int(A.shape[0]) == utils.get_const_int(B.shape[0])
cfg.define_knob("work_group_x", [1, 2, 3, 4, 6, 8, 12, 16, 24, 32, 64])
cfg.define_knob("work_group_y", [1, 2, 3, 4, 6, 8, 12, 16, 24, 32, 64])
cfg.define_knob("unroll_k_factor", [1, 2, 4])
cfg.define_knob("A_interleave", [1, 4, 8, 16, 32, 64])
cfg.define_knob("B_interleave", [1, 4, 8, 16, 32])
n = A.shape[1]
m = B.shape[2]
k_size = utils.get_const_int(A.shape[2])
b_size = utils.get_const_int(A.shape[0])
# Declare a batched GEMM
k = te.reduce_axis((0, k_size), name="k")
C = te.compute(
(b_size, n, m), lambda b, i, j: te.sum(A[b][i][k] * B[b][k][j], axis=k), name="C"
)
return C
def decl_winograd_gemm(cfg, A, B):
"""Declare a winograd GEMM for Mali Bifrost GPUs
Winograd uses batched GEMM, however the input tensors are 4D
This wraps decl_batched_gemm to provide it with 3D tensors
Parameters
----------
cfg : Config
Schedule configuration
A : tvm.te.Tensor
4D Tensor, shape [a, a, n, k]
B : tvm.te.Tensor
4D Tensor, shape [a * a, k, m]
Returns
-------
"""
alpha = utils.get_const_int(A.shape[0])
n = utils.get_const_int(A.shape[2])
k = utils.get_const_int(A.shape[3])
A_3D = te.compute(
(alpha * alpha, n, k), lambda b, i, j: A[b // alpha][b % alpha][i][j], name="A_3D"
)
C = decl_batched_gemm(cfg, A_3D, B)
return A_3D, C
def schedule_gemm(cfg, s, A, B, C, batched=False, schedule_transforms=True):
"""Schedule GEMM, single and batched
Parameters
----------
cfg : Config
Schedule configuration
s : tvm.te.schedule.Schedule
Operator schedule
A : tvm.te.Tensor
2D/3D Tensor, shape [n, k]/[b, n, k]
B : tvm.te.Tensor
2D/3D Tensor, shape [k, m]/[b, k, m]
C : tvm.te.Tensor
2D/3D Tensor, shape [n, m]/[b, n, m]
batched : bool
Whether the GEMM is batched
Returns
-------
"""
block_size_x = 4
block_size_y = 4
warp_size_x = 2
warp_size_y = 2
work_group_x = cfg["work_group_x"].val
work_group_y = cfg["work_group_y"].val
k_unroll = cfg["unroll_k_factor"].val
if not batched:
y_index, x_index = (0, 1)
else:
y_index, x_index = (1, 2)
trans_inter, A_transposed_interleaved = transpose_interleave(
s, A, cfg["A_interleave"].val, y_index, x_index, [C], batched=batched
)
inter_trans, B_interleaved_transposed = interleave_transpose(
s, B, cfg["B_interleave"].val, y_index, x_index, [C], batched=batched
)
if schedule_transforms:
# Schedule A
y, x = s[trans_inter].op.axis
y, x, yi, xi = s[trans_inter].tile(y, x, 1, 8)
s[trans_inter].unroll(yi)
s[trans_inter].unroll(xi)
tile_and_bind(s, trans_inter, y, x, 1, 4)
# Schedule B
y, x = s[inter_trans].op.axis
xo, xi = s[inter_trans].split(x, 4)
s[inter_trans].vectorize(xi)
tile_and_bind(s, inter_trans, y, xo, 4, 4)
# Schedule C
CR_A = s.cache_read(A_transposed_interleaved, "local", [C])
CR_B = s.cache_read(B_interleaved_transposed, "local", [C])
CW_C = s.cache_write(C, "local")
if not batched:
y, x = s[C].op.axis
else:
z, y, x = s[C].op.axis
y, x, yt, xt = s[C].tile(y, x, block_size_y, block_size_x)
s[C].unroll(yt)
s[C].vectorize(xt)
# Tile the global work space to generate 'square' warps -> 2x2 for warp size of 4
y, x, wy, wx = s[C].tile(y, x, warp_size_y, warp_size_x)
x = s[C].fuse(x, wy, wx)
if not batched:
yo, xo, yi, xi = tile_and_bind(s, C, y, x, work_group_y, work_group_x)
else:
# For batched GEMM bind batch to z axis
zo, yo, xo, zi, yi, xi = tile_and_bind3d(s, C, z, y, x, 1, work_group_y, work_group_x)
s[CW_C].compute_at(s[C], xi)
if not batched:
y, x = s[CW_C].op.axis
else:
_, y, x = s[CW_C].op.axis
y, x, yt, xt = s[CW_C].tile(y, x, block_size_y, block_size_x)
k = s[CW_C].op.reduce_axis[0]
s[CW_C].reorder(k, yt, xt)
ko, ki = s[CW_C].split(k, k_unroll)
s[CW_C].unroll(ki)
s[CW_C].unroll(yt)
s[CW_C].unroll(xt)
if not batched:
i, j = s[CR_A].op.axis
else:
_, i, j = s[CR_A].op.axis
s[CR_A].reorder(j, i)
s[CR_A].compute_at(s[CW_C], ki)
s[CR_A].unroll(j)
s[CR_A].vectorize(i)
if not batched:
i, j = s[CR_B].op.axis
else:
_, i, j = s[CR_B].op.axis
s[CR_B].compute_at(s[CW_C], ki)
s[CR_B].unroll(i)
s[CR_B].vectorize(j)
return trans_inter, inter_trans
def schedule_unrollable_gemm(cfg, s, A, B, C, R):
"""Schedule a GEMM that can be unrolled by a constant factor
along its inner dimension
Parameters
----------
cfg : Config
Schedule configuration
s : tvm.te.schedule.Schedule
Operator schedule
A : tvm.te.Tensor
2D/3D Tensor, shape [n, k]/[b, n, k]
B : tvm.te.Tensor
2D/3D Tensor, shape [k, m]/[b, k, m]
C : tvm.te.Tensor
2D/3D Tensor, shape [n, m]/[b, n, m]
R : tvm.te.Tensor
2D Tensor, shape [n, m]
"""
# If the GEMM is 2D, no unrolling has taken place
# Use non-batched GEMM schedule and inline identity matrices A, B and R
if len(C.op.axis) == 2:
s[A].compute_inline()
s[B].compute_inline()
schedule_gemm(cfg, s, A, B, C)
s[R].compute_inline()
# GEMM is 3D, use batched GEMM schedule, inline A and B and schedule R
else:
s[A].compute_inline()
s[B].compute_inline()
schedule_gemm(cfg, s, A, B, C, batched=True)
CR_C = s.cache_read(C, "local", [R])
y, x = s[R].op.axis
xo, xi = s[R].split(x, 4)
k = s[R].op.reduce_axis[0]
s[R].reorder(k, xi)
ko, ki = s[R].split(k, 4)
s[R].unroll(xi)
s[R].unroll(ki)
tile_and_bind(s, R, y, xo, 1, 2)
s[CR_C].compute_at(s[R], ko)
_, y, x = s[CR_C].op.axis
s[CR_C].unroll(y)
s[CR_C].vectorize(x)
def get_unrollable_gemm_ops(R):
"""Get all GEMM operators from the final reduction
This is a helper function to more easily get all the GEMM operations
from an operator
Parameters
----------
R : tvm.te.Tensor
Reduced tensor, final stage of GEMM
Returns
-------
A_unrolled : tvm.te.Tensor
Matrix A unrolled along k
B_unrolled: tvm.te.Tensor
Matrix B unrolled along k
C : tvm.te.Tensor
Result of batched GEMM
R : tvm.te.Tensor
Reduction of C, result of unrollable GEMM
"""
C = R.op.input_tensors[0]
A_unrolled, B_unrolled = C.op.input_tensors
return A_unrolled, B_unrolled, C, R
| 11,006 | 28.119048 | 98 | py |
tvm | tvm-main/python/tvm/topi/bifrost/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=redefined-builtin, wildcard-import
"""ARM Mali GPU specific declaration and schedules."""
from __future__ import absolute_import as _abs
from .gemm import *
from .conv2d import *
from .dense import *
from .depthwise_conv2d import *
| 1,037 | 38.923077 | 62 | py |
tvm | tvm-main/python/tvm/topi/bifrost/depthwise_conv2d.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unused-variable,unused-argument
"""depthwise_conv2d schedule on ARM Mali GPU"""
from __future__ import absolute_import as _abs
import tvm
from tvm import te
from .. import utils
from .. import tag
def schedule_depthwise_conv2d_nchw(outs):
"""Schedule for depthwise_conv2d nchw forward.
Parameters
----------
outs: Array of Tensor
The computation graph description of depthwise_conv2d
in the format of an array of tensors.
Returns
-------
s: Schedule
The computation schedule for depthwise_conv2d nchw.
"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
def _schedule(pad_data, kernel, conv):
raw_data = s[pad_data].op.input_tensors[0]
if conv.op not in s.outputs: # has bias or relu
output = outs[0]
else: # no bias or relu
output = conv
def tile_and_bind3d(tensor, z, y, x, z_factor=2, y_factor=None, x_factor=None):
"""tile and bind 3d"""
y_factor = y_factor or z_factor
x_factor = x_factor or y_factor
zo, zi = s[tensor].split(z, z_factor)
yo, yi = s[tensor].split(y, y_factor)
xo, xi = s[tensor].split(x, x_factor)
s[tensor].bind(zo, te.thread_axis("blockIdx.z"))
s[tensor].bind(zi, te.thread_axis("threadIdx.z"))
s[tensor].bind(yo, te.thread_axis("blockIdx.y"))
s[tensor].bind(yi, te.thread_axis("threadIdx.y"))
s[tensor].bind(xo, te.thread_axis("blockIdx.x"))
s[tensor].bind(xi, te.thread_axis("threadIdx.x"))
return zo, zi, yo, yi, xo, xi
# set tunable parameters
VH = 1
VW = 1
num_thread = 4
while utils.get_const_int(conv.shape[3]) % (VW * 2) == 0 and VW * 2 <= 4:
VW = VW * 2
while utils.get_const_int(conv.shape[2]) % (VH * 2) == 0 and VH * 2 <= 2:
VH = VH * 2
if raw_data.dtype == "float16":
if utils.get_const_int(conv.shape[3]) % (VW * 2) == 0:
VW *= 2
num_thread *= 2
else:
num_thread *= 2
# schedule padding
_, c, y, x = s[pad_data].op.axis
tile_and_bind3d(pad_data, c, y, x, num_thread, 1, 1)
# schedule conv
di, dj = s[conv].op.reduce_axis
s[conv].unroll(di)
s[conv].unroll(dj)
_, c, y, x = s[output].op.axis
y, x, yi, xi = s[output].tile(y, x, VH, VW)
s[output].unroll(yi)
s[output].vectorize(xi)
_, _, _, _, _, ji = tile_and_bind3d(output, c, y, x, num_thread, 1, 1)
if conv.op not in s.outputs:
_, c, y, x = s[conv].op.axis
y, x, yi, xi = s[conv].tile(y, x, VH, VW)
s[conv].unroll(yi)
s[conv].vectorize(xi)
s[conv].compute_at(s[output], ji)
def traverse(op):
"""Internal traverse function"""
# inline all one-to-one-mapping operators except the last stage (output)
if tag.is_broadcast(op.tag):
if op not in s.outputs:
s[op].compute_inline()
for tensor in op.input_tensors:
if tensor.op.input_tensors:
traverse(tensor.op)
# schedule depthwise_conv2d
if op.tag == "depthwise_conv2d_nchw":
pad_data = op.input_tensors[0]
kernel = op.input_tensors[1]
if isinstance(kernel.op, tvm.te.ComputeOp) and "dilate" in kernel.op.tag:
s[kernel].compute_inline()
conv = op.output(0)
_schedule(pad_data, kernel, conv)
traverse(outs[0].op)
return s
| 4,550 | 34.554688 | 87 | py |
tvm | tvm-main/python/tvm/topi/bifrost/transforms.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unused-variable,unused-argument
"""Utility scheduling functions for the Bifrost schedules"""
from __future__ import absolute_import as _abs
import tvm
from tvm import te
def fuse_and_bind(s, tensor, axis=None, num_thread=None):
"""Fuse all the axis and bind to GPU threads"""
axis = axis or s[tensor].op.axis
fused = s[tensor].fuse(*axis)
max_threads = tvm.target.Target.current(allow_none=False).max_num_threads
bx, tx = s[tensor].split(fused, num_thread or max_threads)
s[tensor].bind(bx, te.thread_axis("blockIdx.x"))
s[tensor].bind(tx, te.thread_axis("threadIdx.x"))
return bx, tx
def tile_and_bind(s, tensor, y, x, y_factor, x_factor=None):
"""Tile and bind to GPU threads"""
x_factor = x_factor or y_factor
yo, xo, yi, xi = s[tensor].tile(y, x, y_factor, x_factor)
s[tensor].bind(xo, te.thread_axis("blockIdx.x"))
s[tensor].bind(xi, te.thread_axis("threadIdx.x"))
s[tensor].bind(yo, te.thread_axis("blockIdx.y"))
s[tensor].bind(yi, te.thread_axis("threadIdx.y"))
return yo, xo, yi, xi
def tile_and_bind3d(s, tensor, z, y, x, z_factor=2, y_factor=None, x_factor=None):
"""Tile and bind 3d"""
y_factor = y_factor or z_factor
x_factor = x_factor or y_factor
zo, zi = s[tensor].split(z, z_factor)
yo, yi = s[tensor].split(y, y_factor)
xo, xi = s[tensor].split(x, x_factor)
s[tensor].bind(zo, te.thread_axis("blockIdx.z"))
s[tensor].bind(zi, te.thread_axis("threadIdx.z"))
s[tensor].bind(yo, te.thread_axis("blockIdx.y"))
s[tensor].bind(yi, te.thread_axis("threadIdx.y"))
s[tensor].bind(xo, te.thread_axis("blockIdx.x"))
s[tensor].bind(xi, te.thread_axis("threadIdx.x"))
return zo, yo, xo, zi, yi, xi
def pack_tensor(s, tensor, factor, readers):
"""Do transform X[n, m] -> X[n / factor, m, factor]"""
tmp = s.cache_read(tensor, "global", readers)
y, x = s[tmp].op.axis
yo, yi = s[tmp].split(y, factor)
s[tmp].reorder(yo, x, yi)
s[tmp].compute_inline()
return s.cache_write(tmp, "global"), tmp
def transpose(s, tensor, y_index, x_index, readers):
"""Do transform X[n, m] -> X[m, n]"""
tmp = s.cache_read(tensor, "global", readers)
y, x = s[tmp].op.axis[y_index], s[tmp].op.axis[x_index]
s[tmp].reorder(x, y)
s[tmp].compute_inline()
A_transpose = s.cache_write(tmp, "global")
CR_A = s.cache_read(tensor, "local", [A_transpose])
CW_A_transpose = s.cache_write(A_transpose, "local")
y, x = s[A_transpose].op.axis[y_index], s[A_transpose].op.axis[x_index]
yo, xo, yi, xi = s[A_transpose].tile(y, x, 4, 4)
s[A_transpose].unroll(yi)
s[A_transpose].vectorize(xi)
_, _, _, xi = tile_and_bind(s, A_transpose, yo, xo, 32, 2)
s[CW_A_transpose].compute_at(s[A_transpose], xi)
y, x = s[CW_A_transpose].op.axis[y_index], s[CW_A_transpose].op.axis[x_index]
s[CW_A_transpose].unroll(x)
s[CW_A_transpose].unroll(y)
s[CR_A].compute_at(s[A_transpose], xi)
y, x = s[CR_A].op.axis[y_index], s[CR_A].op.axis[x_index]
s[CR_A].unroll(y)
s[CR_A].vectorize(x)
return tmp
def interleave_transpose(s, tensor, width, y_index, x_index, readers, batched=False):
"""Interleave the tensor, then transpose it"""
tmp = s.cache_read(tensor, "global", readers)
y, x = s[tmp].op.axis[y_index], s[tmp].op.axis[x_index]
xo, xi = s[tmp].split(x, width)
s[tmp].reorder(xo, y, xi)
s[tmp].fuse(y, xi)
if batched:
z = s[tmp].op.axis[0]
s[tmp].fuse(z, xo)
s[tmp].compute_inline()
return s.cache_write(tmp, "global"), tmp
def transpose_interleave(s, tensor, width, y_index, x_index, readers, batched=False):
"""Transpose the tensor, then interleave it"""
tmp = s.cache_read(tensor, "global", readers)
y, x = s[tmp].op.axis[y_index], s[tmp].op.axis[x_index]
yo, yi = s[tmp].split(y, width)
s[tmp].reorder(yo, x, yi)
s[tmp].fuse(x, yi)
if batched:
z = s[tmp].op.axis[0]
s[tmp].fuse(z, yo)
s[tmp].compute_inline()
return s.cache_write(tmp, "global"), tmp
| 4,877 | 36.813953 | 85 | py |
tvm | tvm-main/python/tvm/topi/intel_graphics/conv2d.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unused-variable,unused-argument,no-else-return, too-many-arguments, too-many-locals, too-many-statements, no-member, too-many-branches, too-many-boolean-expressions
"""conv2d schedule on Intel Graphics"""
from __future__ import absolute_import as _abs
import tvm
from tvm import te
from tvm import autotvm
from tvm.autotvm.task.space import SplitEntity, OtherOptionEntity
from .. import nn
from .. import utils
from ..utils import simplify, get_const_tuple, traverse_inline
def _get_default_config(cfg, data, kernel, strides, padding, out_dtype, is_depthwise=False):
if is_depthwise:
raise RuntimeError("Depthwise not supported for intel graphics.")
batch_size, in_channel, height, width = get_const_tuple(data.shape)
out_channel, _, hkernel, _ = get_const_tuple(kernel.shape)
HSTR, _ = strides
ic_bn = 1
oc_bn, oc_bn_upper = 16, 16
for i in range(oc_bn_upper, 0, -1):
if out_channel % i == 0:
oc_bn = i
break
if HSTR == 2:
if out_channel + hkernel == 515:
block_oh = 4
block_ow = 4
else:
block_oh = 4
block_ow = 5
elif hkernel == 3:
if out_channel == 512:
block_oh = 2
block_ow = 7
else:
block_oh = 2
block_ow = 14
else:
block_oh = 1
block_ow = 16
cfg["tile_ic"] = SplitEntity([in_channel // ic_bn, ic_bn])
cfg["tile_oc"] = SplitEntity([out_channel // oc_bn, oc_bn])
cfg["block_oh"] = OtherOptionEntity(block_oh)
cfg["block_ow"] = OtherOptionEntity(block_ow)
def _create_schedule_template(cfg, dshape, kshape, strides, padding, dilation):
"""Create schedule configuration from input arguments"""
n, ic, h, w = dshape
oc, _, kh, kw = kshape
pt, pl, pb, pr = nn.get_pad_tuple(padding, (kh, kw))
sh, sw = strides if isinstance(strides, (tuple, list)) else (strides, strides)
oh = (h - kh + pt + pb) // sh + 1
ow = (w - kw + pl + pr) // sw + 1
ic_bn_upper = 32
oc_bn_upper = 64
oc_bn_lower = min(oc, 8)
ic_bn_candidates, oc_bn_candidates = [], []
for i in range(1, ic + 1):
if ic % i == 0 and i <= ic_bn_upper:
ic_bn_candidates.append(i)
if not ic_bn_candidates:
ic_bn_candidates.append(1)
ic_bn_candidates.append(ic)
for i in range(1, oc + 1):
if oc % i == 0 and oc_bn_lower <= i <= oc_bn_upper:
oc_bn_candidates.append(i)
if not oc_bn_candidates:
oc_bn_candidates.append(1)
oc_bn_candidates.append(oc)
blk_candidates_low_limits = 5
blk_oh_list, blk_ow_list = [], []
for i, j in zip(range(oh, 0, -1), range(ow, 0, -1)):
if i <= 16 and oh % i == 0:
blk_oh_list.append(i)
if j <= 16 and ow % j == 0:
blk_ow_list.append(j)
if len(blk_oh_list) < blk_candidates_low_limits:
for i in range(2, oh):
if i not in blk_oh_list:
blk_oh_list.append(i)
if len(blk_oh_list) >= 5:
break
if len(blk_ow_list) < blk_candidates_low_limits:
for i in range(min(ow - 1, 16), 1, -1):
if i not in blk_ow_list:
blk_ow_list.append(i)
if len(blk_ow_list) >= 5:
break
# Create schedule config
cfg.define_knob("tile_ic", ic_bn_candidates)
cfg.define_knob("tile_oc", oc_bn_candidates)
cfg.define_knob("block_oh", blk_oh_list)
cfg.define_knob("block_ow", blk_ow_list)
##### SCHEDULE UTILITIES #####
def tile_and_bind3d(s, tensor, z, y, x, z_factor=2, y_factor=None, x_factor=None):
"""tile and bind 3d"""
y_factor = y_factor or z_factor
x_factor = x_factor or y_factor
zo, zi = s[tensor].split(z, z_factor)
yo, yi = s[tensor].split(y, y_factor)
xo, xi = s[tensor].split(x, x_factor)
s[tensor].reorder(zo, yo, xo, zi, yi, xi)
thread_z = te.thread_axis((0, z_factor), "threadIdx.z")
thread_y = te.thread_axis((0, y_factor), "threadIdx.y")
thread_x = te.thread_axis((0, x_factor), "threadIdx.x")
s[tensor].bind(zo, te.thread_axis("blockIdx.z"))
s[tensor].bind(zi, thread_z)
s[tensor].bind(yo, te.thread_axis("blockIdx.y"))
s[tensor].bind(yi, thread_y)
s[tensor].bind(xo, te.thread_axis("blockIdx.x"))
s[tensor].bind(xi, thread_x)
return xi, thread_z, thread_y, thread_x
def _pack_data(data, kernel, ic_bn, oc_bn):
n, _, ih, iw = get_const_tuple(data.shape)
oc, ic, kh, kw = get_const_tuple(kernel.shape)
ic_chunk = ic // ic_bn
oc_chunk = oc // oc_bn
data = te.compute(
(n, ic_chunk, ih, iw, ic_bn),
lambda bs, c, h, w, vc: data[bs, c * ic_bn + vc, h, w],
name="data_vec",
)
kernel = te.compute(
(oc_chunk, ic_chunk, kh, kw, ic_bn, oc_bn),
lambda occ, icc, k_h, k_w, icb, ocb: kernel[occ * oc_bn + ocb, icc * ic_bn + icb, k_h, k_w],
name="kernel_vec",
)
return data, kernel
@autotvm.register_topi_compute("conv2d_NCHWc.intel_graphics")
def conv2d_NCHWc(
cfg, data, kernel, strides, padding, dilation, layout, out_layout, out_dtype="float32"
):
"""Conv2D operator for Intel Graphics backend.
Parameters
----------
data : tvm.te.Tensor
4-D with shape [batch, in_channel, in_height, in_width]
kernel : tvm.te.Tensor
5-D with shape [num_filter, in_channel, filter_height, filter_width, nnum_filter_vec]
stride : int or a list/tuple of two ints
stride size, or [stride_height, stride_width]
padding : int or a list/tuple of two ints
padding size, or [pad_height, pad_width]
layout : str
layout of data
Returns
-------
output : tvm.te.Tensor
4-D with shape [batch, out_channel, out_height, out_width]
"""
if len(data.shape) == 5:
batch, ic_chunk, ih, iw, ic_bn = get_const_tuple(data.shape)
oc_chunk, _, kernel_height, kernel_width, _, oc_bn = get_const_tuple(kernel.shape)
in_channel = ic_chunk * ic_bn
num_filter = oc_chunk * oc_bn
else:
batch, in_channel, ih, iw = get_const_tuple(data.shape)
num_filter, _, kernel_height, kernel_width = get_const_tuple(kernel.shape)
dh, dw = dilation if isinstance(dilation, (tuple, list)) else (dilation, dilation)
pad_top, pad_left, pad_down, pad_right = nn.get_pad_tuple(
padding, (kernel_height, kernel_width)
)
assert (dh, dw) == (1, 1), "Does not support dilation"
if isinstance(strides, (tuple, list)):
stride_h, stride_w = strides
else:
stride_h, stride_w = strides, strides
data_shape = (batch, in_channel, ih, iw)
kernel_shape = (num_filter, in_channel, kernel_height, kernel_width)
_create_schedule_template(cfg, data_shape, kernel_shape, strides, padding, dilation)
if cfg.is_fallback:
_get_default_config(
cfg,
te.placeholder((batch, in_channel, ih, iw), dtype=data.dtype),
te.placeholder(
(num_filter, in_channel, kernel_height, kernel_width), dtype=kernel.dtype
),
strides,
padding,
out_dtype,
)
ic_bn = cfg["tile_ic"].val if hasattr(cfg["tile_ic"], "val") else cfg["tile_ic"].size[-1]
oc_bn = cfg["tile_oc"].val if hasattr(cfg["tile_oc"], "val") else cfg["tile_oc"].size[-1]
# Pack data if raw 4-D data is provided.
if len(data.shape) == 4:
data, kernel = _pack_data(data, kernel, ic_bn, oc_bn)
out_channel = num_filter
out_height = simplify((ih - kernel_height + pad_top + pad_down) // stride_h + 1)
out_width = simplify((iw - kernel_width + pad_left + pad_right) // stride_w + 1)
oshape = (batch, out_channel // oc_bn, out_height, out_width, oc_bn)
rc = te.reduce_axis((0, in_channel), name="rc")
ry = te.reduce_axis((0, kernel_height), name="ry")
rx = te.reduce_axis((0, kernel_width), name="rx")
block_h = cfg["block_oh"].val
block_w = cfg["block_ow"].val
c_h = out_height
c_w = out_width
if out_height % block_h != 0:
c_h = (out_height // block_h + 1) * block_h
if out_width % block_w != 0:
c_w = (out_width // block_w + 1) * block_w
cshape = (batch, out_channel // oc_bn, c_h, c_w, oc_bn)
pad_before = [0, 0, pad_top, pad_left, 0]
pad_after = [0, 0, pad_down + c_h - out_height, pad_right + c_w - out_width, 0]
DOPAD = (
pad_top != 0
or pad_left != 0
or pad_down + c_h - out_height != 0
or pad_right + c_w - out_width != 0
)
DOUNPACK = c_h - out_height != 0 or c_w - out_width != 0
if DOPAD:
temp = nn.pad(data, pad_before, pad_after, name="pad_temp")
else:
temp = data
conv = te.compute(
cshape,
lambda nn, ff, yy, xx, ff_v: te.sum(
temp[nn, rc // ic_bn, yy * stride_h + ry, xx * stride_w + rx, rc % ic_bn].astype(
out_dtype
)
* kernel[ff, rc // ic_bn, ry, rx, rc % ic_bn, ff_v].astype(out_dtype),
axis=[rc, ry, rx],
),
tag="conv2d_NCHWc",
name="conv2d_NCHWc",
)
if DOUNPACK:
output = te.compute(
oshape,
lambda nn, ff, yy, xx, ff_v: conv[nn][ff][yy][xx][ff_v],
name="output_unpack",
tag="conv2d_NCHWc_unpack",
)
else:
output = conv
return output
@autotvm.register_topi_schedule("conv2d_NCHWc.intel_graphics")
def schedule_conv2d_NCHWc(cfg, outs):
"""Schedule for conv2d_nchw for Intel Graphics
Parameters
----------
outs: Array of Tensor
The computation graph description of conv2d_nchw
in the format of an array of tensors.
Returns
-------
s: Schedule
The computation schedule for conv2d_nchw.
"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
def _callback(op):
"""inline all one-to-one-mapping operators except the last stage (output)"""
if "conv2d_NCHWc" in op.tag:
_schedule_cl_spatialpack_NCHWc(cfg, s, op)
traverse_inline(s, outs[0].op, _callback)
return s
def _schedule_cl_spatialpack_NCHWc(cfg, s, op):
output = op.output(0)
if op.name == "conv2d_NCHWc":
temp = op.input_tensors[0]
kernel = op.input_tensors[1]
temp_W = s.cache_read(temp, "warp", [output])
conv_L = s.cache_write(output, "local")
if output.op in s.outputs:
conv = output
else:
s[output].compute_inline()
conv = s.outputs[0]
SCHEDULE_OUTPUT = False
else: # conv2d_NCHWc_unpack
conv = op.input_tensors[0]
temp = s[conv].op.input_tensors[0]
kernel = s[conv].op.input_tensors[1]
temp_W = s.cache_read(temp, "warp", [conv])
conv_L = s.cache_write(conv, "local")
SCHEDULE_OUTPUT = True
kernel_L = s.cache_read(kernel, "local", [conv_L])
if temp.name == "pad_temp":
data = temp.op.input_tensors[0]
# TODO(@Laurawly): Do we need to schedule pad op here?
else:
data = temp
if autotvm.GLOBAL_SCOPE.in_tuning:
# only in autotuning, input data of conv2d_NCHWc will be 4-D.
# skip this part during tuning to make records accurate.
# this part will be folded during Relay fold_constant pass.
s[data].pragma(s[data].op.axis[0], "debug_skip_region")
s[kernel].pragma(s[kernel].op.axis[0], "debug_skip_region")
elif isinstance(kernel.op, tvm.te.ComputeOp) and kernel.name == "kernel_vec":
# data and kernel are not pre-computed, schedule layout transform here.
# TODO(@Laurawly): Add schedule for data and kernel pack
pass
OUTPUT_BLOCK_HEIGHT = cfg["block_oh"].val
OUTPUT_BLOCK_WIDTH = cfg["block_ow"].val
# schedule conv
z_factor = 1
y_factor = 1
x_factor = 16
thread_z = te.thread_axis((0, z_factor), "threadIdx.z")
thread_y = te.thread_axis((0, y_factor), "threadIdx.y")
thread_x = te.thread_axis((0, x_factor), "threadIdx.x")
_, co, oh, ow, vc = s[conv].op.axis
ooh, ioh = s[conv].split(oh, factor=OUTPUT_BLOCK_HEIGHT)
oow, iow = s[conv].split(ow, factor=OUTPUT_BLOCK_WIDTH)
s[conv].reorder(_, co, ooh, oow, vc, ioh, iow)
coo, coi = s[conv].split(co, nparts=1)
ooho, oohi = s[conv].split(ooh, factor=z_factor)
oowo, oowi = s[conv].split(oow, factor=y_factor)
vco, vci = s[conv].split(vc, factor=x_factor)
s[conv].reorder(_, coo, vco, ooho, oowo, coi, oohi, oowi, vci, ioh, iow)
s[conv].bind(oohi, thread_z)
s[conv].bind(oowi, thread_y)
s[conv].bind(vci, thread_x)
s[conv].bind(ooho, te.thread_axis("blockIdx.z"))
s[conv].bind(oowo, te.thread_axis("blockIdx.y"))
s[conv].bind(coi, te.thread_axis("blockIdx.x"))
# schedule conv_L
s[conv_L].compute_at(s[conv], vci)
i, oc, h, w, vc = s[conv_L].op.axis
rc, ry, rx = s[conv_L].op.reduce_axis
s[conv_L].reorder(i, oc, rc, ry, rx, vc, h, w)
s[temp_W].compute_at(s[conv_L], rc)
if kernel.shape[3].value != 7:
s[conv_L].unroll(ry)
s[conv_L].unroll(rx)
# schedule temp
if temp.op.name == "pad_temp":
_, ci, h, w, vci = s[temp].op.axis
tile_and_bind3d(s, temp, ci, h, w, 1, 16, 16)
# schedule temp_W
_, ci, h, w, vci = s[temp_W].op.axis
zo, zi = s[temp_W].split(vci, 1)
yo, yi = s[temp_W].split(h, 1)
xo, xi = s[temp_W].split(w, 16)
s[temp_W].reorder(zo, yo, xo, zi, yi, xi)
s[temp_W].bind(zi, thread_z)
s[temp_W].bind(yi, thread_y)
s[temp_W].bind(xi, thread_x)
s[temp_W].storage_align(s[temp_W].op.axis[2], 16, 0)
# schedule kernel_L
if OUTPUT_BLOCK_HEIGHT == 2 and OUTPUT_BLOCK_WIDTH == 14:
s[kernel_L].compute_at(s[conv_L], ry)
else:
s[kernel_L].compute_at(s[conv_L], rx)
# schedule output
if SCHEDULE_OUTPUT:
if output.op in s.outputs:
out = output
else:
s[output].compute_inline()
out = s.outputs[0]
_, co, h, w, vc = s[out].op.axis
tile_and_bind3d(s, out, w, h, vc, 4, 8, 8)
def conv2d_nchw(data, kernel, stride, padding, dilation, out_dtype="float32"):
"""Conv2D operator for Intel Graphics backend.
Parameters
----------
data : tvm.te.Tensor
4-D with shape [batch, in_channel, in_height, in_width]
kernel : tvm.te.Tensor
4-D with shape [num_filter, in_channel, filter_height, filter_width]
stride : int or a list/tuple of two ints
stride size, or [stride_height, stride_width]
padding : int or a list/tuple of two ints
padding size, or [pad_height, pad_width]
Returns
-------
output : tvm.te.Tensor
4-D with shape [batch, out_channel, out_height, out_width]
"""
assert data.shape[0].value == 1, "only support batch size=1 convolution on intel gpu"
assert data.dtype == kernel.dtype, "Do not support inputs with different data types now."
return _decl_cl_spatialpack(data, kernel, stride, padding, out_dtype)
def schedule_conv2d_nchw(outs):
"""Schedule for conv2d_nchw for Intel Graphics
Parameters
----------
outs: Array of Tensor
The computation graph description of conv2d_nchw
in the format of an array of tensors.
Returns
-------
s: Schedule
The computation schedule for conv2d_nchw.
"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
def _callback(op):
"""inline all one-to-one-mapping operators except the last stage (output)"""
if "conv2d" in op.tag:
_schedule_cl_spatialpack(s, op)
traverse_inline(s, outs[0].op, _callback)
return s
def _decl_cl_spatialpack(data, kernel, stride, padding, out_dtype="float16"):
batch, in_channel, in_height, in_width = [utils.get_const_int(x) for x in data.shape]
num_filter, channel, kernel_h, kernel_w = [utils.get_const_int(x) for x in kernel.shape]
pad_top, pad_left, pad_down, pad_right = nn.get_pad_tuple(padding, (kernel_h, kernel_w))
if isinstance(stride, (tuple, list)):
stride_h, stride_w = stride
else:
stride_h, stride_w = stride, stride
out_channel = num_filter
out_height = simplify((in_height - kernel_h + pad_top + pad_down) // stride_h + 1)
out_width = simplify((in_width - kernel_w + pad_left + pad_right) // stride_w + 1)
oshape = (batch, out_channel, out_height, out_width)
rc = te.reduce_axis((0, in_channel), name="rc")
ry = te.reduce_axis((0, kernel_h), name="ry")
rx = te.reduce_axis((0, kernel_w), name="rx")
if stride_h == 2:
if num_filter + kernel_h == 515:
block_h = 4
block_w = 4
else:
block_h = 4
block_w = 5
elif kernel_h == 3:
if num_filter == 512:
block_h = 2
block_w = 7
else:
block_h = 2
block_w = 14
elif kernel_h == 7 and padding == 3 and stride == 1:
block_h = 3
block_w = 4
else:
block_h = 1
block_w = 16
attrs = {"block_h": block_h, "block_w": block_w}
c_h = out_height
c_w = out_width
if out_height % block_h != 0:
c_h = (out_height // block_h + 1) * block_h
if out_width % block_w != 0:
c_w = (out_width // block_w + 1) * block_w
pad_before = [0, 0, pad_top, pad_left]
pad_after = [0, 0, pad_down + c_h - block_h, pad_right + c_w - block_w]
temp = nn.pad(data, pad_before, pad_after, name="pad_temp")
nv = 16
if num_filter % nv != 0:
num_filter = (num_filter // nv + 1) * nv
out_channel = num_filter
cshape = (batch, out_channel // nv, c_h, c_w, nv)
kvshape = (num_filter // nv, channel, kernel_h, kernel_w, nv)
kernel_vec = te.compute(
kvshape, lambda co, ci, kh, kw, vc: kernel[co * nv + vc][ci][kh][kw], name="kernel_vec"
)
conv = te.compute(
cshape,
lambda nn, ff, yy, xx, vc: te.sum(
temp[nn, rc, yy * stride_h + ry, xx * stride_w + rx].astype(out_dtype)
* kernel_vec[ff, rc, ry, rx, vc].astype(out_dtype),
axis=[rc, ry, rx],
),
name="conv",
attrs=attrs,
)
output = te.compute(
oshape,
lambda nn, ff, yy, xx: conv[nn][ff // nv][yy][xx][ff % nv],
name="output_unpack",
tag="conv2d",
)
return output
def _schedule_cl_spatialpack(s, op):
output = op.output(0)
_, _, out_height, out_width = [utils.get_const_int(x) for x in output.shape]
conv = op.input_tensors[0]
temp = s[conv].op.input_tensors[0]
kernel_vec = s[conv].op.input_tensors[1]
kernel = s[kernel_vec].op.input_tensors[0]
temp_W = s.cache_read(temp, "shared", [conv])
conv_L = s.cache_write(conv, "local")
kernel_L = s.cache_read(kernel_vec, "local", [conv_L])
_, in_channel, temp_h, temp_w = [utils.get_const_int(x) for x in temp.shape]
attrs = s[conv].op.attrs
OUTPUT_BLOCK_HEIGHT = attrs["block_h"]
OUTPUT_BLOCK_WIDTH = attrs["block_w"]
# schedule conv
z_factor = 1
y_factor = 1
x_factor = 16
thread_z = te.thread_axis((0, z_factor), "threadIdx.z")
thread_y = te.thread_axis((0, y_factor), "threadIdx.y")
thread_x = te.thread_axis((0, x_factor), "threadIdx.x")
_, co, oh, ow, vc = s[conv].op.axis
ooh, ioh = s[conv].split(oh, factor=OUTPUT_BLOCK_HEIGHT)
oow, iow = s[conv].split(ow, factor=OUTPUT_BLOCK_WIDTH)
s[conv].reorder(_, co, ooh, oow, vc, ioh, iow)
coo, coi = s[conv].split(co, nparts=1)
ooho, oohi = s[conv].split(ooh, factor=z_factor)
oowo, oowi = s[conv].split(oow, factor=y_factor)
vco, vci = s[conv].split(vc, factor=x_factor)
s[conv].reorder(_, coo, vco, ooho, oowo, coi, oohi, oowi, vci, ioh, iow)
s[conv].bind(oohi, thread_z)
s[conv].bind(oowi, thread_y)
s[conv].bind(vci, thread_x)
s[conv].bind(ooho, te.thread_axis("blockIdx.z"))
s[conv].bind(oowo, te.thread_axis("blockIdx.y"))
s[conv].bind(coi, te.thread_axis("blockIdx.x"))
# schedule conv_L
s[conv_L].compute_at(s[conv], vci)
i, oc, h, w, vc = s[conv_L].op.axis
rc, ry, rx = s[conv_L].op.reduce_axis
s[conv_L].reorder(i, oc, rc, ry, rx, vc, h, w)
s[temp_W].compute_at(s[conv_L], rc)
if kernel.shape[3].value != 7:
s[conv_L].unroll(ry)
s[conv_L].unroll(rx)
# schedule temp
_, ci, h, w = s[temp].op.axis
tile_and_bind3d(s, temp, ci, h, w, 1, 16, 16)
# schedule temp_W
_, ci, h, w = s[temp_W].op.axis
zo, zi = s[temp_W].split(ci, 1)
yo, yi = s[temp_W].split(h, 1)
xo, xi = s[temp_W].split(w, 16)
s[temp_W].reorder(zo, yo, xo, zi, yi, xi)
s[temp_W].bind(zi, thread_z)
s[temp_W].bind(yi, thread_y)
s[temp_W].bind(xi, thread_x)
s[temp_W].storage_align(s[temp_W].op.axis[2], 16, 0)
s[kernel_vec].compute_inline()
# schedule kernel_L
if OUTPUT_BLOCK_HEIGHT == 2 and OUTPUT_BLOCK_WIDTH == 14:
s[kernel_L].compute_at(s[conv_L], ry)
else:
s[kernel_L].compute_at(s[conv_L], rx)
# schedule output
if output.op in s.outputs:
out = output
else:
s[output].compute_inline()
out = s.outputs[0]
_, co, h, w = s[out].op.axis
tile_and_bind3d(s, out, w, h, co, 4, 8, 8)
| 22,340 | 32.952888 | 195 | py |
tvm | tvm-main/python/tvm/topi/intel_graphics/conv2d_alter_op.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unused-variable,unused-argument,no-member
"""Conv2D alter op and legalize functions for x86"""
import tvm
from tvm import te
from tvm import relay
from tvm import autotvm
from ..utils import get_const_tuple
from ..nn import conv2d_alter_layout, conv2d_infer_layout
from .conv2d import _get_default_config
@conv2d_alter_layout.register(["intel_graphics"])
def _alter_conv2d_layout(attrs, inputs, tinfos, out_type):
target = tvm.target.Target.current(allow_none=False)
dispatch_ctx = autotvm.task.DispatchContext.current
if isinstance(dispatch_ctx, autotvm.task.ApplyGraphBest):
cfg = dispatch_ctx.query(target, None)
workload = cfg.workload
else:
_, outs = relay.backend.te_compiler.select_implementation(
relay.op.get("nn.conv2d"), attrs, tinfos, out_type, target
)
workload = autotvm.task.get_workload(outs)
if workload is None:
# The best implementation is not an AutoTVM template,
# we then assume it's not necessary to alter this op.
return None
cfg = dispatch_ctx.query(target, workload)
topi_tmpl = workload[0]
new_attrs = {k: attrs[k] for k in attrs.keys()}
padding = attrs.get_int_tuple("padding")
strides = attrs.get_int_tuple("strides")
dilation = attrs.get_int_tuple("dilation")
data_layout = attrs["data_layout"]
kernel_layout = attrs["kernel_layout"]
data_tensor, kernel_tensor = tinfos
data_dtype = data_tensor.dtype
kernel_dtype = kernel_tensor.dtype
out_dtype = out_type.dtype
if topi_tmpl == "conv2d_NCHWc.intel_graphics":
assert data_layout == "NCHW" and kernel_layout == "OIHW"
if cfg.is_fallback:
_get_default_config(cfg, data_tensor, kernel_tensor, strides, padding, out_dtype, False)
batch_size, in_channel, height, width = get_const_tuple(data_tensor.shape)
out_channel, _, kh, kw = get_const_tuple(kernel_tensor.shape)
ic_bn = cfg["tile_ic"].val if hasattr(cfg["tile_ic"], "val") else cfg["tile_ic"].size[-1]
oc_bn = cfg["tile_oc"].val if hasattr(cfg["tile_oc"], "val") else cfg["tile_oc"].size[-1]
# update new attrs
new_attrs["channels"] = out_channel
new_attrs["data_layout"] = f"NCHW{ic_bn}c"
# (oc, ic, h, w) -> (OC, IC, h, w, ic, oc)
new_attrs["kernel_layout"] = f"OIHW{ic_bn}i{oc_bn}o"
new_attrs["out_layout"] = f"NCHW{oc_bn}c"
# Store altered operator's config
new_data = te.placeholder(
(batch_size, in_channel // ic_bn, height, width, ic_bn), dtype=data_dtype
)
new_kernel = te.placeholder(
(out_channel // oc_bn, in_channel // ic_bn, kh, kw, ic_bn, oc_bn), dtype=kernel_dtype
)
new_workload = autotvm.task.args_to_workload(
[
new_data,
new_kernel,
strides,
padding,
dilation,
new_attrs["data_layout"],
new_attrs["out_layout"],
out_dtype,
],
"conv2d_NCHWc.intel_graphics",
)
dispatch_ctx.update(target, new_workload, cfg)
return relay.nn.contrib_conv2d_nchwc(*inputs, **new_attrs)
return None
@conv2d_infer_layout.register("intel_graphics")
def _conv2d_infer_layout(workload, cfg):
_, data, kernel, strides, padding, dilation, layout, dtype = workload
batch_size, in_channel, in_height, in_width = data[1]
out_channel, _, k_height, k_width = kernel[1]
out_height = (in_height + 2 * padding[0] - k_height) // strides[0] + 1
out_width = (in_width + 2 * padding[1] - k_width) // strides[1] + 1
tile_ic, tile_oc = cfg["tile_ic"].size[-1], cfg["tile_oc"].size[-1]
in_shape = (batch_size, in_channel // tile_ic, in_height, in_width, tile_ic)
in_layout = f"NCHW{tile_ic}c"
out_shape = (batch_size, out_channel // tile_oc, out_height, out_width, tile_oc)
out_layout = f"NCHW{tile_oc}c"
return ((in_shape, in_layout),), ((out_shape, out_layout),)
| 4,884 | 41.112069 | 100 | py |
tvm | tvm-main/python/tvm/topi/intel_graphics/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=redefined-builtin, wildcard-import
"""Intel Gen9 GPU specific declaration and schedules."""
from __future__ import absolute_import as _abs
from .conv2d import *
from . import conv2d_alter_op
from .depthwise_conv2d import *
| 1,028 | 40.16 | 62 | py |
tvm | tvm-main/python/tvm/topi/intel_graphics/depthwise_conv2d.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""Schedule for depthwise_conv2d with auto fusion"""
import tvm
from tvm import te
from tvm import autotvm
from ..utils import traverse_inline
from .. import nn
from ..nn.depthwise_conv2d import depthwise_conv2d_infer_layout
# register original implementation of depthwise_conv2d_nchw since we don't need to change this part
@autotvm.register_topi_compute("depthwise_conv2d_nchw.intel_graphics")
def depthwise_conv2d_nchw(_, data, kernel, strides, padding, dilation, out_dtype):
return nn.depthwise_conv2d_nchw(data, kernel, strides, padding, dilation, out_dtype)
@autotvm.register_topi_schedule("depthwise_conv2d_nchw.intel_graphics")
def schedule_depthwise_conv2d_nchw(cfg, outs):
"""Schedule for depthwise_conv2d nchw forward.
Parameters
----------
outs: Array of Tensor
The computation graph description of depthwise_conv2d
in the format of an array of tensors.
Returns
-------
s: Schedule
The computation schedule for depthwise_conv2d nchw.
"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
def _callback(op):
if op.tag == "depthwise_conv2d_nchw":
pad_data = op.input_tensors[0]
kernel = op.input_tensors[1]
conv = op.output(0)
##### space definition begin #####
n, f, y, x = s[conv].op.axis
cfg.define_split("tile_f", f, num_outputs=4)
cfg.define_split("tile_y", y, num_outputs=4)
cfg.define_split("tile_x", x, num_outputs=4)
cfg.define_knob("auto_unroll_max_step", [0, 256, 1500])
target = tvm.target.Target.current()
if target.kind.name in ["nvptx", "rocm"]:
cfg.define_knob("unroll_explicit", [1])
else:
cfg.define_knob("unroll_explicit", [0, 1])
# fallback support
if cfg.is_fallback:
ref_log = autotvm.tophub.load_reference_log(
target.kind.name, target.model, "depthwise_conv2d_nchw.intel_graphics"
)
cfg.fallback_with_reference_log(ref_log)
cfg["unroll_explicit"].val = 0
##### space definition end #####
s[pad_data].compute_inline()
if isinstance(kernel.op, tvm.te.ComputeOp) and "dilate" in kernel.op.tag:
s[kernel].compute_inline()
if conv.op in s.outputs:
output = conv
OL = s.cache_write(conv, "local")
else:
output = s.outputs[0].output(0)
s[conv].set_scope("local")
OL = conv
# create cache stage
AA = s.cache_read(pad_data, "shared", [OL])
WW = s.cache_read(kernel, "shared", [OL])
AL = s.cache_read(AA, "local", [OL])
WL = s.cache_read(WW, "local", [OL])
# tile and bind spatial axes
n, f, y, x = s[output].op.axis
bf, vf, tf, fi = cfg["tile_f"].apply(s, output, f)
by, vy, ty, yi = cfg["tile_y"].apply(s, output, y)
bx, vx, tx, xi = cfg["tile_x"].apply(s, output, x)
kernel_scope, n = s[output].split(n, nparts=1)
bf = s[output].fuse(n, bf)
s[output].bind(bf, te.thread_axis("blockIdx.z"))
s[output].bind(by, te.thread_axis("blockIdx.y"))
s[output].bind(bx, te.thread_axis("blockIdx.x"))
s[output].bind(vf, te.thread_axis("vthread"))
s[output].bind(vy, te.thread_axis("vthread"))
s[output].bind(vx, te.thread_axis("vthread"))
s[output].bind(tf, te.thread_axis("threadIdx.z"))
s[output].bind(ty, te.thread_axis("threadIdx.y"))
s[output].bind(tx, te.thread_axis("threadIdx.x"))
s[output].reorder(bf, by, bx, vf, vy, vx, tf, ty, tx, fi, yi, xi)
s[OL].compute_at(s[output], tx)
# cooperative fetching
s[AA].compute_at(s[output], bx)
s[WW].compute_at(s[output], bx)
s[AL].compute_at(s[output], tx)
s[WL].compute_at(s[output], tx)
for load in [AA, WW]:
fused = s[load].fuse(*list(s[load].op.axis))
fused, tx = s[load].split(fused, cfg["tile_x"].size[2])
fused, ty = s[load].split(fused, cfg["tile_y"].size[2])
fused, tz = s[load].split(fused, cfg["tile_f"].size[2])
s[load].bind(tz, te.thread_axis("threadIdx.z"))
s[load].bind(ty, te.thread_axis("threadIdx.y"))
s[load].bind(tx, te.thread_axis("threadIdx.x"))
s[output].pragma(kernel_scope, "auto_unroll_max_step", cfg["auto_unroll_max_step"].val)
s[output].pragma(kernel_scope, "unroll_explicit", cfg["unroll_explicit"].val)
traverse_inline(s, outs[0].op, _callback)
return s
@depthwise_conv2d_infer_layout.register("intel_graphics")
def _depthwise_conv2d_infer_layout(workload, _):
"""Infer input/output shapes and layouts from a workload and cfg.
Parameters
----------
workload : tuple
conv2d workload
cfg : tuple
tvm.autotvm config
Returns
-------
Output : [tuple of tuple and str, tuple of tuple and str]
Input shapes and layouts, and output shapes and layouts
"""
_, data, kernel, strides, padding, _, _ = workload
batch_size, in_channel, in_height, in_width = data[1]
filter_channel, channel_multiplier, k_height, k_width = kernel[1]
out_channel = filter_channel * channel_multiplier
out_height = (in_height + 2 * padding[0] - k_height) // strides[0] + 1
out_width = (in_width + 2 * padding[1] - k_width) // strides[1] + 1
in_shape = (batch_size, in_channel, in_height, in_width)
out_shape = (batch_size, out_channel, out_height, out_width)
in_layout = out_layout = "NCHW"
return ((in_shape, in_layout),), ((out_shape, out_layout),)
| 6,819 | 40.333333 | 99 | py |
tvm | tvm-main/python/tvm/topi/rocm/conv2d.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument
"""Compute definition for conv2d with rocm backend"""
from tvm import autotvm
from tvm.contrib import miopen
from .. import generic
from ..utils import get_const_tuple
from ..nn.utils import get_pad_tuple
@autotvm.register_topi_compute("conv2d_nchw_miopen.rocm")
def conv2d_nchw_miopen(
cfg, data, kernel, strides, padding, dilation, layout="NCHW", out_dtype="float32"
):
"""Conv2D operator for rocm backend.
Parameters
----------
cfg: ConfigEntity
The config for this template
input : tvm.te.Tensor
4-D with shape [batch, in_channel, in_height, in_width]
filter : tvm.te.Tensor
4-D with shape [num_filter, in_channel, filter_height, filter_width]
strides : int or a list/tuple of two ints
stride size, or [stride_height, stride_width]
padding : int or a list/tuple of 2 or 4 ints
padding size, or
[pad_height, pad_width] for 2 ints, or
[pad_top, pad_left, pad_bottom, pad_right] for 4 ints
layout : str
layout of data
Returns
-------
output : tvm.te.Tensor
4-D with shape [batch, out_channel, out_height, out_width]
"""
CO, CI, KH, KW = get_const_tuple(kernel.shape)
N, _, H, W = get_const_tuple(data.shape)
assert layout == "NCHW"
# handle dilation
stride_h, stride_w = (strides, strides) if isinstance(strides, int) else strides
pt, pl, pb, pr = get_pad_tuple(padding, (KH, KW))
pad_h, pad_w = pt + pb, pl + pr
dilation_h, dilation_w = (dilation, dilation) if isinstance(dilation, int) else dilation
assert (pt == pb) and (pl == pr)
OH = (H + 2 * pad_h - KH) // stride_h + 1
OW = (W + 2 * pad_w - KW) // stride_w + 1
cfg.add_flop(
2 * N * OH * OW * CO * CI * ((KH - 1) * dilation_h + 1) * ((KW - 1) * dilation_w + 1)
)
return miopen.conv2d_forward(
data, kernel, stride_h, stride_w, pt, pl, dilation_h, dilation_w, conv_mode=0, data_type=1
)
@autotvm.register_topi_schedule("conv2d_nchw_miopen.rocm")
def schedule_conv2d_nchw_miopen(cfg, outs):
"""TOPI schedule callback of conv2d for rocm
Parameters
----------
cfg: ConfigEntity
The config for this template
outs: Array of Tensor
The computation graph description of conv2d
in the format of an array of tensors.
Returns
-------
s: Schedule
The computation schedule for conv2d.
"""
return generic.schedule_extern(outs)
| 3,300 | 31.362745 | 98 | py |
tvm | tvm-main/python/tvm/topi/rocm/dense.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-variable, unused-argument
"""Schedule for dense operator"""
from tvm import te
from tvm import autotvm
from tvm.contrib import rocblas
from .. import generic
from .. import tag
@autotvm.register_topi_compute("dense_rocblas.rocm")
def dense_rocblas(cfg, data, weight, bias=None, out_dtype=None):
"""Dense operator for rocm backend with cblas.
Parameters
----------
data : tvm.te.Tensor
2-D with shape [batch, in_dim]
weight : tvm.te.Tensor
2-D with shape [out_dim, in_dim]
bias : tvm.te.Tensor, optional
1-D with shape [out_dim]
out_dtype : str
The output type. This is used for mixed precision.
Returns
-------
output : tvm.te.Tensor
2-D with shape [batch, out_dim]
"""
if out_dtype is None:
out_dtype = data.dtype
assert out_dtype == data.dtype, "Mixed precision not supported."
matmul = rocblas.matmul(data, weight, False, True)
batch, in_dim = data.shape
out_dim, _ = weight.shape
cfg.add_flop(batch * in_dim * out_dim * 2)
if bias is not None:
matmul = te.compute(
(batch, out_dim), lambda i, j: matmul[i, j] + bias[j], tag=tag.BROADCAST
)
return matmul
@autotvm.register_topi_schedule("dense_rocblas.rocm")
def schedule_dense_rocblas(_, outs):
"""Schedule for dense operator with rocm cblas"""
return generic.schedule_extern(outs)
| 2,229 | 32.283582 | 84 | py |
tvm | tvm-main/python/tvm/topi/rocm/batch_matmul.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-variable, unused-argument
"""Schedule for batch_matmul operator"""
from tvm import autotvm
from tvm.contrib import rocblas
from .. import generic
from ..utils import get_const_tuple
@autotvm.register_topi_compute("batch_matmul_rocblas.rocm")
def batch_matmul_rocblas(
cfg, x, y, out_shape=None, out_dtype=None, transpose_a=False, transpose_b=True
):
"""Computes matrix multiplication of `x` and `y` via rocblas when
`x` and `y` are batched matrices.
Parameters
----------
cfg : ConfigSpace
Autotvm tuning space config file
x : tvm.te.Tensor
3-D with shape [batch, M, K]
y : tvm.te.Tensor
3-D with shape [batch, N, K]
Returns
-------
output : tvm.te.Tensor
3-D with shape [batch, M, N]
"""
del out_dtype
batch, M, K = get_const_tuple(x.shape)
_, N, _ = get_const_tuple(y.shape)
if out_shape is not None:
assert out_shape[0] == batch, "Input and output batch sizes must match"
assert out_shape[1] == M and out_shape[2] == N, "Invalid output shape"
result = rocblas.batch_matmul(x, y, transpose_a, transpose_b)
cfg.add_flop(batch * M * N * K * 2)
return result
@autotvm.register_topi_schedule("batch_matmul_rocblas.rocm")
def schedule_batch_matmul_rocblas(_, outs):
"""Schedule for batch_matmul operator with rocm cblas"""
return generic.schedule_extern(outs)
| 2,218 | 35.983333 | 82 | py |
tvm | tvm-main/python/tvm/topi/rocm/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=redefined-builtin, wildcard-import
"""rocm specific declaration and schedules."""
from __future__ import absolute_import as _abs
from .batch_matmul import *
from .conv2d import *
from .dense import *
| 1,005 | 39.24 | 62 | py |
tvm | tvm-main/python/tvm/micro/base.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Base definitions for MicroTVM"""
import tvm
import tvm._ffi
tvm._ffi._init_api("tvm.micro", "tvm.micro.base")
| 900 | 38.173913 | 62 | py |
tvm | tvm-main/python/tvm/micro/debugger.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=consider-using-with
"""Defines functions for controlling debuggers for micro TVM binaries."""
import atexit
import abc
import errno
import logging
import os
import shlex
import signal
import subprocess
import sys
import termios
import threading
import time
import psutil
from .._ffi import register_func
from . import class_factory
from . import transport
from .transport.file_descriptor import FdTransport
_LOG = logging.getLogger(__name__)
class Debugger(metaclass=abc.ABCMeta):
"""An interface for controlling micro TVM debuggers."""
@abc.abstractmethod
def start(self):
"""Start the debugger, but do not block on it.
The runtime will continue to be driven in the background.
"""
raise NotImplementedError()
@abc.abstractmethod
def stop(self):
"""Terminate the debugger."""
raise NotImplementedError()
class GdbDebugger(Debugger):
"""Handles launching, suspending signals, and potentially dealing with terminal issues."""
# Number of seconds to wait in stop() for a graceful shutdown. After this time has elapsed,
# the debugger is kill()'d.
_GRACEFUL_SHUTDOWN_TIMEOUT_SEC = 5.0
# The instance of GdbDebugger that's currently started.
_STARTED_INSTANCE = None
@classmethod
def _stop_all(cls):
if cls._STARTED_INSTANCE:
cls._STARTED_INSTANCE.stop()
def __init__(self):
super(GdbDebugger, self).__init__()
self._is_running = False
self._is_running_lock = threading.RLock()
self._child_exited_event = threading.Event()
self._signals_reset_event = threading.Event()
@abc.abstractmethod
def popen_kwargs(self):
raise NotImplementedError()
def _internal_stop(self):
if not self._is_running:
return
os.kill(os.getpid(), signal.SIGUSR1)
self._signals_reset_event.wait()
termios.tcsetattr(sys.stdin.fileno(), termios.TCSAFLUSH, self.old_termios)
try:
children = psutil.Process(self.popen.pid).children(recursive=True)
for c in children:
c.terminate()
_, alive = psutil.wait_procs(children, timeout=self._GRACEFUL_SHUTDOWN_TIMEOUT_SEC)
for a in alive:
a.kill()
except psutil.NoSuchProcess:
pass
finally:
self.__class__._STARTED_INSTANCE = None
self._is_running = False
self._child_exited_event.set()
def _wait_for_child(self):
self.popen.wait()
with self._is_running_lock:
self._internal_stop()
@classmethod
def _sigusr1_handler(cls, signum, stack_frame): # pylint: disable=unused-argument
assert (
cls._STARTED_INSTANCE is not None
), "overridden sigusr1 handler should not be invoked when GDB not started"
signal.signal(signal.SIGINT, cls._STARTED_INSTANCE.old_sigint_handler)
signal.signal(signal.SIGUSR1, cls._STARTED_INSTANCE.old_sigusr1_handler)
cls._STARTED_INSTANCE._signals_reset_event.set()
@classmethod
def _sigint_handler(cls, signum, stack_frame): # pylint: disable=unused-argument
assert (
cls._STARTED_INSTANCE is not None
), "overridden sigint handler should not be invoked when GDB not started"
with cls._STARTED_INSTANCE._is_running_lock:
exists = cls._STARTED_INSTANCE._is_running
if exists:
try:
os.killpg(cls._STARTED_INSTANCE.child_pgid, signal.SIGINT)
except ProcessLookupError:
pass
def start(self):
with self._is_running_lock:
assert not self._is_running
assert not self._STARTED_INSTANCE
kwargs = self.popen_kwargs()
self.did_start_new_session = kwargs.setdefault("start_new_session", True)
self.old_termios = termios.tcgetattr(sys.stdin.fileno())
self.popen = subprocess.Popen(**kwargs)
self._is_running = True
self.old_sigint_handler = signal.signal(signal.SIGINT, self._sigint_handler)
self.old_sigusr1_handler = signal.signal(signal.SIGUSR1, self._sigusr1_handler)
self.__class__._STARTED_INSTANCE = self
try:
self.child_pgid = os.getpgid(self.popen.pid)
except Exception:
self.stop()
raise
with self._is_running_lock:
self._is_child_alive = True
t = threading.Thread(target=self._wait_for_child)
t.daemon = True
t.start()
def stop(self):
self._child_exited_event.wait()
atexit.register(GdbDebugger._stop_all)
class GdbTransportDebugger(GdbDebugger):
"""A debugger that uses a single GDB subprocess as both the transport and the debugger.
Opens pipes for the target's stdin and stdout, launches GDB and configures GDB's target
arguments to read and write from the pipes using /dev/fd.
"""
def __init__(self, args, **popen_kw):
super(GdbTransportDebugger, self).__init__()
self.args = args
self.popen_kw = popen_kw
def popen_kwargs(self):
stdin_read, stdin_write = os.pipe()
stdout_read, stdout_write = os.pipe()
os.set_inheritable(stdin_read, True)
os.set_inheritable(stdout_write, True)
sysname = os.uname()[0]
if sysname == "Darwin":
args = [
"lldb",
"-O",
f"target create {self.args[0]}",
"-O",
f"settings set target.input-path /dev/fd/{stdin_read}",
"-O",
f"settings set target.output-path /dev/fd/{stdout_write}",
]
if len(self.args) > 1:
args.extend(
["-O", "settings set target.run-args {}".format(" ".join(self.args[1:]))]
)
elif sysname == "Linux":
args = [
"gdb",
"-ex",
f"file {self.args[0]}",
"-ex",
(
f"set args {' '.join(shlex.quote(a) for a in self.args[1:])} "
f"</dev/fd/{stdin_read} >/dev/fd/{stdout_write}"
),
]
else:
raise NotImplementedError(f"System {sysname} is not yet supported")
self.fd_transport = FdTransport(
stdout_read, stdin_write, transport.debug_transport_timeouts()
)
self.fd_transport.open()
return {
"args": args,
"pass_fds": [stdin_read, stdout_write],
}
def _internal_stop(self):
self.fd_transport.close()
super(GdbTransportDebugger, self)._internal_stop()
class _Transport(transport.Transport):
def __init__(self, gdb_transport_debugger):
self.gdb_transport_debugger = gdb_transport_debugger
def timeouts(self):
return transport.debug_transport_timeouts()
def open(self):
pass # Pipes opened by parent class.
def write(self, data, timeout_sec):
end_time = time.monotonic() + timeout_sec if timeout_sec is not None else None
while True:
try:
return self.gdb_transport_debugger.fd_transport.write(data, timeout_sec)
except OSError as exc:
# NOTE: this error sometimes happens when writes are initiated before the child
# process launches.
if exc.errno == errno.EAGAIN:
if end_time is None or time.monotonic() < end_time:
time.sleep(0.1) # sleep to avoid excessive CPU usage
continue
raise exc
raise base.IoTimeoutError()
def read(self, n, timeout_sec):
end_time = time.monotonic() + timeout_sec if timeout_sec is not None else None
while True:
try:
return self.gdb_transport_debugger.fd_transport.read(n, timeout_sec)
except OSError as exc:
# NOTE: this error sometimes happens when reads are initiated before the child
# process launches.
if exc.errno == errno.EAGAIN:
if end_time is None or time.monotonic() < end_time:
time.sleep(0.1) # sleep to avoid excessive CPU usage
continue
raise exc
raise base.IoTimeoutError()
def close(self):
pass # Pipes closed by parent class (DebugWrapperTransport calls stop() next).
def transport(self):
return self._Transport(self)
class GdbRemoteDebugger(GdbDebugger):
"""A Debugger that invokes GDB and attaches to a remote GDBserver-based target."""
def __init__(
self, gdb_binary, remote_hostport, debug_binary, wrapping_context_manager=None, **popen_kw
):
super(GdbRemoteDebugger, self).__init__()
self.gdb_binary = gdb_binary
self.remote_hostport = remote_hostport
self.debug_binary = debug_binary
self.wrapping_context_manager = wrapping_context_manager
self.popen_kw = popen_kw
def popen_kwargs(self):
kwargs = {
"args": [
self.gdb_binary,
"-iex",
f"file {self.debug_binary}",
"-iex",
f"target remote {self.remote_hostport}",
],
}
kwargs.update(self.popen_kw)
return kwargs
def start(self):
if self.wrapping_context_manager is not None:
self.wrapping_context_manager.__enter__()
super(GdbRemoteDebugger, self).start()
def stop(self):
try:
super(GdbRemoteDebugger, self).stop()
finally:
if self.wrapping_context_manager is not None:
self.wrapping_context_manager.__exit__(None, None, None)
GLOBAL_DEBUGGER = None
class DebuggerFactory(class_factory.ClassFactory):
SUPERCLASS = Debugger
def launch_debugger(debugger_factory, *args, **kw):
global GLOBAL_DEBUGGER
if GLOBAL_DEBUGGER is not None:
stop_debugger()
GLOBAL_DEBUGGER = debugger_factory.instantiate(*args, **kw)
GLOBAL_DEBUGGER.start()
@register_func("tvm.micro.debugger.launch_debugger")
def _launch_debugger(debugger_factory_json):
launch_debugger(DebuggerFactory.from_json(debugger_factory_json))
@register_func("tvm.micro.debugger.stop_debugger")
def stop_debugger():
global GLOBAL_DEBUGGER
if GLOBAL_DEBUGGER is not None:
try:
GLOBAL_DEBUGGER.stop()
finally:
GLOBAL_DEBUGGER = None
class RpcDebugger(Debugger):
"""A Debugger instance that launches the actual debugger on a remote TVM RPC server."""
def __init__(self, rpc_session, factory, wrapping_context_manager=None):
super(RpcDebugger, self).__init__()
self._factory = factory
self.launch_debugger = rpc_session.get_function("tvm.micro.debugger.launch_debugger")
self.stop_debugger = rpc_session.get_function("tvm.micro.debugger.stop_debugger")
self.wrapping_context_manager = wrapping_context_manager
def start(self):
if self.wrapping_context_manager is not None:
self.wrapping_context_manager.__enter__()
try:
self.launch_debugger(self._factory.to_json)
except Exception:
if self.wrapping_context_manager is not None:
self.wrapping_context_manager.__exit__(None, None, None)
raise
try:
input("Press [Enter] when debugger is set")
except Exception:
self.stop()
raise
self._is_running = True
def stop(self):
try:
self.stop_debugger()
finally:
if self.wrapping_context_manager is not None:
self.wrapping_context_manager.__exit__(None, None, None)
| 12,966 | 32.33419 | 99 | py |
tvm | tvm-main/python/tvm/micro/project.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Defines glue wrappers around the Project API which mate to TVM interfaces."""
import pathlib
from typing import Union
from .. import __version__
from ..contrib import utils
from .build import get_standalone_crt_dir
from .model_library_format import ExportableModule, export_model_library_format
from .project_api import client
from .transport import Transport, TransportTimeouts
def add_unspecified_options(options: dict, server_project_options: list) -> dict:
"""Adds default value of project template options that are not specified by user."""
if not options:
options = dict()
for option in server_project_options:
name = option["name"]
if name not in options.keys():
options[name] = option["default"]
return options
class ProjectTransport(Transport):
"""A Transport implementation that uses the Project API client."""
def __init__(self, api_client, options):
self._api_client = api_client
self._options = options
self._timeouts = None
def timeouts(self):
assert self._timeouts is not None, "Transport not yet opened"
return self._timeouts
def open(self):
reply = self._api_client.open_transport(self._options)
self._timeouts = TransportTimeouts(**reply["timeouts"])
def close(self):
if not self._api_client.is_shutdown:
self._api_client.close_transport()
self._api_client.shutdown()
def write(self, data, timeout_sec):
self._api_client.write_transport(data, timeout_sec)
def read(self, n, timeout_sec):
return self._api_client.read_transport(n, timeout_sec)["data"]
class TemplateProjectError(Exception):
"""Raised when the Project API server given to GeneratedProject reports is_template=True."""
class GeneratedProject:
"""Defines a glue interface to interact with a generated project through the API server."""
@classmethod
def from_directory(cls, project_dir: Union[pathlib.Path, str], options: dict):
return cls(client.instantiate_from_dir(project_dir), options)
def __init__(self, api_client, options):
self._api_client = api_client
self._info = self._api_client.server_info_query(__version__)
if self._info["is_template"]:
raise TemplateProjectError()
self._options = add_unspecified_options(options, self._info["project_options"])
def build(self):
self._api_client.build(self._options)
def flash(self):
self._api_client.flash(self._options)
def transport(self):
return ProjectTransport(self._api_client, self._options)
def info(self):
return self._info
@property
def options(self):
return self._options
@options.setter
def options(self, options):
self._options = options
class NotATemplateProjectError(Exception):
"""Raised when the API server given to TemplateProject reports is_template=false."""
class TemplateProject:
"""Defines a glue interface to interact with a template project through the API Server."""
@classmethod
def from_directory(cls, template_project_dir):
return cls(client.instantiate_from_dir(template_project_dir))
def __init__(self, api_client):
self._api_client = api_client
self._info = self._api_client.server_info_query(__version__)
if not self._info["is_template"]:
raise NotATemplateProjectError()
def _check_project_options(self, options: dict):
"""Check if options are valid ProjectOptions"""
available_options = [option["name"] for option in self.info()["project_options"]]
if options and not set(options.keys()).issubset(available_options):
raise ValueError(
f"""options:{list(options)} include non valid ProjectOptions.
Here is a list of available options:{list(available_options)}."""
)
def generate_project_from_mlf(self, model_library_format_path, project_dir, options: dict):
"""Generate a project from MLF file."""
self._check_project_options(options)
options = add_unspecified_options(options, self._info["project_options"])
self._api_client.generate_project(
model_library_format_path=str(model_library_format_path),
standalone_crt_dir=get_standalone_crt_dir(),
project_dir=project_dir,
options=options,
)
return GeneratedProject.from_directory(project_dir, options)
def info(self):
return self._info
def generate_project(self, graph_executor_factory, project_dir, options):
"""Generate a project given GraphRuntimeFactory."""
model_library_dir = utils.tempdir()
model_library_format_path = model_library_dir.relpath("model.tar")
export_model_library_format(graph_executor_factory, model_library_format_path)
return self.generate_project_from_mlf(model_library_format_path, project_dir, options)
def generate_project(
template_project_dir: Union[pathlib.Path, str],
module: ExportableModule,
generated_project_dir: Union[pathlib.Path, str],
options: dict = None,
):
"""Generate a project for an embedded platform that contains the given model.
Parameters
----------
template_project_path : pathlib.Path or str
Path to a template project containing a microTVM Project API server.
generated_project_path : pathlib.Path or str
Path to a directory to be created and filled with the built project.
module : ExportableModule
A runtime.Module exportable as Model Library Format. The value returned from tvm.relay.build
or tvm.build.
options : dict
If given, Project API options given to the microTVM API server found in both
template_project_path and generated_project_path.
Returns
-------
GeneratedProject :
A class that wraps the generated project and which can be used to further interact with it.
"""
template = TemplateProject.from_directory(str(template_project_dir))
return template.generate_project(module, str(generated_project_dir), options)
def generate_project_from_mlf(
template_project_dir: Union[pathlib.Path, str],
project_dir: Union[pathlib.Path, str],
mlf_path: Union[pathlib.Path, str],
options: dict,
):
"""Generate a project from a platform template and an existing Model Library Format archive.
Parameters
----------
template_project_path : pathlib.Path or str
Path to a template project containing a microTVM Project API server.
project_dir : pathlib.Path or str
Path to a directory where the project will be created.
mlf_path : pathlib.Path or str
Path to the Model Library Format archive that will be used when creating
the new project. The archive file will be copied to project_dir.
options : dict
Project API options given to the microTVM API server for the specified platform.
Returns
-------
GeneratedProject :
A class that wraps the generated project and which can be used to further interact with it.
"""
template = TemplateProject.from_directory(str(template_project_dir))
return template.generate_project_from_mlf(str(mlf_path), str(project_dir), options)
| 8,163 | 35.284444 | 100 | py |
tvm | tvm-main/python/tvm/micro/model_library_format.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=cell-var-from-loop, use-list-literal
"""Defines functions for exporting to Model Library Format."""
import datetime
import json
import os
import pathlib
import re
import tarfile
import typing
import tvm
from tvm.micro import get_standalone_crt_dir, get_microtvm_template_projects
from .._ffi import get_global_func
from ..contrib import utils
from ..driver import build_module
from ..relay import param_dict
from ..relay.backend import executor_factory
from ..relay.backend.name_transforms import prefix_generated_name, to_c_variable_style
from ..tir import expr
# This should be kept identical to runtime::symbol::tvm_module_main
MAIN_FUNC_NAME_STR = "__tvm_main__"
STANDALONE_CRT_URL = "./runtime"
CRT_TEMPLATE_FILES_URL = "./templates"
METADATA_FILE = "metadata.json"
class UnsupportedInModelLibraryFormatError(Exception):
"""Raised when export_model_library_format does not support the given Module tree."""
def generate_c_interface_header(
module_name,
inputs,
outputs,
pools,
io_pool_allocations,
devices,
workspace_size,
include_path,
input_sizes,
output_sizes,
):
"""Generate C Interface header to be included in MLF"""
mangled_name = to_c_variable_style(prefix_generated_name(module_name))
metadata_header = os.path.join(include_path, f"{mangled_name}.h")
interface_c_create = tvm._ffi.get_global_func("runtime.InterfaceCCreate")
interface_c_module = interface_c_create(
module_name,
inputs,
outputs,
pools,
io_pool_allocations,
devices,
workspace_size,
input_sizes,
output_sizes,
)
with open(metadata_header, "w") as header_file:
header_file.write(interface_c_module.get_source())
return metadata_header
# List of type_key for modules which are ephemeral and do not need to be exported.
EPHEMERAL_MODULE_TYPE_KEYS = ("metadata_module",)
def _populate_codegen_dir(
mods: typing.Union[
typing.List[executor_factory.ExecutorFactoryModule],
typing.List[tvm.runtime.Module],
],
codegen_dir: str,
):
"""Populate the codegen sub-directory as part of a Model Library Format export.
Parameters
----------
mods : List[tvm.relay.backend.executor_factory.ExecutorFactoryModule], List[tvm.runtime.Module]
A list of the return value of tvm.relay.build, which
will be exported into Model Library Format.
codegen_dir : str
Path to the codegen directory on disk.
module_name: Optional[str]
Name used to prefix the generated source files
"""
dso_modules = []
for mod in mods:
if isinstance(mod, executor_factory.ExecutorFactoryModule):
lib = mod.lib
elif isinstance(mod, tvm.runtime.Module):
lib = mod
else:
raise RuntimeError(f"Not supported module type: {type(mod)}")
dso_modules = lib._collect_dso_modules()
non_dso_modules = lib._collect_from_import_tree(lambda m: m not in dso_modules)
# Filter ephemeral modules which cannot be exported.
dso_modules = [m for m in dso_modules if m.type_key not in EPHEMERAL_MODULE_TYPE_KEYS]
non_dso_modules = [
m for m in non_dso_modules if m.type_key not in EPHEMERAL_MODULE_TYPE_KEYS
]
if non_dso_modules:
raise UnsupportedInModelLibraryFormatError(
f"Don't know how to export non-c or non-llvm modules; found: {non_dso_modules!r}"
)
mod_indices = {"lib": 0, "src": 0}
host_codegen_dir = os.path.join(codegen_dir, "host")
lib_name = (
f"{mod.libmod_name}_lib"
if isinstance(mod, executor_factory.ExecutorFactoryModule)
else "lib"
)
for dso_mod in dso_modules:
if dso_mod.type_key == "c":
assert dso_mod.format in ["c", "cc", "cpp"]
ext = dso_mod.format
index = mod_indices["src"]
mod_indices["src"] += 1
parent_dir = os.path.join(host_codegen_dir, "src")
file_name = os.path.join(parent_dir, f"{lib_name}{index}.{ext}")
elif dso_mod.type_key == "llvm":
index = mod_indices["lib"]
mod_indices["lib"] += 1
parent_dir = os.path.join(host_codegen_dir, "lib")
file_name = os.path.join(parent_dir, f"{lib_name}{index}.o")
else:
assert (
False
), f"do not expect module with type_key={lib.type_key} from _collect_dso_modules"
if not os.path.exists(parent_dir):
os.makedirs(parent_dir)
dso_mod.save(file_name)
def _build_memory_map(mod):
ret = dict()
if isinstance(mod, executor_factory.GraphExecutorFactoryModule):
ret["sids"] = _build_sid_map(mod.graph_json)
ret["functions"] = _build_function_memory_map(mod.function_metadata)
return ret
def _build_sid_map(graph_json):
"""Build a simpler storage id info map from graph JSON.
Parameters
----------
graph_json : str
String representation of the graph_json created from tvm.relay.build().
Returns
-------
list :
A list with one entry per storage id describing that memory.
"""
graph = json.loads(graph_json)
seen_storage_ids = set()
memory_map = []
for node_id, storage_id in enumerate(graph["attrs"]["storage_id"][1]):
if storage_id in seen_storage_ids:
continue
seen_storage_ids.add(storage_id)
num_elements = 1
for dim in graph["attrs"]["shape"][1][storage_id]:
num_elements *= dim
dltype = graph["attrs"]["dltype"][1][storage_id]
m = re.match(r"^[a-zA-Z]+([0-9]+)$", dltype)
assert m, f"Exported graph contains unknown dltype {dltype}"
elem_bits = int(m.group(1))
map_entry = {
"storage_id": storage_id,
"size_bytes": (num_elements * elem_bits + 7) // 8,
}
if node_id in graph["arg_nodes"]:
map_entry["input_binding"] = graph["nodes"][node_id]["name"]
memory_map.append(map_entry)
return memory_map
def _create_type_metadata(input_type):
return {
"size": int(_shape_to_size(input_type.shape, input_type.dtype)),
"dtype": str(input_type.dtype),
}
def _flatten_tuple_outputs(ret_type, predefined_names, offset=0):
if isinstance(ret_type, tvm.ir.tensor_type.TensorType):
name = predefined_names[offset] if predefined_names else f"output{offset}"
return {name: ret_type}
added_fields = len(ret_type.fields)
outputs = {}
for output_index in range(added_fields):
next_output = offset + len(outputs)
outputs.update(
_flatten_tuple_outputs(ret_type.fields[output_index], predefined_names, next_output)
)
return outputs
def _get_outputs_from_ret_type(ret_type, predefined_names):
if isinstance(ret_type, tvm.ir.tensor_type.TensorType):
name = predefined_names[0] if predefined_names else "output"
return {name: ret_type}
return _flatten_tuple_outputs(ret_type, predefined_names)
def _build_function_memory_map(function_metadata):
"""Build a simple map that shows how much workspace is required to execute
each primitive function. The main_func describes how much memory is required
to execute the main control code.
Parameters
----------
function_metadata : Map<String, FunctionInfo>
This contains all the compiled metadata on a function basis
Returns
-------
dict :
This will have two entries:
1.) A list with one entry per function describing local memory it is using.
2.) A global memory requirement if all functions are executed sequentially
"""
device_max_workspace = dict()
main_func_metadata = function_metadata[MAIN_FUNC_NAME_STR]
func_entries = []
target_local_entries = dict()
for func_name, finfo in function_metadata.items():
# Skip a few unsupported cases:
# 1. The main function metadata is exported elsewhere.
# 2. BYOC operator implementations do not currently export useful FunctionInfo.
if func_name == MAIN_FUNC_NAME_STR or not finfo.tir_primfuncs:
continue
if func_name not in target_local_entries.keys():
target_local_entries[func_name] = list()
for target in dict(finfo.workspace_sizes).keys():
workspace_size = finfo.workspace_sizes[target]
target_entry = {
"device": int(target.get_target_device_type()),
"workspace_size_bytes": int(workspace_size),
}
target_local_entries[func_name].append(target_entry)
if workspace_size >= device_max_workspace.get(int(target.get_target_device_type()), 0):
device_max_workspace[int(target.get_target_device_type())] = workspace_size
for func_name, target_entries_ in target_local_entries.items():
func_entry = {
"function_name": str(func_name),
"workspace": target_entries_,
}
func_entries.append(func_entry)
target_main_entries = dict()
def _create_empty_entry(target_device_type):
return {
"device": int(target_device_type),
"workspace_size_bytes": 0,
"constants_size_bytes": 0,
"io_size_bytes": 0,
}
for target in dict(main_func_metadata.workspace_sizes).keys():
main_func_local_workspace = main_func_metadata.workspace_sizes[target]
target_main_entries[int(target.get_target_device_type())] = _create_empty_entry(
int(target.get_target_device_type())
)
target_main_entries[int(target.get_target_device_type())]["workspace_size_bytes"] = int(
device_max_workspace.get(int(target.get_target_device_type()), 0)
) + int(main_func_local_workspace)
for target in dict(main_func_metadata.constant_sizes).keys():
if int(target.get_target_device_type()) not in target_main_entries.keys():
target_main_entries[int(target.get_target_device_type())] = _create_empty_entry(
int(target.get_target_device_type())
)
target_main_entries[int(target.get_target_device_type())]["constants_size_bytes"] = int(
main_func_metadata.constant_sizes[target]
)
for target in dict(main_func_metadata.io_sizes).keys():
if int(target.get_target_device_type()) not in target_main_entries.keys():
target_main_entries[int(target.get_target_device_type())] = _create_empty_entry(
int(target.get_target_device_type())
)
target_main_on_device = target_main_entries[int(target.get_target_device_type())]
target_main_on_device["io_size_bytes"] = int(main_func_metadata.io_sizes[target])
main_relay_func = main_func_metadata.relay_primfuncs[target]
target_main_on_device["inputs"] = {
input_param.name_hint: _create_type_metadata(input_param.checked_type)
for input_param in main_relay_func.params
}
predefined_names = (
main_relay_func.attrs["output_tensor_names"]
if "output_tensor_names" in main_relay_func.attrs
else None
)
target_main_on_device["outputs"] = {
name: _create_type_metadata(output_type)
for name, output_type in _get_outputs_from_ret_type(
main_relay_func.ret_type, predefined_names
).items()
}
ret = {
"operator_functions": func_entries,
"main": list(target_main_entries.values()),
}
return ret
def _get_pools_from_module(mod):
return list(dict(mod.executor_codegen_metadata.pool_inputs).values())
def _get_io_pool_allocation_from_module(mod):
return dict(mod.executor_codegen_metadata.io_pool_allocations)
def _should_generate_interface_header(mod):
return "interface-api" in mod.executor and mod.executor["interface-api"] == "c"
def _make_tar(source_dir, tar_file_path, modules):
"""Build a tar file from source_dir."""
with tarfile.open(tar_file_path, "w") as tar_f:
def reset(tarinfo):
tarinfo.uid = tarinfo.gid = 0
tarinfo.uname = tarinfo.gname = "root"
return tarinfo
tar_f.add(str(source_dir), arcname=".", filter=reset)
for mod in modules:
is_aot = isinstance(mod, executor_factory.AOTExecutorFactoryModule)
if is_aot and str(mod.runtime) == "crt":
crt_template_path = pathlib.Path(get_microtvm_template_projects("crt"))
tar_f.add(get_standalone_crt_dir(), arcname=STANDALONE_CRT_URL)
# Add template files from CRT template project
for file in [
"templates/crt_config.h.template",
"templates/platform.c.template",
]:
tar_f.add(
crt_template_path / pathlib.Path(file),
arcname=f"{CRT_TEMPLATE_FILES_URL}/{pathlib.Path(file).name}",
)
break
_GENERATED_VERSION = 7
def _is_module_names_unique(mods: typing.List[executor_factory.ExecutorFactoryModule]):
"""Check if built modules have unique names.
Parameters
----------
mods : List[tvm.relay.backend.executor_factory.ExecutorFactoryModule]
A list of the return value of tvm.relay.build,
which will be exported into Model Library Format.
"""
all_names = []
for mod in mods:
all_names.append(mod.libmod_name)
return len(set(all_names)) == len(all_names)
def _export_graph_model_library_format(
mods: typing.List[executor_factory.ExecutorFactoryModule], tempdir: pathlib.Path
):
"""Export a tvm.relay.build artifact in Model Library Format.
Parameters
----------
mods : List[tvm.relay.backend.executor_factory.ExecutorFactoryModule]
A list of the return value of tvm.relay.build,
which will be exported into Model Library Format.
tempdir : pathlib.Path
Temporary directory to populate with Model Library Format contents.
"""
assert _is_module_names_unique(mods), "Multiple modules should have unique names."
metadata = {
"version": _GENERATED_VERSION,
}
metadata["modules"] = {}
for mod in mods:
is_aot = isinstance(mod, executor_factory.AOTExecutorFactoryModule)
executor = ["aot"] if is_aot else ["graph"]
module_name = mod.libmod_name
metadata["modules"][module_name] = {
"model_name": module_name,
"export_datetime": datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%SZ"),
"memory": _build_memory_map(mod),
"target": [str(t) for t in mod.target],
"executors": executor,
"style": "full-model",
}
if is_aot and (str(mod.runtime) == "crt"):
standalone_crt = {
"short_name": "tvm_standalone_crt",
"url": f"{STANDALONE_CRT_URL}",
"url_type": "mlf_path",
"version_spec": f"{tvm.__version__}",
}
external_dependencies = [standalone_crt]
metadata["modules"][module_name]["external_dependencies"] = external_dependencies
with open(tempdir / METADATA_FILE, "w") as json_f:
json.dump(metadata, json_f, indent=2, sort_keys=True)
codegen_dir = tempdir / "codegen"
codegen_dir.mkdir()
_populate_codegen_dir(mods, codegen_dir)
parameters_dir = tempdir / "parameters"
parameters_dir.mkdir()
src_dir = tempdir / "src"
src_dir.mkdir()
graph_config_dir = tempdir / "executor-config" / "graph"
for mod in mods:
if _should_generate_interface_header(mod):
include_path = codegen_dir / "host" / "include"
if not include_path.exists():
include_path.mkdir()
devices = mod.get_devices()
pools = _get_pools_from_module(mod)
io_pool_allocations = _get_io_pool_allocation_from_module(mod)
main_func = metadata["modules"][mod.libmod_name]["memory"]["functions"]["main"][0]
workspace_size = int(main_func["workspace_size_bytes"])
inputs = main_func["inputs"]
outputs = main_func["outputs"]
inputs_sizes = {name: property_map["size"] for name, property_map in inputs.items()}
output_sizes = {name: property_map["size"] for name, property_map in outputs.items()}
input_names = list(inputs.keys())
output_names = list(outputs.keys())
generate_c_interface_header(
mod.libmod_name,
input_names,
output_names,
pools,
io_pool_allocations,
devices,
workspace_size,
include_path,
inputs_sizes,
output_sizes,
)
is_aot = isinstance(mod, executor_factory.AOTExecutorFactoryModule)
param_filename = parameters_dir / f"{mod.libmod_name}.params"
with open(param_filename, "wb") as f:
f.write(param_dict.save_param_dict(mod.params))
with open(src_dir / f"{mod.libmod_name}.relay", "w") as f:
f.write(str(mod.ir_mod))
if not is_aot:
if not graph_config_dir.exists():
graph_config_dir.mkdir(parents=True)
with open(graph_config_dir / f"{mod.libmod_name}.graph", "w") as f:
f.write(mod.get_executor_config())
class NonStaticShapeError(Exception):
"""Raised when a shape has elements other than IntImm."""
def _shape_to_size(shape, dtype):
bits_per_item = int(
re.match(r"((float)|(int)|(uint))(?P<width_bits>[0-9]+)", dtype).group("width_bits")
)
assert bits_per_item is not None, f"don't know how to compute size of type {dtype}"
total_bits = bits_per_item
for s in shape:
total_bits *= s
return (total_bits + 7) // 8
def _write_tir_and_build_operator_memory_map(src_dir, targets, ir_module_by_target):
def _eval_shape(param_name, buffer_shape):
shape = []
for x in buffer_shape:
if not isinstance(x, expr.IntImm):
raise NonStaticShapeError(
f"Parameter {param_name} has shape with non-IntImm elements: {buffer_shape}"
)
shape.append(x.value)
return shape
memory_map = {}
for target in targets:
# TODO(mbs): The device type is not unique, better would be to use target.kind.name
target_device_type = target.get_target_device_type()
ir_mod = ir_module_by_target[target]
printer = get_global_func("relay.ir.ModelLibraryFormatPrinter")(False, None, False)
with open(src_dir / f"tir-{target_device_type}.txt", "w") as f:
f.write(printer["print"](ir_mod))
for v in ir_mod.get_global_vars():
map_entry = []
for p, b in ir_mod[v.name_hint].buffer_map.items():
shape = _eval_shape(p.name, b.shape)
buffer_size_bytes = _shape_to_size(shape, str(b.dtype))
# NOTE: cannot tell what is an input or output at this point.
map_entry.append(
{
"size_bytes": buffer_size_bytes,
"shape": [int(x) for x in b.shape],
"dtype": b.dtype,
"input_binding": printer["get_var_name"](p),
}
)
memory_map[v.name_hint] = map_entry
return memory_map
def _export_operator_model_library_format(mod: build_module.OperatorModule, tempdir):
"""Export the result of tvm.build() in Model Library Format.
Parameters
----------
mod : runtime.Module
The Module returned from tvm.build().
tempdir : str
Path to the .tar archive to generate.
"""
targets = []
for target in mod.ir_module_by_target.keys():
if str(target.kind) not in ("llvm", "c"):
raise UnsupportedInModelLibraryFormatError(
f"Operator has non-DSO-exportable target {target!s}, which is not yet supported in "
"Model Library Format"
)
targets.append(target)
src_dir = tempdir / "src"
src_dir.mkdir()
memory_map = _write_tir_and_build_operator_memory_map(src_dir, targets, mod.ir_module_by_target)
metadata = {
"version": _GENERATED_VERSION,
"model_name": mod.name,
"export_datetime": datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%SZ"),
"memory": memory_map,
"target": [str(t) for t in targets],
"executors": [],
"style": "operator",
}
with open(tempdir / METADATA_FILE, "w") as metadata_f:
json.dump(metadata, metadata_f)
codegen_dir = tempdir / "codegen"
codegen_dir.mkdir()
_populate_codegen_dir(list([mod]), codegen_dir)
ExportableModule = typing.Union[
build_module.OperatorModule,
executor_factory.AOTExecutorFactoryModule,
executor_factory.GraphExecutorFactoryModule,
]
def export_model_library_format(
mods: typing.Union[ExportableModule, typing.List[ExportableModule]],
file_name: typing.Union[str, pathlib.Path],
):
"""Export the build artifact in Model Library Format.
This function creates a .tar archive containing the build artifacts in a standardized
layout. It's intended to allow downstream automation to build TVM artifacts against the C
runtime.
Parameters
----------
mod : ExportableModule, List[ExportableModule]
The return value of tvm.build or tvm.relay.build.
file_name : str
Path to the .tar archive to generate.
Returns
-------
file_name : str
The path to the generated .tar archive.
"""
modules = mods
if not isinstance(mods, list):
modules = list([mods])
operator_module_type = all(isinstance(mod, build_module.OperatorModule) for mod in modules)
graph_module_type = all(
isinstance(
mod,
(
executor_factory.AOTExecutorFactoryModule,
executor_factory.GraphExecutorFactoryModule,
),
)
for mod in modules
)
file_name = pathlib.Path(file_name)
tempdir = utils.tempdir()
if operator_module_type:
if len(modules) != 1:
raise RuntimeError("Multiple operator is not supported.")
_export_operator_model_library_format(modules[0], tempdir.path)
elif graph_module_type:
_export_graph_model_library_format(modules, tempdir.path)
else:
raise NotImplementedError(
f"Don't know how to export module of type {modules[0].__class__!r}"
)
_make_tar(tempdir.path, file_name, modules)
return file_name
| 23,962 | 34.765672 | 100 | py |
tvm | tvm-main/python/tvm/micro/class_factory.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Defines a utility for representing deferred class instatiations as JSON."""
import importlib
import json
import typing
JsonSerializable = typing.Union[int, float, str, None, bool]
class SerializedFactoryError(Exception):
"""Raised when ClassFactory.from_json is invoked with an invalid JSON blob."""
class ClassFactory:
"""Describes a JSON-serializable class instantiation, for use with the RPC server."""
# When not None, the superclass from which all cls must derive.
SUPERCLASS = None
def __init__(
self,
cls: typing.Callable,
init_args: typing.List[JsonSerializable],
init_kw: typing.Dict[str, JsonSerializable],
):
self.cls = cls
self.init_args = init_args
self.init_kw = init_kw
def override_kw(self, **kw_overrides):
kwargs = self.init_kw
if kw_overrides:
kwargs = dict(kwargs)
for k, v in kw_overrides.items():
kwargs[k] = v
return self.__class__(self.cls, self.init_args, kwargs)
def instantiate(self):
return self.cls(*self.init_args, **self.init_kw)
@property
def to_json(self):
return json.dumps(
{
"cls": ".".join([self.cls.__module__, self.cls.__name__]),
"init_args": self.init_args,
"init_kw": self.init_kw,
}
)
EXPECTED_KEYS = ("cls", "init_args", "init_kw")
@classmethod
def from_json(cls, data):
"""Reconstruct a ClassFactory instance from its JSON representation.
Parameters
----------
data : str
The JSON representation of the ClassFactory.
Returns
-------
ClassFactory :
The reconstructed ClassFactory instance.
Raises
------
SerializedFactoryError :
If the JSON object represented by `data` is malformed.
"""
obj = json.loads(data)
if not isinstance(obj, dict):
raise SerializedFactoryError(f"deserialized json payload: want dict, got: {obj!r}")
for key in cls.EXPECTED_KEYS:
if key not in obj:
raise SerializedFactoryError(
f"deserialized json payload: expect key {key}, got: {obj!r}"
)
cls_package_name, cls_name = obj["cls"].rsplit(".", 1)
cls_package = importlib.import_module(cls_package_name)
cls_obj = getattr(cls_package, cls_name)
return cls(cls_obj, obj["init_args"], obj["init_kw"])
| 3,350 | 30.914286 | 95 | py |
tvm | tvm-main/python/tvm/micro/session.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Defines a top-level glue class that operates the Transport and Flasher classes."""
import json
import logging
import sys
import os
import pathlib
import shutil
from typing import Union
from tvm.runtime.executor.aot_executor import AotModule
from ..error import register_error
from .._ffi import get_global_func, register_func
from ..contrib import graph_executor
from ..contrib import utils
from ..contrib.debugger import debug_executor
from ..rpc import RPCSession
from . import project
from .transport import IoTimeoutError
from .transport import TransportLogger
try:
from .base import _rpc_connect
except ImportError:
raise ImportError("micro tvm is not enabled. Set USE_MICRO to ON in config.cmake")
@register_error
class SessionTerminatedError(Exception):
"""Raised when a transport read operation discovers that the remote session is terminated."""
class Session:
"""MicroTVM Device Session
Parameters
----------
config : dict
configuration for this session (as generated by
`tvm.micro.device.host.default_config()`, for example)
Example
--------
.. code-block:: python
c_mod = ... # some module generated with "c" as the target
dev_config = micro.device.arm.stm32f746xx.default_config('127.0.0.1', 6666)
with tvm.micro.Session(dev_config) as sess:
micro_mod = sess.create_micro_mod(c_mod)
"""
def __init__(
self,
transport_context_manager=None,
session_name="micro-rpc",
timeout_override=None,
):
"""Configure a new session.
Parameters
----------
transport_context_manager : ContextManager[transport.Transport]
If given, `flasher` and `binary` should not be given. On entry, this context manager
should establish a transport between this TVM instance and the device.
session_name : str
Name of the session, used for debugging.
timeout_override : TransportTimeouts
If given, TransportTimeouts that govern the way Receive() behaves. If not given, this is
determined by calling has_flow_control() on the transport.
"""
self.transport_context_manager = transport_context_manager
self.session_name = session_name
self.timeout_override = timeout_override
self._rpc = None
self._graph_executor = None
self._enable_rpc_logger = False
self._exit_called = False
def get_system_lib(self):
return self._rpc.get_function("runtime.SystemLib")()
def create_aot_executor(self):
return self._rpc.get_function("tvm.aot_executor.create")(
self.get_system_lib(), self.device, "default"
)
def _wrap_transport_read(self, n, timeout_microsec):
try:
return self.transport.read(
n, float(timeout_microsec) / 1e6 if timeout_microsec is not None else None
)
except IoTimeoutError:
return bytes([])
def _wrap_transport_write(self, data, timeout_microsec):
self.transport.write(
data, float(timeout_microsec) / 1e6 if timeout_microsec is not None else None
)
return len(data) # TODO(areusch): delete
def __enter__(self):
"""Initialize this session and establish an RPC session with the on-device RPC server.
Returns
-------
Session :
Returns self.
"""
self.transport = TransportLogger(
self.session_name, self.transport_context_manager, level=logging.DEBUG
).__enter__()
try:
timeouts = self.timeout_override
if timeouts is None:
timeouts = self.transport.timeouts()
self._rpc = RPCSession(
_rpc_connect(
self.session_name,
self._wrap_transport_write,
self._wrap_transport_read,
int(timeouts.session_start_retry_timeout_sec * 1e6),
int(timeouts.session_start_timeout_sec * 1e6),
int(timeouts.session_established_timeout_sec * 1e6),
self._cleanup,
self._enable_rpc_logger,
)
)
self.device = self._rpc.cpu(0)
return self
except:
self.transport.__exit__(*sys.exc_info())
raise
def __exit__(self, exc_type, exc_value, exc_traceback):
"""Tear down this session and associated RPC session resources."""
if not self._exit_called:
self._exit_called = True
self.transport.__exit__(exc_type, exc_value, exc_traceback)
shutdown_func = self._rpc._sess.get_function("CloseRPCConnection")
shutdown_func()
def _cleanup(self):
self.__exit__(None, None, None)
def lookup_remote_linked_param(mod, storage_id, template_tensor, device):
"""Lookup a parameter that has been pre-linked into a remote (i.e. over RPC) Module.
This function signature matches the signature built by
Parameters
----------
mod : tvm.runtime.Module
The remote Module containing the pre-linked parameters.
storage_id : int
An integer identifying the pre-linked paramter to find
template_tensor : DLTensor
A DLTensor containing metadata that should be filled-in to the returned NDArray. This
function should mostly not inspect this, and just pass it along to
NDArrayFromRemoteOpaqueHandle.
device : Device
The remote CPU device to be used with the returned NDArray.
Returns
-------
tvm.nd.NDArray :
NDArray containing the pre-linked parameter.
"""
try:
lookup_linked_param = mod.get_function("_lookup_linked_param")
except AttributeError:
return None
remote_data = lookup_linked_param(storage_id)
if remote_data is None:
return None
return get_global_func("tvm.rpc.NDArrayFromRemoteOpaqueHandle")(
mod, remote_data, template_tensor, device, None
)
def create_local_graph_executor(graph_json_str, mod, device):
"""Create a local graph executor driving execution on the remote CPU device given.
Parameters
----------
graph_json_str : str
A string containing the graph representation.
mod : tvm.runtime.Module
The remote module containing functions in graph_json_str.
device : tvm.runtime.Device
The remote CPU execution device.
Returns
-------
tvm.contrib.GraphExecutor :
A local graph executor instance that executes on the remote device.
"""
device_type_id = [device.device_type, device.device_id]
fcreate = get_global_func("tvm.graph_executor.create")
return graph_executor.GraphModule(
fcreate(graph_json_str, mod, lookup_remote_linked_param, *device_type_id)
)
def create_local_debug_executor(graph_json_str, mod, device, dump_root=None):
"""Create a local debug runtime driving execution on the remote CPU device given.
Parameters
----------
graph_json_str : str
A string containing the graph representation.
mod : tvm.runtime.Module
The remote module containing functions in graph_json_str.
device : tvm.runtime.Device
The remote CPU execution device.
dump_root : Optional[str]
If given, passed as dump_root= to GraphModuleDebug.
Returns
-------
tvm.contrib.GraphExecutor :
A local graph executor instance that executes on the remote device.
"""
device_type_id = [device.device_type, device.device_id]
fcreate = get_global_func("tvm.graph_executor_debug.create")
return debug_executor.GraphModuleDebug(
fcreate(graph_json_str, mod, lookup_remote_linked_param, *device_type_id),
[device],
graph_json_str,
dump_root=dump_root,
)
def create_local_aot_executor(session: Session):
"""Create a local AoT executor driving execution on the remote CPU device given.
Parameters
----------
session : Session
A microTVM device session.
Returns
-------
tvm.runtime.executor.aot_executor.AotModule :
A local AoT executor instance that executes on the remote device.
"""
return AotModule(session.create_aot_executor())
@register_func("tvm.micro.compile_and_create_micro_session")
def compile_and_create_micro_session(
mod_src_bytes: bytes,
template_project_dir: str,
project_options: dict = None,
project_dir: Union[os.PathLike, str] = None,
use_existing: bool = False,
):
"""Compile the given libraries and sources into a MicroBinary, then invoke create_micro_session.
Parameters
----------
mod_src_bytes : bytes
The content of a tarfile which contains the TVM-generated sources which together form the
SystemLib. This tar is expected to be created by export_library. The tar will be extracted
into a directory and the sources compiled into a MicroLibrary using the Compiler.
template_project_dir: str
The path to a template microTVM Project API project which is used to generate the embedded
project that is built and flashed onto the target device.
project_options: dict
Options for the microTVM API Server contained in template_project_dir.
project_dir: Union[os.PathLike, str]
if use_existing is False: The path to save the generated microTVM Project.
if use_existing is True: The path to a generated microTVM Project for debugging.
use_existing: bool
skips the project generation and opens transport to the project at the project_dir address.
"""
if use_existing:
project_dir = pathlib.Path(project_dir)
assert project_dir.is_dir(), f"{project_dir} does not exist."
build_dir = project_dir / "generated-project" / "build"
shutil.rmtree(build_dir)
generated_project = project.GeneratedProject.from_directory(
project_dir / "generated-project",
options=json.loads(project_options),
)
else:
if project_dir:
temp_dir = utils.tempdir(custom_path=project_dir, keep_for_debug=True)
else:
temp_dir = utils.tempdir()
model_library_format_path = temp_dir / "model.tar.gz"
with open(model_library_format_path, "wb") as mlf_f:
mlf_f.write(mod_src_bytes)
try:
template_project = project.TemplateProject.from_directory(template_project_dir)
generated_project = template_project.generate_project_from_mlf(
model_library_format_path,
str(temp_dir / "generated-project"),
options=json.loads(project_options),
)
except Exception as exception:
logging.error("Project Generate Error: %s", str(exception))
raise exception
generated_project.build()
generated_project.flash()
transport = generated_project.transport()
rpc_session = Session(transport_context_manager=transport)
# RPC exit is called by cleanup function.
rpc_session.__enter__()
return rpc_session._rpc._sess
| 12,041 | 33.405714 | 100 | py |
tvm | tvm-main/python/tvm/micro/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""MicroTVM module for bare-metal backends"""
from .build import autotvm_build_func
from .build import AutoTvmModuleLoader
from .build import get_standalone_crt_dir
from .build import get_microtvm_template_projects
from .build import copy_crt_config_header
from .model_library_format import (
export_model_library_format,
UnsupportedInModelLibraryFormatError,
)
from .project import generate_project, GeneratedProject, TemplateProject
from .session import (
create_local_graph_executor,
create_local_debug_executor,
create_local_aot_executor,
Session,
SessionTerminatedError,
)
from .transport import TransportLogger
| 1,431 | 37.702703 | 72 | py |
tvm | tvm-main/python/tvm/micro/transport.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Defines abstractions and implementations of the RPC transport used with micro TVM."""
import abc
import logging
import string
import typing
from .project_api.server import IoTimeoutError, TransportTimeouts
from .project_api.server import TransportClosedError
_ = TransportClosedError # work around pylint unused-import error
_LOG = logging.getLogger(__name__)
def debug_transport_timeouts(session_start_retry_timeout_sec=0):
return TransportTimeouts(
session_start_retry_timeout_sec=session_start_retry_timeout_sec,
session_start_timeout_sec=0,
session_established_timeout_sec=0,
)
class Transport(metaclass=abc.ABCMeta):
"""The abstract Transport class used for micro TVM."""
def __enter__(self):
self.open()
return self
def __exit__(self, exc_type, exc_value, exc_traceback):
self.close()
@abc.abstractmethod
def timeouts(self):
"""Return TransportTimeouts suitable for use with this transport.
See the TransportTimeouts documentation in python/tvm/micro/session.py.
"""
raise NotImplementedError()
@abc.abstractmethod
def open(self):
"""Open any resources needed to send and receive RPC protocol data for a single session."""
raise NotImplementedError()
@abc.abstractmethod
def close(self):
"""Release resources associated with this transport."""
raise NotImplementedError()
@abc.abstractmethod
def read(self, n, timeout_sec):
"""Read up to n bytes from the transport.
Parameters
----------
n : int
Maximum number of bytes to read from the transport.
timeout_sec : Union[float, None]
Number of seconds to wait for all `n` bytes to be received before timing out. The
transport can wait additional time to account for transport latency or bandwidth
limitations based on the selected configuration and number of bytes being received. If
timeout_sec is 0, read should attempt to service the request in a non-blocking fashion.
If timeout_sec is None, read should block until at least 1 byte of data can be returned.
Returns
-------
bytes :
Data read from the channel. Less than `n` bytes may be returned, but 0 bytes should
never be returned. If returning less than `n` bytes, the full timeout_sec, plus any
internally-added timeout, should be waited. If a timeout or transport error occurs,
an exception should be raised rather than simply returning empty bytes.
Raises
------
TransportClosedError :
When the transport layer determines that the transport can no longer send or receive
data due to an underlying I/O problem (i.e. file descriptor closed, cable removed, etc).
IoTimeoutError :
When `timeout_sec` elapses without receiving any data.
"""
raise NotImplementedError()
@abc.abstractmethod
def write(self, data, timeout_sec):
"""Write data to the transport channel.
Parameters
----------
data : bytes
The data to write over the channel.
timeout_sec : Union[float, None]
Number of seconds to wait for at least one byte to be written before timing out. The
transport can wait additional time to account for transport latency or bandwidth
limitations based on the selected configuration and number of bytes being received. If
timeout_sec is 0, write should attempt to service the request in a non-blocking fashion.
If timeout_sec is None, write should block until at least 1 byte of data can be
returned.
Returns
-------
int :
The number of bytes written to the underlying channel. This can be less than the length
of `data`, but cannot be 0 (raise an exception instead).
Raises
------
TransportClosedError :
When the transport layer determines that the transport can no longer send or receive
data due to an underlying I/O problem (i.e. file descriptor closed, cable removed, etc).
IoTimeoutError :
When `timeout_sec` elapses without receiving any data.
"""
raise NotImplementedError()
class TransportLogger(Transport):
"""Wraps a Transport implementation and logs traffic to the Python logging infrastructure."""
def __init__(self, name, child, logger=None, level=logging.INFO):
self.name = name
self.child = child
self.logger = logger or _LOG
self.level = level
# Construct PRINTABLE to exclude whitespace from string.printable.
PRINTABLE = string.digits + string.ascii_letters + string.punctuation
@classmethod
def _to_hex(cls, data):
lines = []
if not data:
lines.append("")
return lines
for i in range(0, (len(data) + 15) // 16):
chunk = data[i * 16 : (i + 1) * 16]
hex_chunk = " ".join(f"{c:02x}" for c in chunk)
ascii_chunk = "".join((chr(c) if chr(c) in cls.PRINTABLE else ".") for c in chunk)
lines.append(f"{i * 16:04x} {hex_chunk:47} {ascii_chunk}")
if len(lines) == 1:
lines[0] = lines[0][6:]
return lines
def timeouts(self):
return self.child.timeouts()
def open(self):
self.logger.log(self.level, "%s: opening transport", self.name)
self.child.open()
def close(self):
self.logger.log(self.level, "%s: closing transport", self.name)
return self.child.close()
def read(self, n, timeout_sec):
timeout_str = f"{timeout_sec:5.2f}s" if timeout_sec is not None else " None "
try:
data = self.child.read(n, timeout_sec)
except IoTimeoutError:
self.logger.log(
self.level,
"%s: read {%s} %4d B -> [IoTimeoutError %s]",
self.name,
timeout_str,
n,
timeout_str,
)
raise
except Exception as err:
self.logger.log(
self.level,
"%s: read {%s} %4d B -> [err: %s]",
self.name,
timeout_str,
n,
err.__class__.__name__,
exc_info=1,
)
raise err
hex_lines = self._to_hex(data)
if len(hex_lines) > 1:
self.logger.log(
self.level,
"%s: read {%s} %4d B -> [%3d B]:\n%s",
self.name,
timeout_str,
n,
len(data),
"\n".join(hex_lines),
)
else:
self.logger.log(
self.level,
"%s: read {%s} %4d B -> [%3d B]: %s",
self.name,
timeout_str,
n,
len(data),
hex_lines[0],
)
return data
def write(self, data, timeout_sec):
timeout_str = f"{timeout_sec:5.2f}s" if timeout_sec is not None else " None "
try:
self.child.write(data, timeout_sec)
except IoTimeoutError:
self.logger.log(
self.level,
"%s: write {%s} <- [%3d B]: [IoTimeoutError %s]",
self.name,
timeout_str,
len(data),
timeout_str,
)
raise
except Exception as err:
self.logger.log(
self.level,
"%s: write {%s} <- [%3d B]: [err: %s]",
self.name,
timeout_str,
len(data),
err.__class__.__name__,
exc_info=1,
)
raise err
hex_lines = self._to_hex(data)
if len(hex_lines) > 1:
self.logger.log(
self.level,
"%s: write {%s} <- [%3d B]:\n%s",
self.name,
timeout_str,
len(data),
"\n".join(hex_lines),
)
else:
self.logger.log(
self.level,
"%s: write {%s} <- [%3d B]: %s",
self.name,
timeout_str,
len(data),
hex_lines[0],
)
TransportContextManager = typing.ContextManager[Transport]
| 9,424 | 32.781362 | 100 | py |
tvm | tvm-main/python/tvm/micro/build.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Defines top-level glue functions for building microTVM artifacts."""
import json
import logging
import os
import contextlib
import enum
from pathlib import Path
import shutil
from typing import Union
from .._ffi import libinfo
from .. import rpc as _rpc
_LOG = logging.getLogger(__name__)
STANDALONE_CRT_DIR = None
class MicroTVMTemplateProject(enum.Enum):
ZEPHYR = "zephyr"
ARDUINO = "arduino"
CRT = "crt"
@classmethod
def list(cls):
return list(map(lambda c: c.value, cls))
class CrtNotFoundError(Exception):
"""Raised when the standalone CRT dirtree cannot be found."""
class MicroTVMTemplateProjectNotFoundError(Exception):
"""Raised when the microTVM template project dirtree cannot be found."""
def get_standalone_crt_dir() -> str:
"""Find the standalone_crt directory.
Though the C runtime source lives in the tvm tree, it is intended to be distributed with any
binary build of TVM. This source tree is intended to be integrated into user projects to run
models targeted with --runtime=c.
Returns
-------
str :
The path to the standalone_crt
"""
global STANDALONE_CRT_DIR
if STANDALONE_CRT_DIR is None:
for path in libinfo.find_lib_path():
crt_path = os.path.join(os.path.dirname(path), "standalone_crt")
if os.path.isdir(crt_path):
STANDALONE_CRT_DIR = crt_path
break
else:
raise CrtNotFoundError()
return STANDALONE_CRT_DIR
def get_microtvm_template_projects(platform: str) -> str:
"""Find microTVM template project directory for specific platform.
Parameters
----------
platform : str
Platform type which should be defined in MicroTVMTemplateProject.
Returns
-------
str :
Path to template project directory for platform.
"""
if platform not in MicroTVMTemplateProject.list():
raise ValueError(f"platform {platform} is not supported.")
microtvm_template_projects = None
for path in libinfo.find_lib_path():
template_path = os.path.join(os.path.dirname(path), "microtvm_template_projects")
if os.path.isdir(template_path):
microtvm_template_projects = template_path
break
else:
raise MicroTVMTemplateProjectNotFoundError()
return os.path.join(microtvm_template_projects, platform)
def copy_crt_config_header(platform: str, output_path: Path):
"""Copy crt_config header file for a platform to destinatin.
Parameters
----------
platform : str
Platform type which should be defined in MicroTVMTemplateProject.
output_path: Path
Output path for crt_config header file.
"""
crt_config_path = Path(get_microtvm_template_projects(platform)) / "crt_config" / "crt_config.h"
shutil.copy(crt_config_path, output_path)
class AutoTvmModuleLoader:
"""MicroTVM AutoTVM Module Loader
Parameters
----------
template_project_dir : Union[os.PathLike, str]
project template path
project_options : dict
project generation option
project_dir: str
if use_existing is False: The path to save the generated microTVM Project.
if use_existing is True: The path to a generated microTVM Project for debugging.
use_existing: bool
skips the project generation and opens transport to the project at the project_dir address.
"""
def __init__(
self,
template_project_dir: Union[os.PathLike, str],
project_options: dict = None,
project_dir: Union[os.PathLike, str] = None,
use_existing: bool = False,
):
self._project_options = project_options
self._use_existing = use_existing
if isinstance(template_project_dir, (os.PathLike, str)):
self._template_project_dir = str(template_project_dir)
elif not isinstance(template_project_dir, str):
raise TypeError(f"Incorrect type {type(template_project_dir)}.")
if isinstance(project_dir, (os.PathLike, str)):
self._project_dir = str(project_dir)
else:
self._project_dir = None
@contextlib.contextmanager
def __call__(self, remote_kw, build_result):
with open(build_result.filename, "rb") as build_file:
build_result_bin = build_file.read()
# In case we are tuning on multiple physical boards (with Meta-schedule), the tracker
# device_key is the serial_number of the board that wil be used in generating micro session.
# For CRT projects, and in cases that the serial number is not provided
# (including tuning with AutoTVM), the serial number field doesn't change.
if "board" in self._project_options and "$local$device" not in remote_kw["device_key"]:
self._project_options["serial_number"] = remote_kw["device_key"]
tracker = _rpc.connect_tracker(remote_kw["host"], remote_kw["port"])
remote = tracker.request(
remote_kw["device_key"],
priority=remote_kw["priority"],
session_timeout=remote_kw["timeout"],
session_constructor_args=[
"tvm.micro.compile_and_create_micro_session",
build_result_bin,
self._template_project_dir,
json.dumps(self._project_options),
self._project_dir,
self._use_existing,
],
)
system_lib = remote.get_function("runtime.SystemLib")()
yield remote, system_lib
def autotvm_build_func():
"""A dummy build function which causes autotvm to use a different export format."""
# A sentinel value for the output format.
autotvm_build_func.output_format = ".model-library-format"
| 6,590 | 31.791045 | 100 | py |
tvm | tvm-main/python/tvm/micro/testing/utils.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Defines the test methods used with microTVM."""
import io
from functools import lru_cache
import json
import logging
from pathlib import Path
import tarfile
import time
from typing import Union
import numpy as np
import tvm
from tvm import relay
from tvm.micro.project_api.server import IoTimeoutError
# Timeout in seconds for AOT transport.
TIMEOUT_SEC = 10
@lru_cache(maxsize=None)
def get_supported_platforms():
return ["arduino", "zephyr"]
@lru_cache(maxsize=None)
def get_supported_boards(platform: str):
template = Path(tvm.micro.get_microtvm_template_projects(platform))
with open(template / "boards.json") as f:
return json.load(f)
def get_target(platform: str, board: str = None) -> tvm.target.Target:
"""Intentionally simple function for making Targets for microcontrollers.
If you need more complex arguments, one should call target.micro directly. Note
that almost all, but not all, supported microcontrollers are Arm-based."""
if platform == "crt":
return tvm.target.target.micro("host")
if not board:
raise ValueError(f"`board` type is required for {platform} platform.")
model = get_supported_boards(platform)[board]["model"]
return tvm.target.target.micro(model, options=["-device=arm_cpu"])
def check_tune_log(log_path: Union[Path, str]):
"""Read the tuning log and check each result."""
with open(log_path, "r") as f:
lines = f.readlines()
for line in lines:
if len(line) > 0:
tune_result = json.loads(line)
assert tune_result["result"][0][0] < 1000000000.0
def aot_transport_init_wait(transport):
"""Send init message to microTVM device until it receives wakeup sequence."""
while True:
try:
aot_transport_find_message(transport, "wakeup", timeout_sec=TIMEOUT_SEC)
break
except IoTimeoutError:
transport.write(b"init%", timeout_sec=TIMEOUT_SEC)
def aot_transport_find_message(transport, expression: str, timeout_sec: int) -> str:
"""Read transport message until it finds the expression."""
timeout = timeout_sec
start_time = time.monotonic()
while True:
data = _read_line(transport, timeout)
logging.debug("new line: %s", data)
if expression in data:
return data
timeout = max(0, timeout_sec - (time.monotonic() - start_time))
def _read_line(transport, timeout_sec: int) -> str:
data = bytearray()
while True:
new_data = transport.read(1, timeout_sec=timeout_sec)
logging.debug("read data: %s", new_data)
for item in new_data:
data.append(item)
if str(chr(item)) == "\n":
return data.decode(encoding="utf-8")
def mlf_extract_workspace_size_bytes(mlf_tar_path: Union[Path, str]) -> int:
"""Extract an MLF archive file and read workspace size from metadata file."""
workspace_size = 0
with tarfile.open(mlf_tar_path, "r:*") as tar_file:
tar_members = [tar_info.name for tar_info in tar_file.getmembers()]
assert "./metadata.json" in tar_members
with tar_file.extractfile("./metadata.json") as f:
metadata = json.load(f)
for mod_name in metadata["modules"].keys():
workspace_size += metadata["modules"][mod_name]["memory"]["functions"]["main"][0][
"workspace_size_bytes"
]
return workspace_size
def get_conv2d_relay_module():
"""Generate a conv2d Relay module for testing."""
data_shape = (1, 3, 64, 64)
weight_shape = (8, 3, 5, 5)
data = relay.var("data", relay.TensorType(data_shape, "int8"))
weight = relay.var("weight", relay.TensorType(weight_shape, "int8"))
y = relay.nn.conv2d(
data,
weight,
padding=(2, 2),
channels=8,
kernel_size=(5, 5),
data_layout="NCHW",
kernel_layout="OIHW",
out_dtype="int32",
)
f = relay.Function([data, weight], y)
mod = tvm.IRModule.from_expr(f)
mod = relay.transform.InferType()(mod)
return mod
def _npy_dtype_to_ctype(data: np.ndarray) -> str:
if data.dtype == "int8":
return "int8_t"
elif data.dtype == "int32":
return "int32_t"
elif data.dtype == "uint8":
return "uint8_t"
elif data.dtype == "float32":
return "float"
else:
raise ValueError(f"Data type {data.dtype} not expected.")
def create_header_file(
tensor_name: str, npy_data: np.array, output_path: str, tar_file: tarfile.TarFile
):
"""
This method generates a header file containing the data contained in the numpy array provided
and adds the header file to a tar file.
It is used to capture the tensor data (for both inputs and output).
"""
header_file = io.StringIO()
header_file.write("#include <stddef.h>\n")
header_file.write("#include <stdint.h>\n")
header_file.write("#include <dlpack/dlpack.h>\n")
header_file.write(f"const size_t {tensor_name}_len = {npy_data.size};\n")
header_file.write(f"{_npy_dtype_to_ctype(npy_data)} {tensor_name}[] =")
header_file.write("{")
for i in np.ndindex(npy_data.shape):
header_file.write(f"{npy_data[i]}, ")
header_file.write("};\n\n")
header_file_bytes = bytes(header_file.getvalue(), "utf-8")
raw_path = Path(output_path) / f"{tensor_name}.h"
tar_info = tarfile.TarInfo(name=str(raw_path))
tar_info.size = len(header_file_bytes)
tar_info.mode = 0o644
tar_info.type = tarfile.REGTYPE
tar_file.addfile(tar_info, io.BytesIO(header_file_bytes))
| 6,414 | 33.489247 | 98 | py |
tvm | tvm-main/python/tvm/micro/testing/aot_test_utils.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
This file provides utilities for running AOT tests, especially for Corstone.
"""
import logging
import itertools
import shutil
import pytest
import tvm
from tvm.testing.aot import AOTTestRunner
pytest.importorskip("tvm.micro")
_LOG = logging.getLogger(__name__)
AOT_DEFAULT_RUNNER = AOTTestRunner()
# AOT Test Runner using the Arm® Corstone™-300 Reference Systems
# see: https://developer.arm.com/ip-products/subsystem/corstone/corstone-300
AOT_CORSTONE300_RUNNER = AOTTestRunner(
makefile="corstone300",
prologue="""
UartStdOutInit();
""",
includes=["uart_stdout.h"],
pass_config={
"relay.ext.cmsisnn.options": {
"mcpu": "cortex-m55",
}
},
)
AOT_USMP_CORSTONE300_RUNNER = AOTTestRunner(
makefile="corstone300",
prologue="""
UartStdOutInit();
""",
includes=["uart_stdout.h"],
pass_config={
"relay.ext.cmsisnn.options": {
"mcpu": "cortex-m55",
},
"tir.usmp.enable": True,
},
)
def parametrize_aot_options(test):
"""Parametrize over valid option combinations"""
requires_arm_eabi = pytest.mark.skipif(
shutil.which("arm-none-eabi-gcc") is None, reason="ARM embedded toolchain unavailable"
)
interface_api = ["packed", "c"]
use_unpacked_api = [True, False]
test_runner = [AOT_DEFAULT_RUNNER, AOT_CORSTONE300_RUNNER]
all_combinations = itertools.product(interface_api, use_unpacked_api, test_runner)
# Filter out packed operators with c interface
valid_combinations = filter(
lambda parameters: not (parameters[0] == "c" and not parameters[1]),
all_combinations,
)
# Only use reference system for C interface and unpacked API calls
valid_combinations = filter(
lambda parameters: not (
parameters[2] == AOT_CORSTONE300_RUNNER
and (parameters[0] == "packed" or not parameters[1])
),
valid_combinations,
)
# Skip reference system tests if running in i386 container
marked_combinations = map(
lambda parameters: pytest.param(*parameters, marks=[requires_arm_eabi])
if parameters[2] == AOT_CORSTONE300_RUNNER
else parameters,
valid_combinations,
)
func = pytest.mark.parametrize(
["interface_api", "use_unpacked_api", "test_runner"],
marked_combinations,
)(test)
return tvm.testing.skip_if_32bit(reason="Reference system unavailable in i386 container")(func)
| 3,277 | 28.531532 | 99 | py |
tvm | tvm-main/python/tvm/micro/testing/pytest_plugin.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,redefined-outer-name
""" microTVM testing fixtures used to deduce testing argument
values from testing parameters """
import pathlib
import os
import datetime
import pytest
from tvm.contrib.utils import tempdir
from .utils import get_supported_platforms, get_supported_boards
def pytest_addoption(parser):
"""Adds more pytest arguments"""
parser.addoption(
"--platform",
choices=get_supported_platforms(),
help=("microTVM platform for tests."),
)
parser.addoption(
"--board",
choices=list(get_supported_boards("zephyr").keys())
+ list(get_supported_boards("arduino").keys()),
help=(
"microTVM boards for tests. Board refers to instances"
"of microcontrollers/emulators defined in a platform."
),
)
parser.addoption(
"--test-build-only",
action="store_true",
default=False,
help="Only run tests that don't require physical hardware.",
)
parser.addoption(
"--microtvm-debug",
action="store_true",
default=False,
help=(
"If set true, it will keep the project directory for debugging."
"Also, it will enable debug level logging in project generation."
),
)
parser.addoption(
"--serial-number",
default=None,
help=(
"Board serial number. This is used to run test on a "
"specific board when multiple boards with the same type exist."
),
)
def pytest_generate_tests(metafunc):
"""Hooks into pytest to add platform and board fixtures to tests that
require them. To make sure that "platform" and "board" are treated as
parameters for the appropriate tests (and included in the test names),
we add them as function level parametrizations. This prevents data
from being overwritten in Junit XML files if multiple platforms
or boards are tested."""
for argument in ["platform", "board"]:
if argument in metafunc.fixturenames:
value = metafunc.config.getoption(f"--{argument}", default=None)
if not value:
raise ValueError(
f"Test {metafunc.function.__name__} in module {metafunc.module.__name__} "
f"requires a --{argument} argument, but none was given."
)
metafunc.parametrize(argument, [metafunc.config.getoption(f"--{argument}")])
@pytest.fixture(scope="session")
def microtvm_debug(request):
return request.config.getoption("--microtvm-debug")
def pytest_collection_modifyitems(config, items):
if config.getoption("--test-build-only"):
skip_hardware_tests = pytest.mark.skip(reason="--test-build-only was passed")
for item in items:
if "requires_hardware" in item.keywords:
item.add_marker(skip_hardware_tests)
@pytest.fixture
def workspace_dir(request, board, microtvm_debug):
"""Creates workspace directory for each test."""
parent_dir = pathlib.Path(os.path.dirname(request.module.__file__))
board_workspace = (
parent_dir / f"workspace_{board}" / datetime.datetime.now().strftime("%Y-%m-%dT%H-%M-%S")
)
board_workspace_base = str(board_workspace)
number = 1
while board_workspace.exists():
board_workspace = pathlib.Path(board_workspace_base + f"-{number}")
number += 1
if not os.path.exists(board_workspace.parent):
os.makedirs(board_workspace.parent)
keep_for_debug = microtvm_debug if microtvm_debug else None
test_temp_dir = tempdir(custom_path=board_workspace, keep_for_debug=keep_for_debug)
return test_temp_dir
@pytest.fixture(autouse=True)
def skip_by_board(request, board):
"""Skip test if board is in the list."""
if request.node.get_closest_marker("skip_boards"):
if board in request.node.get_closest_marker("skip_boards").args[0]:
pytest.skip("skipped on this board: {}".format(board))
def pytest_configure(config):
config.addinivalue_line(
"markers",
"skip_boards(board): skip test for the given board",
)
@pytest.fixture
def serial_number(request):
serial_number = request.config.getoption("--serial-number")
if serial_number:
serial_number_splitted = serial_number.split(",")
if len(serial_number_splitted) > 1:
return serial_number_splitted
return serial_number
| 5,268 | 33.89404 | 97 | py |
tvm | tvm-main/python/tvm/micro/testing/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Allows the tools specified below to be imported directly from tvm.micro.testing"""
from .evaluation import tune_model, create_aot_session, predict_labels_aot
from .utils import get_supported_boards, get_target
| 999 | 46.619048 | 85 | py |
tvm | tvm-main/python/tvm/micro/testing/evaluation.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Provides high-level functions for instantiating and timing AOT models. Used
by autotuning tests in tests/micro, and may be used for more performance
tests in the future.
"""
import logging
from io import StringIO
from pathlib import Path
from contextlib import ExitStack
import tempfile
import shutil
import tvm
from tvm.relay.op.contrib import cmsisnn
def tune_model(
platform,
board,
target,
mod,
params,
num_trials,
tuner_cls=tvm.autotvm.tuner.GATuner,
project_options=None,
):
"""Autotunes a model with microTVM and returns a StringIO with the tuning logs"""
with tvm.transform.PassContext(opt_level=3, config={"tir.disable_vectorize": True}):
tasks = tvm.autotvm.task.extract_from_program(mod["main"], {}, target)
assert len(tasks) > 0
assert isinstance(params, dict)
project_options = {
"board": board,
"project_type": "host_driven",
**(project_options or {}),
}
module_loader = tvm.micro.AutoTvmModuleLoader(
template_project_dir=tvm.micro.get_microtvm_template_projects(platform),
project_options=project_options,
)
builder = tvm.autotvm.LocalBuilder(
n_parallel=1,
build_kwargs={"build_option": {"tir.disable_vectorize": True}},
do_fork=False,
build_func=tvm.micro.autotvm_build_func,
runtime=tvm.relay.backend.Runtime("crt", {"system-lib": True}),
)
runner = tvm.autotvm.LocalRunner(number=1, repeat=1, timeout=100, module_loader=module_loader)
measure_option = tvm.autotvm.measure_option(builder=builder, runner=runner)
results = StringIO()
for task in tasks:
tuner = tuner_cls(task)
tuner.tune(
n_trial=num_trials,
measure_option=measure_option,
callbacks=[
tvm.autotvm.callback.log_to_file(results),
tvm.autotvm.callback.progress_bar(num_trials, si_prefix="M"),
],
si_prefix="M",
)
# Note that we might not find a working schedule at all, in which case
# tuner.best_flops would equal zero. This is not good, but checking for
# this case will happen elsewhere.
return results
def create_aot_session(
platform,
board,
target,
mod,
params,
build_dir=Path(tempfile.mkdtemp()),
tune_logs=None,
timeout_override=None,
use_cmsis_nn=False,
project_options=None,
use_existing=False,
):
"""AOT-compiles and uploads a model to a microcontroller, and returns the RPC session"""
executor = tvm.relay.backend.Executor("aot")
crt_runtime = tvm.relay.backend.Runtime("crt", {"system-lib": True})
with ExitStack() as stack:
config = {"tir.disable_vectorize": True}
if use_cmsis_nn:
config["relay.ext.cmsisnn.options"] = {"mcpu": target.mcpu}
stack.enter_context(tvm.transform.PassContext(opt_level=3, config=config))
if use_cmsis_nn:
mod = cmsisnn.partition_for_cmsisnn(mod, params, mcpu=target.mcpu)
if tune_logs is not None:
stack.enter_context(tvm.autotvm.apply_history_best(tune_logs))
lowered = tvm.relay.build(
mod,
target=target,
params=params,
runtime=crt_runtime,
executor=executor,
)
parameter_size = len(tvm.runtime.save_param_dict(lowered.get_params()))
print(f"Model parameter size: {parameter_size}")
project_options = {
"board": board,
"project_type": "host_driven",
# {} shouldn't be the default value for project options ({}
# is mutable), so we use this workaround
**(project_options or {}),
}
if use_existing:
shutil.rmtree(build_dir / "project" / "build")
project = tvm.micro.GeneratedProject.from_directory(
build_dir / "project",
options=project_options,
)
else:
project = tvm.micro.generate_project(
str(tvm.micro.get_microtvm_template_projects(platform)),
lowered,
build_dir / "project",
project_options,
)
project.build()
project.flash()
return tvm.micro.Session(project.transport(), timeout_override=timeout_override)
def predict_labels_aot(session, aot_executor, input_data, runs_per_sample=1):
"""Predicts labels for each sample in input_data using host-driven AOT.
Returns an iterator of (label, runtime) tuples. This function can only
be used with models for which the output is the confidence for each class."""
assert aot_executor.get_num_inputs() == 1
assert aot_executor.get_num_outputs() == 1
assert runs_per_sample > 0
for counter, sample in enumerate(input_data):
logging.info("Evaluating sample %d", counter)
aot_executor.get_input(0).copyfrom(sample)
result = aot_executor.module.time_evaluator("run", session.device, number=runs_per_sample)()
predicted_label = aot_executor.get_output(0).numpy().argmax()
runtime = result.mean
yield predicted_label, runtime
| 5,904 | 32.742857 | 100 | py |
tvm | tvm-main/python/tvm/micro/project_api/server.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""Defines a basic Project API server template.
This file is meant to be imported or copied into Project API servers, so it should not have any
imports or dependencies outside of things strictly required to run the API server.
"""
import abc
import argparse
import base64
import collections
import enum
import io
import json
import logging
import os
import pathlib
import re
import select
import sys
import time
import traceback
import typing
_LOG = logging.getLogger(__name__)
_ProjectOption = collections.namedtuple(
"ProjectOption", ("name", "choices", "default", "type", "required", "optional", "help")
)
class ProjectOption(_ProjectOption):
"""Class used to keep the metadata associated to project options."""
def __new__(cls, name, **kw):
"""Override __new__ to force all options except name to be specified as kwargs."""
assert "name" not in kw
assert (
"required" in kw or "optional" in kw
), "at least one of 'required' or 'optional' must be specified."
assert "type" in kw, "'type' field must be specified."
kw["name"] = name
for param in ["choices", "default", "required", "optional"]:
kw.setdefault(param, None)
return super().__new__(cls, **kw)
def replace(self, attributes):
"""Update attributes associated to the project option."""
updated_option = self
return updated_option._replace(**attributes)
ServerInfo = collections.namedtuple(
"ServerInfo", ("platform_name", "is_template", "model_library_format_path", "project_options")
)
# Timeouts supported by the underlying C++ MicroSession.
#
# session_start_retry_timeout_sec : float
# Number of seconds to wait for the device to send a kSessionStartReply after sending the
# initial session start message. After this time elapses another
# kSessionTerminated-kSessionStartInit train is sent. 0 disables this.
# session_start_timeout_sec : float
# Total number of seconds to wait for the session to be established. After this time, the
# client gives up trying to establish a session and raises an exception.
# session_established_timeout_sec : float
# Number of seconds to wait for a reply message after a session has been established. 0
# disables this.
TransportTimeouts = collections.namedtuple(
"TransportTimeouts",
[
"session_start_retry_timeout_sec",
"session_start_timeout_sec",
"session_established_timeout_sec",
],
)
class ErrorCode(enum.IntEnum):
"""Enumerates error codes which can be returned. Includes JSON-RPC standard and custom codes."""
# Custom (in reserved error code space).
SERVER_ERROR = -32000 # A generic error was raised while processing the request.
# JSON-RPC standard
PARSE_ERROR = -32700
INVALID_REQUEST = -32600
METHOD_NOT_FOUND = -32601
INVALID_PARAMS = -32602
INTERNAL_ERROR = -32603
class JSONRPCError(Exception):
"""An error class with properties that meet the JSON-RPC error spec."""
def __init__(self, code, message, data, client_context=None):
Exception.__init__(self)
self.code = code
self.message = message
self.data = data
self.client_context = client_context
def to_json(self):
return {
"code": self.code,
"message": self.message,
"data": self.data,
}
def __str__(self):
data_str = ""
if self.data:
if isinstance(self.data, dict) and self.data.get("traceback"):
data_str = f'\n{self.data["traceback"]}'
else:
data_str = f"\n{self.data!r}"
return f"JSON-RPC error # {self.code}: {self.message}" + data_str
@classmethod
def from_json(cls, client_context, json_error):
"""Convert an encapsulated ServerError into JSON-RPC compliant format."""
found_server_error = False
try:
if ErrorCode(json_error["code"]) == ErrorCode.SERVER_ERROR:
found_server_error = True
except ValueError:
ServerError.from_json(client_context, json_error)
if found_server_error:
return ServerError.from_json(client_context, json_error)
return cls(
json_error["code"],
json_error["message"],
json_error.get("data", None),
client_context=client_context,
)
class ServerError(JSONRPCError):
"""Superclass for JSON-RPC errors which occur while processing valid requests."""
@classmethod
def from_exception(cls, exc, **kw):
to_return = cls(**kw)
to_return.set_traceback(traceback.TracebackException.from_exception(exc).format())
return to_return
def __init__(self, message=None, data=None, client_context=None):
if self.__class__ == ServerError:
assert message is not None, "Plain ServerError must have message="
else:
assert (
message is None
), f"ServerError subclasses must not supply message=; got {message!r}"
message = self.__class__.__name__
super(ServerError, self).__init__(ErrorCode.SERVER_ERROR, message, data)
self.client_context = client_context
def __str__(self):
context_str = f"{self.client_context}: " if self.client_context is not None else ""
super_str = super(ServerError, self).__str__()
return context_str + super_str
def set_traceback(self, traceback): # pylint: disable=redefined-outer-name
"""Format a traceback to be embedded in the JSON-RPC format."""
if self.data is None:
self.data = {}
if "traceback" not in self.data:
# NOTE: TVM's FFI layer reorders Python stack traces several times and strips
# intermediary lines that start with "Traceback". This logic adds a comment to the first
# stack frame to explicitly identify the first stack frame line that occurs on the
# server.
traceback_list = list(traceback)
# The traceback list contains one entry per stack frame, and each entry contains 1-2
# lines:
# File "path/to/file", line 123, in <method>:
# <copy of the line>
# We want to place a comment on the first line of the outermost frame to indicate this
# is the server-side stack frame.
first_frame_list = traceback_list[1].split("\n")
self.data["traceback"] = (
traceback_list[0]
+ f"{first_frame_list[0]} # <--- Outermost server-side stack frame\n"
+ "\n".join(first_frame_list[1:])
+ "".join(traceback_list[2:])
)
@classmethod
def from_json(cls, client_context, json_error):
assert json_error["code"] == ErrorCode.SERVER_ERROR
for sub_cls in cls.__subclasses__():
if sub_cls.__name__ == json_error["message"]:
return sub_cls(
data=json_error.get("data"),
client_context=client_context,
)
return cls(
json_error["message"], data=json_error.get("data"), client_context=client_context
)
class TransportClosedError(ServerError):
"""Raised when a transport can no longer be used due to underlying I/O problems."""
class IoTimeoutError(ServerError):
"""Raised when the I/O operation could not be completed before the timeout.
Specifically:
- when no data could be read before the timeout
- when some of the write data could be written before the timeout
Note the asymmetric behavior of read() vs write(), since in one case the total length of the
data to transfer is known.
"""
class UnsupportedTVMVersionError(ServerError):
"""Raised when the version of TVM supplied to server_info_query is unsupported."""
class ProjectAPIHandler(metaclass=abc.ABCMeta):
"""The interface class for all Project API implementations.
Extend this class in your microtvm_api_server.py and implement each function defined here.
"""
@abc.abstractmethod
def server_info_query(self, tvm_version: str) -> ServerInfo:
"""Initial request issued by TVM to retrieve metadata about this API server and project.
Should this API server not
Parameters
----------
tvm_version : str
The value of tvm.__version__.
Returns
-------
ServerInfo :
A ServerInfo namedtuple containing the metadata needed by TVM.
Raises
------
UnsupportedTVMVersionError :
When tvm_version indicates a known-unsupported version of TVM.
"""
raise NotImplementedError()
@abc.abstractmethod
def generate_project(
self,
model_library_format_path: pathlib.Path,
standalone_crt_dir: pathlib.Path,
project_dir: pathlib.Path,
options: dict,
):
"""Generate a project from the given artifacts, copying ourselves to that project.
Parameters
----------
model_library_format_path : pathlib.Path
Path to the Model Library Format tar archive.
standalone_crt_dir : pathlib.Path
Path to the root directory of the "standalone_crt" TVM build artifact. This contains the
TVM C runtime.
project_dir : pathlib.Path
Path to a nonexistent directory which should be created and filled with the generated
project.
options : dict
Dict mapping option name to ProjectOption.
"""
raise NotImplementedError()
@abc.abstractmethod
def build(self, options: dict):
"""Build the project, enabling the flash() call to made.
Parameters
----------
options : Dict[str, ProjectOption]
ProjectOption which may influence the build, keyed by option name.
"""
raise NotImplementedError()
@abc.abstractmethod
def flash(self, options: dict):
"""Program the project onto the device.
Parameters
----------
options : Dict[str, ProjectOption]
ProjectOption which may influence the programming process, keyed by option name.
"""
raise NotImplementedError()
@abc.abstractmethod
def open_transport(self, options: dict) -> TransportTimeouts:
"""Open resources needed for the transport layer.
This function might e.g. open files or serial ports needed in write_transport or
read_transport.
Calling this function enables the write_transport and read_transport calls. If the
transport is not open, this method is a no-op.
Parameters
----------
options : Dict[str, ProjectOption]
ProjectOption which may influence the programming process, keyed by option name.
"""
raise NotImplementedError()
@abc.abstractmethod
def close_transport(self):
"""Close resources needed to operate the transport layer.
This function might e.g. close files or serial ports needed in write_transport or
read_transport.
Calling this function disables the write_transport and read_transport calls. If the
transport is not open, this method is a no-op.
"""
raise NotImplementedError()
@abc.abstractmethod
# pylint: disable=unidiomatic-typecheck
def read_transport(self, n: int, timeout_sec: typing.Union[float, type(None)]) -> bytes:
"""Read data from the transport.
Parameters
----------
n : int
The exact number of bytes to read from the transport.
timeout_sec : Union[float, None]
Number of seconds to wait for at least one byte to be written before timing out. If
timeout_sec is 0, write should attempt to service the request in a non-blocking fashion.
If timeout_sec is None, write should block until all `n` bytes of data can be returned.
Returns
-------
bytes :
Data read from the channel. Should be exactly `n` bytes long.
Raises
------
TransportClosedError :
When the transport layer determines that the transport can no longer send or receive
data due to an underlying I/O problem (i.e. file descriptor closed, cable removed, etc).
IoTimeoutError :
When `timeout_sec` elapses without receiving any data.
"""
raise NotImplementedError()
@abc.abstractmethod
def write_transport(self, data: bytes, timeout_sec: float):
"""Write data to the transport.
This function should either write all bytes in `data` or raise an exception.
Parameters
----------
data : bytes
The data to write over the channel.
timeout_sec : Union[float, None]
Number of seconds to wait for all bytes to be written before timing out. If timeout_sec
is 0, write should attempt to service the request in a non-blocking fashion. If
timeout_sec is None, write should block until it has written all data.
Raises
------
TransportClosedError :
When the transport layer determines that the transport can no longer send or receive
data due to an underlying I/O problem (i.e. file descriptor closed, cable removed, etc).
IoTimeoutError :
When `timeout_sec` elapses without receiving any data.
"""
raise NotImplementedError()
class ProjectAPIServer:
"""Base class for Project API Servers.
This API server implements communication using JSON-RPC 2.0:
https://www.jsonrpc.org/specification
Suggested use of this class is to import this module or copy this file into Project Generator
implementations, then instantiate it with server.start().
This RPC server is single-threaded, blocking, and one-request-at-a-time. Don't get anxious.
"""
_PROTOCOL_VERSION = 1
def __init__(
self, read_file: typing.BinaryIO, write_file: typing.BinaryIO, handler: ProjectAPIHandler
):
"""Initialize a new ProjectAPIServer.
Parameters
----------
read_file : BinaryIO
A file-like object used to read binary data from the client.
write_file : BinaryIO
A file-like object used to write binary data to the client.
handler : ProjectAPIHandler
A class which extends the abstract class ProjectAPIHandler and implements the server RPC
functions.
"""
self._read_file = io.TextIOWrapper(read_file, encoding="UTF-8", errors="strict")
self._write_file = io.TextIOWrapper(
write_file, encoding="UTF-8", errors="strict", write_through=True
)
self._handler = handler
def serve_forever(self):
"""Serve requests until no more are available."""
has_more = True
while has_more:
has_more = self.serve_one_request()
def serve_one_request(self):
"""Read, process, and reply to a single request from read_file.
When errors occur reading the request line or loading the request into JSON, they are
propagated to the caller (the stream is then likely corrupted and no further requests
should be served. When errors occur past this point, they are caught and send back to the
client.
Return
----------
bool :
True when more data could be read from read_file, False otherwise.
"""
try:
line = self._read_file.readline()
_LOG.debug("read request <- %s", line)
if not line:
return False
request = json.loads(line)
except EOFError:
_LOG.error("EOF")
return False
except Exception: # pylint: disable=broad-except
_LOG.error("Caught error reading request", exc_info=1)
return False
did_validate = False
try:
self._validate_request(request)
did_validate = True
self._dispatch_request(request)
except JSONRPCError as exc:
if isinstance(exc, ServerError):
exc.set_traceback(traceback.TracebackException.from_exception(exc).format())
request_id = None if not did_validate else request.get("id")
self._reply_error(request_id, exc)
return did_validate
except Exception as exc: # pylint: disable=broad-except
message = "validating request"
if did_validate:
message = f"calling method {request['method']}"
exc = ServerError.from_exception(exc, message=message)
request_id = None if not isinstance(request, dict) else request.get("id")
self._reply_error(request_id, exc)
return did_validate
return True
VALID_METHOD_RE = re.compile("^[a-zA-Z0-9_]+$")
def _validate_request(self, request):
if not isinstance(request, dict):
raise JSONRPCError(
ErrorCode.INVALID_REQUEST, f"request: want dict; got {request!r}", None
)
jsonrpc = request.get("jsonrpc")
if jsonrpc != "2.0":
raise JSONRPCError(
ErrorCode.INVALID_REQUEST, f'request["jsonrpc"]: want "2.0"; got {jsonrpc!r}', None
)
method = request.get("method")
if not isinstance(method, str):
raise JSONRPCError(
ErrorCode.INVALID_REQUEST, f'request["method"]: want str; got {method!r}', None
)
if not self.VALID_METHOD_RE.match(method):
raise JSONRPCError(
ErrorCode.INVALID_REQUEST,
f'request["method"]: should match regex {self.VALID_METHOD_RE.pattern}; '
f"got {method!r}",
None,
)
params = request.get("params")
if not isinstance(params, dict):
raise JSONRPCError(
ErrorCode.INVALID_REQUEST, f'request["params"]: want dict; got {type(params)}', None
)
request_id = request.get("id")
# pylint: disable=unidiomatic-typecheck
if not isinstance(request_id, (str, int, type(None))):
raise JSONRPCError(
ErrorCode.INVALID_REQUEST,
f'request["id"]: want str, number, null; got {request_id!r}',
None,
)
def _dispatch_request(self, request):
method = request["method"]
interface_method = getattr(ProjectAPIHandler, method, None)
if interface_method is None:
raise JSONRPCError(
ErrorCode.METHOD_NOT_FOUND, f'{request["method"]}: no such method', None
)
has_preprocessing = True
dispatch_method = getattr(self, f"_dispatch_{method}", None)
if dispatch_method is None:
dispatch_method = getattr(self._handler, method)
has_preprocessing = False
request_params = request["params"]
params = {}
for var_name, var_type in typing.get_type_hints(interface_method).items():
if var_name in ("self", "return"):
continue
# NOTE: types can only be JSON-compatible types, so var_type is expected to be of type
# 'type'.
if var_name not in request_params:
raise JSONRPCError(
ErrorCode.INVALID_PARAMS,
f'method {request["method"]}: parameter {var_name} not given',
None,
)
param = request_params[var_name]
if not has_preprocessing and not isinstance(param, var_type):
raise JSONRPCError(
ErrorCode.INVALID_PARAMS,
f'method {request["method"]}: parameter {var_name}: want {var_type!r}, '
f"got {type(param)!r}",
None,
)
params[var_name] = param
extra_params = [p for p in request["params"] if p not in params]
if extra_params:
raise JSONRPCError(
ErrorCode.INVALID_PARAMS,
f'{request["method"]}: extra parameters: {", ".join(extra_params)}',
None,
)
return_value = dispatch_method(**params)
self._write_reply(request["id"], result=return_value)
def _write_reply(self, request_id, result=None, error=None):
reply_dict = {
"jsonrpc": "2.0",
"id": request_id,
}
if error is not None:
assert (
result is None
), f"Want either result= or error=, got result={result!r} and error={error!r})"
reply_dict["error"] = error
else:
reply_dict["result"] = result
reply_str = json.dumps(reply_dict)
_LOG.debug("write reply -> %r", reply_dict)
self._write_file.write(reply_str)
self._write_file.write("\n")
def _reply_error(self, request_id, exception):
self._write_reply(request_id, error=exception.to_json())
def _dispatch_generate_project(
self, model_library_format_path, standalone_crt_dir, project_dir, options
):
return self._handler.generate_project(
pathlib.Path(model_library_format_path),
pathlib.Path(standalone_crt_dir),
pathlib.Path(project_dir),
options,
)
def _dispatch_server_info_query(self, tvm_version):
query_reply = self._handler.server_info_query(tvm_version)
to_return = query_reply._asdict()
if to_return["model_library_format_path"] is not None:
to_return["model_library_format_path"] = str(to_return["model_library_format_path"])
to_return.setdefault("protocol_version", self._PROTOCOL_VERSION)
to_return["project_options"] = [o._asdict() for o in query_reply.project_options]
return to_return
def _dispatch_open_transport(self, options):
reply = self._handler.open_transport(options)
return {"timeouts": reply._asdict()}
def _dispatch_read_transport(self, n, timeout_sec):
reply_data = self._handler.read_transport(n, timeout_sec)
return {"data": str(base64.b85encode(reply_data), "utf-8")}
def _dispatch_write_transport(self, data, timeout_sec):
self._handler.write_transport(base64.b85decode(data), timeout_sec)
def _await_nonblocking_ready(rlist, wlist, timeout_sec=None, end_time=None):
if end_time is None:
return True
if timeout_sec is None:
timeout_sec = max(0, end_time - time.monotonic())
rlist, wlist, xlist = select.select(rlist, wlist, rlist + wlist, timeout_sec)
if not rlist and not wlist and not xlist:
raise IoTimeoutError()
return True
def read_with_timeout(fd, n, timeout_sec): # pylint: disable=invalid-name
"""Read data from a file descriptor, with timeout.
This function is intended as a helper function for implementations of ProjectAPIHandler
read_transport. Tested on Linux and OS X. Not tested on Windows.
Parameters
----------
fd : int
File descriptor to read from. Must be opened in non-blocking mode (e.g. with O_NONBLOCK)
if timeout_sec is not None.
n : int
Maximum number of bytes to read.
timeout_sec : float or None
If not None, maximum number of seconds to wait before raising IoTimeoutError.
Returns
-------
bytes :
If at least one byte was received before timeout_sec, returns a bytes object with length
in [1, n]. If timeout_sec is None, returns the equivalent of os.read(fd, n).
Raises
------
IoTimeoutException :
When timeout_sec is not None and that number of seconds elapses before any data is read.
"""
end_time = None if timeout_sec is None else time.monotonic() + timeout_sec
while True:
_await_nonblocking_ready([fd], [], end_time=end_time)
try:
to_return = os.read(fd, n)
break
except BlockingIOError:
pass
# When EOF is reached, close the file.
if not to_return:
os.close(fd)
raise TransportClosedError()
return to_return
def write_with_timeout(fd, data, timeout_sec): # pylint: disable=invalid-name
"""Write data to a file descriptor, with timeout.
This function is intended as a helper function for implementations of ProjectAPIHandler
write_transport. Tested on Linux and OS X. Not tested on Windows.
Parameters
----------
fd : int
File descriptor to read from. Must be opened in non-blocking mode (e.g. with O_NONBLOCK)
if timeout_sec is not None.
data : bytes
Data to write.
timeout_sec : float or None
If not None, maximum number of seconds to wait before raising IoTimeoutError.
Returns
-------
int :
The number of bytes written to the file descriptor, if any bytes were written. A value
in [1, len(data)]. If timeout_sec is None, returns the equivalent of os.write(fd, data).
Raises
------
IoTimeoutException :
When timeout_sec is not None and that number of seconds elapses before any data is read.
"""
end_time = None if timeout_sec is None else time.monotonic() + timeout_sec
num_written = 0
while data:
try:
_await_nonblocking_ready([], [fd], end_time=end_time)
except IoTimeoutError as exc:
if num_written:
return num_written
raise exc
num_written_this_cycle = os.write(fd, data)
if not num_written_this_cycle:
os.close(fd)
raise base.TransportClosedError()
data = data[num_written_this_cycle:]
num_written += num_written_this_cycle
return num_written
def default_project_options(**kw) -> typing.List[ProjectOption]:
"""Get default Project Options
Attributes of any default option can be updated. Here is an example
when attribute `optional` from `verbose` option needs to be updates:
default_project_options(verbose={"optional": ["build"]})
This will update the `optional` attribute of `verbose` ProjectOption
to be `["build"]`.
Returns
-------
options: List[ProjectOption]
A list of default ProjectOption with modifications.
"""
options = [
ProjectOption(
"verbose",
optional=["generate_project"],
type="bool",
default=False,
help="Run build with verbose output.",
),
ProjectOption(
"project_type",
required=["generate_project"],
type="str",
help="Type of project to generate.",
),
ProjectOption(
"board",
required=["generate_project"],
type="str",
help="Name of the board to build for.",
),
ProjectOption(
"cmsis_path",
optional=["generate_project"],
type="str",
default=None,
help="Path to the CMSIS directory.",
),
ProjectOption(
"warning_as_error",
optional=["generate_project"],
type="bool",
default=False,
help="Treat warnings as errors and raise an Exception.",
),
ProjectOption(
"compile_definitions",
optional=["generate_project"],
type="str",
default=None,
help="Extra definitions added project compile.",
),
ProjectOption(
"extra_files_tar",
optional=["generate_project"],
type="str",
default=None,
help="If given, during generate_project, "
"uncompress the tarball at this path into the project dir.",
),
]
for name, config in kw.items():
option_found = False
for ind, option in enumerate(options):
if option.name == name:
options[ind] = option.replace(config)
option_found = True
break
if not option_found:
raise ValueError("Option {} was not found in default ProjectOptions.".format(name))
return options
def main(handler: ProjectAPIHandler, argv: typing.List[str] = None):
"""Start a Project API server.
Parameters
----------
argv : list[str]
Command-line parameters to this program. If not given, sys.argv is used.
handler : ProjectAPIHandler
Handler class that implements the API server RPC calls.
"""
if argv is None:
argv = sys.argv[1:]
parser = argparse.ArgumentParser(description="Generic TVM Project API server entry point")
parser.add_argument(
"--read-fd",
type=int,
required=True,
help="Numeric file descriptor where RPC requests should be read.",
)
parser.add_argument(
"--write-fd",
type=int,
required=True,
help="Numeric file descriptor where RPC replies should be written.",
)
parser.add_argument(
"--debug", action="store_true", help="When given, configure logging at DEBUG level."
)
args = parser.parse_args()
logging.basicConfig(level="DEBUG" if args.debug else "INFO", stream=sys.stderr)
read_file = os.fdopen(args.read_fd, "rb", buffering=0)
write_file = os.fdopen(args.write_fd, "wb", buffering=0)
server = ProjectAPIServer(read_file, write_file, handler)
server.serve_forever()
| 30,727 | 33.720904 | 100 | py |
tvm | tvm-main/python/tvm/micro/project_api/client.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=consider-using-with
"""
Project API client.
"""
import base64
import io
import json
import logging
import platform
import os
import pathlib
import subprocess
import sys
import typing
from . import server
_LOG = logging.getLogger(__name__)
class ProjectAPIErrorBase(Exception):
"""Base class for all Project API errors."""
class ConnectionShutdownError(ProjectAPIErrorBase):
"""Raised when a request is made but the connection has been closed."""
class MalformedReplyError(ProjectAPIErrorBase):
"""Raised when the server responds with an invalid reply."""
class MismatchedIdError(ProjectAPIErrorBase):
"""Raised when the reply ID does not match the request."""
class ProjectAPIServerNotFoundError(ProjectAPIErrorBase):
"""Raised when the Project API server can't be found in the repo."""
class UnsupportedProtocolVersionError(ProjectAPIErrorBase):
"""Raised when the protocol version returned by the API server is unsupported."""
class RPCError(ProjectAPIErrorBase):
def __init__(self, request, error):
ProjectAPIErrorBase.__init__()
self.request = request
self.error = error
def __str__(self):
return f"Calling project API method {self.request['method']}:" "\n" f"{self.error}"
class ProjectAPIClient:
"""A client for the Project API."""
def __init__(
self,
read_file: typing.BinaryIO,
write_file: typing.BinaryIO,
testonly_did_write_request: typing.Optional[typing.Callable] = None,
):
self.read_file = io.TextIOWrapper(read_file, encoding="UTF-8", errors="strict")
self.write_file = io.TextIOWrapper(
write_file, encoding="UTF-8", errors="strict", write_through=True
)
self.testonly_did_write_request = testonly_did_write_request
self.next_request_id = 1
@property
def is_shutdown(self):
return self.read_file.closed
def shutdown(self):
if self.is_shutdown: # pylint: disable=using-constant-test
return
self.read_file.close()
self.write_file.close()
def _request_reply(self, method, params):
if self.is_shutdown: # pylint: disable=using-constant-test
raise ConnectionShutdownError("connection already closed")
request = {
"jsonrpc": "2.0",
"method": method,
"params": params,
"id": self.next_request_id,
}
self.next_request_id += 1
request_str = json.dumps(request)
self.write_file.write(request_str)
_LOG.debug("send -> %s", request_str)
self.write_file.write("\n")
if self.testonly_did_write_request:
self.testonly_did_write_request() # Allow test to assert on server processing.
reply_line = self.read_file.readline()
_LOG.debug("recv <- %s", reply_line)
if not reply_line:
self.shutdown()
raise ConnectionShutdownError("got EOF reading reply from API server")
reply = json.loads(reply_line)
if reply.get("jsonrpc") != "2.0":
raise MalformedReplyError(
f"Server reply should include 'jsonrpc': '2.0'; "
f"saw jsonrpc={reply.get('jsonrpc')!r}"
)
if reply["id"] != request["id"]:
raise MismatchedIdError(
f"Reply id ({reply['id']}) does not equal request id ({request['id']}"
)
if "error" in reply:
raise server.JSONRPCError.from_json(f"calling method {method}", reply["error"])
if "result" not in reply:
raise MalformedReplyError(f"Expected 'result' key in server reply, got {reply!r}")
return reply["result"]
def server_info_query(self, tvm_version: str):
reply = self._request_reply("server_info_query", {"tvm_version": tvm_version})
if reply["protocol_version"] != server.ProjectAPIServer._PROTOCOL_VERSION:
raise UnsupportedProtocolVersionError(
f'microTVM API Server supports protocol version {reply["protocol_version"]}; '
f"want {server.ProjectAPIServer._PROTOCOL_VERSION}"
)
return reply
def generate_project(
self,
model_library_format_path: str,
standalone_crt_dir: str,
project_dir: str,
options: dict = None,
):
return self._request_reply(
"generate_project",
{
"model_library_format_path": model_library_format_path,
"standalone_crt_dir": standalone_crt_dir,
"project_dir": project_dir,
"options": (options if options is not None else {}),
},
)
def build(self, options: dict = None):
return self._request_reply("build", {"options": (options if options is not None else {})})
def flash(self, options: dict = None):
return self._request_reply("flash", {"options": (options if options is not None else {})})
def open_transport(self, options: dict = None):
return self._request_reply(
"open_transport", {"options": (options if options is not None else {})}
)
def close_transport(self):
return self._request_reply("close_transport", {})
def read_transport(self, n, timeout_sec):
reply = self._request_reply("read_transport", {"n": n, "timeout_sec": timeout_sec})
reply["data"] = base64.b85decode(reply["data"])
return reply
def write_transport(self, data, timeout_sec):
return self._request_reply(
"write_transport",
{"data": str(base64.b85encode(data), "utf-8"), "timeout_sec": timeout_sec},
)
# NOTE: windows support untested
SERVER_LAUNCH_SCRIPT_FILENAME = (
f"launch_microtvm_api_server.{'sh' if platform.system() != 'Windows' else '.bat'}"
)
SERVER_PYTHON_FILENAME = "microtvm_api_server.py"
def instantiate_from_dir(project_dir: typing.Union[pathlib.Path, str], debug: bool = False):
"""Launch server located in project_dir, and instantiate a Project API Client
connected to it."""
proc_args = None
project_dir = pathlib.Path(project_dir)
python_script = project_dir / SERVER_PYTHON_FILENAME
if python_script.is_file():
proc_args = [sys.executable, str(python_script)]
launch_script = project_dir / SERVER_LAUNCH_SCRIPT_FILENAME
if launch_script.is_file():
proc_args = [str(launch_script), str(python_script)]
if proc_args is None:
raise ProjectAPIServerNotFoundError(
f"No Project API server found in project directory: {project_dir}"
"\n"
f"Tried: {SERVER_LAUNCH_SCRIPT_FILENAME}, {SERVER_PYTHON_FILENAME}"
)
api_server_read_fd, tvm_write_fd = os.pipe()
tvm_read_fd, api_server_write_fd = os.pipe()
proc_args.extend(["--read-fd", str(api_server_read_fd), "--write-fd", str(api_server_write_fd)])
if debug:
proc_args.append("--debug")
api_server_proc = subprocess.Popen( # pylint: disable=unused-variable
proc_args, bufsize=0, pass_fds=(api_server_read_fd, api_server_write_fd), cwd=project_dir
)
os.close(api_server_read_fd)
os.close(api_server_write_fd)
return ProjectAPIClient(
os.fdopen(tvm_read_fd, "rb", buffering=0), os.fdopen(tvm_write_fd, "wb", buffering=0)
)
| 8,189 | 32.842975 | 100 | py |
tvm | tvm-main/python/tvm/micro/project_api/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""MicroTVM Project API Client and Server"""
| 830 | 45.166667 | 62 | py |
tvm | tvm-main/python/tvm/micro/contrib/stm32/emitter.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=line-too-long
"""Code emission for the STM32 targets."""
import contextlib
import json
import os
import re
import shutil
import tarfile
import textwrap
from datetime import datetime
import numpy as np
import tvm
from tvm.contrib import utils
AI_API_VERSION_MAJOR = 1
AI_API_VERSION_MINOR = 0
AI_API_VERSION_MICRO = 0
AI_TOOLS_REVISION = "v1"
DBAR = "=" * 60
def _fix_name(node_name):
"""Replace ':' with '_' in names like 'InputImg:0'"""
return node_name.replace(":", "_")
def get_input_tensor_name(node_name):
return _fix_name(node_name)
def get_output_tensor_name(node_name, idx):
return _fix_name(node_name) + "_" + str(idx)
def _get_node_args_name(node_name):
return _fix_name(node_name) + "_args"
def _get_node_arg_types_name(node_name):
return _fix_name(node_name) + "_arg_type_ids"
def _get_type_size(dltype):
if dltype in ("uint64", "int64"):
return 8
if dltype in ("uint32", "int32", "float32"):
return 4
if dltype in ("uint16", "int16"):
return 2
if dltype in ("uint8", "int8"):
return 1
raise ValueError(f"Data type {dltype} is not supported")
C_TYPE_TO_DLTYPE = {
"uint64": "kDLUInt, 64, 1",
"int64": "kDLInt, 64, 1",
"float32": "kDLFloat, 32, 1",
"uint32": "kDLUInt, 32, 1",
"int32": "kDLInt, 32, 1",
"uint16": "kDLUInt, 16, 1",
"int16": "kDLInt, 16, 1",
"uint8": "kDLUInt, 8, 1",
"int8": "kDLInt, 8, 1",
}
def _get_type_data(dltype):
try:
return C_TYPE_TO_DLTYPE[dltype]
except KeyError:
raise ValueError(f"Data type {dltype} is not supported")
def _get_aligned_offset(offset, dltype):
align = _get_type_size(dltype)
if offset % align != 0:
offset = offset + (align - offset % align)
return offset
def _get_num_tensor_elts(shape):
size = 1
for dim in shape:
size = size * dim
return size
def _get_tensor_size_bytes(dims, dltype):
size = _get_num_tensor_elts(dims)
return size * _get_type_size(dltype)
def _preprocess_code(src):
"""Hack the C code implementing the model."""
dst = "#include <stdio.h>\n" "#include <math.h>\n\n"
dst = dst + src
return dst
class CodeEmitter(object):
"""Code emitter class."""
DATA_ALIGNMENT_BYTES = 8
def __init__(self, include_activations=True, include_inputs=True, include_outputs=True):
"""Initialize the Emitter instance.
Parameters
----------
include_activations:
The Emitter allocates the storage for the activations data
and places it in a specific data section. If Falsr, the
main application is responsible for allocating the activations
storage. Default: True.
include_inputs/include_outputs:
The Emitter allocates the storage for the input/output data.
This storage is shared with the activations and placed in the
specific activations data section. If False, the main
application is responsible for allocating the input/output
data storage. Default: True.
Returns
-------
CodeEmitter object.
"""
# Static model: activations placed into a nn_data_act section
# Dynamic model: activations need to be malloc'ed by the
# applications.
self.activations_static = include_activations
# Inputs/outputs may be allocated within the activations or
# separately.
# TODO: Separate the inputs from activations inside TVM.
if include_inputs:
assert (
self.activations_static == True
), "###Error: Static inputs are not allowed without activations."
self.inputs_static = include_inputs
if include_outputs:
assert (
self.activations_static == True
), "###Error: Static outputs are not allowed without activations."
self.outputs_static = include_outputs
# Parsed graph
self._nodes = []
self._arg_nodes = []
self._outputs = []
self._attrs = {}
self._node_row_ptr = []
# Parameters
self._params = {}
# Filled by data_placement()
self._weights = {}
self._activations = {}
self._input_data = {}
self._output_data = {}
self._nodes_size = 0
self._weights_size = 0
self._activations_size = 0
self._quantization = {}
def _extract_quantization_info(self, quantization):
"""Build dictionary with quantization infos."""
for dl_tensor_name in self._input_data:
if dl_tensor_name in quantization:
self._quantization[dl_tensor_name] = quantization[dl_tensor_name]
# Matching outputs is more difficult because TVM does not preserve
# output tensor names.
# We only support models with a single output now.
assert len(self._output_data) == 1, "Multiple outputs models are not yet supported."
for dl_tensor_name in self._output_data:
for name in quantization:
if name not in self._input_data:
self._quantization["output"] = quantization[name]
break
def _get_node_arg_name(self, arg):
arg_nid = arg[0]
arg_idx = arg[1]
arg_node = self._nodes[arg_nid]
arg_name = self._nodes[arg_nid]["name"]
if arg_node["op"] == "null":
# parameter
dl_tensor_name = get_input_tensor_name(arg_name)
elif arg_node["name"] == "reshape_nop":
# Handle __nop
src = arg_node["inputs"][0]
dl_tensor_name = self._get_node_arg_name(src)
else:
# activation
dl_tensor_name = get_output_tensor_name(arg_name, arg_idx)
return dl_tensor_name
def _tensor_is_output(self, nid, idx):
for out in self._outputs:
out_nid = out[0]
out_idx = out[1]
if out_nid == nid and out_idx == idx:
return True
return False
def _get_tensor_from_node(self, nid, idx):
# 'eid' is index into the dltype', 'shape', etc.
eid = self._node_row_ptr[nid] + idx
dltype = self._attrs["dltype"][1][eid]
dims = self._attrs["shape"][1][eid]
storage_id = self._attrs["storage_id"][1][eid]
ndim = len(dims)
size = _get_tensor_size_bytes(dims, dltype)
tensor = {
"dltype": dltype,
"ndim": ndim,
"dims": dims,
"strides": None,
"storage_id": storage_id,
"byte_offset": 0,
"offset": 0,
"size": size,
}
return tensor
def _compute_data_placement(self):
"""Compute inputs, outputs, weight, activation sizes"""
self._inputs = self._arg_nodes.copy()
# weights:
offset = 0
for key in self._params:
# First, find the node in graph
nid = 0
for node in self._nodes:
if node["name"] == key:
break
nid += 1
dl_tensor_name = get_input_tensor_name(key)
tensor = self._get_tensor_from_node(nid, 0)
# Compute the offset
dltype = tensor["dltype"]
aligned_offset = _get_aligned_offset(offset, dltype)
tensor["offset"] = aligned_offset
for idx in self._arg_nodes:
node = self._nodes[idx]
node_name = node["name"]
if node_name == key:
self._inputs.remove(idx)
self._weights[dl_tensor_name] = tensor
# Next offset
offset = aligned_offset + tensor["size"]
self._weights_size = offset
# activations:
buffer_list_ = {}
nid = 0
for node in self._nodes:
if node["op"] == "null":
nid += 1
continue
if node["op"] != "tvm_op":
raise ValueError(f"Only TVM ops are supported")
node_name = node["name"]
node_attrs = node["attrs"]
func_name = node_attrs["func_name"]
num_outputs = int(node_attrs["num_outputs"])
if func_name == "__nop":
assert node_name == "reshape_nop", f"Unsupported __nop operator {node_name}."
assert num_outputs == 1
assert not self._tensor_is_output(nid, 0)
nid += 1
continue
for idx in range(num_outputs):
# Do not count the '_outputs'
if self._tensor_is_output(nid, idx):
continue
dl_tensor_name = get_output_tensor_name(node_name, idx)
tensor = self._get_tensor_from_node(nid, idx)
# Remember this tensor with the storage id
storage_id = tensor["storage_id"]
if storage_id not in buffer_list_:
buffer_list_[storage_id] = []
buffer_entry = buffer_list_[storage_id]
buffer_entry.append(tensor)
self._activations[dl_tensor_name] = tensor
self._nodes_size = self._nodes_size + 1
nid += 1
# Compute '_input_data'
offset = 0
for nid in self._inputs:
node = self._nodes[nid]
node_name = node["name"]
# Arthur: I suppose that input nodes only have a single
# output dependency
dl_tensor_name = get_input_tensor_name(node_name)
# This tensor is at some index inside '_input_data' dictionary
# depending on the '_inputs' list order. We refer to this position
# when generating the XXX.h file.
tensor = self._get_tensor_from_node(nid, 0)
if self.inputs_static:
# Remember this tensor with the storage id
storage_id = tensor["storage_id"]
if storage_id not in buffer_list_:
buffer_list_[storage_id] = []
buffer_entry = buffer_list_[storage_id]
buffer_entry.append(tensor)
else:
# Compute the offset
dltype = tensor["dltype"]
aligned_offset = _get_aligned_offset(offset, dltype)
tensor["offset"] = aligned_offset
self._input_data[dl_tensor_name] = tensor
# Next offset
offset = aligned_offset + tensor["size"]
# Compute '_output_data'
offset = 0
for output in self._outputs:
nid = output[0]
idx = output[1]
node = self._nodes[nid]
node_name = node["name"]
dl_tensor_name = get_output_tensor_name(node_name, idx)
tensor = self._get_tensor_from_node(nid, idx)
if self.outputs_static:
# Remember this tensor with the storage id
storage_id = tensor["storage_id"]
if storage_id not in buffer_list_:
buffer_list_[storage_id] = []
buffer_entry = buffer_list_[storage_id]
buffer_entry.append(tensor)
else:
# Compute the offset
dltype = tensor["dltype"]
aligned_offset = _get_aligned_offset(offset, dltype)
tensor["offset"] = aligned_offset
self._output_data[dl_tensor_name] = tensor
# Next offset
offset = aligned_offset + tensor["size"]
# Go over all storage IDs and compute offsets and _activations_size
offset = 0
for storage_id in buffer_list_:
buffer_entry = buffer_list_[storage_id]
new_offset = offset
for tensor in buffer_entry:
assert tensor["storage_id"] == storage_id
dltype = tensor["dltype"]
aligned_offset = _get_aligned_offset(offset, dltype)
tensor["offset"] = aligned_offset
size = tensor["size"]
if (aligned_offset + size) > new_offset:
new_offset = aligned_offset + size
offset = new_offset
self._activations_size = offset
def _parse_model(self, quantization=None):
"""Parse the module. Build internal data structures.
Parameters
----------
module : TVM module or ModuleLibraryFormat object
The module to parse
quantization: Dictionary
The quantization information for model inputs/outputs.
"""
for key in self._graph:
if key == "nodes":
self._nodes = self._graph["nodes"]
elif key == "arg_nodes":
self._arg_nodes = self._graph["arg_nodes"]
elif key == "node_row_ptr":
self._node_row_ptr = self._graph["node_row_ptr"]
elif key == "heads":
self._outputs = self._graph["heads"]
elif key == "attrs":
self._attrs = self._graph["attrs"]
elif key == "metadata":
continue
else:
print("### Error: JSON key {} not supported".format(key))
assert False
# Build all tensor lists
self._compute_data_placement()
# Extract quantization info for inputs/outputs
if quantization is not None:
self._extract_quantization_info(quantization)
def parse_library_format(self, model_library_format_path, quantization=None):
"""Parse the module. Build internal data structures.
Parameters
----------
model_library_format_path :
The ModuleLibraryFormat object to parse
quantization: Dictionary
The quantization information for model inputs/outputs.
"""
temp_dir = utils.tempdir()
extract_path = temp_dir.relpath("extract")
os.mkdir(extract_path)
with tarfile.TarFile(model_library_format_path) as f:
f.extractall(extract_path)
with open(os.path.join(extract_path, "metadata.json")) as metadata_f:
metadata = json.load(metadata_f)
all_module_names = []
for name in metadata["modules"].keys():
all_module_names.append(name)
assert len(metadata["modules"]) == 1, "Multiple modules is not supported."
# Extract informations from the Model Library Format
graph_file = os.path.join(
extract_path, "executor-config", "graph", f"{all_module_names[0]}.graph"
)
with open(graph_file, "r") as f:
# returns JSON object as a dictionary
graph_dict = json.load(f)
params_dict = {}
param_file = os.path.join(extract_path, "parameters", "default.params")
with open(param_file, "rb") as f:
params = tvm.runtime.load_param_dict(f.read())
# Map -> Python Dict
tmp_dict = {}
for (k, v) in params.items():
tmp_dict[k] = v
# Sort params for debugging
for k in sorted(tmp_dict.keys()):
params_dict[k] = tmp_dict[k]
src_dir = os.path.join(extract_path, "codegen", "host", "src")
# List of strings from Model Library Format C files
src_files = []
for filename in os.listdir(src_dir):
with open(os.path.join(src_dir, filename), "r") as fin:
src = fin.read()
src_files.append(src)
self._graph = graph_dict
self._params = params_dict
self._lib = src_files
self._parse_model(quantization)
def parse_module(self, module, quantization=None):
"""Parse the module. Build internal data structures.
Parameters
----------
module : TVM Runtime Module
The module to parse.
quantization: Dictionary
The quantization information for model inputs/outputs.
"""
graph = module.get_json()
if not isinstance(graph, (str,)):
try:
graph = graph._tvm_graph_json()
except AttributeError:
raise ValueError("Type %s is not supported" % type(graph))
# Sort params for debugging
params_dict = {}
tmp_params = module.get_params()
for k in sorted(tmp_params.keys()):
params_dict[k] = tmp_params[k]
self._graph = json.loads(graph)
self._params = params_dict
self._lib = module.get_lib()
self._parse_model(quantization)
def _emit_params_data(self, name, out_h, out_c):
"""Emits the network_data[c,h] files with parameters."""
name_upper = name.upper()
# XXX_data.h
out_h.write(
textwrap.dedent(
f"""\
#ifndef __{name_upper}_DATA_H_
#define __{name_upper}_DATA_H_
#include \"ai_runtime_api.h\"
AI_API_ENTRY
const ai_ptr ai_{name}_data_weights_get (void);
#endif /* __{name_upper}_DATA_H_ */
"""
)
)
# XXX_data.cc
out_c.write(
textwrap.dedent(
f"""
#include \"{name}_data.h\"
const ai_ptr ai_{name}_data_weights_get (void)
{{
AI_ALIGNED({self.DATA_ALIGNMENT_BYTES}) static const __attribute__ ((section(\".nn_weights\"))) uint8_t s_{name}_weights[] = {{
"""
)
)
# Weights are arranged in the order of 'params_'
offset = 0
for key in self._params:
data = self._params[key] # ND Array
npdata = data.asnumpy()
blob = npdata.tobytes()
out_c.write(f'// "{key}": \n')
out_c.write(f"\t")
count = 0
# Align by emitting garbage between un-aligned data
dl_tensor_name = get_input_tensor_name(key)
tensor = self._weights[dl_tensor_name]
tensor_offset = tensor["offset"]
tensor_size = tensor["size"]
while offset < tensor_offset:
count += 1
out_c.write("0x{:02X}, ".format(0))
if count == 12:
out_c.write("\n\t")
count = 0
offset += 1
for val in blob:
count += 1
out_c.write("0x{:02X}, ".format(val))
if count == 12:
out_c.write("\n\t")
count = 0
offset += tensor_size
out_c.write(f"\n")
out_c.write(
textwrap.dedent(
f"""\
}};
return (const ai_ptr)s_{name}_weights;
}}
"""
)
)
def _emit_open(self, name, out_h, out_c):
"""Emits the network.h file with a few network defines and
writes the header part of the network.c file."""
name_upper = name.upper()
input_size = len(self._input_data)
output_size = len(self._output_data)
# XXX.h
out_h.write(
textwrap.dedent(
f"""\
#ifndef __AI_{name_upper}_H__
#define __AI_{name_upper}_H__
#include \"ai_runtime_api.h\"
#define _{name_upper}_INPUTS_COUNT_ ({input_size})
#define _{name_upper}_OUTPUTS_COUNT_ ({output_size})
#define _{name_upper}_ACTIVATION_BYTES_ ({self._activations_size})
"""
)
)
# XXX.c
out_c.write(
textwrap.dedent(
f"""\
#include <stdio.h>
#include \"dlpack/dlpack.h\"
#include \"tvm/runtime/c_runtime_api.h\"
#include \"{name}.h\"
#include \"{name}_data.h\"
"""
)
)
def _emit_close(self, name, out_h, out_c):
"""Emits the ai_model_info structure."""
name_upper = name.upper()
# datetime object containing current date and time
now = datetime.now()
# dd/mm/YY H:M:S
dt_string = now.strftime("%d/%m/%Y %H:%M:%S")
# XXX.h
out_h.write(f"#endif /*__AI_{name_upper}_H__*/ \n")
# XXX.c
if self.activations_static:
out_c.write(
f'AI_ALIGNED({self.DATA_ALIGNMENT_BYTES}) __attribute__ ((section(".{name}.nn_data_act"))) uint8_t {name}_activations[{self._activations_size}];\n'
)
else:
out_c.write(f"AI_STATIC ai_ptr {name}_activations = NULL;")
# Emit network structure
num_inputs = len(self._input_data)
num_outputs = len(self._output_data)
tool_version = tvm.__version__
api_version = f"{AI_API_VERSION_MAJOR}.{AI_API_VERSION_MINOR}.{AI_API_VERSION_MICRO}.0"
out_c.write(
textwrap.dedent(
f"""
AI_API_ENTRY __attribute__ ((section(".nn_models"))) ai_model_info {name}_network = {{
.name = \"{name}\",
.datetime = \"{dt_string}\",
.revision = \"{AI_TOOLS_REVISION}\",
.tool_version = \"{tool_version}\",
.api_version = \"{api_version}\",
.n_nodes = {self._nodes_size},
.n_inputs = {num_inputs},
.n_outputs = {num_outputs},
.activations_size = {self._activations_size},
.params_size = {self._weights_size},
.activations = {name}_activations,
.inputs = _InputsList,
.outputs = _OutputsList,
.ai_get_params = &ai_{name}_data_weights_get,
.ai_create = &ai_{name}_create,
.ai_destroy = &ai_{name}_destroy,
.ai_run = &ai_{name}_run
}};
"""
)
)
def _emit_tensor_shape(self, dl_tensor_name, ndim, shape, strides, out_c):
out_c.write(f"AI_STATIC int64_t {dl_tensor_name}_shape[{ndim}] = {{{shape[1:-1]}}}; \n")
assert strides is None, f"###Error: non-compact tensors are not handled yet."
out_c.write(f"AI_STATIC int64_t {dl_tensor_name}_strides[{ndim}] = {{}}; \n")
def _emit_tensor_quant(self, dl_tensor_name, out_c):
if dl_tensor_name in self._quantization:
quantization = self._quantization[dl_tensor_name]
# At this time, TVM only supports quantization info with
# single output models.
elif dl_tensor_name in self._output_data and "output" in self._quantization.keys():
quantization = self._quantization["output"]
else:
quantization = None
if quantization is not None:
scale = quantization["scale"]
zero_point = quantization["zero_point"]
# Sometimes we get a scalar with ScaleAsNumpy.
# This seem to mean not quantized ?
if not isinstance(scale, np.ndarray):
assert scale == 0.0, f"Non-quantized tensor with scale != 0.0"
assert (
not isinstance(zero_point, np.ndarray) and zero_point == 0
), f"Non-quantized tensor with zero_point != 0"
return None
scale_size = len(scale)
zero_point_size = len(zero_point)
assert len(scale) == len(
zero_point
), f"Inconsistent quantizations scale:{scale} vs zero-point:{zero_point}"
if len(scale) == 1:
quant_name = dl_tensor_name + "_quant"
out_c.write(f"AI_STATIC float {quant_name}_scale[{scale_size}] = {{ ")
for val in scale:
out_c.write(f"{val}, ")
out_c.write(f"}};\n")
out_c.write(f"AI_STATIC int32_t {quant_name}_zero_point[{zero_point_size}] = {{ ")
for val in zero_point:
out_c.write(f"{val}, ")
out_c.write(f"}};")
out_c.write(
textwrap.dedent(
f"""
AI_STATIC ai_quantization_info {quant_name} = {{
.scale = {quant_name}_scale,
.zero_point = {quant_name}_zero_point,
.dim = -1
}};
"""
)
)
return quant_name
return None
def _emit_tensor_init(self, dl_tensor_name, tensor, out_c):
"""Emits the tensor instantiation code."""
dltype = tensor["dltype"]
dims = tensor["dims"]
strides = tensor["strides"]
byte_offset = tensor["byte_offset"]
dtype = _get_type_data(dltype)
ndim = len(dims)
shape = str(dims)
self._emit_tensor_shape(dl_tensor_name, ndim, shape, strides, out_c)
# Quantization
quant_name = self._emit_tensor_quant(dl_tensor_name, out_c)
# Contents
#
# TODO: use the 'storage_id':
# " .ctx = {{ {} }}, \n".format(str(storage_id)[1:-1])
out_c.write(
textwrap.dedent(
f"""
AI_ALIGNED({self.DATA_ALIGNMENT_BYTES}) AI_STATIC ai_tensor {dl_tensor_name} = {{
.dltensor = {{
.data = (ai_ptr)(NULL),
.device = {{kDLCPU,0}},
.ndim = {ndim},
.dtype = {{{dtype}}},
.shape = {dl_tensor_name}_shape,
.strides = {dl_tensor_name}_strides,
.byte_offset = {byte_offset}
}},
"""
)
)
# Figure out quantization, if exists
if quant_name is not None:
out_c.write(f" .quant = &{quant_name} \n")
else:
out_c.write(f" .quant = NULL \n")
out_c.write(f"}}; \n")
def _emit_activation_buffers(self, name, out_c):
# pylint: disable=unused-argument
"""Emits activation tensors, including inputs/outputs."""
out_c.write(
textwrap.dedent(
f"""\
//
// Inputs:
//
"""
)
)
# shape/buffer
for dl_tensor_name in self._input_data:
tensor = self._input_data[dl_tensor_name]
self._emit_tensor_init(dl_tensor_name, tensor, out_c)
out_c.write(f"\n")
out_c.write(f"\n")
# tensor
idx = 0
out_c.write(f"AI_STATIC ai_tensor * _InputsList[] = {{ \n")
for dl_tensor_name in self._input_data:
out_c.write(f" &{dl_tensor_name}, // [{idx}]\n")
idx = idx + 1
out_c.write(f"}}; \n")
out_c.write(f"\n")
out_c.write(
textwrap.dedent(
f"""\
//
// Activations:
//
"""
)
)
for dl_tensor_name in self._activations:
tensor = self._activations[dl_tensor_name]
self._emit_tensor_init(dl_tensor_name, tensor, out_c)
out_c.write(f"\n")
# Outputs:
out_c.write(
textwrap.dedent(
f"""\
//
// Outputs:
//
"""
)
)
for dl_tensor_name in self._output_data:
tensor = self._output_data[dl_tensor_name]
self._emit_tensor_init(dl_tensor_name, tensor, out_c)
out_c.write(f"\n")
out_c.write(f"\n")
idx = 0
out_c.write(f"AI_STATIC ai_tensor * _OutputsList[] = {{ \n")
for dl_tensor_name in self._output_data:
out_c.write(f" &{dl_tensor_name}, // [{idx}]\n")
idx = idx + 1
out_c.write(f"}}; \n")
out_c.write(f"\n")
def _emit_params_buffers(self, name, out_c):
"""Emits all parameter tensors."""
out_c.write(
textwrap.dedent(
f"""
//
// Weights: {name}
//
"""
)
)
for dl_tensor_name in self._weights:
tensor = self._weights[dl_tensor_name]
self._emit_tensor_init(dl_tensor_name, tensor, out_c)
out_c.write(f"\n")
def _emit_network(self, name, out_c):
"""Emits prototypes for the network operator functions."""
out_c.write(
textwrap.dedent(
f"""
//
// Network: {name}
//
"""
)
)
for node in self._nodes:
if node["op"] == "null":
continue
assert node["op"] == "tvm_op", f"###Error: Only TVM ops are supported."
node_attrs = node["attrs"]
func_name = node_attrs["func_name"]
if func_name == "__nop":
continue
out_c.write(
f"TVM_DLL int32_t {func_name}(void * args, void * arg_type_ids, int32_t num_args); \n"
)
out_c.write(f"\n")
def _emit_tensor_activation(self, dl_tensor_name, tensor, out_c):
storage_id = tensor["storage_id"]
offset = tensor["offset"]
out_c.write(
textwrap.indent(
textwrap.dedent(
f"""
//
// {dl_tensor_name}: storage_id:{storage_id}
//
{dl_tensor_name}.dltensor.data = (ai_ptr)(activations + {offset});
"""
),
" ",
)
)
def _emit_activation_init(self, name, out_c):
"""Emits buffer initialization code for activation tensors."""
out_c.write(
textwrap.dedent(
f"""
// {DBAR}
// {name}_configure_activations
// {DBAR}
AI_STATIC AI_INLINE
ai_status {name}_configure_activations (
const ai_ptr activations
)
{{
if (activations == NULL) {{
TVMAPISetLastError (\"Non-null activations arena is required for this model.\");
return AI_STATUS_ERROR;
}}
"""
)
)
# Allocate inputs with the static model
if self.inputs_static:
for dl_tensor_name in self._input_data:
tensor = self._input_data[dl_tensor_name]
self._emit_tensor_activation(dl_tensor_name, tensor, out_c)
# Prepare activation buffers
for dl_tensor_name in self._activations:
tensor = self._activations[dl_tensor_name]
self._emit_tensor_activation(dl_tensor_name, tensor, out_c)
# Allocate outputs with the static model
if self.outputs_static:
for dl_tensor_name in self._output_data:
tensor = self._output_data[dl_tensor_name]
self._emit_tensor_activation(dl_tensor_name, tensor, out_c)
out_c.write(
textwrap.dedent(
f"""
return AI_STATUS_OK;
}}
"""
)
)
def _emit_params_init(self, name, out_c):
"""Emits buffer initialization code for params tensors."""
out_c.write(
textwrap.dedent(
f"""
// {DBAR}
// {name}_configure_weights
// {DBAR}
AI_STATIC AI_INLINE
ai_status {name}_configure_weights (
const ai_ptr weights
)
{{
if (weights == NULL) {{
TVMAPISetLastError(\"Non-null weights arena is required for this model.\");
return AI_STATUS_ERROR;
}}
"""
)
)
for dl_tensor_name in self._weights:
tensor = self._weights[dl_tensor_name]
offset = tensor["offset"]
out_c.write(
textwrap.indent(
textwrap.dedent(
f"""\
//
// {dl_tensor_name}
//
{dl_tensor_name}.dltensor.data = (ai_ptr)(weights + {offset});
"""
),
" ",
)
)
out_c.write(
textwrap.dedent(
f"""
return AI_STATUS_OK;
}}
"""
)
)
def _emit_init(self, name, out_c):
"""Emits buffer initialization code."""
self._emit_activation_init(name, out_c)
self._emit_params_init(name, out_c)
def _emit_run(self, name, out_h, out_c):
"""Emits the run function code."""
out_h.write(
textwrap.dedent(
f"""
AI_API_ENTRY
ai_status ai_{name}_run (
ai_tensor *inputs[],
ai_tensor *outputs[]
);
"""
)
)
out_c.write(
textwrap.dedent(
f"""
// {DBAR}
// ai_{name}_run
// {DBAR}
AI_API_ENTRY
ai_status ai_{name}_run (
ai_tensor *inputs[],
ai_tensor *outputs[]
)
{{
"""
)
)
# Execute nodes one by one
nid = 0
for node in self._nodes:
node_name = node["name"]
node_name_upper = node_name.upper()
nid += 1
if node["op"] == "null":
continue
assert node["op"] == "tvm_op", f"###Error: Only TVM ops are supported."
node_attrs = node["attrs"]
func_name = node_attrs["func_name"]
if func_name == "__nop":
continue
out_c.write(f" // \n")
out_c.write(f" // {func_name}\n")
out_c.write(f" // \n")
# Prepare TVM packed function - this is the one called
if name == "__nop":
print(" exec: __nop")
continue
if name == "__copy":
print(" exec: __copy")
continue
# Get function from the TVM module
#
# void * args : arg_values.data()
# void * arg_type_ids : arg_tcodes.data()
# int32_t num_args : arg_values.size()
dl_args_name = _get_node_args_name(node_name)
dl_arg_types_name = _get_node_arg_types_name(node_name)
num_inputs = len(node["inputs"])
num_outputs = int(node_attrs["num_outputs"])
num_args = num_inputs + num_outputs
out_c.write(f" TVMValue {dl_args_name}[{num_args}]; \n")
out_c.write(f" int32_t {dl_arg_types_name}[{num_args}]; \n")
curr_idx = 0
for arg in node["inputs"]:
dl_tensor_name = self._get_node_arg_name(arg)
#
# If this input is not an activation or a parameter => find the input
#
if dl_tensor_name not in self._weights and dl_tensor_name not in self._activations:
assert dl_tensor_name in self._input_data, "Tensor {} not registered ?".format(
dl_tensor_name
)
input_idx = 0
for dl_entry_name in self._input_data:
if dl_entry_name == dl_tensor_name:
break
input_idx += 1
out_c.write(
f" {dl_args_name}[{curr_idx}].v_handle = &inputs[{input_idx}]->dltensor; \n"
)
else:
out_c.write(
f" {dl_args_name}[{curr_idx}].v_handle = &{dl_tensor_name}.dltensor; \n"
)
out_c.write(f" {dl_arg_types_name}[{curr_idx}] = kTVMNDArrayHandle; \n")
curr_idx += 1
for idx in range(num_outputs):
dl_tensor_name = get_output_tensor_name(node_name, idx)
# If this output is not an activation => find the output
if dl_tensor_name not in self._activations:
assert dl_tensor_name in self._output_data
output_idx = 0
for dl_exit_name in self._output_data:
if dl_exit_name == dl_tensor_name:
break
output_idx += 1
out_c.write(
f" {dl_args_name}[{curr_idx}].v_handle = &outputs[{output_idx}]->dltensor; \n"
)
else:
out_c.write(
f" {dl_args_name}[{curr_idx}].v_handle = &{dl_tensor_name}.dltensor; \n"
)
out_c.write(f" {dl_arg_types_name}[{curr_idx}] = kTVMNDArrayHandle; \n")
out_c.write(f"\n")
curr_idx += 1
# call this function
out_c.write(
textwrap.dedent(
f"""
#if (_VERBOSE_ > 0)
printf (\" {func_name} ... \\r\\n\");
#endif
if ({func_name} ({dl_args_name}, {dl_arg_types_name}, {num_args})) {{
TVMAPISetLastError("Invalid handle");
return AI_STATUS_ERROR;
}}
#if (_VERBOSE_ > 0)
printf (\" {func_name} Done.\\r\\n\");
#endif
"""
)
)
out_c.write(f"\n")
out_c.write(
textwrap.dedent(
f"""
return AI_STATUS_OK;
}}
"""
)
)
out_c.write(f"\n")
def _emit_create_destroy(self, name, out_h, out_c):
"""Emits the create/destroy functions."""
out_h.write(
textwrap.dedent(
f"""
AI_API_ENTRY
ai_status ai_{name}_create (
const ai_ptr weights,
const ai_ptr activations
);
"""
)
)
out_h.write(
textwrap.dedent(
f"""
AI_API_ENTRY
ai_status ai_{name}_destroy ();
"""
)
)
out_c.write(
textwrap.dedent(
f"""
// {DBAR}
// ai_{name}_create
// {DBAR}
AI_API_ENTRY
ai_status ai_{name}_create(
const ai_ptr weights,
const ai_ptr activations
)
{{
ai_status status = AI_STATUS_OK;
status = {name}_configure_weights (weights);
if (status != AI_STATUS_OK) {{
return status;
}}
status = {name}_configure_activations (activations);
if (status != AI_STATUS_OK) {{
return status;
}}
return AI_STATUS_OK;
}}
"""
)
)
out_c.write(
textwrap.dedent(
f"""
// {DBAR}
// ai_{name}_destroy
// {DBAR}
AI_API_ENTRY
ai_status ai_{name}_destroy ()
{{
return AI_STATUS_OK;
}}
"""
)
)
def emit_code(self, dest_dir, model_name):
"""Emits the C code implementing the model."""
# Build the directory structure
if os.path.exists(dest_dir):
raise ValueError(f"emit_code.Error: {dest_dir} exists.")
# Make a new one
os.makedirs(dest_dir)
# Fix the model name
model_name = re.sub("[^0-9a-zA-Z_]+", "_", model_name)
model_name = model_name.lower()
# Write the C code: we can parse the string
if isinstance(self._lib, list):
# List of strings from Model Library Format C files
for idx, src in enumerate(self._lib):
code = _preprocess_code(src)
filename = os.path.join(dest_dir, f"{model_name}_lib{idx}.c")
with open(filename, "w") as fout:
fout.write(code)
else:
# a TVM RuntimeGraphFactory
src = self._lib.get_source(fmt="c")
code = _preprocess_code(src)
filename = os.path.join(dest_dir, f"{model_name}_lib.c")
with open(filename, "w") as fout:
fout.write(code)
# Save params as binary data
saved_params = tvm.runtime.save_param_dict(self._params)
params_name = os.path.join(dest_dir, model_name + ".params")
with open(params_name, "wb") as f:
f.write(saved_params)
# Write the .json
graph_name = os.path.join(dest_dir, model_name + ".json")
json_string = json.dumps(self._graph, indent=4)
with open(graph_name, "w") as f:
print(json_string, file=f)
# emit X_data[c,h]
data_h_name = os.path.join(dest_dir, model_name + "_data.h")
data_c_name = os.path.join(dest_dir, model_name + "_data.c")
model_h_name = os.path.join(dest_dir, model_name + ".h")
model_c_name = os.path.join(dest_dir, model_name + ".c")
with contextlib.ExitStack() as exit_stack:
# emit X[c,h]
data_h = exit_stack.enter_context(open(data_h_name, "w"))
data_c = exit_stack.enter_context(open(data_c_name, "w"))
out_h = exit_stack.enter_context(open(model_h_name, "w"))
out_c = exit_stack.enter_context(open(model_c_name, "w"))
self._emit_params_data(model_name, data_h, data_c)
self._emit_open(model_name, out_h, out_c)
self._emit_params_buffers(model_name, out_c)
self._emit_activation_buffers(model_name, out_c)
self._emit_network(model_name, out_c)
self._emit_init(model_name, out_c)
self._emit_create_destroy(model_name, out_h, out_c)
self._emit_run(model_name, out_h, out_c)
self._emit_close(model_name, out_h, out_c)
| 42,638 | 29.965142 | 163 | py |
tvm | tvm-main/python/tvm/micro/contrib/stm32/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Module container of STM32 code generator."""
from .emitter import CodeEmitter, get_input_tensor_name, get_output_tensor_name
| 915 | 42.619048 | 79 | py |
tvm | tvm-main/python/tvm/script/highlight.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Highlight printed TVM script.
"""
import os
import sys
import warnings
from typing import Any, Optional, Union
def cprint(
printable: Union[Any, str],
style: Optional[str] = None,
black_format: bool = True,
) -> None:
"""Print TVMScript string with Pygments highlight and Black auto-formatting.
Parameters
----------
printable : Union[IRModule, PrimFunc, str]
The TVMScript to be printed
style : str, optional
Pygmentize printing style, auto-detected if None.
black_format: bool
If true (default), use the formatter Black to format the TVMScript
Notes
-----
The style parameter follows the Pygments style names or Style objects. Three
built-in styles are extended: "light", "dark" and "ansi". By default, "light"
will be used for notebook environment and terminal style will be "ansi" for
better style consistency. As an fallback when the optional Pygment library is
not installed, plain text will be printed with a one-time warning to suggest
installing the Pygment library. Other Pygment styles can be found in
https://pygments.org/styles/
The default pygmentize style can also be set with the environment
variable "TVM_PYGMENTIZE_STYLE".
"""
if hasattr(printable, "script") and callable(getattr(printable, "script")):
printable = printable.script()
elif not isinstance(printable, str):
raise TypeError(
f"Only can print strings or objects with `script` method, but got: {type(printable)}"
)
if black_format:
printable = _format(printable)
is_in_notebook = "ipykernel" in sys.modules # in notebook env (support html display).
style = _get_pygments_style(style, is_in_notebook)
if style is None:
print(printable)
return
# pylint: disable=import-outside-toplevel
from pygments import highlight
from pygments.formatters import HtmlFormatter, Terminal256Formatter
from pygments.lexers.python import Python3Lexer
if is_in_notebook:
from IPython import display # pylint: disable=import-outside-toplevel
formatter = HtmlFormatter(style=style)
formatter.noclasses = True # inline styles
html = highlight(printable, Python3Lexer(), formatter)
display.display(display.HTML(html))
else:
print(highlight(printable, Python3Lexer(), Terminal256Formatter(style=style)))
def _format(code_str: str) -> str:
"""Format a code string using Black.
Parameters
----------
code_str: str
The string containing Python/TVMScript code to format
Returns
-------
formatted: str
The formatted Python/TVMScript code
"""
try:
# pylint: disable=import-outside-toplevel
import black
except ImportError as err:
with warnings.catch_warnings():
warnings.simplefilter("once", UserWarning)
install_cmd = sys.executable + ' -m pip install "black==22.3.0" --upgrade --user'
warnings.warn(
str(err)
+ "\n"
+ "To print formatted TVM script, please install the formatter 'Black':\n"
+ install_cmd,
category=UserWarning,
)
return code_str
else:
return black.format_str(code_str, mode=black.FileMode())
def _get_pygments_style(
style: Optional[str], is_in_notebook: bool
) -> Optional[Union["pygments.style.Style", str]]:
"""Select a pygments style to use
Parameters
----------
style: str
The style specifier to use. If None, auto-select a style.
is_in_notebook: bool
Whether python is currently running in a jupyter notebook.
Used for automatic selection.
Returns
-------
style: Optional[Union['pygments.style.Style',str]]
If pygments is installed, the style object or string, suitable
for use as the "style" argument to pygments formatters. If
pygments is not installed, returns None.
"""
try:
# pylint: disable=import-outside-toplevel
import pygments
from packaging import version
from pygments.style import Style
from pygments.token import Comment, Keyword, Name, Number, Operator, String
if version.parse(pygments.__version__) < version.parse("2.4.0"):
raise ImportError("Required Pygments version >= 2.4.0 but got " + pygments.__version__)
except ImportError as err:
if err.name == "packaging":
name = "packaging"
elif err.name == "pygments":
name = "Pygments>=2.4.0"
else:
raise ValueError(f'Package "{err.name}" should not be used')
with warnings.catch_warnings():
warnings.simplefilter("once", UserWarning)
install_cmd = sys.executable + f' -m pip install "{name}" --upgrade --user'
warnings.warn(
str(err)
+ "\n"
+ f"To print highlighted TVM script, please install {name}:\n"
+ install_cmd,
category=UserWarning,
)
return None
class JupyterLight(Style):
"""A Jupyter-Notebook-like Pygments style configuration (aka. "light")"""
background_color = ""
styles = {
Keyword: "bold #008000",
Keyword.Type: "nobold #008000",
Name.Function: "#0000FF",
Name.Class: "bold #0000FF",
Name.Decorator: "#AA22FF",
String: "#BA2121",
Number: "#008000",
Operator: "bold #AA22FF",
Operator.Word: "bold #008000",
Comment: "italic #007979",
}
class VSCDark(Style):
"""A VSCode-Dark-like Pygments style configuration (aka. "dark")"""
background_color = ""
styles = {
Keyword: "bold #c586c0",
Keyword.Type: "#82aaff",
Keyword.Namespace: "#4ec9b0",
Name.Class: "bold #569cd6",
Name.Function: "bold #dcdcaa",
Name.Decorator: "italic #fe4ef3",
String: "#ce9178",
Number: "#b5cea8",
Operator: "#bbbbbb",
Operator.Word: "#569cd6",
Comment: "italic #6a9956",
}
class AnsiTerminalDefault(Style):
"""The default style for terminal display with ANSI colors (aka. "ansi")"""
background_color = ""
styles = {
Keyword: "bold ansigreen",
Keyword.Type: "nobold ansigreen",
Name.Class: "bold ansiblue",
Name.Function: "bold ansiblue",
Name.Decorator: "italic ansibrightmagenta",
String: "ansiyellow",
Number: "ansibrightgreen",
Operator: "bold ansimagenta",
Operator.Word: "bold ansigreen",
Comment: "italic ansibrightblack",
}
if style == "light":
return JupyterLight
elif style == "dark":
return VSCDark
elif style == "ansi":
return AnsiTerminalDefault
if style is not None:
return style
style_from_environment = os.environ.get("TVM_PYGMENTIZE_STYLE", "").strip()
if style_from_environment:
return style_from_environment
if is_in_notebook:
return JupyterLight
return AnsiTerminalDefault
| 8,169 | 31.29249 | 99 | py |
tvm | tvm-main/python/tvm/script/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""TVM Script APIs of TVM Python Package"""
from .parser import ir, ir_module
from .parser import parse as from_source
from .parser import tir
| 928 | 43.238095 | 62 | py |
tvm | tvm-main/python/tvm/script/printer/doc.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Doc types for TVMScript Unified Printer"""
from enum import IntEnum, unique
from typing import Dict, List, Optional, Sequence, Tuple, Union
from tvm._ffi import register_object
from tvm.runtime import Object, ObjectPath
from tvm.tir import FloatImm, IntImm
from . import _ffi_api
class Doc(Object):
"""Base class of all Docs"""
@property
def source_paths(self) -> Sequence[ObjectPath]:
"""
The list of object paths of the source IR node.
This is used to trace back to the IR node position where
this Doc is generated, in order to position the diagnostic
message.
"""
return self.__getattr__("source_paths") # pylint: disable=unnecessary-dunder-call
@source_paths.setter
def source_paths(self, value):
return _ffi_api.DocSetSourcePaths(self, value) # type: ignore # pylint: disable=no-member
class ExprDoc(Doc):
"""Base class of all expression Docs"""
def attr(self, name: str) -> "AttrAccessDoc":
"""
Create a doc that represents attribute access on self.
Parameters
----------
name : str
The attribute name to access
Returns
-------
doc : AttrAccessDoc
"""
return _ffi_api.ExprDocAttr(self, name) # type: ignore # pylint: disable=no-member
def call(self, *args: Tuple["ExprDoc"], **kwargs: Dict[str, "ExprDoc"]) -> "CallDoc":
"""
Create a doc that represents function call, with self as callee.
Parameters
----------
*args : ExprDoc
The positional arguments of the function call.
**kwargs
The keyword arguments of the function call.
Returns
-------
doc : CallDoc
"""
kwargs_keys = list(kwargs.keys())
kwargs_values = list(kwargs.values())
return _ffi_api.ExprDocCall(self, args, kwargs_keys, kwargs_values) # type: ignore # pylint: disable=no-member
_IndexType = Union["ExprDoc", "SliceDoc"]
def __getitem__(self, indices: Union[Tuple[_IndexType], _IndexType]) -> "IndexDoc":
"""
Create a doc that represents index access on self.
Parameters
----------
indices : Union[Tuple[Union["ExprDoc", "SliceDoc"]], Union["ExprDoc", "SliceDoc"]]
The indices to access
Returns
-------
doc : IndexDoc
"""
if not isinstance(indices, tuple):
indices = (indices,)
return _ffi_api.ExprDocIndex(self, indices) # type: ignore # pylint: disable=no-member
def __iter__(self):
"""
This is implemented to prevent confusing error message when trying to use ExprDoc
as iterable. According to PEP-234, An object can be iterated over if it
implements __iter__() or __getitem__(). If an object has only __getitem__
but not __iter__, interpreter will iterate the object by calling
__getitem__ with 0, 1, 2, ..., until an IndexError is raised.
https://peps.python.org/pep-0234/#python-api-specification
"""
raise RuntimeError(f"{self.__class__} cannot be used as iterable.")
class StmtDoc(Doc):
"""Base class of statement doc"""
@property
def comment(self) -> Optional[str]:
"""
The comment of this doc.
The actual position of the comment depends on the type of Doc
and also the DocPrinter implementation. It could be on the same
line as the statement, or the line above, or inside the statement
if it spans over multiple lines.
"""
# It has to call the dunder method to avoid infinite recursion
return self.__getattr__("comment") # pylint: disable=unnecessary-dunder-call
@comment.setter
def comment(self, value):
return _ffi_api.StmtDocSetComment(self, value) # type: ignore # pylint: disable=no-member
@register_object("script.printer.StmtBlockDoc")
class StmtBlockDoc(Doc):
"""The container doc that holds a list of StmtDoc.
Note: `StmtBlockDoc` is never used in the IR, but a temporary container that allows holding a
list of StmtDoc.
"""
stmts: Sequence[StmtDoc]
def __init__(self, stmts: List[StmtDoc]):
self.__init_handle_by_constructor__(_ffi_api.StmtBlockDoc, stmts) # type: ignore # pylint: disable=no-member
@register_object("script.printer.LiteralDoc")
class LiteralDoc(ExprDoc):
"""Doc that represents literal value"""
value: Union[str, IntImm, FloatImm, None]
def __init__(
self,
value: Union[str, float, bool, int, None],
path: Optional[ObjectPath] = None,
):
if value is None:
self.__init_handle_by_constructor__(_ffi_api.LiteralDocNone, path) # type: ignore # pylint: disable=no-member
elif isinstance(value, str):
self.__init_handle_by_constructor__(
_ffi_api.LiteralDocStr, # type: ignore # pylint: disable=no-member
value,
path,
)
elif isinstance(value, float):
self.__init_handle_by_constructor__(
_ffi_api.LiteralDocFloat, # type: ignore # pylint: disable=no-member
value,
path,
)
elif isinstance(value, bool):
self.__init_handle_by_constructor__(
_ffi_api.LiteralDocBoolean, # type: ignore # pylint: disable=no-member
value,
path,
)
elif isinstance(value, int):
self.__init_handle_by_constructor__(
_ffi_api.LiteralDocInt, # type: ignore # pylint: disable=no-member
value,
path,
)
else:
raise TypeError(f"Unsupported type {type(value)} for LiteralDoc")
@register_object("script.printer.IdDoc")
class IdDoc(ExprDoc):
"""Doc that represents identifier"""
name: str
def __init__(self, name: str):
self.__init_handle_by_constructor__(_ffi_api.IdDoc, name) # type: ignore # pylint: disable=no-member
@register_object("script.printer.AttrAccessDoc")
class AttrAccessDoc(ExprDoc):
"""Doc that represents attribute access on an expression"""
value: ExprDoc
name: str
def __init__(self, value: ExprDoc, name: str):
self.__init_handle_by_constructor__(_ffi_api.AttrAccessDoc, value, name) # type: ignore # pylint: disable=no-member
@register_object("script.printer.IndexDoc")
class IndexDoc(ExprDoc):
"""Doc that represents index access on an expression"""
value: ExprDoc
indices: Sequence[Union[ExprDoc, "SliceDoc"]]
def __init__(self, value: ExprDoc, indices: List[Union[ExprDoc, "SliceDoc"]]):
self.__init_handle_by_constructor__(_ffi_api.IndexDoc, value, indices) # type: ignore # pylint: disable=no-member
@register_object("script.printer.CallDoc")
class CallDoc(ExprDoc):
"""Doc that represents function call"""
callee: ExprDoc
args: Sequence[ExprDoc]
kwargs_keys: Sequence[str]
kwargs_values: Sequence[ExprDoc]
def __init__(self, callee: ExprDoc, *args: Tuple[ExprDoc], **kwargs: Dict[str, ExprDoc]):
kwargs_keys = list(kwargs.keys())
kwargs_values = list(kwargs.values())
self.__init_handle_by_constructor__(
_ffi_api.CallDoc, # type: ignore # pylint: disable=no-member
callee,
args,
kwargs_keys,
kwargs_values,
)
@unique
class OperationKind(IntEnum):
"""
This enum represents the kind of operation (operator) in OperationDoc
It's mirrored from OperationDocNode::Kind at include/tvm/script/printer/doc.h
"""
# The name convention follows https://docs.python.org/3/library/ast.html
# pylint: disable=invalid-name
_UnaryStart = 0
USub = 1
Invert = 2
Not = 3
_UnaryEnd = 4
_BinaryStart = 5
Add = 6
Sub = 7
Mult = 8
Div = 9
FloorDiv = 10
Mod = 11
Pow = 12
LShift = 13
RShift = 14
BitAnd = 15
BitOr = 16
BitXor = 17
Lt = 18
LtE = 19
Eq = 20
NotEq = 21
Gt = 22
GtE = 23
And = 24
Or = 25
_BinaryEnd = 26
_SpecialStart = 27
IfThenElse = 28
_SpecialEnd = 29
# pylint: enable=invalid-name
@register_object("script.printer.OperationDoc")
class OperationDoc(ExprDoc):
"""
Doc that represents operation
It can be unary, binary and other special operators (for example, the
if-then-else expression).
"""
kind: OperationKind
operands: Sequence[ExprDoc]
def __init__(self, kind: OperationKind, operands: List[ExprDoc]):
self.__init_handle_by_constructor__(_ffi_api.OperationDoc, kind, operands) # type: ignore # pylint: disable=no-member
@register_object("script.printer.LambdaDoc")
class LambdaDoc(ExprDoc):
"""Doc that represents lambda function"""
args: Sequence[IdDoc]
body: ExprDoc
def __init__(self, args: List[IdDoc], body: ExprDoc):
self.__init_handle_by_constructor__(_ffi_api.LambdaDoc, args, body) # type: ignore # pylint: disable=no-member
@register_object("script.printer.TupleDoc")
class TupleDoc(ExprDoc):
"""Doc that represents tuple literal"""
elements: Sequence[ExprDoc]
def __init__(self, elements: List[ExprDoc]):
self.__init_handle_by_constructor__(_ffi_api.TupleDoc, elements) # type: ignore # pylint: disable=no-member
@register_object("script.printer.ListDoc")
class ListDoc(ExprDoc):
"""Doc that represents list literal"""
elements: Sequence[ExprDoc]
def __init__(self, elements: List[ExprDoc]):
self.__init_handle_by_constructor__(_ffi_api.ListDoc, elements) # type: ignore # pylint: disable=no-member
@register_object("script.printer.DictDoc")
class DictDoc(ExprDoc):
"""Doc that represents dict literal"""
keys: Sequence[ExprDoc]
values: Sequence[ExprDoc]
def __init__(self, content: Dict[ExprDoc, ExprDoc]):
keys = list(content.keys())
values = list(content.values())
self.__init_handle_by_constructor__(_ffi_api.DictDoc, keys, values) # type: ignore # pylint: disable=no-member
@register_object("script.printer.SliceDoc")
class SliceDoc(ExprDoc):
"""
Doc that represents slice in Index expression
This doc can only appear in `IndexDoc.indices`.
"""
start: Optional[ExprDoc]
stop: Optional[ExprDoc]
step: Optional[ExprDoc]
def __init__(
self,
start: Optional[ExprDoc] = None,
stop: Optional[ExprDoc] = None,
step: Optional[ExprDoc] = None,
):
self.__init_handle_by_constructor__(_ffi_api.SliceDoc, start, stop, step) # type: ignore # pylint: disable=no-member
@register_object("script.printer.AssignDoc")
class AssignDoc(StmtDoc):
"""Doc that represents assign statement."""
lhs: ExprDoc
rhs: Optional[ExprDoc]
annotation: Optional[ExprDoc]
def __init__(self, lhs: ExprDoc, rhs: Optional[ExprDoc], annotation: Optional[ExprDoc] = None):
self.__init_handle_by_constructor__(
_ffi_api.AssignDoc, # type: ignore # pylint: disable=no-member
lhs,
rhs,
annotation,
)
@register_object("script.printer.IfDoc")
class IfDoc(StmtDoc):
"""Doc that represent if-then-else statement."""
predicate: ExprDoc
then_branch: Sequence[StmtDoc]
else_branch: Sequence[StmtDoc]
def __init__(self, predicate: ExprDoc, then_branch: List[StmtDoc], else_branch: List[StmtDoc]):
self.__init_handle_by_constructor__(
_ffi_api.IfDoc, # type: ignore # pylint: disable=no-member
predicate,
then_branch,
else_branch,
)
@register_object("script.printer.WhileDoc")
class WhileDoc(StmtDoc):
"""Doc that represents while statement."""
predicate: ExprDoc
body: Sequence[StmtDoc]
def __init__(self, predicate: ExprDoc, body: List[StmtDoc]):
self.__init_handle_by_constructor__(_ffi_api.WhileDoc, predicate, body) # type: ignore # pylint: disable=no-member
@register_object("script.printer.ForDoc")
class ForDoc(StmtDoc):
"""Doc that represents for statement."""
lhs: ExprDoc
rhs: ExprDoc
body: Sequence[StmtDoc]
def __init__(self, lhs: ExprDoc, rhs: ExprDoc, body: List[StmtDoc]):
self.__init_handle_by_constructor__(_ffi_api.ForDoc, lhs, rhs, body) # type: ignore # pylint: disable=no-member
@register_object("script.printer.ScopeDoc")
class ScopeDoc(StmtDoc):
"""
Doc that represents special scopes.
Specifically, this means the with statement in Python:
with <rhs> as <lhs>:
<body...>
"""
lhs: Optional[ExprDoc]
rhs: ExprDoc
body: Sequence[StmtDoc]
def __init__(self, lhs: Optional[ExprDoc], rhs: ExprDoc, body: List[StmtDoc]):
self.__init_handle_by_constructor__(_ffi_api.ScopeDoc, lhs, rhs, body) # type: ignore # pylint: disable=no-member
@register_object("script.printer.ExprStmtDoc")
class ExprStmtDoc(StmtDoc):
"""Doc that represents an expression as statement."""
expr: ExprDoc
def __init__(self, expr: ExprDoc):
self.__init_handle_by_constructor__(_ffi_api.ExprStmtDoc, expr) # type: ignore # pylint: disable=no-member
@register_object("script.printer.AssertDoc")
class AssertDoc(StmtDoc):
"""Doc that represents assert statement."""
test: ExprDoc
msg: Optional[ExprDoc]
def __init__(self, test: ExprDoc, msg: Optional[ExprDoc] = None):
self.__init_handle_by_constructor__(_ffi_api.AssertDoc, test, msg) # type: ignore # pylint: disable=no-member
@register_object("script.printer.ReturnDoc")
class ReturnDoc(StmtDoc):
"""Doc that represents return statement."""
value: ExprDoc
def __init__(self, value: ExprDoc):
self.__init_handle_by_constructor__(_ffi_api.ReturnDoc, value) # type: ignore # pylint: disable=no-member
@register_object("script.printer.FunctionDoc")
class FunctionDoc(StmtDoc):
"""Doc that represents function definition."""
name: IdDoc
args: Sequence[AssignDoc]
decorators: Sequence[ExprDoc]
return_type: Optional[ExprDoc]
body: Sequence[StmtDoc]
def __init__(
self,
name: IdDoc,
args: List[AssignDoc],
decorators: List[ExprDoc],
return_type: Optional[ExprDoc],
body: List[StmtDoc],
):
self.__init_handle_by_constructor__(
_ffi_api.FunctionDoc, # type: ignore # pylint: disable=no-member
name,
args,
decorators,
return_type,
body,
)
@register_object("script.printer.ClassDoc")
class ClassDoc(StmtDoc):
"""Doc that represents class definition."""
name: IdDoc
decorators: Sequence[ExprDoc]
body: Sequence[StmtDoc]
def __init__(self, name: IdDoc, decorators: List[ExprDoc], body: List[StmtDoc]):
self.__init_handle_by_constructor__(
_ffi_api.ClassDoc, # type: ignore # pylint: disable=no-member
name,
decorators,
body,
)
@register_object("script.printer.CommentDoc")
class CommentDoc(StmtDoc):
"""Doc that represents comment."""
def __init__(self, comment: str):
self.__init_handle_by_constructor__(
_ffi_api.CommentDoc, comment # type: ignore # pylint: disable=no-member
)
@register_object("script.printer.DocStringDoc")
class DocStringDoc(StmtDoc):
"""Doc that represents docstring."""
def __init__(self, docs: str):
self.__init_handle_by_constructor__(
_ffi_api.DocStringDoc, docs # type: ignore # pylint: disable=no-member
)
| 16,534 | 29.395221 | 126 | py |
tvm | tvm-main/python/tvm/script/printer/_ffi_api.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""FFI APIs for tvm.script.printer"""
import tvm._ffi
tvm._ffi._init_api("script.printer", __name__) # pylint: disable=protected-access
| 923 | 43 | 82 | py |
tvm | tvm-main/python/tvm/script/printer/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
TVMScript Unified Printer
This package provides a set of APIs to print supported TVM IR into TVMScript
in a roundtrippable way.
"""
| 921 | 40.909091 | 76 | py |
tvm | tvm-main/python/tvm/script/printer/doc_printer.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Functions to print doc into text format"""
from typing import List, Optional
from tvm.runtime import ObjectPath
from tvm.runtime.script_printer import PrinterConfig
from . import _ffi_api
from .doc import Doc
def to_python_script(
doc: Doc,
indent_spaces: int = 4,
print_line_numbers: bool = False,
num_context_lines: Optional[int] = None,
path_to_underline: Optional[List[ObjectPath]] = None,
) -> str:
"""Convert Doc into Python script.
Parameters
----------
doc : Doc
The doc to convert into Python script
indent_spaces : int
The number of indent spaces to use in the output
print_line_numbers: bool
Whether to print line numbers
num_context_lines : Optional[int]
Number of context lines to print around the underlined text
path_to_underline : Optional[ObjectPath]
Object path to be underlined
Returns
-------
script : str
The text representation of Doc in Python syntax
"""
cfg = PrinterConfig(
indent_spaces=indent_spaces,
print_line_numbers=print_line_numbers,
num_context_lines=num_context_lines,
path_to_underline=path_to_underline,
)
return _ffi_api.DocToPythonScript(doc, cfg) # type: ignore # pylint: disable=no-member
| 2,086 | 32.66129 | 91 | py |
tvm | tvm-main/python/tvm/script/ir_builder/base.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""A generic IRBuilder across the TVM stack"""
from typing import Any, Callable, List
from tvm._ffi import register_object as _register_object
from tvm.runtime import Object as _Object
from . import _ffi_api
@_register_object("script.ir_builder.IRBuilderFrame")
class IRBuilderFrame(_Object):
"""A stack frame of the IRBuilder used to keep track of the current scope.
Furthermore, the information stored in each stack frame can be useful for context-dependent
IR construction.
Examples
--------
The `T.match_buffer` below instead an element in the buffer map of `PrimFuncFrame`:
.. code-block:: python
from tvm.script.ir_builder import tir as T
from tvm.script.ir_builder import IRBuilder
with IRBuilder() as builder:
with T.prim_func(...): # pushes a PrimFuncFrame (subclass of IRBuilderFrame)
# to `builder`'s stack of frames
buffer = T.match_buffer(...)
The `T.match_buffer` below instead generates `MatchBufferRegion` in a TIR block:
.. code-block:: python
from tvm.script.ir_builder import tir as T
from tvm.script.ir_builder import IRBuilder
with IRBuilder() as builder:
with T.prim_func(...): # pushes a PrimFuncFrame (subclass of IRBuilderFrame)
# to `builder`'s stack of frames
with T.block(...): # pushes a BlockFrame (subclass of IRBuilderFrame)
# to `builder`'s stack of frames
buffer = T.match_buffer(...)
"""
def __enter__(self) -> "IRBuilderFrame":
_ffi_api.IRBuilderFrameEnter(self) # type: ignore[attr-defined] # pylint: disable=no-member
return self
def __exit__(self, exc_type, exc_value, trace) -> None: # pylint: disable=unused-argument
if exc_type is None and exc_value is None:
# Do not execute `FrameExit` if the with scope exits because of exceptions
_ffi_api.IRBuilderFrameExit(self) # type: ignore[attr-defined] # pylint: disable=no-member
def add_callback(self, callback: Callable[[], None]) -> None:
"""Add a callback method invoked when exiting the with-scope.
Parameters
----------
callback : Callable[[], None]
The callback method to be invoked.
"""
_ffi_api.IRBuilderFrameAddCallback( # type: ignore[attr-defined] # pylint: disable=no-member
self, callback
)
@_register_object("script.ir_builder.IRBuilder")
class IRBuilder(_Object):
"""A dialect-agnostic IRBuilder that constructs any IR of TVM.
Examples
--------
An idiomatic use of this class is to put this inside the with-scope,
call dialect-specific methods accordingly. Upon exiting the scope.
.. code-block:: python
from tvm.script.ir_builder import tir as T
from tvm.script.ir_builder import IRBuilder
with IRBuilder() as builder:
with T.prim_func(...): # pushes a PrimFuncFrame (subclass of IRBuilderFrame)
# to `builder`'s stack of frames
buffer = T.match_buffer(...)
return builder.get() # returns the constructed IR, i.e. tir.PrimFunc
"""
def __init__(self) -> None:
"""Construct an IRBuilder."""
self.__init_handle_by_constructor__(
_ffi_api.IRBuilder # type: ignore[attr-defined] # pylint: disable=no-member
)
def __enter__(self) -> "IRBuilder":
"""Enter the with-scope for IRBuilder, which allows the IRBuilder to be discoverable
using `IRBuilder.current()`.
Examples
--------
.. code-block:: python
from tvm.script.ir_builder import IRBuilder
with IRBuilder() as builder:
assert IRBuilder.current() == builder
"""
_ffi_api.IRBuilderEnter(self) # type: ignore[attr-defined] # pylint: disable=no-member
return self
def __exit__(self, ptype, value, trace) -> None: # pylint: disable=unused-argument
_ffi_api.IRBuilderExit(self) # type: ignore[attr-defined] # pylint: disable=no-member
@staticmethod
def current() -> "IRBuilder":
"""Get the current IRBuilder put in the with-scope.
Returns
-------
builder : IRBuilder
The current IRBuilder.
"""
return _ffi_api.IRBuilderCurrent() # type: ignore[attr-defined] # pylint: disable=no-member
@staticmethod
def is_in_scope() -> bool:
"""See if the current thread-local scope has an IRBuilder.
Returns
-------
bool
Whether the current thread-local scope has an IRBuilder
"""
return _ffi_api.IRBuilderIsInScope() # type: ignore[attr-defined] # pylint: disable=no-member
def get(self) -> _Object:
"""Get the constructed IR."""
return _ffi_api.IRBuilderGet(self) # type: ignore[attr-defined] # pylint: disable=no-member
@staticmethod
def name(s: str, v: Any) -> Any:
"""Set the name of an object.
Parameters
----------
s : str
The name of the object.
v : Any
The object to name.
Returns
-------
v : Any
The same object with the name set.
"""
return _ffi_api.IRBuilderName(s, v) # type: ignore[attr-defined] # pylint: disable=no-member
@staticmethod
def name_many( # pylint: disable=invalid-name
s: List[str],
vs: List[Any],
) -> List[Any]:
"""Set the name of a list of objects.
Parameters
----------
s : List[str]
The names of the objects.
vs : List[Any]
The objects to name.
Returns
-------
vs : List[Any]
The same objects with the names set.
"""
assert len(s) == len(vs)
return [IRBuilder.name(i, v) for i, v in zip(s, vs)]
| 6,743 | 33.584615 | 103 | py |
tvm | tvm-main/python/tvm/script/ir_builder/_ffi_api.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""FFI APIs for tvm.script.ir_builder"""
import tvm._ffi
tvm._ffi._init_api("script.ir_builder", __name__) # pylint: disable=protected-access
| 929 | 43.285714 | 85 | py |
tvm | tvm-main/python/tvm/script/ir_builder/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""tvm.script.ir_builder is a generic IR builder for TVM."""
from .base import IRBuilder
| 874 | 45.052632 | 62 | py |
tvm | tvm-main/python/tvm/script/ir_builder/ir/frame.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Package tvm.script.ir_builder.ir.frame"""
from tvm._ffi import register_object as _register_object
from ..base import IRBuilderFrame
@_register_object("script.ir_builder.IRModuleFrame")
class IRModuleFrame(IRBuilderFrame):
...
| 1,023 | 36.925926 | 62 | py |
tvm | tvm-main/python/tvm/script/ir_builder/ir/_ffi_api.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""FFI APIs"""
import tvm._ffi
tvm._ffi._init_api("script.ir_builder.ir", __name__) # pylint: disable=protected-access
| 906 | 42.190476 | 88 | py |
tvm | tvm-main/python/tvm/script/ir_builder/ir/ir.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Package tvm.script.ir_builder.ir.ir"""
from typing import Dict
from tvm.runtime import Object as tvm_Object
from tvm.ir import BaseFunc, GlobalVar
from . import _ffi_api
from .frame import IRModuleFrame
def ir_module() -> IRModuleFrame:
"""Start a ir_module frame.
Returns
-------
frame: IRModuleFrame
The constructed frame.
"""
return _ffi_api.IRModule() # type: ignore[attr-defined] # pylint: disable=no-member
def decl_function(func_name: str, func_signature: BaseFunc) -> GlobalVar:
"""Declare a Function without given the specific function implementation.
Parameters
----------
func_name : str
The function unique name.
func_signature: Optional[BaseFunc]
A Function w/o body, which used to specify the function signature
(i.e. func params and func return type/shape).
Note
----
It is usually used in cross-function call. And we can specify the function by `DefFunction`
Returns
-------
gv : GlobalVar
The corresponding GlobalVar.
"""
return _ffi_api.DeclFunction( # type: ignore[attr-defined] # pylint: disable=no-member
func_name, func_signature
)
def def_function(func_name: str, func: BaseFunc) -> None:
"""Define the function which is declared before.
Parameters
----------
func_name : str
The function unique name.
func: BaseFunc
The given function implementation
"""
return _ffi_api.DefFunction(func_name, func) # type: ignore[attr-defined] # pylint: disable=no-member
def module_attrs(attrs: Dict[str, tvm_Object]) -> None:
"""Specify the attrs of the ir_module frame.
Parameters
----------
attrs: Dict[str, Object]
The module attrs.
"""
return _ffi_api.ModuleAttrs(attrs) # type: ignore[attr-defined] # pylint: disable=no-member
| 2,653 | 30.595238 | 106 | py |
tvm | tvm-main/python/tvm/script/ir_builder/ir/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Package tvm.script.ir_builder.ir"""
from .frame import IRModuleFrame
from .ir import (
decl_function,
def_function,
ir_module,
module_attrs,
)
| 947 | 36.92 | 62 | py |
tvm | tvm-main/python/tvm/script/ir_builder/tir/frame.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""IRBuilder for TIR"""
from typing import List, Union
from tvm._ffi import register_object as _register_object
from tvm.tir import Buffer, Var
from ..base import IRBuilderFrame
@_register_object("script.ir_builder.tir.TIRFrame")
class TIRFrame(IRBuilderFrame):
...
@_register_object("script.ir_builder.tir.PrimFuncFrame")
class PrimFuncFrame(TIRFrame):
...
@_register_object("script.ir_builder.tir.BlockFrame")
class BlockFrame(TIRFrame):
...
@_register_object("script.ir_builder.tir.BlockInitFrame")
class BlockInitFrame(TIRFrame):
...
@_register_object("script.ir_builder.tir.ForFrame")
class ForFrame(TIRFrame):
def __enter__(self) -> Union[Var, List[Var]]: # type: ignore[override]
super().__enter__()
return self.vars if len(self.vars) > 1 else self.vars[0]
@_register_object("script.ir_builder.tir.AssertFrame")
class AssertFrame(TIRFrame):
...
@_register_object("script.ir_builder.tir.LetFrame")
class LetFrame(TIRFrame):
def __enter__(self) -> Var:
super().__enter__()
return self.var
@_register_object("script.ir_builder.tir.RealizeFrame")
class RealizeFrame(TIRFrame):
...
@_register_object("script.ir_builder.tir.AllocateFrame")
class AllocateFrame(TIRFrame):
def __enter__(self) -> Buffer:
super().__enter__()
return self.buffer_var
@_register_object("script.ir_builder.tir.AllocateConstFrame")
class AllocateConstFrame(TIRFrame):
def __enter__(self) -> Buffer:
super().__enter__()
return self.buffer_var
@_register_object("script.ir_builder.tir.AttrFrame")
class AttrFrame(TIRFrame):
...
@_register_object("script.ir_builder.tir.WhileFrame")
class WhileFrame(TIRFrame):
...
@_register_object("script.ir_builder.tir.IfFrame")
class IfFrame(TIRFrame):
...
@_register_object("script.ir_builder.tir.ThenFrame")
class ThenFrame(TIRFrame):
...
@_register_object("script.ir_builder.tir.ElseFrame")
class ElseFrame(TIRFrame):
...
@_register_object("script.ir_builder.tir.DeclBufferFrame")
class DeclBufferFrame(TIRFrame):
def __enter__(self) -> Buffer:
super().__enter__()
return self.buffer
@_register_object("script.ir_builder.tir.LaunchThreadFrame")
class LaunchThreadFrame(TIRFrame):
def __enter__(self) -> Var:
super().__enter__()
return self.iter_var.var
| 3,153 | 25.066116 | 75 | py |
tvm | tvm-main/python/tvm/script/ir_builder/tir/_ffi_api.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""FFI APIs"""
import tvm._ffi
tvm._ffi._init_api("script.ir_builder.tir", __name__) # pylint: disable=protected-access
| 907 | 42.238095 | 89 | py |
tvm | tvm-main/python/tvm/script/ir_builder/tir/ir.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""IRBuilder for TIR"""
import functools
import inspect
from numbers import Integral
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
# isort: off
from typing_extensions import Literal
# isort: on
import numpy as np # type: ignore
from tvm import tir
from tvm import ir
from tvm.ir import Type
from tvm.ir.base import deprecated
from tvm.runtime import String, convert, ndarray
from tvm.target import Target
# pylint: disable=unused-import
from tvm.target.codegen import llvm_lookup_intrinsic_id
from tvm.tir import Buffer, BufferRegion, IndexMap, PrimExpr
from tvm.tir import op as _tir_op
from tvm.tir import type_annotation
# import tir.expr for direct ir construction to pass structural_equal comparison
from tvm.tir.expr import (
EQ,
GE,
GT,
LE,
LT,
NE,
Add,
And,
Broadcast,
BufferLoad,
Call,
CallEffectKind,
Cast,
CommReducer,
Div,
FloatImm,
FloorDiv,
FloorMod,
IntImm,
IterVar,
Max,
Min,
Mod,
Mul,
Not,
Or,
ProducerLoad,
Ramp,
Reduce,
Select,
Shuffle,
SizeVar,
StringImm,
Sub,
Var,
)
from tvm.tir.generic import cast
from . import _ffi_api, frame
# pylint: enable=unused-import
def buffer(
shape: Union[List[PrimExpr], Tuple[PrimExpr], PrimExpr, Integral],
dtype: str = "float32",
data: Var = None,
strides: List[PrimExpr] = None,
elem_offset: PrimExpr = None,
scope: str = "global",
align: int = 0,
offset_factor: int = 0,
buffer_type: str = "",
axis_separators: List[int] = None,
) -> Buffer:
"""The buffer declaration function.
Parameters
----------
shape : Union[List[PrimExpr], Tuple[PrimExpr], PrimExpr, Integral]
The type of the buffer prior to flattening.
dtype : str
The data type in the content of the buffer.
data : Var
The pointer to the head of the data.
strides : List[PrimExpr]
The strides of each dimension.
elem_offset : PrimExpr
The offset in terms of number of dtype elements (including lanes).
scope : str
The optional storage scope of buffer data pointer.
align : int
The alignment requirement of data pointer in bytes.
offset_factor : int
The factor of elem_offset field.
buffer_type : str
The buffer type.
axis_separators : List[int]
The separators between input axes when generating flattened output axes.
Returns
-------
res : Buffer
The declared buffer.
"""
shape = (shape,) if isinstance(shape, (PrimExpr, Integral)) else shape
if strides is not None:
strides = [Var(s, "int32") if isinstance(s, str) else s for s in strides]
else:
strides = []
return _ffi_api.Buffer( # type: ignore[attr-defined] # pylint: disable=no-member
shape,
dtype,
"",
data,
strides,
elem_offset,
scope,
align,
offset_factor,
buffer_type,
axis_separators,
)
@deprecated("T.buffer_decl(...)", "T.Buffer(...)")
def buffer_decl(*args, **kwargs):
return buffer(*args, **kwargs)
def prim_func() -> frame.PrimFuncFrame:
"""The primitive function statement.
Returns
-------
res : frame.PrimFuncFrame
The PrimFuncFrame.
"""
return _ffi_api.PrimFunc() # type: ignore[attr-defined] # pylint: disable=no-member
def arg(name: str, obj: Union[Var, Buffer]) -> Union[Var, Buffer]:
"""The PrimFunc arguments adding function.
Parameters
----------
name : str
The name of the argument.
var : Union[Var, Buffer]
The argument of Var or Buffer.
Returns
-------
res : Union[Var, Buffer]
The argument.
"""
return _ffi_api.Arg(name, obj) # type: ignore[attr-defined] # pylint: disable=no-member
def func_name(name: str) -> None:
"""The PrimFunc naming statement.
Parameters
----------
name : str
The name of the PrimFunc.
"""
_ffi_api.FuncName(name) # type: ignore[attr-defined] # pylint: disable=no-member
def func_attr(attrs: Dict[str, Any]) -> None:
"""The PrimFunc annotation statement.
Parameters
----------
attrs : Dict[str, Any]
The annotations of the PrimFunc.
"""
_ffi_api.FuncAttrs(attrs) # type: ignore[attr-defined] # pylint: disable=no-member
def func_ret(ret_type: Type) -> Type:
"""The PrimFunc return type statement.
Parameters
----------
ret_type : Type
The return type of the PrimFunc.
Returns
-------
res : Type
The return type.
"""
return _ffi_api.FuncRet(ret_type) # type: ignore[attr-defined] # pylint: disable=no-member
def match_buffer(
param: Union[Var, BufferLoad, BufferRegion],
shape: Union[List[PrimExpr], Tuple[PrimExpr], PrimExpr, Integral] = None,
dtype: str = "float32",
data: Var = None,
strides: List[PrimExpr] = None,
elem_offset: PrimExpr = None,
scope: str = "global",
align: int = -1,
offset_factor: int = 0,
buffer_type: str = "default",
axis_separators: List[int] = None,
) -> Buffer:
"""The buffer match function.
Note
----
This function will perform different behavior, depending on the type of param.
If the param is a var in function parameter, it will create a buffer from DLTensor.
Else if the param is a subregion of other buffers, then create a subregion match inside a block.
Example
-------
Match buffer from function parameter
.. code-block:: python
A = T.match_buffer(a, (128, 128), dtype="float32")
Match buffer from Buffer subregion
.. code-block:: python
A = T.match_buffer(B[0:128, i * 128 : i * 128 + 128], (128, 128), dtype="float32")
Parameters
----------
param : Union[Var, BufferLoad, BufferRegion]
The parameter of the PrimFunc to match.
shape : Union[List[PrimExpr], Tuple[PrimExpr], PrimExpr, Integral]
The type of the buffer prior to flattening.
dtype : str
The data type in the content of the buffer.
data : Var
The pointer to the head of the data.
strides : List[PrimExpr]
The strides of each dimension.
elem_offset : PrimExpr
The offset in terms of number of dtype elements (including lanes).
scope : str
The optional storage scope of buffer data pointer.
align : int
The alignment requirement of data pointer in bytes.
offset_factor : int
The factor of elem_offset field.
buffer_type : str
The buffer type.
axis_separators : List[int]
The separators between input axes when generating flattened output axes.
Returns
-------
res : Buffer
The matched buffer.
"""
if shape is None:
if isinstance(param, BufferRegion):
dtype = param.buffer.dtype
shape = [region.extent for region in param.region]
else:
raise ValueError("Shape must be specified when binding input param")
shape = (shape,) if isinstance(shape, (PrimExpr, Integral)) else shape
if strides is not None:
strides = [Var(s, "int32") if isinstance(s, str) else s for s in strides]
else:
strides = []
return _ffi_api.MatchBuffer( # type: ignore[attr-defined] # pylint: disable=no-member
param,
shape,
dtype,
data,
strides,
elem_offset,
scope,
align,
offset_factor,
buffer_type,
axis_separators,
)
def block(name: str = "", no_realize: bool = False) -> frame.BlockFrame:
"""The block declaration statement.
Parameters
----------
name : str
The name of the block.
no_realize : bool
The flag whether to construct BlockRealize or Block.
Returns
-------
res : frame.BlockFrame
The BlockFrame.
"""
return _ffi_api.Block(name, no_realize) # type: ignore[attr-defined] # pylint: disable=no-member
def init() -> frame.BlockInitFrame:
"""The block initialization statement.
Returns
-------
res : frame.BlockInitFrame
The BlockInitFrame.
"""
return _ffi_api.Init() # type: ignore[attr-defined] # pylint: disable=no-member
def where(predicate: Union[PrimExpr, int]) -> None:
"""The block predicate statement.
Parameters
----------
predicate : Union[PrimExpr, Literal[0, 1]]
The predicate condition.
"""
if isinstance(predicate, bool):
predicate = IntImm("bool", predicate)
if isinstance(predicate, int):
if predicate in [0, 1]:
predicate = IntImm("bool", predicate)
else:
raise ValueError(f"Invalid value for predicate: {predicate}")
_ffi_api.Where(predicate) # type: ignore[attr-defined] # pylint: disable=no-member
def reads(*buffer_slices: List[Union[BufferRegion, BufferLoad]]) -> None:
"""The block buffer region reading statement.
Parameters
----------
buffer_slices : List[Union[BufferRegion, BufferLoad]]
The array of buffer regions to read.
"""
if len(buffer_slices) == 1:
if isinstance(buffer_slices[0], tuple):
buffer_slices = list(buffer_slices[0])
elif isinstance(buffer_slices[0], list):
buffer_slices = buffer_slices[0] # type: ignore[assignment]
else:
buffer_slices = [buffer_slices[0]]
else:
buffer_slices = list(buffer_slices) # type: ignore[assignment]
_ffi_api.Reads(buffer_slices) # type: ignore[attr-defined] # pylint: disable=no-member
def writes(*buffer_slices: List[Union[BufferRegion, BufferLoad]]) -> None:
"""The block buffer region writing statement.
Parameters
----------
buffer_slices : List[Union[BufferRegion, BufferLoad]]
The array of buffer regions to write.
"""
if len(buffer_slices) == 1:
if isinstance(buffer_slices[0], tuple):
buffer_slices = list(buffer_slices[0])
elif isinstance(buffer_slices[0], list):
buffer_slices = buffer_slices[0] # type: ignore[assignment]
else:
buffer_slices = [buffer_slices[0]]
else:
buffer_slices = list(buffer_slices) # type: ignore[assignment]
_ffi_api.Writes(buffer_slices) # type: ignore[attr-defined] # pylint: disable=no-member
def block_attr(attrs: Dict[str, Any]) -> None:
"""The block annotation statement.
Parameters
----------
attrs : Dict[str, Any]
The annotation of the block.
"""
return _ffi_api.BlockAttrs(attrs) # type: ignore[attr-defined] # pylint: disable=no-member
def alloc_buffer(
shape: Union[List[PrimExpr], Tuple[PrimExpr], PrimExpr, Integral],
dtype: str = "float32",
data: Var = None,
strides: List[PrimExpr] = None,
elem_offset: PrimExpr = None,
scope: str = "global",
align: int = -1,
offset_factor: int = 0,
buffer_type: str = "default",
axis_separators: List[int] = None,
) -> Buffer:
"""The buffer alllocation function.
Parameters
----------
shape : Union[List[PrimExpr], Tuple[PrimExpr], PrimExpr, Integral]
The type of the buffer prior to flattening.
dtype : str
The data type in the content of the buffer.
data : Var
The pointer to the head of the data.
strides : List[PrimExpr]
The strides of each dimension.
elem_offset : PrimExpr
The offset in terms of number of dtype elements (including lanes).
scope : str
The optional storage scope of buffer data pointer.
align : int
The alignment requirement of data pointer in bytes.
offset_factor : int
The factor of elem_offset field.
buffer_type : str
The buffer type.
axis_separators : List[int]
The separators between input axes when generating flattened output axes.
Returns
-------
res : Buffer
The allocated buffer.
"""
shape = (shape,) if isinstance(shape, (PrimExpr, Integral)) else shape
if strides is not None:
strides = [Var(s, "int32") if isinstance(s, str) else s for s in strides]
else:
strides = []
return _ffi_api.AllocBuffer( # type: ignore[attr-defined] # pylint: disable=no-member
shape,
dtype,
data,
strides,
elem_offset,
scope,
align,
offset_factor,
buffer_type,
axis_separators,
)
def _as_range(dom: Union[ir.Range, List[PrimExpr]]) -> ir.Range:
"""The range constructor.
Parameters
----------
dom : Union[Range, List[PrimExpr]]
The domain.
Returns
-------
res : Range
The Range.
"""
if isinstance(dom, ir.Range):
return dom
if isinstance(dom, (list, tuple)):
return ir.Range(dom[0], dom[1])
if hasattr(dom, "dtype"):
return ir.Range(IntImm(dom.dtype, 0), dom)
return ir.Range(0, dom)
class axis: # pylint: disable=invalid-name
"""The axis class"""
@staticmethod
def spatial(
dom: Union[ir.Range, List[PrimExpr], Tuple[PrimExpr]],
binding: PrimExpr,
dtype: str = "int32",
) -> Var:
"""The spatial block axis defining function.
Parameters
----------
dom : Union[Range, List[PrimExpr], Tuple[PrimExpr]]
The domain of the iteration variable.
binding : PrimExpr
The binding value of the iteration variable.
dtype : str
The data type of the iteration variable.
Returns
-------
res : Var
The iteration variable.
"""
return _ffi_api.AxisSpatial( # type: ignore[attr-defined] # pylint: disable=no-member
_as_range(dom), binding, dtype
)
@staticmethod
def reduce(
dom: Union[ir.Range, List[PrimExpr], Tuple[PrimExpr]],
binding: PrimExpr,
dtype: str = "int32",
) -> Var:
"""The reduced block axis defining function.
Parameters
----------
dom : Union[Range, List[PrimExpr], Tuple[PrimExpr]]
The domain of the iteration variable.
binding : PrimExpr
The binding value of the iteration variable.
dtype : str
The data type of the iteration variable.
Returns
-------
res : Var
The iteration variable.
"""
return _ffi_api.AxisReduce( # type: ignore[attr-defined] # pylint: disable=no-member
_as_range(dom), binding, dtype
)
@staticmethod
def scan(
dom: Union[ir.Range, List[PrimExpr], Tuple[PrimExpr]],
binding: PrimExpr,
dtype: str = "int32",
) -> Var:
"""The scanning block axis defining function.
Parameters
----------
dom : Union[Range, List[PrimExpr], Tuple[PrimExpr]]
The domain of the iteration variable.
binding : PrimExpr
The binding value of the iteration variable.
dtype : str
The data type of the iteration variable.
Returns
-------
res : Var
The iteration variable.
"""
return _ffi_api.AxisScan( # type: ignore[attr-defined] # pylint: disable=no-member
_as_range(dom), binding, dtype
)
@staticmethod
def opaque(
dom: Union[ir.Range, List[PrimExpr], Tuple[PrimExpr]],
binding: PrimExpr,
dtype: str = "int32",
) -> Var:
"""The opaque block axis defining function.
Parameters
----------
dom : Union[Range, List[PrimExpr], Tuple[PrimExpr]]
The domain of the iteration variable.
binding : PrimExpr
The binding value of the iteration variable.
dtype : str
The data type of the iteration variable.
Returns
-------
res : Var
The iteration variable.
"""
return _ffi_api.AxisOpaque( # type: ignore[attr-defined] # pylint: disable=no-member
_as_range(dom), binding, dtype
)
@staticmethod
def remap(kinds: str, bindings: List[PrimExpr], dtype: str = "int32") -> Union[List[Var], Var]:
"""The block axis remapping function.
Parameters
----------
kinds : str
The types of the iteration variables.
bindings : List[PrimExpr]
The binding values of the iteration variables.
dtype : str
The data types of the iteration variables.
Returns
-------
res : Var
The iteration variables.
"""
iter_vars = _ffi_api.AxisRemap( # type: ignore[attr-defined] # pylint: disable=no-member
kinds, bindings, dtype
)
return iter_vars[0] if len(iter_vars) == 1 else iter_vars
S = spatial # pylint: disable=invalid-name
R = reduce # pylint: disable=invalid-name
def serial(
start: PrimExpr, stop: PrimExpr = None, *, annotations: Dict[str, Any] = None
) -> frame.ForFrame:
"""The serial For statement.
Parameters
----------
start : PrimExpr
The minimum value of iteration.
stop : PrimExpr
The maximum value of iteration.
annotations : Dict[str, Any]
The optional annotations of the For statement.
Returns
-------
res : frame.ForFrame
The ForFrame.
"""
if stop is None:
stop = start
if hasattr(start, "dtype"):
start = IntImm(start.dtype, 0)
else:
start = 0
return _ffi_api.Serial(start, stop, annotations) # type: ignore[attr-defined] # pylint: disable=no-member
def parallel(
start: PrimExpr, stop: PrimExpr = None, *, annotations: Dict[str, Any] = None
) -> frame.ForFrame:
"""The parallel For statement.
Parameters
----------
start : PrimExpr
The minimum value of iteration.
stop : PrimExpr
The maximum value of iteration.
annotations : Dict[str, Any]
The optional annotations of the For statement.
Returns
-------
res : frame.ForFrame
The ForFrame.
"""
if stop is None:
stop = start
if hasattr(start, "dtype"):
start = IntImm(start.dtype, 0)
else:
start = 0
return _ffi_api.Parallel(start, stop, annotations) # type: ignore[attr-defined] # pylint: disable=no-member
def vectorized(
start: PrimExpr, stop: PrimExpr = None, *, annotations: Dict[str, Any] = None
) -> frame.ForFrame:
"""The vectorized For statement.
Parameters
----------
start : PrimExpr
The minimum value of iteration.
stop : PrimExpr
The maximum value of iteration.
annotations : Dict[str, Any]
The optional annotations of the For statement.
Returns
-------
res : frame.ForFrame
The ForFrame.
"""
if stop is None:
stop = start
if hasattr(start, "dtype"):
start = IntImm(start.dtype, 0)
else:
start = 0
return _ffi_api.Vectorized(start, stop, annotations) # type: ignore[attr-defined] # pylint: disable=no-member
def unroll(
start: PrimExpr, stop: PrimExpr = None, *, annotations: Dict[str, Any] = None
) -> frame.ForFrame:
"""The unrolled For statement.
Parameters
----------
start : PrimExpr
The minimum value of iteration.
stop : PrimExpr
The maximum value of iteration.
annotations : Dict[str, Any]
The optional annotations of the For statement.
Returns
-------
res : frame.ForFrame
The ForFrame.
"""
if stop is None:
stop = start
if hasattr(start, "dtype"):
start = IntImm(start.dtype, 0)
else:
start = 0
return _ffi_api.Unroll(start, stop, annotations) # type: ignore[attr-defined] # pylint: disable=no-member
def thread_binding(
start: PrimExpr,
stop: PrimExpr = None,
thread: str = None,
*,
annotations: Dict[str, Any] = None,
) -> frame.ForFrame:
"""The thread-binding For statement.
Parameters
----------
start : PrimExpr
The minimum value of iteration.
stop : PrimExpr
The maximum value of iteration.
thread : str
The thread for loop variable to bind.
annotations : Dict[str, Any]
The optional annotations of the For statement.
Returns
-------
res : frame.ForFrame
The ForFrame.
"""
if thread is None:
if not isinstance(stop, str):
raise ValueError("Thread cannot be None for thread_binding")
thread = stop
stop = start
if hasattr(start, "dtype"):
start = IntImm(start.dtype, 0)
else:
start = 0
elif stop is None:
stop = start
if hasattr(start, "dtype"):
start = IntImm(start.dtype, 0)
else:
start = 0
return _ffi_api.ThreadBinding( # type: ignore[attr-defined] # pylint: disable=no-member
start, stop, thread, annotations
)
def grid(*extents: PrimExpr) -> frame.ForFrame:
"""The grid For statement.
Parameters
----------
extents : PrimExpr
The extents of the iteration.
Returns
-------
res : frame.ForFrame
The ForFrame.
"""
return _ffi_api.Grid(extents) # type: ignore[attr-defined] # pylint: disable=no-member
def Assert(condition: PrimExpr, message: str) -> frame.AssertFrame: # pylint: disable=invalid-name
"""Create an assertion statement.
Parameters
----------
condition : PrimExpr
The PrimExpr to test.
message : str
The output error message when the assertion fails.
Returns
-------
res : frame.AssertFrame
The result AssertFrame.
"""
if isinstance(condition, bool):
condition = IntImm("bool", condition)
return _ffi_api.Assert(condition, message) # type: ignore[attr-defined] # pylint: disable=no-member
def LetStmt( # pylint: disable=invalid-name
value: PrimExpr,
type_annotation: Optional[Type] = None, # pylint: disable=redefined-outer-name
*,
var: Optional[Var] = None, # pylint: disable=redefined-outer-name
) -> frame.LetFrame:
"""Create a LetStmt binding
Parameters
----------
value : PrimExpr
The value to be bound.
type_annotation : Optional[Type] = None
The type annotation of the let binding. Usually it is used for fine-grained var typing,
particularly, PointerType.
var : Optional[Var] = None
The variable to bind. If not specified, a new variable will be created.
Returns
-------
let_frame : frame.LetFrame
The result LetFrame.
"""
if type_annotation is not None:
if callable(type_annotation):
type_annotation = type_annotation()
if isinstance(type_annotation, Var):
type_annotation = type_annotation.type_annotation
return _ffi_api.LetStmt(value, type_annotation, var) # type: ignore[attr-defined] # pylint: disable=no-member
def Let( # pylint: disable=invalid-name
expr: PrimExpr,
where: Dict[Var, PrimExpr], # pylint: disable=redefined-outer-name
) -> PrimExpr:
"""Create a Let expression binding"""
assert len(where) == 1, "T.Let only allows `where` to have exactly one element"
var, value = list(where.items())[0] # pylint: disable=redefined-outer-name
return tir.Let(var, value, expr)
def let(
v: Var,
value: PrimExpr,
body: PrimExpr = None,
) -> frame.LetFrame:
"""Create a new let binding.
Parameters
----------
v : Var
The variable to bind.
value : PrimExpr
The value to be bound.
body : PrimExpr
The body expression, None will be used if it was not specified.
Returns
-------
res : frame.LetFrame
The result LetFrame.
"""
@deprecated("T.let", "T.Let")
def let_expr(v: Var, value: PrimExpr, body: PrimExpr) -> PrimExpr:
return tir.Let(v, value, body)
@deprecated("T.let", "T.LetStmt")
def let_stmt(v: Var, value: PrimExpr) -> frame.LetFrame:
return _ffi_api.LegacyLetStmt(v, value) # type: ignore[attr-defined] # pylint: disable=no-member
if body is None:
return let_stmt(v, value)
else:
return let_expr(v, value, body)
def realize(
buffer_slice: BufferRegion,
storage_scope: str,
condition: PrimExpr = True,
) -> frame.RealizeFrame:
"""Create a realization.
Parameters
----------
buffer_slice : BufferRegion
The region of buffer access.
storage_scope : str
The storage scope associated with this realization.
condition: PrimExpr
The condition expression, the default is True.
Returns
-------
res : frame.RealizeFrame
The result RealizeFrame.
"""
return _ffi_api.Realize( # type: ignore[attr-defined] # pylint: disable=no-member
buffer_slice, storage_scope, condition
)
def allocate(
extents: List[PrimExpr],
dtype: str,
scope: str = "global",
condition: PrimExpr = None,
annotations=None,
) -> frame.AllocateFrame:
"""Allocate node.
Parameters
----------
extents : List[PrimExpr]
The extents of the allocate.
dtype : str
The data type of the buffer.
scope : str
The storage scope.
condition : PrimExpr
The condition.
annotations: Optional[Mapping[str, Object]]
Additional annotation hints.
"""
if isinstance(condition, bool):
condition = IntImm("bool", condition)
return _ffi_api.Allocate( # type: ignore[attr-defined] # pylint: disable=no-member
extents, dtype, scope, condition, annotations
)
def allocate_const(
data: List[PrimExpr],
dtype: str,
extents: List[PrimExpr],
annotations=None,
) -> frame.AllocateConstFrame:
"""Allocate constant node.
Parameters
----------
data : List[PrimExpr]
The data associated with the constant.
dtype : str
The data type of the buffer.
extents : List[PrimExpr]
The extents of the allocate.
annotations : Optional[Map]
Additional annotations about the allocation.
"""
np_data = np.asarray(data, dtype=dtype)
prod_extent = 1
for extent in extents:
prod_extent *= extent
prod_shape = 1
for shape in np_data.shape:
prod_shape *= shape
if prod_extent == prod_shape:
np_data = np_data.reshape(extents)
return _ffi_api.AllocateConst( # type: ignore[attr-defined] # pylint: disable=no-member
ndarray.array(np_data), dtype, extents, annotations
)
def attr(node: Any, attr_key: str, value: Union[PrimExpr, str]) -> frame.AttrFrame:
"""Create an attribute node.
Parameters
----------
node : Any
The node to annotate the attribute.
attr_key : str
Attribute type key.
value : Union[PrimExpr, str]
The value of the attribute.
Returns
-------
res : frame.AttrFrame
The result AttrFrame.
"""
node = convert(node)
value = convert(value)
return _ffi_api.Attr(node, attr_key, value) # type: ignore[attr-defined] # pylint: disable=no-member
def While(condition: PrimExpr) -> frame.WhileFrame: # pylint: disable=invalid-name
"""Create a while node.
Parameters
----------
condition : PrimExpr
The termination condition of the loop.
Returns
-------
res : frame.WhileFrame
The result WhileFrame.
"""
if isinstance(condition, bool):
condition = IntImm("bool", condition)
return _ffi_api.While(condition) # type: ignore[attr-defined] # pylint: disable=no-member
def If(condition: PrimExpr) -> frame.IfFrame: # pylint: disable=invalid-name
"""Create an if node.
Parameters
----------
condition : PrimExpr
The condition of if statement, executes the true branch if the condition is true,
otherwise jump into the false branch.
Returns
-------
res : frame.IfFrame
The result IfFrame.
"""
if isinstance(condition, bool):
condition = IntImm("bool", condition)
return _ffi_api.If(condition) # type: ignore[attr-defined] # pylint: disable=no-member
def Then() -> frame.ThenFrame: # pylint: disable=invalid-name
"""Create a then.
Returns
-------
res : frame.ThenFrame
The result ThenFrame.
"""
return _ffi_api.Then() # type: ignore[attr-defined] # pylint: disable=no-member
def Else() -> frame.ElseFrame: # pylint: disable=invalid-name
"""Create an else.
Returns
-------
res : frame.ElseFrame
The result ElseFrame.
"""
return _ffi_api.Else() # type: ignore[attr-defined] # pylint: disable=no-member
def decl_buffer(
shape,
dtype="float32",
data=None,
strides=None,
elem_offset=None,
scope="global",
align=0,
offset_factor=0,
buffer_type="",
axis_separators=None,
) -> frame.DeclBufferFrame:
"""Create a buffer declaration node.
Parameters
----------
shape : Union[List[PrimExpr], Tuple[PrimExpr], PrimExpr, Integral]
The type of the buffer prior to flattening.
dtype : str
The data type in the content of the buffer.
data : Var
The pointer to the head of the data.
strides : List[PrimExpr]
The strides of each dimension.
elem_offset : PrimExpr
The offset in terms of number of dtype elements (including lanes).
scope : str
The optional storage scope of buffer data pointer.
align : int
The alignment requirement of data pointer in bytes.
offset_factor : int
The factor of elem_offset field.
buffer_type : str
The buffer type.
axis_separators : List[int]
The separators between input axes when generating flattened output axes.
Returns
-------
res : frame.DeclBufferFrame
The result DeclBufferFrame.
"""
shape = (shape,) if isinstance(shape, (PrimExpr, Integral)) else shape
if strides is not None:
strides = [Var(s, "int32") if isinstance(s, str) else s for s in strides]
else:
strides = []
return _ffi_api.DeclBuffer( # type: ignore[attr-defined] # pylint: disable=no-member
shape,
dtype,
"",
data,
strides,
elem_offset,
scope,
align,
offset_factor,
buffer_type,
axis_separators,
)
def launch_thread(
thread: Union[IterVar, str], # pylint: disable=redefined-outer-name
extent: PrimExpr,
) -> frame.LaunchThreadFrame:
"""Launch a thread.
Parameters
----------
thread : Union[IterVar, str]
The iteration variable.
extent : PrimExpr
The extent of environment thread.
Returns
-------
res : frame.LaunchThreadFrame
The result LaunchThreadFrame.
Examples
--------
.. code-block:: python
from tvm.script.ir_builder import tir as T
brow = T.env_thread("blockIdx.y")
T.launch_thread(brow, 1)
"""
if isinstance(thread, str):
thread = String(thread)
return _ffi_api.LaunchThread(thread, extent) # type: ignore[attr-defined] # pylint: disable=no-member
def env_thread(thread_tag: str) -> IterVar:
"""Bind a var to thread env
Parameters
----------
thread_tag : str
The thread type tag.
Returns
-------
res : IterVar
The result iteration variable gets bound to the thread env.
"""
return _ffi_api.EnvThread(thread_tag) # type: ignore[attr-defined] # pylint: disable=no-member
def buffer_store(
buffer: Buffer, # pylint: disable=redefined-outer-name
value: PrimExpr,
indices: List[Union[PrimExpr, slice]],
) -> None:
"""Buffer store node.
Parameters
----------
buffer : Buffer
The buffer.
value : PrimExpr
The value to be stored.
indices : List[Union[PrimExpr, slice]]
The indices location to be stored.
"""
from tvm.arith import Analyzer # pylint: disable=import-outside-toplevel
expr_indices = []
for index in indices:
if isinstance(index, slice):
step = 1 if index.step is None else index.step
lanes = Analyzer().simplify((index.stop - index.start + step - 1) // step)
if lanes == 1:
expr_indices.append(index.start)
else:
expr_indices.append(ramp(index.start, step, int(lanes)))
else:
expr_indices.append(index)
if isinstance(value, bool) and buffer.dtype == "bool":
value = IntImm("bool", value)
return _ffi_api.BufferStore( # type: ignore[attr-defined] # pylint: disable=no-member
buffer, value, expr_indices
)
def prefetch(
buffer: Buffer, # pylint: disable=redefined-outer-name
bounds: List[ir.Range],
) -> None:
"""The prefetch hint for a buffer.
Parameters
----------
buffer : Buffer
The buffer to be prefetched.
bounds : List[Range]
The range to be prefetched.
"""
return _ffi_api.Prefetch(buffer, bounds) # type: ignore[attr-defined] # pylint: disable=no-member
def evaluate(value: PrimExpr) -> None:
"""Evaluate the input expression.
Parameters
----------
value: PrimExpr
The input expression to evaluate.
"""
if isinstance(value, str):
value = StringImm(value)
if isinstance(value, bool):
value = cast(value, "bool")
return _ffi_api.Evaluate(value) # type: ignore[attr-defined] # pylint: disable=no-member
def func_gen(name: str):
"""Generate a function for each PrimExpr dtype.
Parameters
----------
name: str
The ffi function name to call.
"""
def func(
expr: Union[
None,
PrimExpr,
Literal["inf", "-inf", "nan"],
int,
float,
] = None,
*,
is_size_var: bool = False,
) -> PrimExpr:
if isinstance(expr, str):
expr = float(expr)
return getattr(_ffi_api, name)(expr, is_size_var)
return func
# pylint: disable=invalid-name
int8 = func_gen(("Int8"))
int16 = func_gen(("Int16"))
int32 = func_gen(("Int32"))
int64 = func_gen(("Int64"))
int8x4 = func_gen(("Int8x4"))
int16x4 = func_gen(("Int16x4"))
int32x4 = func_gen(("Int32x4"))
int64x4 = func_gen(("Int64x4"))
int8x8 = func_gen(("Int8x8"))
int16x8 = func_gen(("Int16x8"))
int32x8 = func_gen(("Int32x8"))
int64x8 = func_gen(("Int64x8"))
int8x16 = func_gen(("Int8x16"))
int16x16 = func_gen(("Int16x16"))
int32x16 = func_gen(("Int32x16"))
int64x16 = func_gen(("Int64x16"))
int8x32 = func_gen(("Int8x32"))
int16x32 = func_gen(("Int16x32"))
int32x32 = func_gen(("Int32x32"))
int64x32 = func_gen(("Int64x32"))
int8x64 = func_gen(("Int8x64"))
int16x64 = func_gen(("Int16x64"))
int32x64 = func_gen(("Int32x64"))
int64x64 = func_gen(("Int64x64"))
uint8 = func_gen(("UInt8"))
uint16 = func_gen(("UInt16"))
uint32 = func_gen(("UInt32"))
uint64 = func_gen(("UInt64"))
uint8x4 = func_gen(("UInt8x4"))
uint16x4 = func_gen(("UInt16x4"))
uint32x4 = func_gen(("UInt32x4"))
uint64x4 = func_gen(("UInt64x4"))
uint8x8 = func_gen(("UInt8x8"))
uint16x8 = func_gen(("UInt16x8"))
uint32x8 = func_gen(("UInt32x8"))
uint64x8 = func_gen(("UInt64x8"))
uint8x16 = func_gen(("UInt8x16"))
uint16x16 = func_gen(("UInt16x16"))
uint32x16 = func_gen(("UInt32x16"))
uint64x16 = func_gen(("UInt64x16"))
uint8x32 = func_gen(("UInt8x32"))
uint16x32 = func_gen(("UInt16x32"))
uint32x32 = func_gen(("UInt32x32"))
uint64x32 = func_gen(("UInt64x32"))
uint8x64 = func_gen(("UInt8x64"))
uint16x64 = func_gen(("UInt16x64"))
uint32x64 = func_gen(("UInt32x64"))
uint64x64 = func_gen(("UInt64x64"))
float8 = func_gen(("Float8"))
float16 = func_gen(("Float16"))
float32 = func_gen(("Float32"))
float64 = func_gen(("Float64"))
float8x4 = func_gen(("Float8x4"))
float16x4 = func_gen(("Float16x4"))
float32x4 = func_gen(("Float32x4"))
float64x4 = func_gen(("Float64x4"))
float8x8 = func_gen(("Float8x8"))
float16x8 = func_gen(("Float16x8"))
float32x8 = func_gen(("Float32x8"))
float64x8 = func_gen(("Float64x8"))
float8x16 = func_gen(("Float8x16"))
float16x16 = func_gen(("Float16x16"))
float32x16 = func_gen(("Float32x16"))
float64x16 = func_gen(("Float64x16"))
float8x32 = func_gen(("Float8x32"))
float16x32 = func_gen(("Float16x32"))
float32x32 = func_gen(("Float32x32"))
float64x32 = func_gen(("Float64x32"))
float8x64 = func_gen(("Float8x64"))
float16x64 = func_gen(("Float16x64"))
float32x64 = func_gen(("Float32x64"))
float64x64 = func_gen(("Float64x64"))
# pylint: enable=invalid-name
def boolean(expr: Optional[PrimExpr] = None, is_size_var: bool = False) -> PrimExpr:
"""Construct a new tir.Var with type boolean or cast expression to type boolean.
Parameters
----------
expr: PrimExpr
The expression to be cast.
is_size_var: bool
Whether or not to return a SizeVar instead of Var.
Returns
-------
res : PrimExpr
The new tir.Var with type boolean or casted expression with type boolean.
"""
return _ffi_api.Boolean(expr, is_size_var) # type: ignore[attr-defined] # pylint: disable=no-member
def handle(
dtype: Optional[str] = None, storage_scope: str = "global", *, is_size_var: bool = False
) -> Var:
"""Create a TIR var that represents a pointer.
Parameters
----------
dtype: str
The data type of the pointer.
storage_scope: str
The storage scope of the pointer.
is_size_var: bool
Whether or not to return a SizeVar instead of Var.
Returns
-------
res : PrimExpr
The new tir.Var with type handle or casted expression with type handle.
"""
is_unknown_type = dtype is None
if dtype is None:
dtype = "void"
return _ffi_api.Handle( # type: ignore[attr-defined] # pylint: disable=no-member
dtype,
storage_scope,
is_size_var,
is_unknown_type,
)
def void(expr: Optional[PrimExpr] = None, *, is_size_var: bool = False) -> PrimExpr:
"""Construct a new tir.Var with type void or cast expression to type void.
Parameters
----------
expr: PrimExpr
The expression to be cast.
Returns
-------
res : PrimExpr
The new tir.Var with type void or casted expression with type void.
"""
return _ffi_api.Void(expr, is_size_var) # type: ignore[attr-defined] # pylint: disable=no-member
@deprecated("T.var", "T.{dtype}")
def var(dtype: str, name: str = "") -> Var:
"""Construct a new tir.Var.
Parameters
----------
dtype: str
The dtype of the Var.
name: str
The name of the Var.
Returns
-------
res : Var
The result tir.Var.
"""
return Var(name, dtype) # pylint: disable=no-member
def ptr(dtype: str, storage_scope: str = "global", is_size_var: bool = False) -> Var:
"""The pointer declaration function.
Parameters
----------
dtype : str
The data type of the pointer.
storage_scope : str
The storage scope of the pointer.
is_size_var: bool
Whether or not to return a SizeVar instead of Var.
Returns
-------
res : Var
The pointer.
"""
return _ffi_api.Ptr(dtype, storage_scope, is_size_var) # type: ignore[attr-defined] # pylint: disable=no-member
@deprecated("T.buffer_var", "T.handle")
def buffer_var(dtype: str, storage_scope: str = "global") -> Var:
"""The pointer declaration function.
Parameters
----------
dtype : str
The data type of the pointer.
storage_scope : str
The storage scope of the pointer.
Returns
-------
res : Var
The pointer.
"""
return _ffi_api.Ptr(dtype, storage_scope) # type: ignore[attr-defined] # pylint: disable=no-member
def min(a: PrimExpr, b: PrimExpr) -> PrimExpr: # pylint: disable=redefined-builtin
"""Compute the minimum value of two expressions.
Parameters
----------
a : PrimExpr
The left hand operand
b : PrimExpr
The right hand operand
Returns
-------
res : PrimExpr
The result expression.
"""
return _ffi_api.min(a, b) # type: ignore[attr-defined] # pylint: disable=no-member
def max(a: PrimExpr, b: PrimExpr) -> PrimExpr: # pylint: disable=redefined-builtin
"""Compute the maximum value of two expressions.
Parameters
----------
a : PrimExpr
The left hand operand
b : PrimExpr
The right hand operand
Returns
-------
res : PrimExpr
The result expression.
"""
return _ffi_api.max(a, b) # type: ignore[attr-defined] # pylint: disable=no-member
def iter_var(v: Union[Var, str], dom: ir.Range, iter_type: str, thread_tag: str) -> IterVar:
"""The iteration variable.
Parameters
----------
var : Union[Var, str]
The internal variable that is used for iteration.
dom : Range
The domain of the iteration.
iter_type : str
The iteration type.
thread_tag : str
The thread type tag.
Returns
-------
res : IterVar
The iteration variable.
"""
iter_type = getattr(IterVar, iter_type)
return IterVar(dom, v, iter_type, thread_tag)
def comm_reducer(combiner: Callable, identity: List[PrimExpr]) -> CommReducer:
"""
Create a CommReducer from lambda inputs/outputs and the identities
Parameters
----------
combiner : Callable
A binary function which takes two PrimExpr as input to return a PrimExpr.
identity : List[PrimExpr]
A list of types of output PrimExpr.
Returns
-------
res : CommReducer
The CommReducer.
"""
params = inspect.signature(combiner).parameters
num_args = len(params)
args = []
for name, i in zip(params.keys(), identity + identity):
if isinstance(i, int):
args.append(Var(name, "int32"))
else:
args.append(Var(name, i.dtype))
res = combiner(*args)
if not isinstance(res, tuple):
res = (res,)
return CommReducer(args[: num_args // 2], args[num_args // 2 :], res, identity)
def index_map(
mapping: Callable,
*,
inverse_index_map: Optional[Callable] = None,
) -> IndexMap:
"""Create a TIR Index mapping"""
return IndexMap.from_func(mapping, inverse_index_map=inverse_index_map)
def target(
target_config: Union[Dict, str],
host: Optional[Union[Dict, str, Target]] = None,
) -> Target:
"""
Create a target
Parameters
----------
target_config : Union[Dict, str]
The target configuration.
host : Optional[Union[Dict, str, Target]]
The target configuration.
Returns
-------
res : Target
The target.
"""
if not isinstance(target_config, (str, dict)):
raise ValueError(
f"T.target expected a config dict or string, but got {type(target_config)}"
)
if host is not None and not isinstance(host, (str, dict, Target)):
raise ValueError(
"T.target expected the host to be "
"a config dict, string, or T.target, "
f"but got {type(host)}"
)
if isinstance(target_config, dict) and "host" in target_config and host is not None:
raise ValueError(
"T.target expects to either receive the host "
"as part of the target's config dictionary, "
"or as a separate argument, but not both."
)
return Target(target_config, host)
def Range(begin: PrimExpr, end: PrimExpr) -> ir.Range: # pylint: disable=invalid-name
"""
Create a Range object.
Parameters
----------
begin : PrimExpr
The begin value of the range.
end : Optional[PrimExpr]
The end value of the range.
"""
return ir.Range(begin, end)
class meta_var: # pylint: disable=invalid-name
"""A meta variable used in TVMScript metaprogramming. It means that the value of the variable
does not appear in the final TIR, but only stays in the parser.
Parameters
----------
value: Any
The meta variable.
"""
def __init__(self, value: Any) -> None:
self.value = value
def __iter__(self):
def f():
for i in self.value:
yield meta_var(i)
return f()
# pylint: disable=invalid-name
def _op_wrapper(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
if "dtype" in kwargs:
kwargs.pop("dtype")
return func(*args, **kwargs)
return wrapped
abs = _op_wrapper(_tir_op.abs) # pylint: disable=redefined-builtin
acos = _op_wrapper(_tir_op.acos)
acosh = _op_wrapper(_tir_op.acosh)
address_of = _op_wrapper(_tir_op.address_of)
asin = _op_wrapper(_tir_op.asin)
asinh = _op_wrapper(_tir_op.asinh)
atan = _op_wrapper(_tir_op.atan)
atan2 = _op_wrapper(_tir_op.atan2)
atanh = _op_wrapper(_tir_op.atanh)
bitwise_and = _op_wrapper(_tir_op.bitwise_and)
bitwise_not = _op_wrapper(_tir_op.bitwise_not)
bitwise_or = _op_wrapper(_tir_op.bitwise_or)
bitwise_xor = _op_wrapper(_tir_op.bitwise_xor)
ceil = _op_wrapper(_tir_op.ceil)
clz = _op_wrapper(_tir_op.clz)
copysign = _op_wrapper(_tir_op.copysign)
cos = _op_wrapper(_tir_op.cos)
cosh = _op_wrapper(_tir_op.cosh)
erf = _op_wrapper(_tir_op.erf)
exp = _op_wrapper(_tir_op.exp)
exp2 = _op_wrapper(_tir_op.exp2)
exp10 = _op_wrapper(_tir_op.exp10)
floor = _op_wrapper(_tir_op.floor)
ceildiv = _op_wrapper(_tir_op.ceildiv)
floordiv = _op_wrapper(_tir_op.floordiv)
floormod = _op_wrapper(_tir_op.floormod)
fmod = _op_wrapper(_tir_op.fmod)
hypot = _op_wrapper(_tir_op.hypot)
if_then_else = _op_wrapper(_tir_op.if_then_else)
infinity = _op_wrapper(_tir_op.infinity)
isfinite = _op_wrapper(_tir_op.isfinite)
isinf = _op_wrapper(_tir_op.isinf)
isnan = _op_wrapper(_tir_op.isnan)
isnullptr = _op_wrapper(_tir_op.isnullptr)
ldexp = _op_wrapper(_tir_op.ldexp)
likely = _op_wrapper(_tir_op.likely)
log = _op_wrapper(_tir_op.log)
log1p = _op_wrapper(_tir_op.log1p)
log2 = _op_wrapper(_tir_op.log2)
log10 = _op_wrapper(_tir_op.log10)
lookup_param = _op_wrapper(_tir_op.lookup_param)
max_value = _op_wrapper(_tir_op.max_value)
min_value = _op_wrapper(_tir_op.min_value)
nearbyint = _op_wrapper(_tir_op.nearbyint)
nextafter = _op_wrapper(_tir_op.nextafter)
popcount = _op_wrapper(_tir_op.popcount)
pow = _op_wrapper(_tir_op.pow) # pylint: disable=redefined-builtin
q_multiply_shift = _op_wrapper(_tir_op.q_multiply_shift)
q_multiply_shift_per_axis = _op_wrapper(_tir_op.q_multiply_shift_per_axis)
ret = _op_wrapper(_tir_op.ret)
round = _op_wrapper(_tir_op.round) # pylint: disable=redefined-builtin
rsqrt = _op_wrapper(_tir_op.rsqrt)
shift_left = _op_wrapper(_tir_op.shift_left)
shift_right = _op_wrapper(_tir_op.shift_right)
sigmoid = _op_wrapper(_tir_op.sigmoid)
sin = _op_wrapper(_tir_op.sin)
sinh = _op_wrapper(_tir_op.sinh)
sqrt = _op_wrapper(_tir_op.sqrt)
tan = _op_wrapper(_tir_op.tan)
tanh = _op_wrapper(_tir_op.tanh)
trunc = _op_wrapper(_tir_op.trunc)
truncdiv = _op_wrapper(_tir_op.truncdiv)
truncmod = _op_wrapper(_tir_op.truncmod)
tvm_access_ptr = _op_wrapper(_tir_op.tvm_access_ptr)
tvm_throw_last_error = _op_wrapper(_tir_op.tvm_throw_last_error)
tvm_stack_alloca = _op_wrapper(_tir_op.tvm_stack_alloca)
tvm_stack_make_shape = _op_wrapper(_tir_op.tvm_stack_make_shape)
tvm_stack_make_array = _op_wrapper(_tir_op.tvm_stack_make_array)
tvm_check_return = _op_wrapper(_tir_op.tvm_check_return)
call_packed = _op_wrapper(_tir_op.call_packed)
call_cpacked = _op_wrapper(_tir_op.call_cpacked)
call_packed_lowered = _op_wrapper(_tir_op.call_packed_lowered)
call_cpacked_lowered = _op_wrapper(_tir_op.call_cpacked_lowered)
tvm_tuple = _op_wrapper(_tir_op.tvm_tuple)
tvm_struct_set = _op_wrapper(_tir_op.tvm_struct_set)
tvm_struct_get = _tir_op.tvm_struct_get
tvm_thread_allreduce = _op_wrapper(_tir_op.tvm_thread_allreduce)
tvm_load_matrix_sync = _op_wrapper(_tir_op.tvm_load_matrix_sync)
tvm_mma_sync = _op_wrapper(_tir_op.tvm_mma_sync)
tvm_bmma_sync = _op_wrapper(_tir_op.tvm_bmma_sync)
tvm_fill_fragment = _op_wrapper(_tir_op.tvm_fill_fragment)
tvm_store_matrix_sync = _op_wrapper(_tir_op.tvm_store_matrix_sync)
tvm_storage_sync = _tir_op.tvm_storage_sync
tvm_warp_shuffle = _tir_op.tvm_warp_shuffle
tvm_warp_shuffle_up = _tir_op.tvm_warp_shuffle_up
tvm_warp_shuffle_down = _tir_op.tvm_warp_shuffle_down
tvm_warp_activemask = _tir_op.tvm_warp_activemask
ptx_wait_group = _op_wrapper(_tir_op.ptx_wait_group)
ptx_commit_group = _op_wrapper(_tir_op.ptx_commit_group)
assume = _op_wrapper(_tir_op.assume)
undef = _op_wrapper(_tir_op.undef)
TVMBackendAllocWorkspace = _op_wrapper(_tir_op.TVMBackendAllocWorkspace)
TVMBackendFreeWorkspace = _op_wrapper(_tir_op.TVMBackendFreeWorkspace)
start_profile_intrinsic = _op_wrapper(_tir_op.start_profile_intrinsic)
end_profile_intrinsic = _op_wrapper(_tir_op.end_profile_intrinsic)
def _dtype_forward(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
if "dtype" in kwargs:
args = (kwargs.pop("dtype"),) + args
return func(*args, **kwargs)
return wrapped
reinterpret = _dtype_forward(_tir_op.reinterpret)
call_extern = _dtype_forward(_tir_op.call_extern)
call_intrin = _dtype_forward(_tir_op.call_intrin)
call_llvm_intrin = _dtype_forward(_tir_op.call_llvm_intrin)
call_llvm_pure_intrin = _dtype_forward(_tir_op.call_llvm_pure_intrin)
call_pure_extern = _dtype_forward(_tir_op.call_pure_extern)
ptx_mma = _dtype_forward(_tir_op.ptx_mma)
ptx_mma_sp = _dtype_forward(_tir_op.ptx_mma_sp)
ptx_ldmatrix = _dtype_forward(_tir_op.ptx_ldmatrix)
ptx_cp_async = _dtype_forward(_tir_op.ptx_cp_async)
mma_store = _dtype_forward(_tir_op.mma_store)
mma_fill = _dtype_forward(_tir_op.mma_fill)
vectorlow = _dtype_forward(_tir_op.vectorlow)
vectorhigh = _dtype_forward(_tir_op.vectorhigh)
vectorcombine = _dtype_forward(_tir_op.vectorcombine)
broadcast = Broadcast
ramp = Ramp
fabs = abs
tvm_call_packed = call_packed
tvm_call_cpacked = call_cpacked
tvm_call_packed_lowered = call_packed_lowered
tvm_call_cpacked_lowered = call_cpacked_lowered
# pylint: enable=invalid-name
__all__ = [
"int8",
"int16",
"int32",
"int64",
"int8x4",
"int16x4",
"int32x4",
"int64x4",
"int8x8",
"int16x8",
"int32x8",
"int64x8",
"int8x16",
"int16x16",
"int32x16",
"int64x16",
"int8x32",
"int16x32",
"int32x32",
"int64x32",
"int8x64",
"int16x64",
"int32x64",
"int64x64",
"uint8",
"uint16",
"uint32",
"uint64",
"uint8x4",
"uint16x4",
"uint32x4",
"uint64x4",
"uint8x8",
"uint16x8",
"uint32x8",
"uint64x8",
"uint8x16",
"uint16x16",
"uint32x16",
"uint64x16",
"uint8x32",
"uint16x32",
"uint32x32",
"uint64x32",
"uint8x64",
"uint16x64",
"uint32x64",
"uint64x64",
"float8",
"float16",
"float32",
"float64",
"float8x4",
"float16x4",
"float32x4",
"float64x4",
"float8x8",
"float16x8",
"float32x8",
"float64x8",
"float8x16",
"float16x16",
"float32x16",
"float64x16",
"float8x32",
"float16x32",
"float32x32",
"float64x32",
"float8x64",
"float16x64",
"float32x64",
"float64x64",
"buffer",
"buffer_decl",
"prim_func",
"arg",
"func_name",
"func_attr",
"func_ret",
"match_buffer",
"block",
"init",
"where",
"reads",
"writes",
"block_attr",
"alloc_buffer",
"axis",
"serial",
"parallel",
"vectorized",
"unroll",
"thread_binding",
"grid",
"Assert",
"realize",
"allocate",
"allocate_const",
"attr",
"While",
"If",
"Then",
"Else",
"decl_buffer",
"launch_thread",
"env_thread",
"buffer_store",
"prefetch",
"evaluate",
"boolean",
"handle",
"void",
"var",
"ptr",
"min",
"max",
"iter_var",
"comm_reducer",
"index_map",
"target",
"buffer_var",
"abs",
"fabs",
"acos",
"acosh",
"address_of",
"asin",
"asinh",
"atan",
"atan2",
"atanh",
"bitwise_and",
"bitwise_not",
"bitwise_or",
"bitwise_xor",
"ceil",
"clz",
"copysign",
"cos",
"cosh",
"erf",
"exp",
"exp2",
"exp10",
"floor",
"ceildiv",
"floordiv",
"floormod",
"fmod",
"hypot",
"if_then_else",
"infinity",
"isfinite",
"isinf",
"isnan",
"isnullptr",
"ldexp",
"likely",
"log",
"log1p",
"log2",
"log10",
"lookup_param",
"max_value",
"min_value",
"nearbyint",
"nextafter",
"popcount",
"pow",
"q_multiply_shift",
"q_multiply_shift_per_axis",
"ret",
"reinterpret",
"round",
"rsqrt",
"shift_left",
"shift_right",
"sigmoid",
"sin",
"sinh",
"sqrt",
"tan",
"tanh",
"trunc",
"truncdiv",
"truncmod",
"tvm_access_ptr",
"tvm_throw_last_error",
"tvm_stack_alloca",
"tvm_stack_make_shape",
"tvm_stack_make_array",
"tvm_check_return",
"call_packed",
"call_cpacked",
"call_packed_lowered",
"call_cpacked_lowered",
"call_extern",
"call_intrin",
"call_llvm_intrin",
"call_llvm_pure_intrin",
"call_pure_extern",
"tvm_tuple",
"tvm_struct_set",
"tvm_struct_get",
"tvm_thread_allreduce",
"tvm_load_matrix_sync",
"tvm_mma_sync",
"tvm_bmma_sync",
"tvm_fill_fragment",
"tvm_store_matrix_sync",
"tvm_storage_sync",
"tvm_warp_shuffle",
"tvm_warp_shuffle_up",
"tvm_warp_shuffle_down",
"tvm_warp_activemask",
"ptx_mma",
"ptx_mma_sp",
"ptx_ldmatrix",
"ptx_cp_async",
"ptx_wait_group",
"ptx_commit_group",
"mma_store",
"mma_fill",
"vectorlow",
"vectorhigh",
"vectorcombine",
"assume",
"undef",
"tvm_call_packed",
"tvm_call_cpacked",
"tvm_call_packed_lowered",
"tvm_call_cpacked_lowered",
"TVMBackendAllocWorkspace",
"TVMBackendFreeWorkspace",
"start_profile_intrinsic",
"end_profile_intrinsic",
"meta_var",
"llvm_lookup_intrinsic_id",
"type_annotation",
"broadcast",
"ramp",
"cast",
# tvm.tir.expr
"Var",
"SizeVar",
"Reduce",
"FloatImm",
"IntImm",
"StringImm",
"Cast",
"Add",
"Sub",
"Mul",
"Div",
"Mod",
"FloorDiv",
"FloorMod",
"Min",
"Max",
"EQ",
"NE",
"LT",
"LE",
"GT",
"GE",
"And",
"Or",
"Not",
"Select",
"BufferLoad",
"ProducerLoad",
"Ramp",
"Broadcast",
"Shuffle",
"Call",
"CallEffectKind",
"let",
"LetStmt",
"Let",
"IterVar",
"CommReducer",
"Range",
]
| 55,947 | 24.806273 | 116 | py |
tvm | tvm-main/python/tvm/script/ir_builder/tir/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Package tvm.script.ir_builder.tir"""
from .ir import * # pylint: disable=wildcard-import,redefined-builtin
from .ir import boolean as bool # pylint: disable=redefined-builtin
from .ir import buffer as Buffer
| 998 | 46.571429 | 70 | py |
tvm | tvm-main/python/tvm/script/parser/_core.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the Licens.
"""The core parser infra"""
# pylint: disable=unused-import
from .core import dispatch, doc, utils
from .core.dispatch import OpMethod, register_op
from .core.entry import parse, parse_macro
from .core.parser import Parser
| 1,007 | 42.826087 | 62 | py |
tvm | tvm-main/python/tvm/script/parser/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the Licens.
"""The parser"""
from . import _core, ir, tir
from ._core import parse
from .ir import ir_module
from .tir import prim_func
| 908 | 40.318182 | 62 | py |
tvm | tvm-main/python/tvm/script/parser/core/error.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Error classes for diagnostics."""
from . import doc
class ParserError(Exception):
"""Error class for diagnostics."""
def __init__(self, node: doc.AST, msg: str):
super().__init__(msg)
self.node = node
| 1,016 | 36.666667 | 62 | py |
tvm | tvm-main/python/tvm/script/parser/core/parser.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""The core parser"""
from collections import defaultdict
from contextlib import contextmanager
from typing import Any, Callable, Dict, List, Optional, Set, Union
import numpy as np
from tvm._ffi.base import TVMError
from tvm.error import DiagnosticError
from tvm.ir import GlobalVar
from . import dispatch, doc
from .diagnostics import Diagnostics, Source
from .evaluator import eval_assign, eval_expr
DEFAULT_VISIT = {
"Interactive",
"Module",
"Expression",
"Pass",
}
def _deferred(exit_f: Callable[[], None]):
"""Created context with certain exit function.
Parameters
----------
exit_f : Callable[[], None]
The function to call when exiting the context.
Returns
-------
res : Any
The created context.
"""
@contextmanager
def context():
try:
yield
finally:
exit_f()
return context()
def _do_nothing(*args, **kwargs): # pylint: disable=unused-argument
pass
class VarTableFrame:
"""The variable table frame.
A frame of variable table stores the variables created in one block or scope.
Parameters
----------
vars : Set[str]
The set of variable names in the variable table frame.
"""
vars: Set[str]
def __init__(self):
self.vars = set()
def add(self, var: str):
"""Add a new variable into variable table frame.
Parameters
----------
var : str
The name of new variable.
"""
if var in self.vars:
raise ValueError(f"Variable {var} already defined in current scope")
self.vars.add(var)
def pop_all(self, fn_pop: Callable[[str], None]):
"""Pop out all variable in variable table frame.
Parameters
----------
fn_pop : Callable[[str], None]
The methods to call when popping each variable.
"""
for var in self.vars:
fn_pop(var)
self.vars.clear()
class VarTable:
"""The variable table.
A variable table stores the all variables when parsing TVMScript.
Parameters
----------
frames : List[VarTableFrame]
The list or stack of variable table frame.
name2value : Dict[str, List[Any]]
The dictionary for variable table name-based query.
"""
frames: List[VarTableFrame]
name2value: Dict[str, List[Any]]
def __init__(self):
self.frames = []
self.name2value = defaultdict(list)
def with_frame(self):
"""Create a new variable table frame as with statement.
Returns
-------
res : Any
The context with new variable table frame.
"""
def pop_frame():
frame = self.frames.pop()
frame.pop_all(lambda name: self.name2value[name].pop())
self.frames.append(VarTableFrame())
return _deferred(pop_frame)
def add(self, var: str, value: Any, allow_shadowing: bool = False):
"""Add a new variable to variable table.
Parameters
----------
var : str
The name of variable.
value : Any
The value of variable.
allow_shadowing : bool
The options of whether variable shadowing allwed for this variable.
"""
# Skip if the key and value are equal to those in the var_table
if self.name2value[var] and isinstance(self.name2value[var][-1], type(value)):
if isinstance(value, np.ndarray) and (self.name2value[var][-1] == value).all():
return
elif self.name2value[var][-1] == value:
return
if allow_shadowing and var in self.frames[-1].vars:
# Shadowing
self.name2value[var][-1] = value
else:
self.frames[-1].add(var)
self.name2value[var].append(value)
def get(self) -> Dict[str, Any]:
"""Get a variable dictionary of latest variables.
Returns
-------
res : Any
The variable dictionary copy of latest variables.
"""
return {key: values[-1] for key, values in self.name2value.items() if values}
def exist(self, value: Any) -> bool:
"""Check if any value exists in variable table.
Parameters
----------
value : Any
The value of variable.
Returns
-------
res : bool
The existence of the value.
"""
return any(
value.same_as(known_value)
for known_value_stack in self.name2value.values()
for known_value in known_value_stack
)
def _dispatch_wrapper(func: dispatch.ParseMethod) -> dispatch.ParseMethod:
def _wrapper(self: "Parser", node: doc.AST) -> None:
try:
return func(self, node)
except DiagnosticError:
raise
except Exception as e: # pylint: disable=broad-except,invalid-name
self.report_error(node, e)
raise
return _wrapper
def _dispatch(self: "Parser", type_name: str) -> dispatch.ParseMethod:
for token in [self.dispatch_tokens[-1], "default"]:
func = dispatch.get(token=token, type_name=type_name, default=None)
if func is not None:
return _dispatch_wrapper(func)
return _dispatch_wrapper(lambda self, node: self.generic_visit(node))
class Parser(doc.NodeVisitor):
"""The TVMScript parser
Parameters
----------
diag : Diagnostics
The diagnostics for error reporting.
dispatch_tokens : List[str]
The list of dispatching tokens to dispatching parsing method
of different IRs and different doc AST structure.
var_table : VarTable
The variable table for parsing.
"""
diag: Diagnostics
dispatch_tokens: List[str]
function_annotations: Optional[Dict[str, Dict[str, Any]]]
var_table: VarTable
def __init__(
self,
source: Source,
function_annotations: Dict[str, Dict[str, Any]],
) -> None:
self.diag = Diagnostics(source)
self.dispatch_tokens = ["default"]
self.function_annotations = function_annotations
self.var_table = VarTable()
def parse(self, extra_vars: Optional[Dict[str, Any]] = None) -> Any:
"""The main parse method for parser.
Parameters
----------
extra_vars : Optional[Dict[str, Any]]
The optional global value table for parsing.
Returns
-------
res : Any
The doc AST node visiting result.
"""
if extra_vars is None:
extra_vars = {}
with self.var_table.with_frame():
for k, v in extra_vars.items():
self.var_table.add(k, v)
node = self.diag.source.as_ast()
self.visit(node)
def get_dispatch_token(self, node: doc.FunctionDef) -> str:
if not isinstance(node, doc.FunctionDef):
self.report_error(node, "Only can get dispatch token for function.")
if not node.decorator_list:
self.report_error(node, "Function must be decorated")
# TODO: only the last decorator is parsed
decorator = self.eval_expr(node.decorator_list[-1])
if not hasattr(decorator, "dispatch_token"):
self.report_error(node, "The parser does not understand the decorator")
return decorator.dispatch_token
def with_dispatch_token(self, token: str):
"""Add a new dispatching token as with statement.
Parameters
----------
token : str
The dispathing token.
Returns
-------
res : Any
The context with new dispatching token.
"""
def pop_token():
self.dispatch_tokens.pop()
self.dispatch_tokens.append(token)
return _deferred(pop_token)
def eval_expr(
self,
node: Union[doc.Expression, doc.expr],
extra_vars: Optional[Dict[str, Any]] = None,
) -> Any:
"""Expression evaluation when parsing.
Parameters
----------
node : Union[doc.expr, doc.Expression]
The root node of AST tree node of expression to evaluate.
extra_vars : Optional[Dict[str, Any]]
The optional global value table for expression evaluation.
Returns
-------
res : Any
The evaluation result.
"""
var_values = self.var_table.get()
if extra_vars is not None:
for k, v in extra_vars.items():
var_values[k] = v
return eval_expr(self, node, var_values)
def _duplicate_lhs_check(self, target: doc.expr) -> Union[bool, Set[str]]:
"""Check whether duplicate lhs exists in assignment.
Parameters
----------
target : doc.expr
The doc AST expr node for lhs.
Returns
-------
res : Union[bool, Set[str]]
The result of true if duplicate lhs exists,
or the set of lhs names if no duplicate lhs exists.
"""
if isinstance(target, (doc.Tuple, doc.List)):
vars: Set[str] = set() # pylint: disable=redefined-builtin
for i in target.elts:
res = self._duplicate_lhs_check(i)
if isinstance(res, bool) and res:
return True
assert isinstance(res, set)
if vars & res:
return True
vars = vars.union(res)
return vars
elif isinstance(target, doc.Name):
return {target.id}
else:
self.report_error(target, "Invalid type in assign statement")
raise NotImplementedError
def eval_assign(
self,
target: doc.expr,
source: Any,
bind_value: Callable[["Parser", doc.expr, str, Any], Any],
allow_shadowing: bool = False,
) -> Dict[str, Any]:
"""Expression assignment evaluation when parsing.
Parameters
----------
target : doc.expr
The root node of AST tree node of assigned expression to evaluate.
source : Any
The source to be assigned with evaluated expression.
bind_value : Callable[["Parser", doc.expr, str, Any], Any]
The value binding method when assigning the values to variables.
allow_shadowing : bool
The options of whether variable shadowing allwed for assignment.
Returns
-------
res : Dict[str, Any]
The dirctionary of assignment result.
"""
if self._duplicate_lhs_check(target) is True:
self.report_error(target, "Duplicate vars assigned.")
var_values = eval_assign(self, target, source)
for k, v in var_values.items():
var = bind_value(self, target, k, v)
self.var_table.add(k, var, allow_shadowing)
return var_values
def report_error(
self, node: doc.AST, err: Union[Exception, str]
) -> None: # pylint: disable=no-self-use
"""The error reporting when parsing.
Parameters
----------
node : doc.AST
The doc AST node with errors.
err: Union[Exception, str]
The error to report.
"""
# Only take the last line of the error message
if isinstance(err, TVMError):
msg = list(filter(None, str(err).split("\n")))[-1]
elif isinstance(err, KeyError):
msg = "KeyError: " + str(err)
else:
msg = str(err)
self.diag.error(node, msg)
def visit(self, node: doc.AST) -> None:
"""The general visiting method.
Parameters
----------
node : doc.AST
The doc AST node.
Returns
-------
res : Any
The visiting result.
"""
if isinstance(node, (list, tuple)):
for item in node:
self.visit(item)
return
if not isinstance(node, doc.AST):
return
name = node.__class__.__name__.split(".")[-1]
if name in DEFAULT_VISIT:
func = self.generic_visit
else:
func = getattr(self, "visit_" + name, None)
if func is None:
raise NotImplementedError(f"Visitor of AST node is not implemented: {name}")
try:
func(node)
except DiagnosticError:
raise
except Exception as e: # pylint: disable=broad-except,invalid-name
self.report_error(node, str(e))
raise
def visit_body(self, node: List[doc.stmt]) -> Any:
"""The general body visiting method.
Parameters
----------
node : List[doc.stmt]
The list of statements in body.
Returns
-------
res : Any
The visiting result.
"""
for stmt in node:
self.visit(stmt)
def visit_tvm_annotation(self, node: doc.expr) -> Any:
"""The general TVM annotation visiting method.
Parameters
----------
node : doc.expr
The doc AST expr node.
Returns
-------
res : Any
The visiting result.
"""
return _dispatch(self, "tvm_annotation")(self, node)
def visit_FunctionDef(self, node: doc.FunctionDef) -> None: # pylint: disable=invalid-name
"""The general function definition visit method.
Parameters
----------
node : doc.FunctionDef
The doc FunctionDef node.
"""
token = self.get_dispatch_token(node)
current_token = self.dispatch_tokens[-1]
func = dispatch.get(token=token, type_name="FunctionDef", default=None)
if func is None:
self.report_error(node, "The parser does not understand the decorator")
pre_func = dispatch.get(
token=current_token, type_name="pre_token_switch", default=_do_nothing
)
post_func = dispatch.get(
token=current_token, type_name="post_token_switch", default=_do_nothing
)
pre_func(self, node)
_dispatch_wrapper(func)(self, node)
post_func(self, node)
def visit_tvm_declare_function(self, node: doc.FunctionDef) -> GlobalVar:
token = self.get_dispatch_token(node)
with self.with_dispatch_token(token):
return _dispatch(self, "tvm_declare_function")(self, node)
def visit_ClassDef(self, node: doc.ClassDef) -> Any: # pylint: disable=invalid-name
"""The general class definition visiting method.
Parameters
----------
node : doc.ClassDef
The doc AST class definition node.
Returns
-------
res : Any
The visiting result.
"""
func = dispatch.get(token="ir", type_name="ClassDef", default=None)
if func is None:
self.report_error(node, "The parser does not understand the decorator")
_dispatch_wrapper(func)(self, node)
def visit_arguments(self, node: doc.arguments) -> Any:
"""The general arguments visiting method.
Parameters
----------
node : doc.arguments
The doc AST arguments node.
Returns
-------
res : Any
The visiting result.
"""
return _dispatch(self, "arguments")(self, node)
def visit_For(self, node: doc.For) -> Any: # pylint: disable=invalid-name
"""The general for visiting method.
Parameters
----------
node : doc.For
The doc AST for node.
Returns
-------
res : Any
The visiting result.
"""
return _dispatch(self, "For")(self, node)
def visit_While(self, node: doc.While) -> Any: # pylint: disable=invalid-name
"""The general while visiting method.
Parameters
----------
node : doc.While
The doc AST while node.
Returns
-------
res : Any
The visiting result.
"""
return _dispatch(self, "While")(self, node)
def visit_With(self, node: doc.With) -> Any: # pylint: disable=invalid-name
"""The general with visiting method.
Parameters
----------
node : doc.With
The doc AST with node.
Returns
-------
res : Any
The visiting result.
"""
return _dispatch(self, "With")(self, node)
def visit_Assign(self, node: doc.Assign) -> Any: # pylint: disable=invalid-name
"""The general assign visiting method.
Parameters
----------
node : doc.Assign
The doc AST assign node.
Returns
-------
res : Any
The visiting result.
"""
return _dispatch(self, "Assign")(self, node)
def visit_AnnAssign(self, node: doc.AnnAssign) -> Any: # pylint: disable=invalid-name
"""The general annotated assign visiting method.
Parameters
----------
node : doc.Assign
The doc AST annotated assign node.
Returns
-------
res : Any
The visiting result.
"""
return _dispatch(self, "AnnAssign")(self, node)
def visit_Expr(self, node: doc.Expr) -> Any: # pylint: disable=invalid-name
"""The general expression visiting method.
Parameters
----------
node : doc.Expr
The doc AST exprssion node.
Returns
-------
res : Any
The visiting result.
"""
return _dispatch(self, "Expr")(self, node)
def visit_If(self, node: doc.If) -> Any: # pylint: disable=invalid-name
"""The general if visiting method.
Parameters
----------
node : doc.If
The doc AST if node.
Returns
-------
res : Any
The visiting result.
"""
return _dispatch(self, "If")(self, node)
def visit_AugAssign(self, node: doc.AugAssign) -> Any: # pylint: disable=invalid-name
"""The general augmented assignment visiting method.
Parameters
----------
node : doc.AugAssign
The doc AST augmented assignment node.
Returns
-------
res : Any
The visiting result.
"""
return _dispatch(self, "AugAssign")(self, node)
def visit_Assert(self, node: doc.Assert) -> Any: # pylint: disable=invalid-name
"""The general assert visiting method.
Parameters
----------
node : doc.Assert
The doc AST assert node.
Returns
-------
res : Any
The visiting result.
"""
return _dispatch(self, "Assert")(self, node)
def visit_Return(self, node: doc.Return) -> Any: # pylint: disable=invalid-name
"""The general return visiting method.
Parameters
----------
node : doc.Return
The doc AST return node.
Returns
-------
res : Any
The visiting result.
"""
return _dispatch(self, "Return")(self, node)
| 20,176 | 28.031655 | 95 | py |
tvm | tvm-main/python/tvm/script/parser/core/doc.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""TVM Script Parser doc AST"""
import ast
import inspect
import sys
import typing
from collections import defaultdict
from . import doc_core as doc
from .doc_core import * # pylint: disable=unused-import,wildcard-import,redefined-builtin,W0614
FnToDoc = typing.Callable[[ast.AST], doc.AST]
FnFromDoc = typing.Callable[[doc.AST], ast.AST]
class Entry:
"""Mapping entry between python AST node type str and doc AST.
Parameters
----------
to_doc : typing.Optional[FnToDoc]
The callable methods for converting python AST node to doc AST.
from_doc : typing.Optional[FnFromDoc]
The callable methods for converting doc AST to python AST node.
"""
to_doc: typing.Optional[FnToDoc]
from_doc: typing.Optional[FnFromDoc]
def __init__(self):
self.to_doc = None
self.from_doc = None
class Registry:
"""Registration map from python AST node type str to methods of conversion
between python AST node and doc AST node.
Parameters
----------
_inst : typing.Optional["Registry"]
The instance of Registry.
table : typing.Dict[str, Entry]
The registration map from python AST node type str to methods of conversion
between python AST node and doc AST node.
"""
_inst: typing.Optional["Registry"] = None
table: typing.Dict[str, Entry]
def __init__(self):
self.table = defaultdict(Entry)
def register_to_doc(name: str):
"""Register the to_doc method for python AST node type.
Parameters
----------
name : str
The type of python AST node.
Returns
-------
f : Callable[[FnToDoc], None]
The function of registering the to_doc method for python AST node type.
"""
def f(to_doc: FnToDoc): # pylint: disable=redefined-outer-name
reg = Registry._inst # pylint: disable=protected-access
reg.table[name].to_doc = to_doc
return f
def register_from_doc(name: str):
"""Register the from_doc method for python AST node type.
Parameters
----------
name : str
The type of python AST node.
Returns
-------
f : Callable[[FnFromDoc], None]
The function of registering the from_doc method for python AST node type.
"""
def f(to_doc: FnFromDoc): # pylint: disable=redefined-outer-name
reg = Registry._inst # pylint: disable=protected-access
reg.table[name].from_doc = to_doc
return f
def _is_atomic_type(node):
return (
node is None
or node in [..., True, False]
or isinstance(
node,
(
int,
float,
str,
bool,
bytes,
complex,
),
)
)
def _get_registry_entry(cls_name, attr):
cls_name = cls_name.split(".")[-1]
reg = Registry._inst # pylint: disable=protected-access
if cls_name in reg.table:
entry = reg.table[cls_name]
return getattr(entry, attr, None)
return None
def from_doc(node):
"""Get original python AST node from doc AST node.
Parameters
----------
node : doc.AST
The doc AST node.
Returns
-------
res : ast.AST
The corresponding AST node.
"""
if _is_atomic_type(node):
return node
if isinstance(node, tuple):
return tuple(from_doc(n) for n in node)
if isinstance(node, list):
return [from_doc(n) for n in node]
func = _get_registry_entry(node.__class__.__name__, "from_doc")
if not func:
raise NotImplementedError(f"from_doc is not implemented for: {node.__class__.__name__}")
return func(node)
def to_doc(node):
"""Get doc AST node from python AST node.
Parameters
----------
node : ast.AST
The AST node.
Returns
-------
res : doc.AST
The corresponding doc AST node.
"""
if _is_atomic_type(node):
return node
if isinstance(node, tuple):
return tuple(to_doc(n) for n in node)
if isinstance(node, list):
return [to_doc(n) for n in node]
func = _get_registry_entry(node.__class__.__name__, "to_doc")
if not func:
raise NotImplementedError(f"to_doc is not implemented for: {node.__class__.__name__}")
return func(node)
def parse(
source: str,
filename: str = "<unknown>",
mode: str = "exec",
) -> doc.AST:
"""Parse TVMScript source code str to doc AST.
Its interface is consistent with python built-in ast.parse.
And it will parse by python 3.8 first if possible,
or it will parse with python version in current environment.
Parameters
----------
source : str
The TVMScript source code.
filename : str
The optional filename of the file where source code locates.
mode : str
The parsing mode for ast.parse.
Returns
-------
res : doc.AST
The parsed doc AST.
"""
try:
program = ast.parse( # pylint: disable=unexpected-keyword-arg
source=source,
filename=filename,
mode=mode,
feature_version=(3, 8),
)
except: # pylint: disable=bare-except
program = ast.parse(
source=source,
filename=filename,
mode=mode,
)
return to_doc(program)
class NodeVisitor:
"""Node visitor for doc AST"""
def visit(self, node: doc.AST) -> None:
if isinstance(node, (list, tuple)):
for item in node:
self.visit(item)
return
if not isinstance(node, doc.AST):
return
getattr(
self,
"visit_" + node.__class__.__name__.split(".")[-1],
self.generic_visit,
)(node)
def generic_visit(self, node: doc.AST) -> None:
for field in node.__class__._FIELDS: # pylint: disable=protected-access
value = getattr(node, field, None)
if value is None:
pass
elif isinstance(value, (doc.AST, list, tuple)):
self.visit(value)
class NodeTransformer:
"""Node transformer for doc AST"""
def visit(self, node: doc.AST) -> doc.AST:
if isinstance(node, list):
return [self.visit(item) for item in node]
if isinstance(node, tuple):
return tuple(self.visit(item) for item in node)
if not isinstance(node, doc.AST):
return node
return getattr(
self,
"visit_" + node.__class__.__name__.split(".")[-1],
self.generic_visit,
)(node)
def generic_visit(self, node: doc.AST) -> doc.AST:
kv: typing.Dict[str, typing.Any] = {}
for field in node.__class__._FIELDS: # pylint: disable=protected-access
value = getattr(node, field, None)
if value is None:
pass
elif isinstance(value, (doc.AST, list, tuple)):
value = self.visit(value)
kv[field] = value
return node.__class__(**kv)
def _register_default():
class DefaultTranslator:
def __init__(self, doc_cls, func, fields):
self.doc_cls = doc_cls # getattr(doc, name)
self.func = func
self.fields = fields
def __call__(self, node):
kv = {attr: self.func(getattr(node, attr, None)) for attr in self.fields}
return self.doc_cls(**kv)
Registry._inst = Registry() # pylint: disable=protected-access
for cls_name in dir(doc):
doc_cls = getattr(doc, cls_name)
if not hasattr(ast, cls_name):
continue
if inspect.isclass(doc_cls) and issubclass(doc_cls, doc.AST):
assert "." not in cls_name
register_to_doc(cls_name)(
DefaultTranslator(
getattr(doc, cls_name),
to_doc,
doc_cls._FIELDS, # pylint: disable=protected-access
)
)
register_from_doc(cls_name)(
DefaultTranslator(
getattr(ast, cls_name),
from_doc,
doc_cls._FIELDS, # pylint: disable=protected-access
)
)
def _py_version() -> typing.Tuple[int, int]:
return (sys.version_info.major, sys.version_info.minor)
def _register_constant_handling():
if _py_version() not in [(3, 6), (3, 7)]:
return
def as_constant(f) -> doc.Constant:
def to_doc_func(x: ast.AST) -> doc.Constant:
return doc.Constant(
value=getattr(x, f) if isinstance(f, str) else f(x),
kind=None,
s=None,
n=None,
lineno=x.lineno,
col_offset=x.col_offset,
end_lineno=x.lineno,
end_col_offset=x.col_offset,
)
return to_doc_func
register_to_doc("Str")(as_constant("s"))
register_to_doc("NameConstant")(as_constant("value"))
register_to_doc("Num")(as_constant("n"))
register_to_doc("Bytes")(as_constant("s"))
register_to_doc("Ellipsis")(as_constant(lambda _: ...))
def _register_subscription_handling():
if _py_version() >= (3, 9):
return
def subscript_to_doc(x: ast.Subscript) -> doc.Subscript:
if isinstance(x.slice, ast.Slice):
return doc.Subscript(
value=to_doc(x.value),
slice=doc.Slice(
lower=to_doc(x.slice.lower),
upper=to_doc(x.slice.upper),
step=to_doc(x.slice.step),
lineno=getattr(x.slice, "lineno", None),
col_offset=getattr(x.slice, "col_offset", None),
end_lineno=getattr(x.slice, "end_lineno", None),
end_col_offset=getattr(x.slice, "end_col_offset", None),
),
ctx=to_doc(x.ctx),
lineno=getattr(x, "lineno", None),
col_offset=getattr(x, "col_offset", None),
end_lineno=getattr(x, "end_lineno", None),
end_col_offset=getattr(x, "end_col_offset", None),
)
if isinstance(x.slice, ast.ExtSlice):
return doc.Subscript(
value=to_doc(x.value),
slice=doc.Tuple(
elts=[to_doc(i) for i in x.slice.dims],
ctx=doc.Load(
lineno=None,
col_offset=None,
end_lineno=None,
end_col_offset=None,
),
lineno=getattr(x, "lineno", None),
col_offset=getattr(x, "col_offset", None),
end_lineno=getattr(x, "end_lineno", None),
end_col_offset=getattr(x, "end_col_offset", None),
),
ctx=to_doc(x.ctx),
lineno=getattr(x, "lineno", None),
col_offset=getattr(x, "col_offset", None),
end_lineno=getattr(x, "end_lineno", None),
end_col_offset=getattr(x, "end_col_offset", None),
)
if isinstance(x.slice, ast.Index):
return doc.Subscript(
value=to_doc(x.value),
slice=to_doc(x.slice.value),
ctx=to_doc(x.ctx),
lineno=getattr(x, "lineno", None),
col_offset=getattr(x, "col_offset", None),
end_lineno=getattr(x, "end_lineno", None),
end_col_offset=getattr(x, "end_col_offset", None),
)
raise TypeError(f"Unknown subscript type: {type(x.slice)}")
def subscript_from_doc(x: doc.Subscript) -> ast.Subscript:
if isinstance(x.slice, doc.Slice):
result = ast.Subscript(
value=from_doc(x.value),
slice=from_doc(x.slice),
ctx=from_doc(x.ctx),
)
elif isinstance(x.slice, doc.Tuple):
result = ast.Subscript(
value=from_doc(x.value),
slice=ast.ExtSlice(
dims=[from_doc(i) for i in x.slice.elts],
),
ctx=from_doc(x.ctx),
)
else:
result = ast.Subscript(
value=from_doc(x.value),
slice=ast.Index(value=from_doc(x.slice)),
ctx=from_doc(x.ctx),
)
result.lineno = x.lineno
result.col_offset = x.col_offset
result.end_lineno = x.end_lineno
result.end_col_offset = x.end_col_offset
return result
register_to_doc("Subscript")(subscript_to_doc)
register_from_doc("Subscript")(subscript_from_doc)
def _register_index_handling():
if _py_version() >= (3, 9):
return
def index_to_doc(x: ast.Index) -> doc.Expr:
return to_doc(x.value)
def index_from_doc(x: doc.Expr) -> ast.Index:
result = ast.Index(value=from_doc(x), ctx=from_doc(x.ctx))
result.lineno = x.lineno
result.col_offset = x.col_offset
result.end_lineno = x.end_lineno
result.end_col_offset = x.end_col_offset
return result
register_to_doc("Index")(index_to_doc)
register_from_doc("Index")(index_from_doc)
_register_default()
_register_constant_handling()
_register_subscription_handling()
_register_index_handling()
| 14,262 | 29.805616 | 96 | py |
tvm | tvm-main/python/tvm/script/parser/core/entry.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""The entry point of TVM parser."""
import inspect
from typing import Any, Dict, Union
from ...ir_builder import IRBuilder
from . import doc
from .diagnostics import Source
from .error import ParserError
from .parser import Parser
def _default_globals() -> Dict[str, Any]:
import tvm # pylint: disable=import-outside-toplevel
from tvm.script.parser import ir # pylint: disable=import-outside-toplevel
from tvm.script.parser import tir # pylint: disable=import-outside-toplevel
extra_vars = {"tvm": tvm, "I": ir, "ir": ir, "T": tir, "tir": tir}
return extra_vars
def parse_macro(program: Union[Any, str], extra_vars: Dict[str, Any] = None) -> Any:
"""Generate the AST, and the source code for __repr__."""
# The AST will be converted into TIR at the time of expansion.
source = Source(program)
source_txt = source.source
source_ast = source.as_ast()
closure_vars = extra_vars or _default_globals()
return source_ast, source_txt, closure_vars
def parse(program: Union[doc.AST, Any, str], extra_vars: Dict[str, Any] = None) -> Any:
"""Register a method for a operand type, AST operator node and operand index.
Parameters
----------
program : Union[doc.AST, Any, str]
The TVMScript code to parse.
extra_vars : Dict[str, Any]
The extra variable table for parsing.
Returns
-------
func : Any
The parsed TVMScript program.
"""
if extra_vars is None:
extra_vars = _default_globals()
ann = {}
if inspect.isfunction(program):
ann = {program.__name__: program.__annotations__}
elif inspect.isclass(program):
for name, func in program.__dict__.items():
if inspect.isfunction(func):
ann[name] = func.__annotations__
source = Source(program)
parser = Parser(source, ann)
with IRBuilder() as builder:
try:
parser.parse(extra_vars=extra_vars)
except ParserError as err:
parser.report_error(err.node, err.args[0])
return builder.get()
| 2,852 | 33.792683 | 87 | py |
tvm | tvm-main/python/tvm/script/parser/core/utils.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""TVM Script Parser utils"""
import inspect
from types import FrameType
from typing import Any, Callable, Dict, List
from .diagnostics import findsource
def get_func_nonlocals(func):
"""A modified version of `inspect.getclosurevars`"""
if inspect.ismethod(func):
func = func.__func__
if not inspect.isfunction(func):
raise TypeError("{!r} is not a Python function".format(func))
code = func.__code__
# Nonlocal references are named in co_freevars and resolved
# by looking them up in __closure__ by positional index
nonlocal_vars = {}
if func.__closure__ is not None:
for var, cell in zip(code.co_freevars, func.__closure__):
try:
nonlocal_vars[var] = cell.cell_contents
except ValueError as err:
# cell_contents may raise ValueError if the cell is empty.
if "empty" not in str(err):
raise
return nonlocal_vars
def inspect_function_capture(func: Callable) -> Dict[str, Any]:
"""Capture function non-locals and global variables.
Parameters
----------
func : Callable
The function to inspect.
Returns
-------
res : Dict[str, Any]
The function variables map with non-local or global variables.
"""
captured = {
**func.__globals__, # type: ignore
**get_func_nonlocals(func),
}
return captured
def inspect_class_capture(cls: type) -> Dict[str, Any]:
"""Capture class non-locals and global variables.
Parameters
----------
cls : type
The class to inspect.
Returns
-------
res : Dict[str, Any]
The class variables map with non-local or global variables.
"""
result: Dict[str, Any] = {}
for _, v in cls.__dict__.items():
if inspect.isfunction(v):
func_vars = inspect_function_capture(v)
result.update(**func_vars)
return result
def is_defined_in_class(frames: List[FrameType], obj: Any) -> bool:
"""Check whether a object is defined in a class scope.
Parameters
----------
frames : List[FrameType]
The frame stack of the object, obtained by `inspect.stack()`.
Returns
-------
res : bool
The result if the object is defined in a class scope.
"""
if len(frames) > 2:
frame_info = frames[2]
code_context = frame_info.code_context
if code_context is None:
return False
line = code_context[0].strip()
if line.startswith("@") and "ir_module" in line:
return True
if line.startswith("class"):
lineno = frame_info.lineno
if lineno >= 2:
source, _ = findsource(obj)
line = source[lineno - 2].strip()
if line.startswith("@") and "ir_module" in line:
return True
return False
| 3,702 | 30.117647 | 74 | py |
tvm | tvm-main/python/tvm/script/parser/core/doc_core.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=redefined-outer-name,missing-docstring,invalid-name
# pylint: disable=useless-super-delegation,redefined-builtin
# pylint: disable=too-few-public-methods,too-many-arguments
class AST:
_FIELDS = ["lineno", "col_offset", "end_lineno", "end_col_offset"]
def __init__(self, lineno, col_offset, end_lineno, end_col_offset):
super().__init__()
self.lineno = lineno
self.col_offset = col_offset
self.end_lineno = end_lineno
self.end_col_offset = end_col_offset
class mod(AST):
_FIELDS = ["lineno", "col_offset", "end_lineno", "end_col_offset"]
def __init__(self, lineno, col_offset, end_lineno, end_col_offset):
super().__init__(lineno, col_offset, end_lineno, end_col_offset)
class Module(mod):
_FIELDS = ["body", "lineno", "col_offset", "end_lineno", "end_col_offset"]
def __init__(self, body, lineno, col_offset, end_lineno, end_col_offset):
super().__init__(lineno, col_offset, end_lineno, end_col_offset)
self.body = body
class Interactive(mod):
_FIELDS = ["body", "lineno", "col_offset", "end_lineno", "end_col_offset"]
def __init__(self, body, lineno, col_offset, end_lineno, end_col_offset):
super().__init__(lineno, col_offset, end_lineno, end_col_offset)
self.body = body
class Expression(mod):
_FIELDS = ["body", "lineno", "col_offset", "end_lineno", "end_col_offset"]
def __init__(self, body, lineno, col_offset, end_lineno, end_col_offset):
super().__init__(lineno, col_offset, end_lineno, end_col_offset)
self.body = body
class stmt(AST):
_FIELDS = ["lineno", "col_offset", "end_lineno", "end_col_offset"]
def __init__(self, lineno, col_offset, end_lineno, end_col_offset):
super().__init__(lineno, col_offset, end_lineno, end_col_offset)
class FunctionDef(stmt):
_FIELDS = [
"name",
"args",
"body",
"decorator_list",
"returns",
"lineno",
"col_offset",
"end_lineno",
"end_col_offset",
]
def __init__(
self,
name,
args,
body,
decorator_list,
returns,
lineno,
col_offset,
end_lineno,
end_col_offset,
):
super().__init__(lineno, col_offset, end_lineno, end_col_offset)
self.name = name
self.args = args
self.body = body
self.decorator_list = decorator_list
self.returns = returns
class ClassDef(stmt):
_FIELDS = [
"name",
"bases",
"keywords",
"body",
"decorator_list",
"lineno",
"col_offset",
"end_lineno",
"end_col_offset",
]
def __init__(
self,
name,
bases,
keywords,
body,
decorator_list,
lineno,
col_offset,
end_lineno,
end_col_offset,
):
super().__init__(lineno, col_offset, end_lineno, end_col_offset)
self.name = name
self.bases = bases
self.keywords = keywords
self.body = body
self.decorator_list = decorator_list
class Return(stmt):
_FIELDS = ["value", "lineno", "col_offset", "end_lineno", "end_col_offset"]
def __init__(self, value, lineno, col_offset, end_lineno, end_col_offset):
super().__init__(lineno, col_offset, end_lineno, end_col_offset)
self.value = value
class Delete(stmt):
_FIELDS = ["targets", "lineno", "col_offset", "end_lineno", "end_col_offset"]
def __init__(self, targets, lineno, col_offset, end_lineno, end_col_offset):
super().__init__(lineno, col_offset, end_lineno, end_col_offset)
self.targets = targets
class Assign(stmt):
_FIELDS = ["targets", "value", "lineno", "col_offset", "end_lineno", "end_col_offset"]
def __init__(self, targets, value, lineno, col_offset, end_lineno, end_col_offset):
super().__init__(lineno, col_offset, end_lineno, end_col_offset)
self.targets = targets
self.value = value
class AugAssign(stmt):
_FIELDS = ["target", "op", "value", "lineno", "col_offset", "end_lineno", "end_col_offset"]
def __init__(self, target, op, value, lineno, col_offset, end_lineno, end_col_offset):
super().__init__(lineno, col_offset, end_lineno, end_col_offset)
self.target = target
self.op = op
self.value = value
class AnnAssign(stmt):
_FIELDS = [
"target",
"annotation",
"value",
"simple",
"lineno",
"col_offset",
"end_lineno",
"end_col_offset",
]
def __init__(
self, target, annotation, value, simple, lineno, col_offset, end_lineno, end_col_offset
):
super().__init__(lineno, col_offset, end_lineno, end_col_offset)
self.target = target
self.annotation = annotation
self.value = value
self.simple = simple
class For(stmt):
_FIELDS = [
"target",
"iter",
"body",
"orelse",
"lineno",
"col_offset",
"end_lineno",
"end_col_offset",
]
def __init__(self, target, iter, body, orelse, lineno, col_offset, end_lineno, end_col_offset):
super().__init__(lineno, col_offset, end_lineno, end_col_offset)
self.target = target
self.iter = iter
self.body = body
self.orelse = orelse
class While(stmt):
_FIELDS = ["test", "body", "orelse", "lineno", "col_offset", "end_lineno", "end_col_offset"]
def __init__(self, test, body, orelse, lineno, col_offset, end_lineno, end_col_offset):
super().__init__(lineno, col_offset, end_lineno, end_col_offset)
self.test = test
self.body = body
self.orelse = orelse
class If(stmt):
_FIELDS = ["test", "body", "orelse", "lineno", "col_offset", "end_lineno", "end_col_offset"]
def __init__(self, test, body, orelse, lineno, col_offset, end_lineno, end_col_offset):
super().__init__(lineno, col_offset, end_lineno, end_col_offset)
self.test = test
self.body = body
self.orelse = orelse
class With(stmt):
_FIELDS = ["items", "body", "lineno", "col_offset", "end_lineno", "end_col_offset"]
def __init__(self, items, body, lineno, col_offset, end_lineno, end_col_offset):
super().__init__(lineno, col_offset, end_lineno, end_col_offset)
self.items = items
self.body = body
class Raise(stmt):
_FIELDS = ["exc", "cause", "lineno", "col_offset", "end_lineno", "end_col_offset"]
def __init__(self, exc, cause, lineno, col_offset, end_lineno, end_col_offset):
super().__init__(lineno, col_offset, end_lineno, end_col_offset)
self.exc = exc
self.cause = cause
class Try(stmt):
_FIELDS = [
"body",
"handlers",
"orelse",
"finalbody",
"lineno",
"col_offset",
"end_lineno",
"end_col_offset",
]
def __init__(
self, body, handlers, orelse, finalbody, lineno, col_offset, end_lineno, end_col_offset
):
super().__init__(lineno, col_offset, end_lineno, end_col_offset)
self.body = body
self.handlers = handlers
self.orelse = orelse
self.finalbody = finalbody
class Assert(stmt):
_FIELDS = ["test", "msg", "lineno", "col_offset", "end_lineno", "end_col_offset"]
def __init__(self, test, msg, lineno, col_offset, end_lineno, end_col_offset):
super().__init__(lineno, col_offset, end_lineno, end_col_offset)
self.test = test
self.msg = msg
class Import(stmt):
_FIELDS = ["names", "lineno", "col_offset", "end_lineno", "end_col_offset"]
def __init__(self, names, lineno, col_offset, end_lineno, end_col_offset):
super().__init__(lineno, col_offset, end_lineno, end_col_offset)
self.names = names
class ImportFrom(stmt):
_FIELDS = ["module", "names", "level", "lineno", "col_offset", "end_lineno", "end_col_offset"]
def __init__(self, module, names, level, lineno, col_offset, end_lineno, end_col_offset):
super().__init__(lineno, col_offset, end_lineno, end_col_offset)
self.module = module
self.names = names
self.level = level
class Global(stmt):
_FIELDS = ["names", "lineno", "col_offset", "end_lineno", "end_col_offset"]
def __init__(self, names, lineno, col_offset, end_lineno, end_col_offset):
super().__init__(lineno, col_offset, end_lineno, end_col_offset)
self.names = names
class Nonlocal(stmt):
_FIELDS = ["names", "lineno", "col_offset", "end_lineno", "end_col_offset"]
def __init__(self, names, lineno, col_offset, end_lineno, end_col_offset):
super().__init__(lineno, col_offset, end_lineno, end_col_offset)
self.names = names
class Expr(stmt):
_FIELDS = ["value", "lineno", "col_offset", "end_lineno", "end_col_offset"]
def __init__(self, value, lineno, col_offset, end_lineno, end_col_offset):
super().__init__(lineno, col_offset, end_lineno, end_col_offset)
self.value = value
class Pass(stmt):
_FIELDS = ["lineno", "col_offset", "end_lineno", "end_col_offset"]
def __init__(self, lineno, col_offset, end_lineno, end_col_offset):
super().__init__(lineno, col_offset, end_lineno, end_col_offset)
class Break(stmt):
_FIELDS = ["lineno", "col_offset", "end_lineno", "end_col_offset"]
def __init__(self, lineno, col_offset, end_lineno, end_col_offset):
super().__init__(lineno, col_offset, end_lineno, end_col_offset)
class Continue(stmt):
_FIELDS = ["lineno", "col_offset", "end_lineno", "end_col_offset"]
def __init__(self, lineno, col_offset, end_lineno, end_col_offset):
super().__init__(lineno, col_offset, end_lineno, end_col_offset)
class expr(AST):
_FIELDS = ["lineno", "col_offset", "end_lineno", "end_col_offset"]
def __init__(self, lineno, col_offset, end_lineno, end_col_offset):
super().__init__(lineno, col_offset, end_lineno, end_col_offset)
class BoolOp(expr):
_FIELDS = ["op", "values", "lineno", "col_offset", "end_lineno", "end_col_offset"]
def __init__(self, op, values, lineno, col_offset, end_lineno, end_col_offset):
super().__init__(lineno, col_offset, end_lineno, end_col_offset)
self.op = op
self.values = values
class BinOp(expr):
_FIELDS = ["left", "op", "right", "lineno", "col_offset", "end_lineno", "end_col_offset"]
def __init__(self, left, op, right, lineno, col_offset, end_lineno, end_col_offset):
super().__init__(lineno, col_offset, end_lineno, end_col_offset)
self.left = left
self.op = op
self.right = right
class UnaryOp(expr):
_FIELDS = ["op", "operand", "lineno", "col_offset", "end_lineno", "end_col_offset"]
def __init__(self, op, operand, lineno, col_offset, end_lineno, end_col_offset):
super().__init__(lineno, col_offset, end_lineno, end_col_offset)
self.op = op
self.operand = operand
class Lambda(expr):
_FIELDS = ["args", "body", "lineno", "col_offset", "end_lineno", "end_col_offset"]
def __init__(self, args, body, lineno, col_offset, end_lineno, end_col_offset):
super().__init__(lineno, col_offset, end_lineno, end_col_offset)
self.args = args
self.body = body
class IfExp(expr):
_FIELDS = ["test", "body", "orelse", "lineno", "col_offset", "end_lineno", "end_col_offset"]
def __init__(self, test, body, orelse, lineno, col_offset, end_lineno, end_col_offset):
super().__init__(lineno, col_offset, end_lineno, end_col_offset)
self.test = test
self.body = body
self.orelse = orelse
class Dict(expr):
_FIELDS = ["keys", "values", "lineno", "col_offset", "end_lineno", "end_col_offset"]
def __init__(self, keys, values, lineno, col_offset, end_lineno, end_col_offset):
super().__init__(lineno, col_offset, end_lineno, end_col_offset)
self.keys = keys
self.values = values
class Set(expr):
_FIELDS = ["elts", "lineno", "col_offset", "end_lineno", "end_col_offset"]
def __init__(self, elts, lineno, col_offset, end_lineno, end_col_offset):
super().__init__(lineno, col_offset, end_lineno, end_col_offset)
self.elts = elts
class ListComp(expr):
_FIELDS = ["elt", "generators", "lineno", "col_offset", "end_lineno", "end_col_offset"]
def __init__(self, elt, generators, lineno, col_offset, end_lineno, end_col_offset):
super().__init__(lineno, col_offset, end_lineno, end_col_offset)
self.elt = elt
self.generators = generators
class SetComp(expr):
_FIELDS = ["elt", "generators", "lineno", "col_offset", "end_lineno", "end_col_offset"]
def __init__(self, elt, generators, lineno, col_offset, end_lineno, end_col_offset):
super().__init__(lineno, col_offset, end_lineno, end_col_offset)
self.elt = elt
self.generators = generators
class DictComp(expr):
_FIELDS = ["key", "value", "generators", "lineno", "col_offset", "end_lineno", "end_col_offset"]
def __init__(self, key, value, generators, lineno, col_offset, end_lineno, end_col_offset):
super().__init__(lineno, col_offset, end_lineno, end_col_offset)
self.key = key
self.value = value
self.generators = generators
class GeneratorExp(expr):
_FIELDS = ["elt", "generators", "lineno", "col_offset", "end_lineno", "end_col_offset"]
def __init__(self, elt, generators, lineno, col_offset, end_lineno, end_col_offset):
super().__init__(lineno, col_offset, end_lineno, end_col_offset)
self.elt = elt
self.generators = generators
class Yield(expr):
_FIELDS = ["value", "lineno", "col_offset", "end_lineno", "end_col_offset"]
def __init__(self, value, lineno, col_offset, end_lineno, end_col_offset):
super().__init__(lineno, col_offset, end_lineno, end_col_offset)
self.value = value
class YieldFrom(expr):
_FIELDS = ["value", "lineno", "col_offset", "end_lineno", "end_col_offset"]
def __init__(self, value, lineno, col_offset, end_lineno, end_col_offset):
super().__init__(lineno, col_offset, end_lineno, end_col_offset)
self.value = value
class Compare(expr):
_FIELDS = ["left", "ops", "comparators", "lineno", "col_offset", "end_lineno", "end_col_offset"]
def __init__(self, left, ops, comparators, lineno, col_offset, end_lineno, end_col_offset):
super().__init__(lineno, col_offset, end_lineno, end_col_offset)
self.left = left
self.ops = ops
self.comparators = comparators
class Call(expr):
_FIELDS = ["func", "args", "keywords", "lineno", "col_offset", "end_lineno", "end_col_offset"]
def __init__(self, func, args, keywords, lineno, col_offset, end_lineno, end_col_offset):
super().__init__(lineno, col_offset, end_lineno, end_col_offset)
self.func = func
self.args = args
self.keywords = keywords
class FormattedValue(expr):
_FIELDS = [
"value",
"conversion",
"format_spec",
"lineno",
"col_offset",
"end_lineno",
"end_col_offset",
]
def __init__(
self, value, conversion, format_spec, lineno, col_offset, end_lineno, end_col_offset
):
super().__init__(lineno, col_offset, end_lineno, end_col_offset)
self.value = value
self.conversion = conversion
self.format_spec = format_spec
class JoinedStr(expr):
_FIELDS = ["values", "lineno", "col_offset", "end_lineno", "end_col_offset"]
def __init__(self, values, lineno, col_offset, end_lineno, end_col_offset):
super().__init__(lineno, col_offset, end_lineno, end_col_offset)
self.values = values
class Constant(expr):
_FIELDS = ["value", "kind", "s", "n", "lineno", "col_offset", "end_lineno", "end_col_offset"]
def __init__(self, value, kind, s, n, lineno, col_offset, end_lineno, end_col_offset):
super().__init__(lineno, col_offset, end_lineno, end_col_offset)
self.value = value
self.kind = kind
self.s = s
self.n = n
class NamedExpr(expr):
_FIELDS = ["target", "value", "lineno", "col_offset", "end_lineno", "end_col_offset"]
def __init__(self, target, value, lineno, col_offset, end_lineno, end_col_offset):
super().__init__(lineno, col_offset, end_lineno, end_col_offset)
self.target = target
self.value = value
class Attribute(expr):
_FIELDS = ["value", "attr", "ctx", "lineno", "col_offset", "end_lineno", "end_col_offset"]
def __init__(self, value, attr, ctx, lineno, col_offset, end_lineno, end_col_offset):
super().__init__(lineno, col_offset, end_lineno, end_col_offset)
self.value = value
self.attr = attr
self.ctx = ctx
class slice(AST):
_FIELDS = ["lineno", "col_offset", "end_lineno", "end_col_offset"]
def __init__(self, lineno, col_offset, end_lineno, end_col_offset):
super().__init__(lineno, col_offset, end_lineno, end_col_offset)
class Slice(slice):
_FIELDS = ["lower", "upper", "step", "lineno", "col_offset", "end_lineno", "end_col_offset"]
def __init__(self, lower, upper, step, lineno, col_offset, end_lineno, end_col_offset):
super().__init__(lineno, col_offset, end_lineno, end_col_offset)
self.lower = lower
self.upper = upper
self.step = step
class ExtSlice(slice):
_FIELDS = ["dims", "lineno", "col_offset", "end_lineno", "end_col_offset"]
def __init__(self, dims, lineno, col_offset, end_lineno, end_col_offset):
super().__init__(lineno, col_offset, end_lineno, end_col_offset)
self.dims = dims
class Index(slice):
_FIELDS = ["value", "lineno", "col_offset", "end_lineno", "end_col_offset"]
def __init__(self, value, lineno, col_offset, end_lineno, end_col_offset):
super().__init__(lineno, col_offset, end_lineno, end_col_offset)
self.value = value
class Subscript(expr):
_FIELDS = ["value", "slice", "ctx", "lineno", "col_offset", "end_lineno", "end_col_offset"]
def __init__(self, value, slice, ctx, lineno, col_offset, end_lineno, end_col_offset):
super().__init__(lineno, col_offset, end_lineno, end_col_offset)
self.value = value
self.slice = slice
self.ctx = ctx
class Starred(expr):
_FIELDS = ["value", "ctx", "lineno", "col_offset", "end_lineno", "end_col_offset"]
def __init__(self, value, ctx, lineno, col_offset, end_lineno, end_col_offset):
super().__init__(lineno, col_offset, end_lineno, end_col_offset)
self.value = value
self.ctx = ctx
class Name(expr):
_FIELDS = ["id", "ctx", "lineno", "col_offset", "end_lineno", "end_col_offset"]
def __init__(self, id, ctx, lineno, col_offset, end_lineno, end_col_offset):
super().__init__(lineno, col_offset, end_lineno, end_col_offset)
self.id = id
self.ctx = ctx
class List(expr):
_FIELDS = ["elts", "ctx", "lineno", "col_offset", "end_lineno", "end_col_offset"]
def __init__(self, elts, ctx, lineno, col_offset, end_lineno, end_col_offset):
super().__init__(lineno, col_offset, end_lineno, end_col_offset)
self.elts = elts
self.ctx = ctx
class Tuple(expr):
_FIELDS = ["elts", "ctx", "lineno", "col_offset", "end_lineno", "end_col_offset"]
def __init__(self, elts, ctx, lineno, col_offset, end_lineno, end_col_offset):
super().__init__(lineno, col_offset, end_lineno, end_col_offset)
self.elts = elts
self.ctx = ctx
class expr_context(AST):
_FIELDS = ["lineno", "col_offset", "end_lineno", "end_col_offset"]
def __init__(self, lineno, col_offset, end_lineno, end_col_offset):
super().__init__(lineno, col_offset, end_lineno, end_col_offset)
class AugLoad(expr_context):
_FIELDS = ["lineno", "col_offset", "end_lineno", "end_col_offset"]
def __init__(self, lineno, col_offset, end_lineno, end_col_offset):
super().__init__(lineno, col_offset, end_lineno, end_col_offset)
class AugStore(expr_context):
_FIELDS = ["lineno", "col_offset", "end_lineno", "end_col_offset"]
def __init__(self, lineno, col_offset, end_lineno, end_col_offset):
super().__init__(lineno, col_offset, end_lineno, end_col_offset)
class Param(expr_context):
_FIELDS = ["lineno", "col_offset", "end_lineno", "end_col_offset"]
def __init__(self, lineno, col_offset, end_lineno, end_col_offset):
super().__init__(lineno, col_offset, end_lineno, end_col_offset)
class Suite(mod):
_FIELDS = ["body", "lineno", "col_offset", "end_lineno", "end_col_offset"]
def __init__(self, body, lineno, col_offset, end_lineno, end_col_offset):
super().__init__(lineno, col_offset, end_lineno, end_col_offset)
self.body = body
class Del(expr_context):
_FIELDS = ["lineno", "col_offset", "end_lineno", "end_col_offset"]
def __init__(self, lineno, col_offset, end_lineno, end_col_offset):
super().__init__(lineno, col_offset, end_lineno, end_col_offset)
class Load(expr_context):
_FIELDS = ["lineno", "col_offset", "end_lineno", "end_col_offset"]
def __init__(self, lineno, col_offset, end_lineno, end_col_offset):
super().__init__(lineno, col_offset, end_lineno, end_col_offset)
class Store(expr_context):
_FIELDS = ["lineno", "col_offset", "end_lineno", "end_col_offset"]
def __init__(self, lineno, col_offset, end_lineno, end_col_offset):
super().__init__(lineno, col_offset, end_lineno, end_col_offset)
class boolop(AST):
_FIELDS = ["lineno", "col_offset", "end_lineno", "end_col_offset"]
def __init__(self, lineno, col_offset, end_lineno, end_col_offset):
super().__init__(lineno, col_offset, end_lineno, end_col_offset)
class And(boolop):
_FIELDS = ["lineno", "col_offset", "end_lineno", "end_col_offset"]
def __init__(self, lineno, col_offset, end_lineno, end_col_offset):
super().__init__(lineno, col_offset, end_lineno, end_col_offset)
class Or(boolop):
_FIELDS = ["lineno", "col_offset", "end_lineno", "end_col_offset"]
def __init__(self, lineno, col_offset, end_lineno, end_col_offset):
super().__init__(lineno, col_offset, end_lineno, end_col_offset)
class operator(AST):
_FIELDS = ["lineno", "col_offset", "end_lineno", "end_col_offset"]
def __init__(self, lineno, col_offset, end_lineno, end_col_offset):
super().__init__(lineno, col_offset, end_lineno, end_col_offset)
class Add(operator):
_FIELDS = ["lineno", "col_offset", "end_lineno", "end_col_offset"]
def __init__(self, lineno, col_offset, end_lineno, end_col_offset):
super().__init__(lineno, col_offset, end_lineno, end_col_offset)
class BitAnd(operator):
_FIELDS = ["lineno", "col_offset", "end_lineno", "end_col_offset"]
def __init__(self, lineno, col_offset, end_lineno, end_col_offset):
super().__init__(lineno, col_offset, end_lineno, end_col_offset)
class BitOr(operator):
_FIELDS = ["lineno", "col_offset", "end_lineno", "end_col_offset"]
def __init__(self, lineno, col_offset, end_lineno, end_col_offset):
super().__init__(lineno, col_offset, end_lineno, end_col_offset)
class BitXor(operator):
_FIELDS = ["lineno", "col_offset", "end_lineno", "end_col_offset"]
def __init__(self, lineno, col_offset, end_lineno, end_col_offset):
super().__init__(lineno, col_offset, end_lineno, end_col_offset)
class Div(operator):
_FIELDS = ["lineno", "col_offset", "end_lineno", "end_col_offset"]
def __init__(self, lineno, col_offset, end_lineno, end_col_offset):
super().__init__(lineno, col_offset, end_lineno, end_col_offset)
class FloorDiv(operator):
_FIELDS = ["lineno", "col_offset", "end_lineno", "end_col_offset"]
def __init__(self, lineno, col_offset, end_lineno, end_col_offset):
super().__init__(lineno, col_offset, end_lineno, end_col_offset)
class LShift(operator):
_FIELDS = ["lineno", "col_offset", "end_lineno", "end_col_offset"]
def __init__(self, lineno, col_offset, end_lineno, end_col_offset):
super().__init__(lineno, col_offset, end_lineno, end_col_offset)
class Mod(operator):
_FIELDS = ["lineno", "col_offset", "end_lineno", "end_col_offset"]
def __init__(self, lineno, col_offset, end_lineno, end_col_offset):
super().__init__(lineno, col_offset, end_lineno, end_col_offset)
class Mult(operator):
_FIELDS = ["lineno", "col_offset", "end_lineno", "end_col_offset"]
def __init__(self, lineno, col_offset, end_lineno, end_col_offset):
super().__init__(lineno, col_offset, end_lineno, end_col_offset)
class MatMult(operator):
_FIELDS = ["lineno", "col_offset", "end_lineno", "end_col_offset"]
def __init__(self, lineno, col_offset, end_lineno, end_col_offset):
super().__init__(lineno, col_offset, end_lineno, end_col_offset)
class Pow(operator):
_FIELDS = ["lineno", "col_offset", "end_lineno", "end_col_offset"]
def __init__(self, lineno, col_offset, end_lineno, end_col_offset):
super().__init__(lineno, col_offset, end_lineno, end_col_offset)
class RShift(operator):
_FIELDS = ["lineno", "col_offset", "end_lineno", "end_col_offset"]
def __init__(self, lineno, col_offset, end_lineno, end_col_offset):
super().__init__(lineno, col_offset, end_lineno, end_col_offset)
class Sub(operator):
_FIELDS = ["lineno", "col_offset", "end_lineno", "end_col_offset"]
def __init__(self, lineno, col_offset, end_lineno, end_col_offset):
super().__init__(lineno, col_offset, end_lineno, end_col_offset)
class unaryop(AST):
_FIELDS = ["lineno", "col_offset", "end_lineno", "end_col_offset"]
def __init__(self, lineno, col_offset, end_lineno, end_col_offset):
super().__init__(lineno, col_offset, end_lineno, end_col_offset)
class Invert(unaryop):
_FIELDS = ["lineno", "col_offset", "end_lineno", "end_col_offset"]
def __init__(self, lineno, col_offset, end_lineno, end_col_offset):
super().__init__(lineno, col_offset, end_lineno, end_col_offset)
class Not(unaryop):
_FIELDS = ["lineno", "col_offset", "end_lineno", "end_col_offset"]
def __init__(self, lineno, col_offset, end_lineno, end_col_offset):
super().__init__(lineno, col_offset, end_lineno, end_col_offset)
class UAdd(unaryop):
_FIELDS = ["lineno", "col_offset", "end_lineno", "end_col_offset"]
def __init__(self, lineno, col_offset, end_lineno, end_col_offset):
super().__init__(lineno, col_offset, end_lineno, end_col_offset)
class USub(unaryop):
_FIELDS = ["lineno", "col_offset", "end_lineno", "end_col_offset"]
def __init__(self, lineno, col_offset, end_lineno, end_col_offset):
super().__init__(lineno, col_offset, end_lineno, end_col_offset)
class cmpop(AST):
_FIELDS = ["lineno", "col_offset", "end_lineno", "end_col_offset"]
def __init__(self, lineno, col_offset, end_lineno, end_col_offset):
super().__init__(lineno, col_offset, end_lineno, end_col_offset)
class Eq(cmpop):
_FIELDS = ["lineno", "col_offset", "end_lineno", "end_col_offset"]
def __init__(self, lineno, col_offset, end_lineno, end_col_offset):
super().__init__(lineno, col_offset, end_lineno, end_col_offset)
class Gt(cmpop):
_FIELDS = ["lineno", "col_offset", "end_lineno", "end_col_offset"]
def __init__(self, lineno, col_offset, end_lineno, end_col_offset):
super().__init__(lineno, col_offset, end_lineno, end_col_offset)
class GtE(cmpop):
_FIELDS = ["lineno", "col_offset", "end_lineno", "end_col_offset"]
def __init__(self, lineno, col_offset, end_lineno, end_col_offset):
super().__init__(lineno, col_offset, end_lineno, end_col_offset)
class In(cmpop):
_FIELDS = ["lineno", "col_offset", "end_lineno", "end_col_offset"]
def __init__(self, lineno, col_offset, end_lineno, end_col_offset):
super().__init__(lineno, col_offset, end_lineno, end_col_offset)
class Is(cmpop):
_FIELDS = ["lineno", "col_offset", "end_lineno", "end_col_offset"]
def __init__(self, lineno, col_offset, end_lineno, end_col_offset):
super().__init__(lineno, col_offset, end_lineno, end_col_offset)
class IsNot(cmpop):
_FIELDS = ["lineno", "col_offset", "end_lineno", "end_col_offset"]
def __init__(self, lineno, col_offset, end_lineno, end_col_offset):
super().__init__(lineno, col_offset, end_lineno, end_col_offset)
class Lt(cmpop):
_FIELDS = ["lineno", "col_offset", "end_lineno", "end_col_offset"]
def __init__(self, lineno, col_offset, end_lineno, end_col_offset):
super().__init__(lineno, col_offset, end_lineno, end_col_offset)
class LtE(cmpop):
_FIELDS = ["lineno", "col_offset", "end_lineno", "end_col_offset"]
def __init__(self, lineno, col_offset, end_lineno, end_col_offset):
super().__init__(lineno, col_offset, end_lineno, end_col_offset)
class NotEq(cmpop):
_FIELDS = ["lineno", "col_offset", "end_lineno", "end_col_offset"]
def __init__(self, lineno, col_offset, end_lineno, end_col_offset):
super().__init__(lineno, col_offset, end_lineno, end_col_offset)
class NotIn(cmpop):
_FIELDS = ["lineno", "col_offset", "end_lineno", "end_col_offset"]
def __init__(self, lineno, col_offset, end_lineno, end_col_offset):
super().__init__(lineno, col_offset, end_lineno, end_col_offset)
class comprehension(AST):
_FIELDS = [
"target",
"iter",
"ifs",
"is_async",
"lineno",
"col_offset",
"end_lineno",
"end_col_offset",
]
def __init__(self, target, iter, ifs, is_async, lineno, col_offset, end_lineno, end_col_offset):
super().__init__(lineno, col_offset, end_lineno, end_col_offset)
self.target = target
self.iter = iter
self.ifs = ifs
self.is_async = is_async
class excepthandler(AST):
_FIELDS = ["lineno", "col_offset", "end_lineno", "end_col_offset"]
def __init__(self, lineno, col_offset, end_lineno, end_col_offset):
super().__init__(lineno, col_offset, end_lineno, end_col_offset)
class ExceptHandler(excepthandler):
_FIELDS = ["type", "name", "body", "lineno", "col_offset", "end_lineno", "end_col_offset"]
def __init__(self, type, name, body, lineno, col_offset, end_lineno, end_col_offset):
super().__init__(lineno, col_offset, end_lineno, end_col_offset)
self.type = type
self.name = name
self.body = body
class arguments(AST):
_FIELDS = [
"args",
"vararg",
"kwonlyargs",
"kw_defaults",
"kwarg",
"defaults",
"posonlyargs",
"lineno",
"col_offset",
"end_lineno",
"end_col_offset",
]
def __init__(
self,
args,
vararg,
kwonlyargs,
kw_defaults,
kwarg,
defaults,
posonlyargs,
lineno,
col_offset,
end_lineno,
end_col_offset,
):
super().__init__(lineno, col_offset, end_lineno, end_col_offset)
self.args = args
self.vararg = vararg
self.kwonlyargs = kwonlyargs
self.kw_defaults = kw_defaults
self.kwarg = kwarg
self.defaults = defaults
self.posonlyargs = posonlyargs
class arg(AST):
_FIELDS = ["arg", "annotation", "lineno", "col_offset", "end_lineno", "end_col_offset"]
def __init__(self, arg, annotation, lineno, col_offset, end_lineno, end_col_offset):
super().__init__(lineno, col_offset, end_lineno, end_col_offset)
self.arg = arg
self.annotation = annotation
class keyword(AST):
_FIELDS = ["arg", "value", "lineno", "col_offset", "end_lineno", "end_col_offset"]
def __init__(self, arg, value, lineno, col_offset, end_lineno, end_col_offset):
super().__init__(lineno, col_offset, end_lineno, end_col_offset)
self.arg = arg
self.value = value
class alias(AST):
_FIELDS = ["name", "asname", "lineno", "col_offset", "end_lineno", "end_col_offset"]
def __init__(self, name, asname, lineno, col_offset, end_lineno, end_col_offset):
super().__init__(lineno, col_offset, end_lineno, end_col_offset)
self.name = name
self.asname = asname
class withitem(AST):
_FIELDS = [
"context_expr",
"optional_vars",
"lineno",
"col_offset",
"end_lineno",
"end_col_offset",
]
def __init__(self, context_expr, optional_vars, lineno, col_offset, end_lineno, end_col_offset):
super().__init__(lineno, col_offset, end_lineno, end_col_offset)
self.context_expr = context_expr
self.optional_vars = optional_vars
__all__ = [
"AST",
"mod",
"Module",
"Interactive",
"Expression",
"stmt",
"FunctionDef",
"ClassDef",
"Return",
"Delete",
"Assign",
"AugAssign",
"AnnAssign",
"For",
"While",
"If",
"With",
"Raise",
"Try",
"Assert",
"Import",
"ImportFrom",
"Global",
"Nonlocal",
"Expr",
"Pass",
"Break",
"Continue",
"expr",
"BoolOp",
"BinOp",
"UnaryOp",
"Lambda",
"IfExp",
"Dict",
"Set",
"ListComp",
"SetComp",
"DictComp",
"GeneratorExp",
"Yield",
"YieldFrom",
"Compare",
"Call",
"FormattedValue",
"JoinedStr",
"Constant",
"NamedExpr",
"Attribute",
"slice",
"Slice",
"ExtSlice",
"Index",
"Subscript",
"Starred",
"Name",
"List",
"Tuple",
"expr_context",
"AugLoad",
"AugStore",
"Param",
"Suite",
"Del",
"Load",
"Store",
"boolop",
"And",
"Or",
"operator",
"Add",
"BitAnd",
"BitOr",
"BitXor",
"Div",
"FloorDiv",
"LShift",
"Mod",
"Mult",
"MatMult",
"Pow",
"RShift",
"Sub",
"unaryop",
"Invert",
"Not",
"UAdd",
"USub",
"cmpop",
"Eq",
"Gt",
"GtE",
"In",
"Is",
"IsNot",
"Lt",
"LtE",
"NotEq",
"NotIn",
"comprehension",
"excepthandler",
"ExceptHandler",
"arguments",
"arg",
"keyword",
"alias",
"withitem",
]
| 35,222 | 29.870289 | 100 | py |
tvm | tvm-main/python/tvm/script/parser/core/evaluator.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""AST Evaluation"""
import ast
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Type, Union
from . import dispatch, doc
from .error import ParserError
if TYPE_CHECKING:
from .parser import Parser
DEFAULT_OP: Dict[Type, Callable[..., Any]] = {
doc.Add: lambda a, b: a + b,
doc.Sub: lambda a, b: a - b,
doc.Mult: lambda a, b: a * b,
doc.Div: lambda a, b: a / b,
doc.FloorDiv: lambda a, b: a // b,
doc.Mod: lambda a, b: a % b,
doc.LShift: lambda a, b: a << b,
doc.RShift: lambda a, b: a >> b,
doc.BitOr: lambda a, b: a | b,
doc.BitXor: lambda a, b: a ^ b,
doc.BitAnd: lambda a, b: a & b,
doc.MatMult: lambda a, b: a @ b,
doc.Pow: lambda a, b: a**b,
doc.Eq: lambda a, b: a == b,
doc.NotEq: lambda a, b: a != b,
doc.Lt: lambda a, b: a < b,
doc.LtE: lambda a, b: a <= b,
doc.Gt: lambda a, b: a > b,
doc.GtE: lambda a, b: a >= b,
doc.Is: lambda a, b: a is b,
doc.IsNot: lambda a, b: a is not b,
doc.In: lambda a, b: a in b,
doc.NotIn: lambda a, b: a not in b,
doc.And: lambda a, b: a and b,
doc.Or: lambda a, b: a or b,
doc.Invert: lambda a: ~a,
doc.Not: lambda a: not a,
doc.UAdd: lambda a: +a,
doc.USub: lambda a: -a,
}
class ExprEvaluator:
"""Expression evaluator for TVMScript parser.
Parameters
----------
parser : Parser
The parser bound with the evaluator.
value_table : Dict[str, Any]
The value table for expression evaluation.
new_value_count : int
The count for intermediate result added during evaluation.
"""
parser: "Parser"
value_table: Dict[str, Any]
new_value_count: int
def __init__(self, parser: "Parser", value_table: Dict[str, Any]) -> None:
super().__init__()
self.parser = parser
self.value_table = value_table
self.new_value_count = 0
@staticmethod
def eval(parser: "Parser", value_table: Dict[str, Any], node: doc.AST) -> Any:
"""Expression evaluation for TVMScript parser.
Parameters
----------
parser : Parser
The parser bound with the evaluator.
value_table : Dict[str, Any]
The value table for expression evaluation.
node : doc.AST
The root node of AST tree node of expression to evaluate.
Returns
-------
res : Any
The evaluation result.
"""
self = ExprEvaluator(parser, value_table)
result = self._visit(node) # pylint: disable=protected-access
if isinstance(result, doc.Name):
if result.id not in self.value_table:
raise ParserError(result, f"Undefined variable: {result.id}")
return self.value_table[result.id]
if isinstance(result, doc.Constant):
return result.value
raise TypeError(f"Unexpected result type: {type(result)}")
def _add_intermediate_result(self, value: Any) -> doc.Name:
"""Add intermediate result during evaluation into value table.
Parameters
----------
value : Any
The intermediate result.
Returns
-------
name : doc.Name
The doc AST name node with intermediate name for intermediate result.
"""
name = f"__tvm_tmp_value_{self.new_value_count}"
self.new_value_count += 1
self.value_table[name] = value
lineno = 0
col_offset = 0
return doc.Name(
id=name,
ctx=doc.Load(
lineno=lineno,
col_offset=col_offset,
end_lineno=None,
end_col_offset=None,
),
lineno=lineno,
col_offset=col_offset,
end_lineno=None,
end_col_offset=None,
)
def _visit(self, node: doc.AST) -> Any:
"""General doc AST node visiting method for expression evaluation.
Parameters
----------
node : doc.AST
The root node of AST tree node of expression to evaluate.
Returns
-------
res : Any
The evaluation result.
"""
args = []
if (
isinstance(node, doc.Call)
and hasattr(node.func, "attr")
and node.func.attr not in ["reads", "writes", "match_buffer", "realize"]
) or isinstance(node, (doc.BinOp, doc.UnaryOp, doc.Compare, doc.BoolOp)):
if isinstance(node, doc.BinOp):
args = [node.left, node.right]
elif isinstance(node, doc.UnaryOp):
args = [node.operand]
elif isinstance(node, doc.Compare):
args = [node.left, *node.comparators]
else:
if isinstance(node, doc.Call):
args = node.args
elif isinstance(node, doc.BoolOp):
args = node.values
for arg in args:
if isinstance(arg, doc.Subscript) and isinstance(arg.slice, (doc.Slice, doc.Tuple)):
if isinstance(arg.slice, doc.Slice):
check_slices = [arg.slice]
else:
check_slices = []
for p in arg.slice.elts:
if isinstance(p, doc.Slice):
check_slices.append(p)
for s in check_slices:
if not s.step and s.upper and s.lower:
s.step = doc.Constant(
1,
None,
1,
1,
s.upper.lineno,
s.upper.end_col_offset + 1,
s.upper.lineno,
s.upper.end_col_offset + 2,
)
if isinstance(node, list):
return [self._visit(n) for n in node]
if isinstance(node, tuple):
return tuple(self._visit(n) for n in node)
assert isinstance(node, doc.AST)
if isinstance(node, doc.Name):
if node.id not in self.value_table:
raise ParserError(node, f"Undefined variable: {node.id}")
return node
if isinstance(
node,
(
doc.Constant,
doc.expr_context,
doc.operator,
doc.boolop,
doc.unaryop,
doc.cmpop,
),
):
return node
if not isinstance(node, (doc.expr, doc.slice)):
return node
if isinstance(node, doc.Lambda):
return self._eval_lambda(node)
fields = {}
for field in node.__class__._FIELDS: # pylint: disable=protected-access
attr = getattr(node, field)
if isinstance(attr, (doc.AST, tuple, list)):
fields[field] = self._visit(attr)
else:
fields[field] = attr
try:
if isinstance(node, doc.BoolOp):
value = self._eval_bool_op(fields)
elif isinstance(node, doc.Compare):
value = self._eval_compare(fields)
elif isinstance(node, doc.UnaryOp):
value = self._eval_unary_op(fields)
elif isinstance(node, doc.BinOp):
value = self._eval_bin_op(fields)
elif isinstance(node, doc.Slice):
value = self._eval_slice(fields)
else:
value = self._eval_expr(node.__class__(**fields))
except Exception as e: # pylint: disable=broad-except,invalid-name
self.parser.report_error(node, e)
return self._add_intermediate_result(value)
def _eval_lambda(self, node: doc.Lambda) -> Any:
"""The doc AST lambda node evaluating method.
Parameters
----------
node : doc.Lambda
The root node of AST tree node of expression to evaluate.
Returns
-------
res : Any
The evaluation result.
"""
try:
value = self._eval_expr(node)
except Exception as e: # pylint: disable=broad-except,invalid-name
self.parser.report_error(node, str(e))
return self._add_intermediate_result(value)
def _eval_bool_op(self, fields: Dict[str, Any]) -> Any:
"""The doc AST boolean operator node evaluating method.
Parameters
----------
fields : Dict[str, Any]
The dictionary of boolean operation information,
e.g., operator types, operand values.
Returns
-------
res : Any
The evaluation result.
"""
op = fields["op"]
if not isinstance(op, (doc.And, doc.Or)):
raise TypeError(f"Unexpected operator: {op}")
value = self._eval_expr(fields["values"][0])
for rhs in fields["values"][1:]:
value = _eval_op(op, values=[value, self._eval_expr(rhs)])
return value
def _eval_compare(self, fields: Dict[str, Any]) -> Any:
"""The doc AST comparison operation node evaluating method.
Parameters
----------
fields : Dict[str, Any]
The dictionary of comparison operation information,
e.g., operator types, operand values.
Returns
-------
res : Any
The evaluation result.
"""
value = self._eval_expr(fields["left"])
for op, rhs in zip(fields["ops"], fields["comparators"]):
value = _eval_op(op, values=[value, self._eval_expr(rhs)])
return value
def _eval_unary_op(self, fields: Dict[str, Any]) -> Any:
"""The doc AST unary operation node evaluating method.
Parameters
----------
fields : Dict[str, Any]
The dictionary of unary operation information,
e.g., operator types, operand values.
Returns
-------
res : Any
The evaluation result.
"""
value = self._eval_expr(fields["operand"])
value = _eval_op(fields["op"], values=[value])
return value
def _eval_bin_op(self, fields: Dict[str, Any]) -> Any:
"""The doc AST binary operation node evaluating method.
Parameters
----------
fields : Dict[str, Any]
The dictionary of binary operation information,
e.g., operator types, operand values.
Returns
-------
res : Any
The evaluation result.
"""
return _eval_op(
fields["op"],
values=[
self._eval_expr(fields["left"]),
self._eval_expr(fields["right"]),
],
)
def _eval_slice(self, fields: Dict[str, Any]) -> slice:
"""The doc AST slice node evaluating method.
Parameters
----------
fields : Dict[str, Any]
The dictionary of slice information,
e.g., lower bound, upper bound, step.
Returns
-------
res : slice
The evaluation result.
"""
lower, upper, step = fields["lower"], fields["upper"], fields["step"]
lower = self._eval_expr(lower) if lower is not None else None
upper = self._eval_expr(upper) if upper is not None else None
step = self._eval_expr(step) if step is not None else None
return slice(lower, upper, step)
def _eval_expr(self, v: Any) -> Any:
"""The doc AST expression node evaluating method.
Parameters
----------
v : Any
The root node of AST tree node of expression to evaluate.
Returns
-------
res : Any
The evaluation result.
"""
return _eval_expr(v, self.value_table)
def eval_expr(
parser: "Parser",
node: Union[doc.expr, doc.Expression],
dict_globals: Optional[Dict[str, Any]],
) -> Any:
"""Expression evaluation for TVMScript parser.
Parameters
----------
parser : Parser
The parser bound with the evaluator.
node : Union[doc.expr, doc.Expression]
The root node of AST tree node of expression to evaluate.
dict_globals : Optional[Dict[str, Any]]
The optional global value table for expression evaluation.
Returns
-------
res : Any
The evaluation result.
"""
value_table = {}
if dict_globals is not None:
value_table.update(dict_globals)
return ExprEvaluator.eval(parser, value_table, node)
def eval_assign(
parser: "Parser",
target: doc.expr,
source: Any,
) -> Dict[str, Any]:
"""Expression assignment evaluation for TVMScript parser.
Parameters
----------
parser : Parser
The parser bound with the evaluator.
target : doc.expr
The root node of AST tree node of assigned expression to evaluate.
source : Any
The source to be assigned with evaluated expression.
Returns
-------
res : Any
The evaluation result.
"""
try:
return _eval_assign(target, source)
except Exception as e: # pylint: disable=broad-except,invalid-name
parser.report_error(target, f"Failed to evaluate assignment: {str(e)}")
raise
def _eval_expr(
node: Union[doc.expr, doc.Expression],
dict_globals: Optional[Dict[str, Any]],
) -> Any:
"""Expression evaluation implementation for TVMScript parser.
Parameters
----------
node : Union[doc.expr, doc.Expression]
The root node of AST tree node of expression to evaluate.
dict_globals : Optional[Dict[str, Any]]
The optional global value table for expression evaluation.
Returns
-------
res : Any
The evaluation result.
"""
node = doc.from_doc(node)
if isinstance(node, ast.expr):
node = ast.Expression(body=node)
assert isinstance(node, ast.Expression), "Expects an ast.Expression, but gets: " + str(node)
if dict_globals is None:
dict_globals = {}
node = ast.fix_missing_locations(node)
exe = compile(node, filename="<ast>", mode="eval")
return eval(exe, dict_globals) # pylint: disable=eval-used
def _eval_op(
op: doc.AST,
values: List[Any],
):
"""Operation expression evaluation implementation for TVMScript parser.
Parameters
----------
op : doc.AST
The root node of AST tree node of operation expression to evaluate.
values : List[Any]
The list of values of operands.
Returns
-------
res : Any
The evaluation result.
"""
op_type = type(op) # pylint: disable=protected-access
for i, v in enumerate(values):
v_type = getattr(type(v), "_dispatch_type", None)
if v_type is None:
continue
f = dispatch.get_op(
operand_type=v_type, op_node_type=op_type, operand_index=i, default=None
)
if f is not None:
return f(*values)
return DEFAULT_OP[op_type](*values)
def _eval_assign(
target: doc.expr,
source: Any,
) -> Dict[str, Any]:
"""Expression assignment evaluation implementation for TVMScript parser.
Parameters
----------
target : doc.expr
The root node of AST tree node of assigned expression to evaluate.
source : Any
The source to be assigned with evaluated expression.
Returns
-------
res : Any
The evaluation result.
"""
target = doc.from_doc(target)
assert isinstance(target, ast.expr)
RHS_VAR_NAME = "__tvm_rhs_var__" # pylint: disable=invalid-name
rhs_var_name = RHS_VAR_NAME
dict_locals = {rhs_var_name: source}
mod = ast.fix_missing_locations(
ast.Module(
body=[
ast.Assign(
targets=[target],
value=ast.Name(
id=rhs_var_name,
ctx=ast.Load(),
),
)
],
type_ignores=[],
)
)
exe = compile(mod, filename="<ast>", mode="exec")
exec(exe, {}, dict_locals) # pylint: disable=exec-used
del dict_locals[rhs_var_name]
return dict_locals
| 17,118 | 30.182149 | 96 | py |
tvm | tvm-main/python/tvm/script/parser/core/dispatch.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Parser dispatching infrastructure"""
from typing import TYPE_CHECKING, Any, Callable, Dict, Optional, Tuple, Type
from .doc import AST
if TYPE_CHECKING:
from .parser import Parser
ParseMethod = Callable[["Parser", AST], None]
ParseVTable: Dict[Tuple[str, str], ParseMethod] = {}
OpMethod = Callable[..., Any]
OpVTable: Dict[Tuple[Type, AST, int], OpMethod] = {}
def register(token: str, type_name: str):
"""Register a method for a dispatch token and type name.
Parameters
----------
token : str
The token for IR, e.g., T for TIR and R for Relax.
type_name : str
The type name of AST node, e.g., FunctionDef, With, For.
Returns
-------
func : callable
The function to register dispatched method of parsing
corresponding token and AST node type.
"""
def func(method: ParseMethod):
"""Register a method in parser virtual table.
Parameters
----------
method : ParseMethod
The dispatched method to be registered in parser virtual table.
"""
ParseVTable[(token, type_name)] = method
return func
def get(
token: str,
type_name: str,
default: Optional[ParseMethod] = None,
) -> Optional[ParseMethod]:
"""Get a registered method for a dispatch token and type name,
or return a default method if no registered methods with this dispatch token and type name.
Parameters
----------
token : str
The token for IR, e.g., T for TIR and R for Relax.
type_name : str
The type name of AST node, e.g., FunctionDef, With, For.
default : Optional[ParseMethod]
The default method when no registered methods with this dispatch token and type name.
Returns
-------
func : Optional[ParseMethod]
The dispatched method of parsing corresponding token and AST node type.
"""
return ParseVTable.get((token, type_name), default)
def register_op(operand_type: Type, op_node_type: AST, operand_index: int):
"""Register a method for a operand type, AST operator node and operand index.
Parameters
----------
operand_type : Type
The type of operands, e.g., tir.PrimExpr, tir.IterVar.
op_node_type : AST
The doc AST operator node type, e.g., doc.Add, doc.Eq.
operand_index : int
The operand index, i.e., 0 for left operand and 1 for right operand.
Returns
-------
func : callable
The function to register dispatched method of parsing
corresponding a operand type, AST operator node and operand index.
"""
def func(method: OpMethod):
"""Register a method in parser operator virtual table.
Parameters
----------
method : ParseMethod
The dispatched method to be registered in parser operator virtual table.
"""
OpVTable[(operand_type, op_node_type, operand_index)] = method
return func
def get_op(
operand_type: Type,
op_node_type: Type,
operand_index: int,
default: Optional[OpMethod] = None,
) -> Optional[OpMethod]:
"""Register a method for a operand type, AST operator node and operand index.
Parameters
----------
operand_type : Type
The type of operands, e.g., tir.PrimExpr, tir.IterVar.
op_node_type : AST
The doc AST operator node type, e.g., doc.Add, doc.Eq.
operand_index : int
The operand index, i.e., 0 for left operand and 1 for right operand.
default : Optional[OpMethod]
The default method when no registered methods with this operand type,
AST operator node and operand index.
Returns
-------
func : Optional[OpMethod]
The function to register dispatched method of parsing
corresponding a operand type, AST operator node and operand index.
"""
return OpVTable.get((operand_type, op_node_type, operand_index), default)
| 4,719 | 29.063694 | 95 | py |
tvm | tvm-main/python/tvm/script/parser/core/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""The core parser infra"""
from . import diagnostics, dispatch, doc, doc_core, entry, evaluator, parser, utils
| 897 | 46.263158 | 83 | py |
tvm | tvm-main/python/tvm/script/parser/core/diagnostics.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""TVM Script Parser Source and diagnostics"""
import inspect
import sys
from typing import Union
from tvm.ir import IRModule, SourceName, Span, diagnostics
from . import doc
class Source:
"""Source code class for TVMScript.
It is constructed by source code str or doc AST tree.
Parameters
----------
source_name : str
The filename of the file where the source code locates.
start_line : int
The first line number of the source code.
start_column : int
The first column number of the first line of the source code.
source : str
The source code str of source code.
full_source : str
The complete source code of the file where the source code locates.
"""
source_name: str
start_line: int
start_column: int
source: str
full_source: str
def __init__(self, program: Union[str, doc.AST]):
if isinstance(program, str):
self.source_name = "<str>"
self.start_line = 1
self.start_column = 0
self.source = program
self.full_source = program
return
self.source_name = inspect.getsourcefile(program) # type: ignore
lines, self.start_line = getsourcelines(program) # type: ignore
if lines:
self.start_column = len(lines[0]) - len(lines[0].lstrip())
else:
self.start_column = 0
if self.start_column and lines:
self.source = "\n".join([l[self.start_column :].rstrip() for l in lines])
else:
self.source = "".join(lines)
try:
# It will cause a problem when running in Jupyter Notebook.
# `mod` will be <module '__main__'>, which is a built-in module
# and `getsource` will throw a TypeError
mod = inspect.getmodule(program)
if mod:
self.full_source = inspect.getsource(mod)
else:
self.full_source = self.source
except TypeError:
# It's a work around for Jupyter problem.
# Since `findsource` is an internal API of inspect, we just use it
# as a fallback method.
src, _ = inspect.findsource(program) # type: ignore
self.full_source = "".join(src)
def as_ast(self) -> doc.AST:
"""Parse the source code into AST.
Returns
-------
res : doc.AST
The AST of source code.
"""
return doc.parse(self.source)
_getfile = inspect.getfile # pylint: disable=invalid-name
_findsource = inspect.findsource # pylint: disable=invalid-name
def _patched_inspect_getfile(obj):
"""Work out which source or compiled file an object was defined in."""
if not inspect.isclass(obj):
return _getfile(obj)
mod = getattr(obj, "__module__", None)
if mod is not None:
file = getattr(sys.modules[mod], "__file__", None)
if file is not None:
return file
for _, member in inspect.getmembers(obj):
if inspect.isfunction(member):
if obj.__qualname__ + "." + member.__name__ == member.__qualname__:
return inspect.getfile(member)
raise TypeError(f"Source for {obj:!r} not found")
def findsource(obj):
"""Return the entire source file and starting line number for an object."""
import linecache # pylint: disable=import-outside-toplevel
if not inspect.isclass(obj):
return _findsource(obj)
file = inspect.getsourcefile(obj)
if file:
linecache.checkcache(file)
else:
file = inspect.getfile(obj)
if not (file.startswith("<") and file.endswith(">")):
raise OSError("source code not available")
module = inspect.getmodule(obj, file)
if module:
lines = linecache.getlines(file, module.__dict__)
else:
lines = linecache.getlines(file)
if not lines:
raise OSError("could not get source code")
qual_names = obj.__qualname__.replace(".<locals>", "<locals>").split(".")
in_comment = 0
scope_stack = []
indent_info = {}
for i, line in enumerate(lines):
n_comment = line.count('"""')
if n_comment:
# update multi-line comments status
in_comment = in_comment ^ (n_comment & 1)
continue
if in_comment:
# skip lines within multi-line comments
continue
indent = len(line) - len(line.lstrip())
tokens = line.split()
if len(tokens) > 1:
name = None
if tokens[0] == "def":
name = tokens[1].split(":")[0].split("(")[0] + "<locals>"
elif tokens[0] == "class":
name = tokens[1].split(":")[0].split("(")[0]
# pop scope if we are less indented
while scope_stack and indent_info[scope_stack[-1]] >= indent:
scope_stack.pop()
if name:
scope_stack.append(name)
indent_info[name] = indent
if scope_stack == qual_names:
return lines, i
raise OSError("could not find class definition")
def getsourcelines(obj):
"""Extract the block of code at the top of the given list of lines."""
obj = inspect.unwrap(obj)
lines, l_num = findsource(obj)
return inspect.getblock(lines[l_num:]), l_num + 1
inspect.getfile = _patched_inspect_getfile
class Diagnostics:
"""Diagnostics class for error reporting in parser.
Parameters
----------
source : Source
The source code.
ctx : diagnostics.DiagnosticContext
The diagnostic context for diagnostics.
"""
source: Source
ctx: diagnostics.DiagnosticContext
def __init__(self, source: Source):
mod = IRModule()
mod.source_map.add(source.source_name, source.full_source)
self.source = source
self.ctx = diagnostics.DiagnosticContext(mod, diagnostics.get_renderer())
def _emit(self, node: doc.AST, message: str, level: diagnostics.DiagnosticLevel) -> None:
"""Emit a diagnostic.
Parameters
----------
node : doc.AST
The node with diagnostic information.
message : str
The diagnostic message.
level : diagnostics.DiagnosticLevel
The diagnostic level.
"""
lineno = node.lineno or 1
col_offset = node.col_offset or self.source.start_column
end_lineno = node.end_lineno or lineno
end_col_offset = node.end_col_offset or col_offset
lineno += self.source.start_line - 1
end_lineno += self.source.start_line - 1
col_offset += self.source.start_column + 1
end_col_offset += self.source.start_column + 1
self.ctx.emit(
diagnostics.Diagnostic(
level=level,
span=Span(
source_name=SourceName(self.source.source_name),
line=lineno,
end_line=end_lineno,
column=col_offset,
end_column=end_col_offset,
),
message=message,
)
)
def error(self, node: doc.AST, message: str) -> None:
"""Emit a diagnostic error.
Parameters
----------
node : doc.AST
The node with diagnostic error.
message : str
The diagnostic message.
"""
self._emit(node, message, diagnostics.DiagnosticLevel.ERROR)
self.ctx.render()
| 8,363 | 31.418605 | 93 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.