repo stringlengths 2 99 | file stringlengths 13 225 | code stringlengths 0 18.3M | file_length int64 0 18.3M | avg_line_length float64 0 1.36M | max_line_length int64 0 4.26M | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
tvm | tvm-main/python/tvm/contrib/micro/meta_schedule/local_builder_micro.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Local builder for microTVM projects that compile on the local host"""
import os
import tempfile
from typing import Optional, Dict
from tvm.ir import IRModule
from tvm.runtime import NDArray
from tvm.target import Target
from tvm.meta_schedule.builder import LocalBuilder
from tvm.driver.build_module import OperatorModule
from tvm import micro
from tvm.contrib.tar import tar
from tvm.relay.backend import Runtime
from tvm.driver import build as tvm_build
from tvm.tir.transform import RemoveWeightLayoutRewriteBlock
def get_local_builder_micro():
"""Return micro-compatible Builder for meta schedule."""
def _micro_build(
mod: IRModule, target: Target, _params: Optional[Dict[str, NDArray]]
) -> OperatorModule:
"""Build function for micro targets.
Parameters
----------
mod : IRModule
The IRModule to be built.
target : Target
The target to be built.
_params : Optional[Dict[str, NDArray]]
The parameters to be used for the build. Must be None.
Returns
-------
rt_mod : OperatorModule
The built Module.
"""
# Note: tvm_build assigns "global_symbol" to the name of generated C function
# changing it is necessary for micro targets,
# since the generated projects already include a main function.
prim_func = mod["main"].with_attr("global_symbol", "default_function")
mod = IRModule({"main": prim_func})
runtime = Runtime("crt", {"system-lib": True})
mod = RemoveWeightLayoutRewriteBlock(skip_ndarray_rewrite=True)(mod)
rt_mod = tvm_build(mod, target=target, runtime=runtime)
return rt_mod
def _micro_export(mod: OperatorModule) -> str:
"""Export function for micro targets.
Parameters
----------
mod : OperatorModule
The Module to be exported.
Returns
-------
artifact_path : str
The path to the exported Module.
"""
artifact_path = os.path.join(tempfile.mkdtemp(), "tvm_tmp_mod." + tar.output_format)
micro.export_model_library_format(mod, artifact_path)
return artifact_path
return LocalBuilder(f_build=_micro_build, f_export=_micro_export)
| 3,083 | 35.282353 | 92 | py |
tvm | tvm-main/python/tvm/contrib/micro/meta_schedule/rpc_runner_micro.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""RPC Runner Micro"""
from contextlib import contextmanager
from typing import Callable, List, Optional, Union
from collections import namedtuple
import signal
import random
from tvm import micro
from tvm import nd
from tvm.contrib.popen_pool import PopenPoolExecutor
from tvm.rpc.server import Server
from tvm.rpc.tracker import Tracker
from tvm.meta_schedule.logging import get_logger
from tvm.meta_schedule.utils import cpu_count, derived_object
from tvm.meta_schedule.runner.config import EvaluatorConfig, RPCConfig
from tvm.meta_schedule.runner import PyRunner, RunnerFuture, RunnerInput
from tvm.meta_schedule.runner.rpc_runner import RPCRunnerFuture
from tvm.meta_schedule.runner.utils import T_ARG_INFO_JSON_OBJ_LIST
logger = get_logger(__name__) # pylint: disable=invalid-name
@derived_object
class RPCRunnerMicro(PyRunner):
"""RPC based runner for tuning micro models."""
def __init__(
self,
platform: str = "crt",
project_options: Optional[dict] = None,
rpc_configs: Optional[List[RPCConfig]] = None,
evaluator_config: Optional[EvaluatorConfig] = None,
max_workers: Optional[int] = None,
initializer: Optional[Callable[[], None]] = None,
session_timeout_sec: int = 300,
) -> None:
"""Constructor
Parameters
----------
platform: str
The platform used for project generation.
project_options: dict
The options for the generated micro project.
rpc_config: RPCConfig
The rpc configuration.
evaluator_config: EvaluatorConfig
The evaluator configuration.
max_workers: Optional[int] = None
The maximum number of connections. Defaults to number of logical CPU cores.
initializer: Optional[Callable[[], None]]
The initializer function.
session_timeout_sec: int
The session timeout, including the pending time. if the number of candidates sent to runner is larger
than the runner workers, increase the timeout.
"""
super().__init__()
self.platform = platform
if project_options is None:
project_options = {}
self.project_options = project_options
self.rpc_configs = rpc_configs
self.evaluator_config = EvaluatorConfig._normalized(evaluator_config)
self.session_timeout_sec = session_timeout_sec
if max_workers is None:
max_workers = cpu_count(logical=True)
logger.info("RPCRunner: max_workers = %d", max_workers)
self.pool = PopenPoolExecutor(
max_workers=max_workers,
timeout=session_timeout_sec,
initializer=initializer,
)
def run(self, runner_inputs: List[RunnerInput]) -> List[RunnerFuture]:
results: List[RunnerFuture] = []
for runner_input in runner_inputs:
future = RPCRunnerFuture(
future=self.pool.submit(
_worker_func,
self.platform,
self.project_options or {},
self.rpc_configs,
self.evaluator_config,
str(runner_input.artifact_path),
str(runner_input.device_type),
tuple(arg_info.as_json() for arg_info in runner_input.args_info),
),
timeout_sec=self.session_timeout_sec,
)
results.append(future) # type: ignore
return results
def _worker_func(
platform: str,
project_options: dict,
rpc_configs: List[RPCConfig],
evaluator_config: EvaluatorConfig,
artifact_path: str,
device_type: str,
args_info: T_ARG_INFO_JSON_OBJ_LIST,
) -> List[float]:
module_loader = micro.AutoTvmModuleLoader(
template_project_dir=micro.get_microtvm_template_projects(platform),
project_options=project_options,
)
rpc_config = random.choice(rpc_configs)
remote_kw = {
"device_key": rpc_config.tracker_key,
"host": rpc_config.tracker_host,
"port": rpc_config.tracker_port,
"priority": 0,
"timeout": 100,
}
build_result = namedtuple("BuildResult", ["filename"])(artifact_path)
with module_loader(remote_kw, build_result) as (remote, mod):
dev = remote.device(device_type, 0)
f_prepare = ""
if evaluator_config.enable_cpu_cache_flush:
f_prepare = "cache_flush_cpu_non_first_arg"
time_f = mod.time_evaluator(
mod.entry_name,
dev,
number=evaluator_config.number,
repeat=evaluator_config.repeat,
min_repeat_ms=evaluator_config.min_repeat_ms,
f_preproc=f_prepare,
)
random_fill = remote.get_function("tvm.contrib.random.random_fill")
args = [nd.empty(x[2], x[1], dev) for x in args_info]
for arg in args:
random_fill(arg)
dev.sync()
costs = time_f(*args).results
return costs
@contextmanager
def get_rpc_runner_micro(
platform,
options,
evaluator_config: EvaluatorConfig = None,
tracker_host: Optional[str] = None,
tracker_port: Union[None, int, str] = None,
session_timeout_sec: int = 300,
rpc_timeout_sec: int = 10,
serial_numbers: List[str] = None,
):
"""Parameters
----------
platform: str
The platform used for project generation.
options: dict
The options for the generated micro project.
evaluator_config: EvaluatorConfig
The evaluator configuration.
tracker_host: Optional[str]
The host url of the rpc server.
tracker_port: Union[None, int, str]
The TCP port to bind to
session_timeout_sec: int
The session timeout. if the number of candidates sent to runner is larger
than the runner workers, increase the timeout.
rpc_timeout_sec:
The rpc session timeout.
serial_numbers:
List of board serial numbers to be used during tuning.
For "CRT" and "QEMU" platforms the serial numners are not used,
but the length of the list determines the number of runner instances.
"""
if evaluator_config is None:
evaluator_config = EvaluatorConfig(
number=3,
repeat=1,
min_repeat_ms=100,
enable_cpu_cache_flush=False,
)
if tracker_host is None:
tracker_host = "127.0.0.1"
if tracker_port is None:
tracker_port = 9000
else:
tracker_port = int(tracker_port)
tracker_port_end = tracker_port + 1000
if not (serial_numbers):
serial_numbers = ["$local$device"]
tracker = Tracker(
port=tracker_port,
port_end=tracker_port_end,
silent=True,
reuse_addr=True,
timeout=60,
)
servers = []
rpc_configs = []
for serial_number in serial_numbers:
key = serial_number
rpc_config = RPCConfig(
tracker_host=tracker_host,
tracker_port=tracker_port,
tracker_key=key,
session_priority=0,
session_timeout_sec=rpc_timeout_sec,
)
rpc_configs.append(rpc_config)
server = Server(
port=tracker_port,
port_end=tracker_port_end,
key=key,
silent=True,
tracker_addr=(tracker_host, tracker_port),
reuse_addr=True,
timeout=60,
)
servers.append(server)
def terminate():
tracker.terminate()
for server in servers:
server.terminate()
def handle_SIGINT(signal, frame):
terminate()
raise KeyboardInterrupt("Received SIGINT")
signal.signal(signal.SIGINT, handle_SIGINT)
try:
yield RPCRunnerMicro(
platform=platform,
project_options=options,
rpc_configs=rpc_configs,
evaluator_config=evaluator_config,
session_timeout_sec=session_timeout_sec,
)
finally:
terminate()
| 8,879 | 31.888889 | 113 | py |
tvm | tvm-main/python/tvm/contrib/debugger/debug_executor.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Graph debug runtime executes TVM debug packed functions."""
import logging
import os
import shutil
import struct
import tempfile
import tvm._ffi
from tvm._ffi.base import string_types
from tvm.contrib import graph_executor
from tvm.runtime.module import BenchmarkResult
from ...runtime.profiling import Report
from . import debug_result
_DUMP_ROOT_PREFIX = "tvmdbg_"
_DUMP_PATH_PREFIX = "_tvmdbg_"
def create(graph_json_str, libmod, device, dump_root=None):
"""Create a runtime executor module given a graph and module.
Parameters
----------
graph_json_str : str
The graph to be deployed in json format output by graph compiler.
The graph can contain operator(tvm_op) that points to the name
of PackedFunc in the libmod.
libmod : tvm.Module
The module of the corresponding function.
device : Device
The device to deploy the module, can be local or remote.
dump_root : str
To select which folder the outputs should be kept.
None will make a temp folder in /tmp/tvmdbg<rand_string> and does the dumping
Returns
-------
graph_module : GraphModuleDebug
Debug Runtime graph module that can be used to execute the graph.
"""
assert isinstance(graph_json_str, string_types)
try:
dev, num_rpc_dev, device_type_id = graph_executor.get_device(libmod, device)
if num_rpc_dev == len(dev):
fcreate = dev[0]._rpc_sess.get_function("tvm.graph_executor_debug.create")
else:
fcreate = tvm._ffi.get_global_func("tvm.graph_executor_debug.create")
except ValueError:
raise ValueError(
"Please set '(USE_PROFILER ON)' in " "config.cmake and rebuild TVM to enable debug mode"
)
func_obj = fcreate(graph_json_str, libmod, *device_type_id)
gmod = GraphModuleDebug(func_obj, dev, graph_json_str, dump_root)
# Automatically set params if they can be extracted from the libmod
try:
params = libmod["get_graph_params"]()
except (AttributeError, tvm.error.RPCError):
# Params can not be extracted from the libmod and must be set somewhere else manually
# Do not set params during RPC communication
pass
else:
gmod.set_input(**params)
return gmod
class GraphModuleDebug(graph_executor.GraphModule):
"""Graph debug runtime module.
This is a debug wrapper over the TVM runtime.
Runtime interfaces are wrapped with debug functionalities.
Manage the debug framework to format the debug data and
trigger the user interfaces.
Parameters
----------
module : Module
The internal tvm module that holds the actual graph functions.
device : Device
The device that this module is under.
graph_json_str : str or graph class
Content of graph json file in string format
dump_root : str
To select which folder the outputs should be kept.
None will make a temp folder in /tmp/tvmdbg<rand_string> and does the dumping
"""
def __init__(self, module, device, graph_json_str, dump_root):
self._dump_root = dump_root
self._dump_path = None
self._run_individual = module["run_individual"]
self._run_individual_node = module["run_individual_node"]
self._debug_get_output = module["debug_get_output"]
self._execute_node = module["execute_node"]
self._get_node_output = module["get_node_output"]
self._profile = module["profile"]
self._profile_rpc = module["profile_rpc"]
graph_executor.GraphModule.__init__(self, module)
self._create_debug_env(graph_json_str, device)
def _format_device(self, device):
return str(device[0]).upper().replace("(", ":").replace(")", "")
def _ensure_dir(self, directory):
"""Create a directory if not exists
Parameters
----------
directory : str
File path to create
"""
if not os.path.exists(directory):
os.makedirs(directory, 0o700)
def _get_dump_path(self, device):
"""Make the graph and tensor dump folder and return the path.
Parameters
----------
device : Device
The device that this module is under.
Returns
-------
path : str
Directory path where the graph and node outputs will be stored.
"""
# save to file
folder_name = _DUMP_PATH_PREFIX + "device_"
folder_name = folder_name + device.replace(":", "_")
path = os.path.join(self._dump_root, folder_name)
self._ensure_dir(path)
return path
def _remove_dump_root(self):
if os.path.isdir(self._dump_root):
shutil.rmtree(self._dump_root)
def _create_debug_env(self, graph_json, device):
"""Create UI wrapper framework to handle multiple UI frontends for tvmdbg
Parameters
----------
graph_json : json format
json formatted NNVM graph contain list of each node's name, shape and type.
nodes_list : list
List of all the nodes presented in the graph
device : Device
The device that this module is under.
"""
# make the dump folder if not given
if not self._dump_root:
self._dump_root = tempfile.mkdtemp(prefix=_DUMP_ROOT_PREFIX)
# format the device
device = self._format_device(device)
# updates the dumping directories
self._dump_path = self._get_dump_path(device)
# init the debug dumping environment
self.debug_datum = debug_result.DebugResult(graph_json, self._dump_path)
def _execute_next_node(self, node_index, output_index):
"""Execute node assuming all previous nodes has been executed.
Return the output of this node.
Parameters
----------
node_index : int
The node index
output_index: int
The node output index
Return
------
output_tensors : Array<NDarray>
Array of output tensors
"""
output_tensors = self._execute_next_node_get_output(node_index, output_index)
return output_tensors
def _run_per_layer(self):
"""Execute up to each node and each debug output will be
copied to the buffer.
"""
output_tensors = []
for i, node in enumerate(self.debug_datum.get_graph_nodes()):
self._execute_node(i)
num_outputs = self.debug_datum.get_graph_node_output_num(node)
for j in range(num_outputs):
logging.info(
"running node=%d, output_ind=%d, with node_name: %s", i, j, node["name"]
)
output_tensors.append(self._get_node_output(i, j))
self.debug_datum.update_output_tensors(output_tensors)
def _run_debug(
self,
number,
repeat,
min_repeat_ms,
limit_zero_time_iterations,
cooldown_interval_ms,
repeats_to_cooldown,
):
"""Execute the node specified with index will be executed.
Each debug output will be copied to the buffer
Time consumed for each execution will be set as debug output.
"""
# Get timing.
self.debug_datum._time_list = self.run_individual(
number=number,
repeat=repeat,
min_repeat_ms=min_repeat_ms,
limit_zero_time_iterations=limit_zero_time_iterations,
cooldown_interval_ms=cooldown_interval_ms,
repeats_to_cooldown=repeats_to_cooldown,
)
# Get outputs.
self._run_per_layer()
def debug_get_output(self, node, out=None):
"""Run graph up to node and get the output to out
Parameters
----------
node : int / str
The node index or name
out : NDArray
The output array container
"""
if isinstance(node, str):
node_index = None
for i, graph_node in enumerate(self.debug_datum.get_graph_nodes()):
if graph_node["name"] == node:
node_index = i
break
else:
raise AttributeError(f"Could not find a node named {node} in this graph.")
elif isinstance(node, int):
node_index = node
else:
raise RuntimeError("Require node index or name only.")
self._debug_get_output(node_index, out)
# pylint: disable=arguments-differ
def run(
self,
number=10,
repeat=1,
min_repeat_ms=1,
limit_zero_time_iterations=100,
cooldown_interval_ms=0,
repeats_to_cooldown=1,
sort_by_time=True,
**input_dict,
):
"""Run forward execution of the graph with debug
Parameters
----------
number: int, optional
The number of times to run this function for taking average.
We call these runs as one `repeat` of measurement.
repeat: int, optional
The number of times to repeat the measurement.
In total, the function will be invoked (1 + number x repeat) times,
where the first one is warm up and will be discarded.
The returned result contains `repeat` costs,
each of which is an average of `number` costs.
min_repeat_ms: int, optional
The minimum duration of one `repeat` in milliseconds.
By default, one `repeat` contains `number` runs. If this parameter is set,
the parameters `number` will be dynamically adjusted to meet the
minimum duration requirement of one `repeat`.
i.e., When the run time of one `repeat` falls below this time, the `number` parameter
will be automatically increased.
limit_zero_time_iterations: int, optional
The maximum number of repeats when measured time is equal to 0.
It helps to avoid hanging during measurements.
cooldown_interval_ms: int, optional
The cooldown interval in milliseconds between the number of repeats defined by
`repeats_to_cooldown`.
repeats_to_cooldown: int, optional
The number of repeats before the cooldown is activated.
sort_by_time: bool, optional
Whether to sort the debug output by time.
input_dict : dict of str to NDArray
List of input values to be feed to
"""
if input_dict:
self.set_input(**input_dict)
# Step 1. Execute the graph
self._run_debug(
number=number,
repeat=repeat,
min_repeat_ms=min_repeat_ms,
limit_zero_time_iterations=limit_zero_time_iterations,
cooldown_interval_ms=cooldown_interval_ms,
repeats_to_cooldown=repeats_to_cooldown,
)
# Step 2. Dump the output tensors to the dump folder
self.debug_datum.dump_output_tensor()
# Step 3. Dump the Chrome trace to the dump folder
self.debug_datum.dump_chrome_trace()
# Step 4. Display the collected information
self.debug_datum.display_debug_result(sort_by_time)
def run_individual(
self,
number,
repeat=1,
min_repeat_ms=0,
limit_zero_time_iterations=100,
cooldown_interval_ms=0,
repeats_to_cooldown=1,
):
"""Run each operation in the graph and get the time per op for all ops.
number: int
The number of times to run this function for taking average.
We call these runs as one `repeat` of measurement.
repeat: int, optional
The number of times to repeat the measurement.
In total, the function will be invoked (1 + number x repeat) times,
where the first one is warm up and will be discarded.
The returned result contains `repeat` costs,
each of which is an average of `number` costs.
min_repeat_ms: int, optional
The minimum duration of one `repeat` in milliseconds.
By default, one `repeat` contains `number` runs. If this parameter is set,
the parameters `number` will be dynamically adjusted to meet the
minimum duration requirement of one `repeat`.
i.e., When the run time of one `repeat` falls below this time, the `number` parameter
will be automatically increased.
limit_zero_time_iterations: int, optional
The maximum number of repeats when measured time is equal to 0.
It helps to avoid hanging during measurements.
cooldown_interval_ms: int, optional
The cooldown interval in milliseconds between the number of repeats defined by
`repeats_to_cooldown`.
repeats_to_cooldown: int, optional
The number of repeats before the cooldown is activated.
Returns
-------
A 2-dimensional array where the dimensions are: the index of the operation and
the repeat of the measurement.
"""
res = self._run_individual(
number,
repeat,
min_repeat_ms,
limit_zero_time_iterations,
cooldown_interval_ms,
repeats_to_cooldown,
)
results = []
offset = 0
format_size = "@q"
(nodes_count,) = struct.unpack_from(format_size, res, offset)
offset += struct.calcsize(format_size)
format_data = "@" + repeat * "d"
for _ in range(0, nodes_count):
ret = struct.unpack_from(format_data, res, offset)
offset += struct.calcsize(format_data)
results.append([*ret])
return results
def run_individual_node(
self,
index,
number=10,
repeat=1,
min_repeat_ms=0,
limit_zero_time_iterations=100,
cooldown_interval_ms=0,
repeats_to_cooldown=1,
):
"""Benchmark a single node in the serialized graph.
This does not do any data transfers and uses arrays already on the device.
Parameters
----------
index : int
The index of the node, see `self.debug_datum.get_graph_nodes`
number: int
The number of times to run this function for taking average.
We call these runs as one `repeat` of measurement.
repeat: int, optional
The number of times to repeat the measurement.
In total, the function will be invoked (1 + number x repeat) times,
where the first one is warm up and will be discarded.
The returned result contains `repeat` costs,
each of which is an average of `number` costs.
min_repeat_ms : int, optional
The minimum duration of one `repeat` in milliseconds.
By default, one `repeat` contains `number` runs. If this parameter is set,
the parameters `number` will be dynamically adjusted to meet the
minimum duration requirement of one `repeat`.
i.e., When the run time of one `repeat` falls below this time, the `number` parameter
will be automatically increased.
limit_zero_time_iterations: int, optional
The maximum number of repeats when measured time is equal to 0.
It helps to avoid hanging during measurements.
cooldown_interval_ms: int, optional
The cooldown interval in milliseconds between the number of repeats defined by
`repeats_to_cooldown`.
repeats_to_cooldown: int, optional
The number of repeats before the cooldown is activated.
Returns
-------
A module BenchmarkResult
"""
# Results are returned as serialized strings which we deserialize
res = self._run_individual_node(
index,
number,
repeat,
min_repeat_ms,
limit_zero_time_iterations,
cooldown_interval_ms,
repeats_to_cooldown,
)
fmt = "@" + ("d" * repeat)
results = struct.unpack(fmt, res)
return BenchmarkResult(list(results))
def profile(self, collectors=None, **input_dict):
"""Run forward execution of the graph and collect overall and per-op
performance metrics.
Parameters
----------
collectors : Optional[Sequence[MetricCollector]]
Extra metrics to collect. If profiling over RPC, collectors must be `None`.
input_dict : dict of str to NDArray
List of input values to be feed to
Return
------
timing_results : str
Per-operator and whole graph timing results in a table format.
"""
if input_dict:
self.set_input(**input_dict)
if self.module.type_key == "rpc":
# We cannot serialize MetricCollectors over RPC
assert collectors is None, "Profiling with collectors is not supported over RPC"
return Report.from_json(self._profile_rpc())
return self._profile(collectors)
def exit(self):
"""Exits the dump folder and all its contents"""
self._remove_dump_root()
| 18,213 | 34.643836 | 100 | py |
tvm | tvm-main/python/tvm/contrib/debugger/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 785 | 45.235294 | 62 | py |
tvm | tvm-main/python/tvm/contrib/debugger/debug_runtime.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Deprecated Python API for DebugExecutor."""
import warnings
from . import debug_executor
def create(*args, **kwargs):
warnings.warn(
"This function has been moved to tvm.contrib.graph_executor and will be removed "
"in the next TVM release"
)
return debug_executor.create(*args, **kwargs)
| 1,109 | 36 | 89 | py |
tvm | tvm-main/python/tvm/contrib/debugger/debug_result.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=pointless-exception-statement, unnecessary-list-index-lookup
"""Graph debug results dumping class."""
import collections
import json
import os
import numpy as np
import tvm
GRAPH_DUMP_FILE_NAME = "_tvmdbg_graph_dump.json"
CHROME_TRACE_FILE_NAME = "_tvmdbg_execution_trace.json"
ChromeTraceEvent = collections.namedtuple("ChromeTraceEvent", ["ts", "tid", "pid", "name", "ph"])
class DebugResult(object):
"""Graph debug data module.
Data dump module manage all the debug data formatting.
Output data and input graphs are formatted and dumped to file.
Frontend read these data and graph for visualization.
Parameters
----------
graph_json : str
The graph to be deployed in json format output by graph compiler. Each operator (tvm_op)
in the graph will have a one to one mapping with the symbol in libmod which is used
to construct a "PackedFunc" .
dump_path : str
Output data path is read/provided from frontend
"""
def __init__(self, graph_json, dump_path):
self._dump_path = dump_path
self._output_tensor_list = []
self._time_list = []
json_obj = self._parse_graph(graph_json)
# dump the json information
self._dump_graph_json(json_obj)
def _parse_graph(self, graph_json):
"""Parse and extract the JSON graph and update the nodes, shapes and dltype.
Parameters
----------
graph_json : str or graph class
The graph to be deployed in json format output by JSON graph.
"""
json_obj = json.loads(graph_json)
self._nodes_list = json_obj["nodes"]
self._shapes_list = json_obj["attrs"]["shape"]
self._dtype_list = json_obj["attrs"]["dltype"]
self._update_graph_json()
return json_obj
def _update_graph_json(self):
"""update the nodes_list with name, shape and data type,
for temporarily storing the output.
"""
eid = 0
for node in self._nodes_list:
input_list = []
if node["op"] == "null":
node["attrs"] = {}
node["op"] = "param"
num_outputs = 1
elif node["op"] == "tvm_op":
for input_node in node["inputs"]:
input_list.append(self._nodes_list[input_node[0]]["name"])
node["op"] = node["attrs"]["func_name"]
num_outputs = int(node["attrs"]["num_outputs"])
else:
raise ValueError("")
node["inputs"] = input_list
dtype = str("type: " + self._dtype_list[1][eid])
node["attrs"].update({"T": dtype})
node["shape"] = self._shapes_list[1][eid]
eid += num_outputs
def _cleanup_tensors(self):
"""Remove the tensor dump file (graph wont be removed)"""
for filename in os.listdir(self._dump_path):
if os.path.isfile(filename) and not filename.endswith(".json"):
os.remove(filename)
def get_graph_nodes(self):
"""Return the nodes list"""
return self._nodes_list
def get_graph_node_shapes(self):
"""Return the nodes shapes list"""
return self._shapes_list
def get_graph_node_output_num(self, node):
"""Return the number of outputs of a node"""
return 1 if node["op"] == "param" else int(node["attrs"]["num_outputs"])
def get_graph_node_dtypes(self):
"""Return the nodes dtype list"""
return self._dtype_list
def get_output_tensors(self):
"""Get the output tensors of each operation in numpy format"""
eid = 0
output_tensors = {}
for i, node in enumerate(self._nodes_list):
num_outputs = self.get_graph_node_output_num(node)
for j in range(num_outputs):
# the node name is not unique, so we need a consistent
# indexing based on the list ordering in the nodes
key = f"{node['name']}____topo-index:{i}____output-num:{j}"
output_tensors[key] = self._output_tensor_list[eid]
eid += 1
return output_tensors
def update_output_tensors(self, tensors):
"""Update output tensors list
Parameters
----------
tensors : list[NDArray]
"""
if not isinstance(tensors, list):
AttributeError("tensors with incorrect type.")
for output_array in tensors:
self._output_tensor_list.append(output_array)
def dump_output_tensor(self):
"""Dump the outputs to a temporary folder, the tensors are in numpy format"""
# cleanup existing tensors before dumping
self._cleanup_tensors()
output_tensors = self.get_output_tensors()
with open(os.path.join(self._dump_path, "output_tensors.params"), "wb") as param_f:
param_f.write(save_tensors(output_tensors))
def dump_chrome_trace(self):
"""Dump the trace to the Chrome trace.json format."""
def s_to_us(t):
return t * 10**6
starting_times = np.zeros(len(self._time_list) + 1)
starting_times[1:] = np.cumsum([np.mean(times) for times in self._time_list])
def node_to_events(node, times, starting_time):
return [
ChromeTraceEvent(
ts=s_to_us(starting_time),
tid=1,
pid=1,
ph="B",
name=node["name"],
),
ChromeTraceEvent(
# Use start + duration instead of end to ensure precise timings.
ts=s_to_us(np.mean(times) + starting_time),
tid=1,
pid=1,
ph="E",
name=node["name"],
),
]
events = [
e
for (node, times, starting_time) in zip(
self._nodes_list, self._time_list, starting_times
)
for e in node_to_events(node, times, starting_time)
]
result = dict(displayTimeUnit="ns", traceEvents=[e._asdict() for e in events])
with open(os.path.join(self._dump_path, CHROME_TRACE_FILE_NAME), "w") as trace_f:
json.dump(result, trace_f)
def _dump_graph_json(self, graph):
"""Dump json formatted graph.
Parameters
----------
graph : json format
json formatted JSON graph contain list of each node's
name, shape and type.
"""
graph_dump_file_name = GRAPH_DUMP_FILE_NAME
with open(os.path.join(self._dump_path, graph_dump_file_name), "w") as outfile:
json.dump(graph, outfile, indent=4, sort_keys=False)
def get_debug_result(self, sort_by_time=True):
"""Return the debugger result"""
header = [
"Node Name",
"Ops",
"Time(us)",
"Time(%)",
"Shape",
"Inputs",
"Outputs",
"Measurements(us)",
]
lines = [
"---------",
"---",
"--------",
"-------",
"-----",
"------",
"-------",
"----------------",
]
eid = 0
data = []
total_time = sum([np.mean(time) for time in self._time_list])
for node, time in zip(self._nodes_list, self._time_list):
time_mean = np.mean(time)
num_outputs = self.get_graph_node_output_num(node)
for j in range(num_outputs):
op = node["op"]
if node["op"] == "param":
eid += 1
continue
name = node["name"]
shape = str(self._output_tensor_list[eid].shape)
time_us = round(time_mean * 1e6, 3)
time_percent = round(((time_mean / total_time) * 100), 3)
inputs = str(node["attrs"]["num_inputs"])
outputs = str(node["attrs"]["num_outputs"])
measurements = str([round(repeat_data * 1e6, 3) for repeat_data in time])
node_data = [name, op, time_us, time_percent, shape, inputs, outputs, measurements]
data.append(node_data)
eid += 1
if sort_by_time:
# Sort on the basis of execution time. Prints the most expensive ops in the start.
data = sorted(data, key=lambda x: x[2], reverse=True)
# Insert a row for total time at the end.
rounded_total_time_us = round(total_time * 1e6, 3)
data.append(["Total_time", "-", rounded_total_time_us, "-", "-", "-", "-", "-", "-"])
fmt = ""
for i, _ in enumerate(header):
max_len = len(header[i])
for j, _ in enumerate(data):
item_len = len(str(data[j][i]))
if item_len > max_len:
max_len = item_len
fmt = fmt + "{:<" + str(max_len + 2) + "}"
log = [fmt.format(*header)]
log.append(fmt.format(*lines))
for row in data:
log.append(fmt.format(*row))
return "\n".join(log)
def display_debug_result(self, sort_by_time=True):
"""Displays the debugger result"""
print(self.get_debug_result(sort_by_time))
def save_tensors(params):
"""Save parameter dictionary to binary bytes.
The result binary bytes can be loaded by the
GraphModule with API "load_params".
Parameters
----------
params : dict of str to NDArray
The parameter dictionary.
Returns
-------
param_bytes: bytearray
Serialized parameters.
"""
_save_tensors = tvm.get_global_func("tvm.relay._save_param_dict")
return _save_tensors(params)
| 10,681 | 34.845638 | 99 | py |
tvm | tvm-main/python/tvm/contrib/cuda_graph/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 785 | 45.235294 | 62 | py |
tvm | tvm-main/python/tvm/contrib/cuda_graph/cuda_graph_executor.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Graph executor with CUDA Graph"""
import tvm._ffi
from tvm._ffi.base import string_types
from tvm.contrib import graph_executor
def create(graph_json_str, libmod, device):
"""Create a runtime executor module given a graph and module.
Parameters
----------
graph_json_str : str
The graph to be deployed in json format output by json graph.
The graph can contain operator(tvm_op) that points to the name
of PackedFunc in the libmod.
libmod : tvm.runtime.Module
The module of the corresponding function
device : Device
The device to deploy the module, only supports CUDA GPU
Returns
-------
graph_module : GraphModuleCudaGraph
CUDA graph executor module that can be used to execute the graph.
Note
----
See also :py:class:`tvm.contrib.cuda_graph.cuda_graph_executor.GraphModuleCudaGraph`
for examples to directly construct a GraphModuleCudaGraph from an exported
relay compiled library.
"""
assert isinstance(graph_json_str, string_types)
try:
dev, num_rpc_dev, device_type_id = graph_executor.get_device(libmod, device)
if num_rpc_dev == len(dev):
fcreate = dev[0]._rpc_sess.get_function("tvm.graph_executor_cuda_graph.create")
else:
fcreate = tvm._ffi.get_global_func("tvm.graph_executor_cuda_graph.create")
except ValueError:
raise ValueError(
"To enable CUDA graph support (experimental), please set "
"'(USE_GRAPH_EXECUTOR_CUGRAPH ON)' in config.cmake and rebuild TVM"
)
return GraphModuleCudaGraph(fcreate(graph_json_str, libmod, *device_type_id))
class GraphModuleCudaGraph(graph_executor.GraphModule):
"""CUDA graph executor module.
This is a CUDA graph executor wrapper over the TVM runtime.
Runtime interfaces are wrapped with CUDA graph functionalities.
Parameters
----------
module : Module
The internal tvm module that holds the actual graph functions.
"""
def __init__(self, module):
self._start_capture = module["start_capture"]
self._end_capture = module["end_capture"]
self._run_cuda_graph = module["run_cuda_graph"]
self._cuda_graph_captured = False
graph_executor.GraphModule.__init__(self, module)
def capture_cuda_graph(self):
"""Capture a CUDA graph for tvm_op graph
This should be called before run_cuda_graph() to capture and
instantiate a CUDA graph instance.
"""
self._run() # call cuModuleLoadData before cudaStream API
self._start_capture()
self._run()
self._end_capture()
self._cuda_graph_captured = True
def run_cuda_graph(self):
"""Run the CUDA graph for tvm_op graph
Run the captured CUDA graph instance instead of the
for-loop kernel launch of default graph executor
"""
self._run_cuda_graph()
def run(self, **input_dict):
"""A run wrapper for graph capture / launch, user can just
change default graph executor to cuda graph executor, and
the first call will capture a cuda graph for future launch
Parameters
----------
input_dict: dict of str to NDArray
List of input values to be feed to
"""
if input_dict:
self.set_input(**input_dict)
if not self._cuda_graph_captured:
self.capture_cuda_graph()
else:
self._run_cuda_graph()
def debug_get_output(self, node, out):
"""Run graph up to node and get the output to out
Parameters
----------
node : int / str
The node index or name
out : NDArray
The output array container
"""
raise NotImplementedError("Please use debugger.debug_executor as graph_executor instead.")
| 4,685 | 33.711111 | 98 | py |
tvm | tvm-main/python/tvm/topi/einsum.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,consider-using-enumerate,redefined-outer-name
"""Einsum operator"""
from . import cpp
def einsum(subscripts, *operand):
"""Evaluates the Einstein summation convention on the operands.
Parameters
----------
subscripts : string
Specifies the subscripts for summation as comma separated list of subscript labels.
An implicit (classical Einstein summation) calculation is performed unless the
explicit indicator ‘->’ is included as well as subscript labels of the precise
output form.
a_tuple : tuple of tvm.te.Tensor
These are the Tensors for the operation.
The only difference of einsum between in tvm and numpy is it needs an extra brackets
for the tensors. For example, topi.einsum("ij, jk -> ik", (A, B)).
Returns
-------
out : tvm.te.Tensor
The calculation based on the Einstein summation convention.
"""
return cpp.einsum(subscripts, operand)
| 1,772 | 38.4 | 92 | py |
tvm | tvm-main/python/tvm/topi/reduction.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=redefined-builtin,consider-using-enumerate,no-member
"""Reduce operators"""
from __future__ import absolute_import as _abs
from . import cpp
def _get_real_axis(ndim, axis):
if axis is None:
real_axis = list(range(ndim))
else:
if isinstance(axis, int):
axis = [axis]
else:
assert isinstance(axis, (list, tuple))
real_axis = []
for ele in axis:
if ele < 0:
ele += ndim
if ele >= ndim:
raise ValueError(
f"{ele} exceeds the maximum dimension {ndim}. Received axis={axis}"
)
real_axis.append(ele)
real_axis.sort()
real_axis = list(set(real_axis)) # Remove the duplicates
return real_axis
def sum(data, axis=None, keepdims=False):
"""Sum of array elements over a given axis or a list of axes
Parameters
----------
data : tvm.te.Tensor
The input tvm tensor
axis : None or int or tuple of int
Axis or axes along which a sum is performed.
The default, axis=None, will sum all of the elements of the input array.
If axis is negative it counts from the last to the first axis.
keepdims : bool
If this is set to True, the axes which are reduced are left in the result as dimensions
with size one.
With this option, the result will broadcast correctly against the input array.
Returns
-------
ret : tvm.te.Tensor
"""
return cpp.sum(data, axis, keepdims)
def all(data, axis=None, keepdims=False):
"""Logical AND of array elements over a given axis or a list of axes
Parameters
----------
data : tvm.te.Tensor
The input tvm boolean tensor
axis : None or int or tuple of int
Axis or axes along which a logical AND is performed.
The default, axis=None, will perform logical AND over all elements of the input array.
If axis is negative it counts from the last to the first axis.
keepdims : bool
If this is set to True, the axes which are reduced are left in the result as dimensions
with size one.
With this option, the result will broadcast correctly against the input array.
Returns
-------
ret : tvm.te.Tensor
"""
return cpp.all(data, axis, keepdims)
def any(data, axis=None, keepdims=False):
"""Logical OR of array elements over a given axis or a list of axes
Parameters
----------
data : tvm.te.Tensor
The input tvm boolean tensor
axis : None or int or tuple of int
Axis or axes along which a logical OR is performed.
The default, axis=None, will perform logical OR over all elements of the input array.
If axis is negative it counts from the last to the first axis.
keepdims : bool
If this is set to True, the axes which are reduced are left in the result as dimensions
with size one.
With this option, the result will broadcast correctly against the input array.
Returns
-------
ret : tvm.te.Tensor
"""
return cpp.any(data, axis, keepdims)
def max(data, axis=None, keepdims=False):
"""Maximum of array elements over a given axis or a list of axes
Parameters
----------
data : tvm.te.Tensor
The input tvm tensor
axis : None or int or tuple of int
Axis or axes along which the max operation is performed.
The default, axis=None, will find the max element from all of the elements of the input
array. If axis is negative it counts from the last to the first axis.
keepdims : bool
If this is set to True, the axes which are reduced are left in the result as dimensions
with size one.
With this option, the result will broadcast correctly against the input array.
Returns
-------
ret : tvm.te.Tensor
"""
return cpp.max(data, axis, keepdims)
def min(data, axis=None, keepdims=False):
"""Minimum of array elements over a given axis or a list of axes
Parameters
----------
data : tvm.te.Tensor
The input tvm tensor
axis : None or int or tuple of int
Axis or axes along which a minimum operation is performed.
The default, axis=None, will find the minimum element from all of the elements of the
input array. If axis is negative it counts from the last to the first axis.
keepdims : bool
If this is set to True, the axes which are reduced are left in the result as dimensions
with size one.
With this option, the result will broadcast correctly against the input array.
Returns
-------
ret : tvm.te.Tensor
"""
return cpp.min(data, axis, keepdims)
def argmax(data, axis=None, keepdims=False, select_last_index=False):
"""Returns the indices of the maximum values along an axis.
Parameters
----------
data : tvm.te.Tensor
The input tvm tensor
axis : None or int or tuple of int
Axis or axes along which a argmax operation is performed.
The default, axis=None, will find the indices of the maximum element of the elements of
the input array. If axis is negative it counts from the last to the first axis.
keepdims : bool
If this is set to True, the axes which are reduced are left in the result as dimensions
with size one.
With this option, the result will broadcast correctly against the input array.
select_last_index: bool
Whether to select the last index if the maximum element appears multiple times, else
select the first index.
Returns
-------
ret : tvm.te.Tensor
"""
return cpp.argmax(data, axis, keepdims, select_last_index)
def argmin(data, axis=None, keepdims=False, select_last_index=False):
"""Returns the indices of the minimum values along an axis.
Parameters
----------
data : tvm.te.Tensor
The input tvm tensor
axis : None or int or tuple of int
Axis or axes along which a argmin operation is performed.
The default, axis=None, will find the indices of minimum element all of the elements of
the input array. If axis is negative it counts from the last to the first axis.
keepdims : bool
If this is set to True, the axes which are reduced are left in the result as dimensions
with size one.
With this option, the result will broadcast correctly against the input array.
select_last_index: bool
Whether to select the last index if the minimum element appears multiple times, else
select the first index.
Returns
-------
ret : tvm.te.Tensor
"""
return cpp.argmin(data, axis, keepdims, select_last_index)
def prod(data, axis=None, keepdims=False):
"""Product of array elements over a given axis or a list of axes
Parameters
----------
data : tvm.te.Tensor
The input tvm tensor
axis : None or int or tuple of int
Axis or axes along which a prod operation is performed.
The default, axis=None, will get the prod element over all of the elements of the
input array. If axis is negative it counts from the last to the first axis.
keepdims : bool
If this is set to True, the axes which are reduced are left in the result as dimensions
with size one.
With this option, the result will broadcast correctly against the input array.
Returns
-------
ret : tvm.te.Tensor
"""
return cpp.prod(data, axis, keepdims)
def collapse_sum(data, target_shape):
"""Return a summation of data to the given shape.
collapse_sum is intended as the backward operator of topi broadcast operators in the automatic
differentiation process.
We expect that data is the result of broadcasting some tensor of target_shape in some
broadcast operation. Thus target_shape and data.shape must follow broadcast rules.
During computation, the axes of data.shape and target_shape are checked from right to left.
For every axis, if it either:
- exist in data but not in target_shape, or
- is larger than 1 in data and equals to 1 in target_shape,
data will be summed over this axis.
Parameters
----------
data : tvm.te.Tensor
The input tensor.
shape : Tuple[int]
The shape to collapse to.
Returns
-------
ret : tvm.te.Tensor
The result tensor after summation.
"""
return cpp.collapse_sum(data, target_shape)
| 9,360 | 32.195035 | 98 | py |
tvm | tvm-main/python/tvm/topi/unique.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""Unique operator"""
from tvm import te, tir
from ..te import hybrid
from .scan import cumsum
from .sort import sort, argsort
def _calc_adjacent_diff_ir(data, output, binop=tir.Sub):
"""Low level IR to calculate adjacent difference in an 1-D array.
Parameters
----------
data : Buffer
Input 1-D Buffer.
output: Buffer
A buffer to store adjacent difference, of the same shape as data. The adjacent difference
is defined as: output[0] = 0, output[i] = binop(data[i], data[i-1])
where i > 0 and i < len(data).
binop: function, optional
A binary associative op to use for calculating adjacent difference. The function takes two
TIR expressions and produce a new TIR expression. By default it uses tvm.tir.Sub to
compute the adjacent difference.
"""
ib = tir.ir_builder.create()
data_ptr = ib.buffer_ptr(data)
output_ptr = ib.buffer_ptr(output)
with ib.for_range(0, data.shape[0], kind="parallel") as i:
with ib.if_scope(i == 0):
output_ptr[0] = 0
with ib.else_scope():
output_ptr[i] = tir.Cast(output.dtype, binop(data_ptr[i], data_ptr[i - 1]))
return ib.get()
def _calc_adjacent_diff(data, out_dtype="int32", binop=tir.Sub):
"""Function calculate adjacent difference in an 1-D array.
Parameters
----------
data : tvm.te.Tensor
Input 1-D tensor.
output_dtype : str
The output tensor data type.
binop: function, optional
A binary associative op to use for calculating difference. The function takes two
TIR expressions and produce a new TIR expression. By default it uses tvm.tir.Sub to
compute the adjacent difference.
Returns
-------
output : tvm.te.Tensor
1-D tensor storing the adjacent difference of the input tensor. The adjacent difference
is defined as: output[0] = 0, output[i] = binop(data[i], data[i-1])
where i > 0 and i < len(data).
"""
return te.extern(
[data.shape],
[data],
lambda ins, outs: _calc_adjacent_diff_ir(ins[0], outs[0], binop=binop),
dtype=[out_dtype],
name="_calc_adjacent_diff",
tag="_calc_adjacent_diff_cpu",
)
@hybrid.script
def _calc_num_unique(inc_scan):
"""Helper function to get the number of unique elements fron inc_scan tensor"""
output = output_tensor((1,), "int32")
output[0] = inc_scan[inc_scan.shape[0] - 1] + int32(1)
return output
def _calc_unique_ir(
data, argsorted_indices, inc_scan, index_converter, unique_elements, inverse_indices, counts
):
"""Low level IR to calculate unique elements, inverse indices, and counts (optional) of
unique elements of 1-D array.
Parameters
----------
data : Buffer
Input 1-D Buffer.
argsorted_indices : Buffer
A buffer that stores the argsorted indices of the input data.
inc_scan : Buffer
A buffer that stores the inclusive scan of the binary tir.NE adjacent difference
of the sorted data.
index_converter (optional) : Buffer
An optional index converter that transforms the unique element index
such that new_idx = index_converter[old_idx].
unique_elements : Buffer
A buffer that stores the unique elements.
inverse_indices : Buffer
A buffer that stores the index of each input data element in the unique element array.
counts (optional) : Buffer
A buffer that stores the count of each unique element.
"""
ib = tir.ir_builder.create()
data_ptr = ib.buffer_ptr(data)
argsorted_indices_ptr = ib.buffer_ptr(argsorted_indices)
inc_scan_ptr = ib.buffer_ptr(inc_scan)
unique_elements_ptr = ib.buffer_ptr(unique_elements)
inverse_indices_ptr = ib.buffer_ptr(inverse_indices)
index_converter_ptr = None
if isinstance(index_converter, tir.Buffer):
index_converter_ptr = ib.buffer_ptr(index_converter)
if isinstance(counts, tir.Buffer):
counts_ptr = ib.buffer_ptr(counts)
# use indices_ptr as a tmp buffer to store tids with inc_scan[tid] != inc_scan[tid-1]
unique_seq_indices_ptr = ib.buffer_ptr(inverse_indices)
data_length = data.shape[0]
# if need to return counts
if isinstance(counts, tir.Buffer):
num_unique = inc_scan_ptr[inc_scan.shape[0] - 1] + 1
num_elements = data.shape[0]
unique_seq_indices_ptr[num_unique - 1] = num_elements
with ib.new_scope():
with ib.for_range(0, data_length, kind="parallel") as i:
with ib.if_scope(i > 0):
with ib.if_scope(inc_scan_ptr[i] != inc_scan_ptr[i - 1]):
unique_seq_indices_ptr[inc_scan_ptr[i] - 1] = i
with ib.new_scope():
with ib.for_range(0, num_unique, kind="parallel") as i:
unique_idx = i if not index_converter_ptr else index_converter_ptr[i]
with ib.if_scope(i == 0):
counts_ptr[unique_idx] = unique_seq_indices_ptr[i]
with ib.else_scope():
counts_ptr[unique_idx] = (
unique_seq_indices_ptr[i] - unique_seq_indices_ptr[i - 1]
)
# calculate unique elements and inverse indices
with ib.new_scope():
with ib.for_range(0, data_length, kind="parallel") as i:
data_idx = argsorted_indices_ptr[i]
unique_idx = (
inc_scan_ptr[i] if not index_converter_ptr else index_converter_ptr[inc_scan_ptr[i]]
)
inverse_indices_ptr[data_idx] = unique_idx
with ib.if_scope(i == 0):
unique_elements_ptr[unique_idx] = data_ptr[data_idx]
with ib.else_scope():
with ib.if_scope(inc_scan_ptr[i] != inc_scan_ptr[i - 1]):
unique_elements_ptr[unique_idx] = data_ptr[data_idx]
return ib.get()
@hybrid.script
def _calc_first_occurence(argsorted_indices, inc_scan):
"""Hybrid script to calculate the first occurence of each unique element in the input data.
Parameters
----------
argsorted_indices : tvm.te.Tensor
A tensor that stores the argsorted indices of the input data.
inc_scan : tvm.te.Tensor
A tensor that stores the inclusive scan of the binary tir.NE adjacent difference
of the sorted data.
first_occurence : tvm.te.Tensor
A tensor that stores the first occurence of each unique element in the input data.
"""
first_occurence = output_tensor(argsorted_indices.shape, "int32")
for i in parallel(argsorted_indices.shape[0]):
first_occurence[i] = argsorted_indices.shape[0]
for i in parallel(argsorted_indices.shape[0]):
if i == 0 or inc_scan[i] != inc_scan[i - 1]:
first_occurence[inc_scan[i]] = argsorted_indices[i]
return first_occurence
def unique(data, is_sorted=True, return_counts=False):
"""
Find the unique elements of a 1-D tensor. Please note `output` and `counts` are all padded to
have the same length of `data` and element with index >= num_unique[0] has undefined value.
Parameters
----------
data : tvm.te.Tensor
A 1-D tensor of integers.
sorted : bool
Whether to sort the unique elements in ascending order before returning as output.
return_counts : bool
Whether to return the count of each unique element.
Returns
-------
unique : tvm.te.Tensor
A 1-D tensor containing the unique elements of the input data tensor. The same size as
the input data. If there are less unique elements than input data, the end of the tensor
is padded with zeros.
indices : tvm.te.Tensor
A 1-D tensor. The same size as output. For each entry in output, it contains
the index of its first occurence in the input data. The end of the tensor is padded
with the length of the input data.
inverse_indices : tvm.te.Tensor
A 1-D tensor. For each entry in data, it contains the index of that data element in
the unique array. (Note that inverse_indices is very similar to indices if output is not
sorted.)
num_unique : tvm.te.Tensor
A 1-D tensor with size=1 containing the number of unique elements in the input data tensor.
counts (optional) : tvm.te.Tensor
A 1-D tensor containing the count of each unique element in the output.
Examples
--------
.. code-block:: python
[output, indices, num_unique] = unique([4, 5, 1, 2, 3, 3, 4, 5], False, False)
output = [4, 5, 1, 2, 3, _, _, _]
indices = [0, 1, 2, 3, 4, _, _, _]
inverse_indices = [0, 1, 2, 3, 4, 4, 0, 1]
num_unique = [5]
[output, indices, num_unique, counts] = unique([4, 5, 1, 2, 3, 3, 4, 5], False, True)
output = [4, 5, 1, 2, 3, _, _, _]
indices = [0, 1, 2, 3, 4, _, _, _]
inverse_indices = [0, 1, 2, 3, 4, 4, 0, 1]
num_unique = [5]
counts = [2, 2, 1, 1, 2, _, _, _]
[output, indices, num_unique] = unique([4, 5, 1, 2, 3, 3, 4, 5], True)
output = [1, 2, 3, 4, 5, _, _, _]
indices = [2, 3, 4, 0, 1, _, _, _]
inverse_indices = [3, 4, 0, 1, 2, 2, 3, 4]
num_unique = [5]
"""
sorted_data = sort(data)
argsorted_indices = argsort(data, dtype="int32")
# adjacent difference
adjacent_diff = _calc_adjacent_diff(sorted_data, "int32", tir.NE)
# inclusive scan
inc_scan = cumsum(adjacent_diff, dtype="int32", exclusive=0)
# total number of unique elements
num_unique_elements = _calc_num_unique(inc_scan)
# prepare outputs
if return_counts:
out_data_shape = [data.shape] * 3
out_dtypes = [data.dtype, "int32", "int32"]
else:
out_data_shape = [data.shape] * 2
out_dtypes = [data.dtype, "int32"]
# prepare inputs and fcompute
first_occurence = _calc_first_occurence(argsorted_indices, inc_scan)
if is_sorted:
in_data = [data, argsorted_indices, inc_scan]
if return_counts:
fcompute = lambda ins, outs: _calc_unique_ir(*ins, None, *outs)
else:
fcompute = lambda ins, outs: _calc_unique_ir(*ins, None, *outs, None)
indices = first_occurence
else:
# calculate index converter by sorting unique elements by their first occurence
argsorted_first_occurence = argsort(first_occurence, dtype="int32")
index_converter = argsort(argsorted_first_occurence, dtype="int32")
in_data = [data, argsorted_indices, inc_scan, index_converter]
if return_counts:
fcompute = lambda ins, outs: _calc_unique_ir(*ins, *outs)
else:
fcompute = lambda ins, outs: _calc_unique_ir(*ins, *outs, None)
# First occurence is in order of sorted unique output, if we sort the first_occurence array
# we get the correct result
indices = sort(first_occurence)
outs = te.extern(
out_data_shape,
in_data,
fcompute,
dtype=out_dtypes,
name="_calc_unique",
tag="_calc_unique_cpu",
)
if return_counts:
return [outs[0], indices, outs[1], num_unique_elements, outs[2]]
return [outs[0], indices, outs[1], num_unique_elements]
| 12,245 | 37.753165 | 100 | py |
tvm | tvm-main/python/tvm/topi/scan.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""Scan (cumulative binary) operators"""
from typing import Callable, Optional
import tvm
from ..te import extern
from ..tir import decl_buffer, generic, ir_builder
from .math import cast
from . import utils
def scanop(
data: tvm.te.Tensor,
binop: Callable[["tvm.Expr", "tvm.Expr"], "tvm.Expr"],
identity_value: "tvm.Expr",
op_name: str,
axis: Optional[int] = None,
dtype: Optional[str] = None,
exclusive: Optional[bool] = None,
) -> tvm.te.Tensor:
"""Cumulative binary operator (scan) with similar axis behavior as np.cumsum and np.cumprod.
See cumprod and cumsum for an example of use.
E.g. if * is your binary operator and the input tensor is [1, 2, 3, 4] the output may be
[1, 1 * 2, 1 * 2 * 3, 1 * 2 * 3 * 4]
Parameters
----------
data : tvm.te.Tensor
The input data to the operator.
binop: Callable (tvm.Expr, tvm.Expr) -> tvm.Expr
A binary operator which should be associative and commutative. E.g. if * is your
operator then a * (b * c) = (a * b) * c and a * b = b * a
identity_value: tvm.Expr
A value for the binary operation which provides the identity property. E.g. if * is
your operator and i is the identity_value then a * i = a for all a in the domain of
your operation.
axis : int, optional
Axis along which the operation is computed. The default (None) is to compute
the cumulative operation over the flattened array.
dtype : string, optional
Type of the returned array and of the accumulator in which the elements are computed.
If dtype is not specified, it defaults to the dtype of data.
exclusive : bool, optional
If True will return exclusive cumulative operation in which the first element is not
included. In other terms, if True, the j-th output element would be
the cumulative operation of the first (j-1) elements. Otherwise, it would be the
cumulative operation of the first j elements. The cumulative operation of zero elements
is assumed to be the identity_value.
Returns
-------
result : tvm.te.Tensor
The result has the same size as data, and the same shape as data if axis is not None.
If axis is None, the result is a 1-d array.
"""
if dtype is None or dtype == "":
dtype = data.dtype
if exclusive is None:
exclusive = False
def maybe_cast(x):
if dtype != data.dtype:
return cast(x, dtype)
return x
axis_mul_before = 1
axis_mul_after = 1
if axis is None:
axis = 0
cumsum_axis_len = utils.prod(data.shape)
shape = (cumsum_axis_len,)
else:
if not isinstance(axis, int):
axis = utils.get_const_int(axis)
shape = data.shape
cumsum_axis_len = shape[axis]
if axis < 0:
axis = len(shape) + axis
for i, value in enumerate(shape, 0):
if i < axis:
axis_mul_before *= value
elif i > axis:
axis_mul_after *= value
def gen_ir(data_buf, out_buf):
ib = ir_builder.create()
data_buf = ib.buffer_ptr(data_buf)
out_buf = ib.buffer_ptr(out_buf)
with ib.for_range(0, axis_mul_before * axis_mul_after, "fused", kind="parallel") as fused:
i = fused // axis_mul_after
j = fused % axis_mul_after
base_idx = i * cumsum_axis_len * axis_mul_after + j
if exclusive:
out_buf[base_idx] = cast(identity_value, dtype)
else:
out_buf[base_idx] = maybe_cast(data_buf[base_idx])
with ib.for_range(0, cumsum_axis_len - 1, "_k") as _k:
k = _k + 1
cur_idx = base_idx + k * axis_mul_after
prev_idx = base_idx + (k - 1) * axis_mul_after
if exclusive:
out_buf[cur_idx] = binop(out_buf[prev_idx], maybe_cast(data_buf[prev_idx]))
else:
out_buf[cur_idx] = binop(out_buf[prev_idx], maybe_cast(data_buf[cur_idx]))
return ib.get()
out_buf = decl_buffer(shape, dtype, "out_buf")
return extern(
[shape],
[data],
lambda ins, outs: gen_ir(ins[0], outs[0]),
dtype=dtype,
out_buffers=[out_buf],
name=op_name,
tag=op_name,
)
def cumsum(
data: tvm.te.Tensor,
axis: Optional[int] = None,
dtype: Optional[int] = None,
exclusive: Optional[bool] = None,
) -> tvm.te.Tensor:
"""Numpy style cumsum op. Return the cumulative sum of the elements along a given axis.
Parameters
----------
data : tvm.te.Tensor
The input data to the operator.
axis : int, optional
Axis along which the cumulative sum is computed. The default (None) is to compute
the cumsum over the flattened array.
dtype : string, optional
Type of the returned array and of the accumulator in which the elements are summed.
If dtype is not specified, it defaults to the dtype of data.
exclusive : bool, optional
If True, will return exclusive sum in which the first element is not
included. In other terms, if True, the j-th output element would be
the sum of the first (j-1) elements. Otherwise, it would be the sum of
the first j elements.
Returns
-------
result : tvm.te.Tensor
The result has the same size as data, and the same shape as data if axis is not None.
If axis is None, the result is a 1-d array.
"""
return scanop(
data=data,
binop=generic.add,
identity_value=0,
op_name="cumsum_generic",
axis=axis,
dtype=dtype,
exclusive=exclusive,
)
def cumprod(
data: tvm.te.Tensor,
axis: Optional[int] = None,
dtype: Optional[int] = None,
exclusive: Optional[bool] = None,
) -> tvm.te.Tensor:
"""Numpy style cumprod op. Return the cumulative product of the elements along a given axis.
Parameters
----------
data : tvm.te.Tensor
The input data to the operator.
axis : int, optional
Axis along which the cumulative product is computed. The default (None) is to compute
the cumproduct over the flattened array.
dtype : string, optional
Type of the returned array and of the accumulator in which the elements are multiplied.
If dtype is not specified, it defaults to the dtype of data.
exclusive : bool, optional
If True, will return exclusive product in which the first element is not
included. In other terms, if True, the j-th output element would be
the product of the first (j-1) elements. Otherwise, it would be the product of
the first j elements.
Returns
-------
result : tvm.te.Tensor
The result has the same size as data, and the same shape as data if axis is not None.
If axis is None, the result is a 1-d array.
"""
return scanop(
data=data,
binop=generic.multiply,
identity_value=1,
op_name="cumprod_generic",
axis=axis,
dtype=dtype,
exclusive=exclusive,
)
| 8,064 | 33.029536 | 98 | py |
tvm | tvm-main/python/tvm/topi/math.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Elementwise operators"""
# pylint: disable=redefined-builtin,unused-argument
import tvm
from tvm import te
from tvm.tir import PrimExpr
from . import tag
from . import cpp
from .utils import get_const_tuple
@tvm.te.tag_scope(tag=tag.ELEMWISE)
def identity(x):
"""Take identity of input x.
Parameters
----------
x : tvm.te.Tensor
Input argument.
Returns
-------
y : tvm.te.Tensor
The result.
"""
# pylint: disable=unnecessary-lambda
return te.compute(x.shape, lambda *i: x(*i))
@tvm.te.tag_scope(tag=tag.ELEMWISE)
def negative(x):
"""Take negation of input x.
Parameters
----------
x : tvm.te.Tensor
Input argument.
Returns
-------
y : tvm.te.Tensor
The result.
"""
# pylint: disable=unnecessary-lambda
return te.compute(x.shape, lambda *i: -x(*i))
@tvm.te.tag_scope(tag=tag.ELEMWISE)
def exp(x):
"""Take exponential of input x.
Parameters
----------
x : tvm.te.Tensor
Input argument.
Returns
-------
y : tvm.te.Tensor
The result.
"""
return te.compute(x.shape, lambda *i: te.exp(x(*i)))
@tvm.te.tag_scope(tag=tag.ELEMWISE)
def erf(x):
"""Take gauss error function of input x.
Parameters
----------
x : tvm.te.Tensor
Input argument.
Returns
-------
y : tvm.te.Tensor
The result.
"""
return te.compute(x.shape, lambda *i: te.erf(x(*i)))
@tvm.target.generic_func
def erf_legalize(attrs, inputs, types):
"""Legalizes ERF op.
Parameters
----------
attrs : tvm.ir.Attrs
Attributes of current convolution
inputs : list of tvm.relay.Expr
The args of the Relay expr to be legalized
types : list of types
List of input and output types
Returns
-------
result : tvm.relay.Expr
The legalized expr.
"""
# Note changed by default.
return None
@tvm.te.tag_scope(tag=tag.ELEMWISE)
def tanh(x):
"""Take hyperbolic tanh of input x.
Parameters
----------
x : tvm.te.Tensor
Input argument.
Returns
-------
y : tvm.te.Tensor
The result.
"""
return te.compute(x.shape, lambda *i: te.tanh(x(*i)))
@tvm.te.tag_scope(tag=tag.ELEMWISE)
def tan(x):
"""Take tan of input x.
Parameters
----------
x : tvm.te.Tensor
Input argument.
Returns
-------
y : tvm.te.Tensor
The result.
"""
return te.compute(x.shape, lambda *i: te.tan(x(*i)))
@tvm.te.tag_scope(tag=tag.ELEMWISE)
def cos(x):
"""Take cos of input x.
Parameters
----------
x : tvm.te.Tensor
Input argument.
Returns
-------
y : tvm.te.Tensor
The result.
"""
return te.compute(x.shape, lambda *i: te.cos(x(*i)))
@tvm.te.tag_scope(tag=tag.ELEMWISE)
def cosh(x):
"""Take cosh of input x.
Parameters
----------
x : tvm.te.Tensor
Input argument.
Returns
-------
y : tvm.te.Tensor
The result.
"""
return te.compute(x.shape, lambda *i: te.cosh(x(*i)))
@tvm.te.tag_scope(tag=tag.ELEMWISE)
def sin(x):
"""Take sin of input x.
Parameters
----------
x : tvm.te.Tensor
Input argument.
Returns
-------
y : tvm.te.Tensor
The result.
"""
return te.compute(x.shape, lambda *i: te.sin(x(*i)))
@tvm.te.tag_scope(tag=tag.ELEMWISE)
def sinh(x):
"""Take sinh of input x.
Parameters
----------
x : tvm.te.Tensor
Input argument.
Returns
-------
y : tvm.te.Tensor
The result.
"""
return te.compute(x.shape, lambda *i: te.sinh(x(*i)))
@tvm.te.tag_scope(tag=tag.ELEMWISE)
def acos(x):
"""Take arc cos of input x.
Parameters
----------
x : tvm.te.Tensor
Input argument.
Returns
-------
y : tvm.te.Tensor
The result.
"""
return te.compute(x.shape, lambda *i: te.acos(x(*i)))
@tvm.te.tag_scope(tag=tag.ELEMWISE)
def acosh(x):
"""Take arc cosh of input x.
Parameters
----------
x : tvm.te.Tensor
Input argument.
Returns
-------
y : tvm.te.Tensor
The result.
"""
return te.compute(x.shape, lambda *i: te.acosh(x(*i)))
@tvm.te.tag_scope(tag=tag.ELEMWISE)
def asin(x):
"""Take arc sin of input x.
Parameters
----------
x : tvm.te.Tensor
Input argument.
Returns
-------
y : tvm.te.Tensor
The result.
"""
return te.compute(x.shape, lambda *i: te.asin(x(*i)))
@tvm.te.tag_scope(tag=tag.ELEMWISE)
def asinh(x):
"""Take arc sinh of input x.
Parameters
----------
x : tvm.te.Tensor
Input argument.
Returns
-------
y : tvm.te.Tensor
The result.
"""
return te.compute(x.shape, lambda *i: te.asinh(x(*i)))
@tvm.te.tag_scope(tag=tag.ELEMWISE)
def atan(x):
"""Take atan of input x.
Parameters
----------
x : tvm.te.Tensor
Input argument.
Returns
-------
y : tvm.te.Tensor
The result.
"""
return te.compute(x.shape, lambda *i: te.atan(x(*i)))
@tvm.te.tag_scope(tag=tag.ELEMWISE)
def atanh(x):
"""Take atanh of input x.
Parameters
----------
x : tvm.te.Tensor
Input argument.
Returns
-------
y : tvm.te.Tensor
The result.
"""
return te.compute(x.shape, lambda *i: te.atanh(x(*i)))
@tvm.te.tag_scope(tag=tag.ELEMWISE)
def floor(x):
"""Take floor of input x.
Parameters
----------
x : tvm.te.Tensor
Input argument.
Returns
-------
y : tvm.te.Tensor
The result.
"""
return te.compute(x.shape, lambda *i: te.floor(x(*i)))
@tvm.te.tag_scope(tag=tag.ELEMWISE)
def ceil(x):
"""Take ceil of input x.
Parameters
----------
x : tvm.te.Tensor
Input argument.
Returns
-------
y : tvm.te.Tensor
The result.
"""
return te.compute(x.shape, lambda *i: te.ceil(x(*i)))
def sign(x):
"""Returns -1, 0, 1 based on sign of x.
Parameters
----------
x : tvm.te.Tensor
Input argument.
Returns
-------
y : tvm.te.Tensor
The result.
"""
return cpp.sign(x)
@tvm.te.tag_scope(tag=tag.ELEMWISE)
def trunc(x):
"""Take truncated value of the input of x, element-wise.
Parameters
----------
x : tvm.te.Tensor
Input argument.
Returns
-------
y : tvm.te.Tensor
The result.
"""
return te.compute(x.shape, lambda *i: te.trunc(x(*i)))
@tvm.te.tag_scope(tag=tag.ELEMWISE)
def abs(x):
"""Take absolute value of the input of x, element-wise.
Parameters
----------
x : tvm.te.Tensor
Input argument.
Returns
-------
y : tvm.te.Tensor
The result.
"""
return te.compute(x.shape, lambda *i: te.abs(x(*i)))
@tvm.te.tag_scope(tag=tag.ELEMWISE)
def isnan(x):
"""Check if value of x is NaN, element-wise.
Parameters
----------
x : tvm.te.Tensor
Input argument.
Returns
-------
y : tvm.te.Tensor
The result.
"""
return te.compute(x.shape, lambda *i: te.isnan(x(*i)))
@tvm.te.tag_scope(tag=tag.ELEMWISE)
def isfinite(x):
"""Check if value of x is finite, element-wise.
Parameters
----------
x : tvm.te.Tensor
Input argument.
Returns
-------
y : tvm.te.Tensor
The result.
"""
return te.compute(x.shape, lambda *i: te.isfinite(x(*i)))
@tvm.te.tag_scope(tag=tag.ELEMWISE)
def isinf(x):
"""Check if value of x is infinite, element-wise.
Parameters
----------
x : tvm.te.Tensor
Input argument.
Returns
-------
y : tvm.te.Tensor
The result.
"""
return te.compute(x.shape, lambda *i: te.isinf(x(*i)))
@tvm.te.tag_scope(tag=tag.ELEMWISE)
def round(x):
"""Round elements of x to nearest integer.
Parameters
----------
x : tvm.te.Tensor
Input argument.
Returns
-------
y : tvm.te.Tensor
The result.
"""
return te.compute(x.shape, lambda *i: te.round(x(*i)))
@tvm.te.tag_scope(tag=tag.ELEMWISE)
def log(x):
"""Take logarithm of input x.
Parameters
----------
x : tvm.te.Tensor
Input argument.
Returns
-------
y : tvm.te.Tensor
The result.
"""
return te.compute(x.shape, lambda *i: te.log(x(*i)))
@tvm.te.tag_scope(tag=tag.ELEMWISE)
def log2(x):
"""Take logarithm to the base 2 of input x.
Parameters
----------
x : tvm.te.Tensor
Input argument.
Returns
-------
y : tvm.te.Tensor
The result.
"""
return te.compute(x.shape, lambda *i: te.log2(x(*i)))
@tvm.te.tag_scope(tag=tag.ELEMWISE)
def log10(x):
"""Take logarithm to the base 10 of input x.
Parameters
----------
x : tvm.te.Tensor
Input argument.
Returns
-------
y : tvm.te.Tensor
The result.
"""
return te.compute(x.shape, lambda *i: te.log10(x(*i)))
@tvm.te.tag_scope(tag=tag.ELEMWISE)
def sqrt(x):
"""Take square root of input x.
Parameters
----------
x : tvm.te.Tensor
Input argument.
Returns
-------
y : tvm.te.Tensor
The result.
"""
return te.compute(x.shape, lambda *i: te.sqrt(x(*i)))
@tvm.te.tag_scope(tag=tag.ELEMWISE)
def rsqrt(x):
"""Take inverse square root of input x.
Parameters
----------
x : tvm.te.Tensor
Input argument.
Returns
-------
y : tvm.te.Tensor
The result.
"""
return te.compute(x.shape, lambda *i: te.rsqrt(x(*i)))
@tvm.te.tag_scope(tag=tag.ELEMWISE)
def sigmoid(x):
"""Take sigmoid tanh of input x.
Parameters
----------
x : tvm.te.Tensor
Input argument.
Returns
-------
y : tvm.te.Tensor
The result.
"""
return te.compute(x.shape, lambda *i: te.sigmoid(x(*i)))
@tvm.te.tag_scope(tag=tag.ELEMWISE)
def left_shift(x, n):
"""Take n bits left shift of input x.
Parameters
----------
x : tvm.te.Tensor
Input argument.
n : int
Number of bits.
Returns
-------
y : tvm.te.Tensor
The result.
"""
return te.compute(x.shape, lambda *i: x(*i) << n)
@tvm.te.tag_scope(tag=tag.ELEMWISE)
def right_shift(x, n):
"""Take n bits right shift of input x.
Parameters
----------
x : tvm.te.Tensor
Input argument.
n : int
Number of bits.
Returns
-------
y : tvm.te.Tensor
The result.
"""
return te.compute(x.shape, lambda *i: x(*i) >> n)
@tvm.te.tag_scope(tag=tag.ELEMWISE)
def clip(x, a_min, a_max):
"""Clip (limit) the values in an array. Given an interval, values
outside the interval are clipped to the interval edges.
Parameters
----------
x : tvm.te.Tensor
Input argument.
a_min : tvm.tir.PrimExpr
Minimum value.
a_max : tvm.tir.PrimExpr
Maximum value.
Returns
-------
y : tvm.te.Tensor
The result.
"""
def _compute(*indices):
value = x(*indices)
const_min = (
tvm.tir.Cast(value.dtype, a_min)
if isinstance(a_min, PrimExpr)
else tvm.tir.const(a_min, value.dtype)
)
const_max = (
tvm.tir.Cast(value.dtype, a_max)
if isinstance(a_max, PrimExpr)
else tvm.tir.const(a_max, value.dtype)
)
return tvm.te.max(tvm.te.min(value, const_max), const_min)
return te.compute(x.shape, _compute)
@tvm.te.tag_scope(tag=tag.ELEMWISE)
def fixed_point_multiply(x, multiplier, shift):
"""Fixed point multiplication between data and a fixed point
constant expressed as multiplier * 2^(-shift), where multiplier
is a Q-number with 31 fractional bits
Parameters
----------
x : tvm.te.Tensor or Expr
Input argument.
multiplier : int
Multiplier of a fixed floating point number described as multiplier*2^(-shift).
shift : int
Shift of a fixed floating point number described as multiplier*2^(-shift).
Returns
-------
y : tvm.te.Tensor
The result.
"""
def _compute(*indices):
value = x(*indices)
return tvm.tir.q_multiply_shift(
value,
tvm.tir.const(multiplier, "int32"),
tvm.tir.const(31, "int32"),
tvm.tir.const(shift, "int32"),
)
return te.compute(x.shape, _compute)
@tvm.te.tag_scope(tag=tag.BROADCAST)
def fixed_point_multiply_per_axis(
x: te.Tensor,
y: te.Tensor,
lshift: te.Tensor,
rshift: te.Tensor,
is_lshift_required: int,
is_rshift_required: int,
axes,
):
"""Fixed point multiplication between data and a fixed point constant expressed as
multiplier * 2^(-shift), where multiplier is a Q-number with 31 fractional bits
Parameters
----------
x : tvm.te.Tensor
Input argument.
y : tvm.te.Tensor
Multiplier of a fixed floating point number described as multiplier*2^(-shift).
lshift : tvm.te.Tensor
Left shifts of a fixed floating point number described as multiplier*2^(-shift).
rshift : tvm.te.Tensor
Right shifts of a fixed floating point number described as multiplier*2^(-shift).
is_lshift_required : int
Whether we need to do left shift or not.
is_rshift_required : int
Whether we need to do right shift or not.
Returns
-------
z : tvm.te.Tensor
The result.
"""
def _compute(*indices):
elements = []
for element in get_const_tuple(axes):
elements += [indices[element]]
param_indices = tuple(elements)
value = x(*indices)
m = y(*param_indices)
l_shift = lshift(*param_indices)
r_shift = rshift(*param_indices)
return tvm.tir.q_multiply_shift_per_axis(
value,
m,
l_shift,
r_shift,
tvm.tir.const(31, "int32"),
tvm.tir.const(is_lshift_required, "bool"),
tvm.tir.const(is_rshift_required, "bool"),
)
return te.compute(x.shape, _compute)
def cast(x, dtype, span=None):
"""Cast input to specified data type.
Parameters
----------
x : tvm.te.Tensor or Expr
Input argument.
dtype : str
Data type.
span : Optional[Span]
The location of the cast in the source.
Returns
-------
y : tvm.te.Tensor
The result.
"""
if isinstance(x, te.tensor.Tensor):
return te.compute(x.shape, lambda *i: x(*i).astype(dtype), tag=tag.ELEMWISE)
# pylint: disable=import-outside-toplevel
from tvm.tir import _ffi_api
return _ffi_api._cast(dtype, x, span)
def reinterpret(x, dtype):
"""Reinterpret input to specified data type.
Parameters
----------
x : tvm.te.Tensor
Input argument.
dtype : str
Data type.
Returns
-------
y : tvm.te.Tensor
The result.
"""
return cpp.reinterpret(x, dtype)
def fast_exp(x):
"""Take exponential of input x using fast_exp implementation
Parameters
----------
x : tvm.te.Tensor
Input argument.
Returns
-------
y : tvm.te.Tensor
The result.
"""
return cpp.fast_exp(x, x.dtype, tag.ELEMWISE)
def fast_tanh(x):
"""Take hyperbolic tangent of input x using fast_tanh implementation
Parameters
----------
x : tvm.te.Tensor
Input argument.
Returns
-------
y : tvm.te.Tensor
The result.
"""
return cpp.fast_tanh(x, x.dtype, tag.ELEMWISE)
def fast_erf(x):
"""Take gauss error function of input x using fast_erf implementation.
Parameters
----------
x : tvm.te.Tensor
Input argument.
Returns
-------
y : tvm.te.Tensor
The result.
"""
return cpp.fast_erf(x, x.dtype, tag.ELEMWISE)
def ceil_log2(x):
"""Compute integer ceil log2 with a special code path for vulkan
SPIR-V does not support log2 on fp64. Instead, we compute integer ceil_log2 via clz
intrinsic when the target is vulkan.
Parameters
----------
x : tvm.te.Tensor
Input argument.
Returns
-------
y : tvm.te.Tensor
The result.
"""
if not isinstance(x, tvm.tir.PrimExpr):
x = tvm.tir.const(x)
if "float" in x.dtype:
return tvm.tir.ceil(tvm.tir.log2(x))
if "vulkan" in tvm.target.Target.current().kind.name:
clz = tvm.tir.clz(x)
bits = int(x.dtype[-2:])
res = tvm.tir.if_then_else(x & (x - 1) == 0, bits - clz - 1, bits - clz)
if res.dtype != x.dtype:
return cast(res, x.dtype)
return res
if "adreno" in tvm.target.Target.current().device_name:
return cast(tvm.tir.ceil(tvm.tir.log2(cast(x, "float32"))), x.dtype)
return cast(tvm.tir.ceil(tvm.tir.log2(cast(x, "float64"))), x.dtype)
| 17,852 | 19.473624 | 89 | py |
tvm | tvm-main/python/tvm/topi/sparse_reshape.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, too-many-arguments, too-many-nested-blocks
"""Sparse_Reshape operator"""
from ..tir import decl_buffer, ir_builder, Cast
from ..te import extern, div, floordiv, floormod
def sparse_reshape(
sparse_indices,
prev_shape,
new_shape,
new_sparse_indices_shape,
new_shape_shape,
):
"""
Reshape a Sparse Tensor
Parameters
----------
sparse_indices : relay.Expr
A 2-D tensor[N, n_dim] of integers containing location of sparse values, where N is the
number of sparse values and n_dim is the number of dimensions of the dense_shape
prev_shape : relay.Expr
A 1-D tensor containing the previous shape of the dense tensor
new_shape : relay.Expr
A 1-D tensor containing the new shape of the dense tensor
Returns
-------
result: relay.Expr
Output tensor.
Examples
--------
.. code-block:: python
sparse_indices = [[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[1, 0, 0],
[1, 2, 3]]
prev_shape = [2, 3, 4]
new_shape = [9, -1]
new_sparse_indices, new_shape = relay.sparse_reshape(
sparse_indices, prev_shape, new_shape)
new_sparse_indices = [[0, 0],
[0, 1],
[1, 2],
[4, 2],
[8, 1]]
new_shape = [9, 4]
"""
def gen_ir(
sparse_indices_ptr,
prev_shape_ptr,
new_shape_ptr,
new_sparse_indices_ptr,
out_new_shape_ptr,
):
ib = ir_builder.create()
sparse_indices = ib.buffer_ptr(sparse_indices_ptr)
prev_shape = ib.buffer_ptr(prev_shape_ptr)
new_shape = ib.buffer_ptr(new_shape_ptr)
out_new_shape = ib.buffer_ptr(out_new_shape_ptr)
new_sparse_indices = ib.buffer_ptr(new_sparse_indices_ptr)
out_new_shape = ib.buffer_ptr(out_new_shape_ptr)
prev_shape_size = prev_shape_ptr.shape[0]
new_shape_size = new_shape_ptr.shape[0]
multipliers = ib.allocate(
new_shape_ptr.dtype, (prev_shape_size,), name="multipliers", scope="local"
)
dividers = ib.allocate(
new_shape_ptr.dtype, (new_shape_size,), name="dividers", scope="local"
)
flattened_indices = ib.allocate(
new_shape_ptr.dtype,
(sparse_indices_ptr.shape[0],),
name="flattened_indices",
scope="local",
)
total_ele = ib.allocate(new_shape_ptr.dtype, (1,), name="total_ele", scope="local")
total_ele[0] = prev_shape[0]
# Cumulative Reverse Exclusive Multiply
multipliers[prev_shape_size - 1] = Cast(new_shape_ptr.dtype, 1)
with ib.for_range(0, prev_shape_size - 1) as i_:
i = i_ + 1
multipliers[prev_shape_size - 1 - i] = (
prev_shape[prev_shape_size - i] * multipliers[prev_shape_size - i]
)
total_ele[0] *= prev_shape[prev_shape_size - i]
division_total_ele = ib.allocate(
new_shape_ptr.dtype, (1,), name="division_total_ele", scope="local"
)
division_total_ele[0] = Cast(new_shape_ptr.dtype, 1)
with ib.for_range(0, new_shape_size) as i:
with ib.if_scope(new_shape[i] != -1):
division_total_ele[0] *= new_shape[i]
# Compute true output shape (replace negative ones)
with ib.for_range(0, new_shape_size) as i:
with ib.if_scope(new_shape[i] == -1):
out_new_shape[i] = Cast(
new_shape_ptr.dtype, div(total_ele[0], division_total_ele[0])
)
with ib.else_scope():
out_new_shape[i] = new_shape[i]
equal_shape = ib.allocate("bool", (1,), name="equal_shape", scope="local")
# Check if prev_shape and new_shape are equal
equal_shape[0] = True
with ib.if_scope(prev_shape_size == new_shape_size):
with ib.for_range(0, prev_shape_size) as i:
with ib.if_scope(prev_shape[i] != out_new_shape[i]):
equal_shape[0] = False
with ib.else_scope():
equal_shape[0] = False
# Return same inputs if shapes are equal
with ib.if_scope(equal_shape[0]):
with ib.for_range(0, sparse_indices_ptr.shape[0], kind="parallel") as i:
with ib.for_range(0, sparse_indices_ptr.shape[1]) as j:
new_sparse_indices[i, j] = sparse_indices[i, j]
# Else compute new_sparse_indices
with ib.else_scope():
dividers[new_shape_size - 1] = Cast(new_shape_ptr.dtype, 1)
with ib.for_range(0, new_shape_size - 1) as i_:
i = i_ + 1
dividers[new_shape_size - 1 - i] = (
dividers[new_shape_size - i] * out_new_shape[new_shape_size - i]
)
with ib.for_range(0, sparse_indices_ptr.shape[0], kind="parallel") as i:
flattened_indices[i] = Cast(new_shape_ptr.dtype, 0)
with ib.for_range(0, sparse_indices_ptr.shape[1]) as j:
flattened_indices[i] += sparse_indices[i, j] * multipliers[j]
with ib.for_range(0, new_sparse_indices_ptr.shape[0], kind="parallel") as i:
current_element = ib.allocate(
new_shape_ptr.dtype, (1,), name="current_element", scope="local"
)
current_element[0] = flattened_indices[i]
with ib.for_range(0, new_sparse_indices_ptr.shape[1]) as j:
new_sparse_indices[i, j] = Cast(
sparse_indices_ptr.dtype, floordiv(current_element[0], dividers[j])
)
current_element[0] = floormod(current_element[0], dividers[j])
return ib.get()
new_sparse_indices_buf = decl_buffer(
new_sparse_indices_shape, sparse_indices.dtype, "new_sparse_indices_buf"
)
new_shape_buf = decl_buffer(new_shape_shape, prev_shape.dtype, "new_shape_buf")
return extern(
[new_sparse_indices_shape, new_shape_shape],
[sparse_indices, prev_shape, new_shape],
lambda ins, outs: gen_ir(ins[0], ins[1], ins[2], outs[0], outs[1]),
out_buffers=[new_sparse_indices_buf, new_shape_buf],
name="sparse_reshape_cpu",
tag="sparse_reshape_cpu",
)
| 7,340 | 37.434555 | 95 | py |
tvm | tvm-main/python/tvm/topi/transform.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,consider-using-enumerate,redefined-outer-name
"""Injective transformation operators"""
from __future__ import absolute_import as _abs
import tvm
from tvm import te, topi
from tvm.te import hybrid
from . import cpp, tag
from .utils import const_vector, make_idx, within_index
def expand_dims(a, axis, num_newaxis=1):
"""Expand the shape of an array.
Parameters
----------
a : tvm.te.Tensor
The tensor to be expanded.
num_newaxis: int, optional
Number of newaxis to be inserted on axis
Returns
-------
ret : tvm.te.Tensor
"""
return cpp.expand_dims(a, axis, num_newaxis)
def expand_like(a, shape_like, axis):
"""Expand an input array with the shape of second array.
This operation can always be composed of unsqueezing and
expanding dims on those unsqueezed axes.
Examples
--------
.. code-block::
input = [ 12. 19. 27.]
input.shape = (3,)
new_shape_array = [[[1,2],[2,3],[1,3]],
[[1,4],[4,3],[5,2]],
[[7,1],[7,2],[7,3]]]
new_shape_array.shape = (3, 3, 2)
expand_like(input, [1,2], new_shape_array) =
[[[12,12],[12,12],[12,12]],
[[19,19],[19,19],[19,19]],
[[27,27],[27,27],[27,27]]]
Parameters
----------
a : tvm.te.Tensor
The tensor to be expanded.
shape_like : tvm.te.Tensor
The tensor to with target shape.
axis: list of int
axis to be expanded on
Returns
-------
ret : tvm.te.Tensor
"""
odim = len(axis) + len(a.shape)
if odim != len(shape_like.shape):
if len(a.shape) == 1 and len(axis) == len(shape_like.shape):
# A special case: `a` is a scalar represented as a 1-dim tensor
return te.compute(shape_like.shape, lambda *idxs: a(0))
raise ValueError(
f"shape inconsistent when expand_like ({len(axis)}, "
f"{len(a.shape)}, {len(shape_like.shape)})"
)
real_axis = topi.reduction._get_real_axis(len(shape_like.shape), axis)
real_axis = sorted(real_axis)
def _compute(*idxs):
indices = []
axis_index = 0
for i in range(0, len(idxs)):
if i not in real_axis:
indices.append(idxs[i])
axis_index += 1
return a(*indices)
return te.compute(shape_like.shape, _compute)
def transpose(a, axes=None):
"""Permute the dimensions of an array.
Parameters
----------
a : tvm.te.Tensor
The tensor to be expanded.
axes: tuple of ints, optional
By default, reverse the dimensions.
Returns
-------
ret : tvm.te.Tensor
"""
return cpp.transpose(a, axes)
def flip(a, axis=0):
"""Flip/reverse elements of an array in a particular axis.
Parameters
----------
a : tvm.te.Tensor
The tensor to be expanded.
axis : int, optional
The axis along which the tensors will be reveresed.
Returns
-------
ret : tvm.te.Tensor
"""
return cpp.flip(a, axis)
def reverse_sequence(a, seq_lengths, seq_axis=1, batch_axis=0):
"""Reverse the tensor for variable length slices.
Input is first sliced along batch axis and then elements are reversed along seq axis.
Parameters
----------
a : tvm.te.Tensor
The tensor to be reversed.
seq_lengths : tvm.te.Tensor
A 1D Tensor with length a.dims[batch_axis]
Must be one of the following types: int32, int64
if seq_lengths[i] > a.dims[seq_axis], it is rounded to a.dims[seq_axis]
if seq_lengths[i] < 1, it is rounded to 1
seq_axis : int, optional
The axis along which the elements will be reversed. Default is 1.
batch_axis : int, optional
The axis along which the tensor will be sliced. Default is 0.
Returns
-------
ret : tvm.te.Tensor
The computed result of same shape and type as of input.
"""
return cpp.reverse_sequence(a, seq_lengths, seq_axis, batch_axis)
def strided_slice(a, begin, end, strides=None, axes=None, slice_mode="end"):
"""Slice of an array.
Parameters
----------
a : tvm.te.Tensor
The tensor to be sliced.
begin : list of int
The indices to begin with in the slicing.
end : list of int
Indices indicating end of the slice.
strides : list of int, optional
Specifies the stride values, it can be negative
in that case, the input tensor will be reversed
in that particular axis.
axes : list of int, optional
Axes along which slicing is applied. When it is specified, begin, end
strides, and axes need to a list of integers of the same length.
slice_mode : str, optional
The slice mode [end, size].
end - The ending indices for the slice [default].
size - The input strides will be ignored, input end in this mode indicates
the sizeof a slice starting at the location specified by begin. If end[i]
is -1, all remaining elements in that dimension are included in the slice.
Returns
-------
ret : tvm.te.Tensor
"""
if (
isinstance(begin, tvm.te.Tensor)
or isinstance(end, tvm.te.Tensor)
or isinstance(strides, tvm.te.Tensor)
):
assert axes is None, "axes argument is not supported by dynamic strided slice yet."
if not isinstance(begin, tvm.te.Tensor):
begin = const_vector(begin)
if not isinstance(end, tvm.te.Tensor):
end = const_vector(end)
if strides is None:
strides = [1] * begin.shape[0].value
if not isinstance(strides, tvm.te.Tensor):
strides = const_vector(strides)
return cpp.dynamic_strided_slice(a, begin, end, strides)
if strides is None:
strides = []
if axes is None:
axes = []
return cpp.strided_slice(a, begin, end, strides, axes, slice_mode)
@tvm.te.tag_scope(tag=tag.INJECTIVE + ",strided_set")
def strided_set(a, v, begin, end, strides=None):
"""Set slice of an array.
Parameters
----------
a : tvm.te.Tensor
The tensor to be sliced.
v : tvm.te.Tensor
The values to set
begin: tvm.te.Tensor
The indices to begin with in the slicing.
end: tvm.te.Tensor
Indices indicating end of the slice.
strides: tvm.te.Tensor, optional
Specifies the stride values, it can be negative
in that case, the input tensor will be reversed
in that particular axis.
Returns
-------
ret : tvm.te.Tensor
"""
n = len(a.shape)
if len(begin.shape) != 1:
raise ValueError("begin should be a vector")
if not begin.dtype == "int32":
raise TypeError("begin should be int32")
if len(end.shape) != 1:
raise ValueError("end should be a vector")
if not end.dtype == "int32":
raise TypeError("end should be int32")
if strides is not None:
if len(strides.shape) != 1:
raise ValueError("strides should be a vector")
if not strides.dtype == "int32":
raise TypeError("strides should be int32")
def _max(a, b):
return tvm.tir.Select(a > b, a, b)
if strides is None:
strides = [tvm.tir.const(1, "int32")] * n
else:
strides = [
tvm.tir.if_then_else(strides.shape[0] > i, strides[i], tvm.tir.const(1, "int32"))
for i in range(n)
]
begin = [
tvm.tir.if_then_else(
begin.shape[0] > i,
begin[i],
tvm.tir.Select(strides[i] > 0, tvm.tir.const(0, "int32"), a.shape[i]),
)
for i in range(n)
]
end = [
tvm.tir.if_then_else(
end.shape[0] > i,
end[i],
tvm.tir.Select(strides[i] > 0, a.shape[i] + 1, -(a.shape[i] + 1)),
)
for i in range(n)
]
# Convert negative indexes
for i in range(n):
begin[i] = tvm.tir.if_then_else(begin[i] < 0, begin[i] + a.shape[i], begin[i])
end[i] = tvm.tir.if_then_else(end[i] < 0, end[i] + a.shape[i], end[i])
def _select(*indices):
from_val = []
index_tuple = []
for i in range(n):
from_val.append(within_index(begin[i], end[i], strides[i], indices[i]))
index_tuple.append(make_idx(begin[i], end[i], strides[i], a.shape[i], indices[i]))
return tvm.tir.if_then_else(tvm.tir.all(*from_val), v(*index_tuple), a(*indices))
return te.compute(a.shape, _select, name="strided_set")
def reshape(a, newshape):
"""Reshape the array
Parameters
----------
a : tvm.te.Tensor
The tensor to be reshaped
newshape : tuple of ints
The new shape
Returns
-------
ret : tvm.te.Tensor
"""
return cpp.reshape(a, newshape)
def squeeze(a, axis=None):
"""Remove single-dimensional entries from the shape of an array.
Parameters
----------
a : tvm.te.Tensor
axis : None or int or tuple of ints, optional
Selects a subset of the single-dimensional entries in the shape.
If an axis is selected with shape entry greater than one, an error is raised.
Returns
-------
squeezed : tvm.te.Tensor
"""
return cpp.squeeze(a, axis)
def concatenate(a_tuple, axis=0):
"""Join a sequence of arrays along an existing axis.
Parameters
----------
a_tuple : tuple of tvm.te.Tensor
The arrays to concatenate
axis : int, optional
The axis along which the arrays will be joined. Default is 0.
Returns
-------
ret : tvm.te.Tensor
"""
return cpp.concatenate(a_tuple, axis)
def stack(a, axis):
"""Repeats the whole array multiple times.
Parameters
----------
a : tvm.te.Tensor
The tensor to be stacked.
axis : int, optional
The axis in the result array along which the input arrays are stacked.
Returns
-------
ret : tvm.te.Tensor
"""
return cpp.stack(a, axis)
def split(ary, indices_or_sections, axis=0):
"""Split an array into multiple sub-arrays.
Parameters
----------
ary : tvm.te.Tensor
indices_or_sections : int or 1-D array
axis : int
Returns
-------
ret : tuple of tvm.te.Tensor
"""
return cpp.split(ary, indices_or_sections, axis)
def take(a, indices, axis=None, batch_dims=0, mode="clip"):
"""Take elements from an array along an axis.
Parameters
----------
a : tvm.te.Tensor
The source array.
indices : tvm.te.Tensor
The indices of the values to extract.
axis : int, optional
The axis over which to select values. By default,
the flattened input array is used.
batch_dims : int
The number of batch dimensions. By default is 0.
mode : str, optional
Specifies how out-of-bound indices will behave.
clip - clip to the range (default)
wrap - wrap around the indices
fast - no clip or wrap around (user must make sure indices are in-bound)
Returns
-------
ret : tvm.te.Tensor
"""
if axis is None:
return cpp.take(a, indices, int(batch_dims), mode)
return cpp.take(a, indices, int(batch_dims), int(axis), mode)
@tvm.target.generic_func
def take_legalize(attrs, inputs, types):
"""Legalizes dyn.topk op.
Parameters
----------
attrs : tvm.ir.Attrs
Attributes of current op
inputs : list of tvm.relay.Expr
The args of the Relay expr to be legalized
types : list of types
List of input and output types
Returns
-------
result : tvm.relay.Expr
The legalized expr
"""
if tvm.relay.ty.is_dynamic(types[0]):
return tvm.relay.take(tvm.relay.annotation.stop_fusion(inputs[0]), inputs[1], **attrs)
return None
def gather(data, axis, indices):
"""Gather values along given axis from given indices.
E.g. for a 3D tensor, output is computed as:
.. code-block:: python
out[i][j][k] = data[indices[i][j][k]][j][k] # if axis == 0
out[i][j][k] = data[i][indices[i][j][k]][k] # if axis == 1
out[i][j][k] = data[i][j][indices[i][j][k]] # if axis == 2
``indices`` must have same shape as ``data``, except at dimension ``axis``
which must just be not null. Output will have same shape as ``indices``.
Parameters
----------
data : tvm.te.Tensor
The input data to the operator.
axis: int
The axis along which to index.
indices : tvm.te.Tensor
The indices of the values to extract.
Returns
-------
ret : tvm.te.Tensor
"""
return cpp.gather(data, axis, indices)
def gather_nd(a, indices):
"""Gather elements from a n-dimension array..
Parameters
----------
a : tvm.te.Tensor
The source array.
indices : tvm.te.Tensor
The indices of the values to extract.
Returns
-------
ret : tvm.te.Tensor
"""
return cpp.gather_nd(a, indices)
def matmul(a, b, transp_a=False, transp_b=False):
"""
Creates an operation that calculates a matrix multiplication (row-major notation):
A(i, k) * B(k, j)
if trans_a == trans_b, the usual transposed combinations, otherwise
Parameters
----------
a : The matrix A
b : The matrix B
trans_a : Is A's layout transposed?
trans_b : Is B's layout transposed?
Returns
-------
A Tensor whose op member is the matmul operation
"""
return cpp.matmul(a, b, transp_a, transp_b)
def tensordot(a, b, axes):
"""A generalization of matrix multiplication to tensor.
Parameters
----------
a : The tensor A
b : The tensor B
axes : The number of dimensions to reduce over
Returns
-------
A Tensor computing the result
"""
if isinstance(axes, int):
return cpp.tensordot(a, b, axes)
if isinstance(axes[0], int):
return cpp.tensordot(a, b, (axes[0],), (axes[1],))
return cpp.tensordot(a, b, axes[0], axes[1])
def arange(start, stop=None, step=1, dtype="float32"):
"""Creates a tensor with evenly spaced values within a given interval.
Parameters
----------
start : tvm.Expr, optional
Start of interval. The interval includes this value. The default start
value is 0.
stop : tvm.Expr
Stop of interval. The interval does not include this value.
step : tvm.Expr, optional
Spacing between values. The default step size is 1.
dtype : str, optional
The target data type.
Returns
-------
result : tvm.te.Tensor
The resulting tensor.
"""
if stop is None:
stop = start
start = 0
return cpp.arange(start, stop, step, dtype)
def meshgrid(a_tuple, indexing):
"""Create coordinate matrices from coordinate vectors.
Parameters
----------
a_tuple : tuple of tvm.te.Tensor
The coordinate vectors or scalars.
indexing : str
Indexing mode, either "ij" or "xy".
Returns
-------
result : tuple of tvm.te.Tensor
The resulting grids for each axis.
"""
return cpp.meshgrid(a_tuple, indexing)
def repeat(a, repeats, axis):
"""Repeats elements of an array.
Parameters
----------
a : tvm.te.Tensor
The tensor to be repeated.
repeats: int, required
Number of repetitions for each element
axis: int, optional
The axis along which to repeat values
Returns
-------
ret : tvm.te.Tensor
"""
return cpp.repeat(a, repeats, axis)
def tile(a, reps):
"""Repeats the whole array multiple times.
Parameters
----------
a : tvm.te.Tensor
The tensor to be tiled.
reps: tuple of ints, required
The number of times for repeating the tensor
Returns
-------
ret : tvm.te.Tensor
"""
return cpp.tile(a, reps)
def layout_transform(array, src_layout, dst_layout, schedule_rule="None"):
"""Transform the layout according to src_layout and dst_layout
Parameters
----------
array : tvm.te.Tensor
The source array.
src_layout : str
the source layout.
dst_layout : str
the destination layout.
schedule_rule : str
the schedule rule to apply if any
"""
return cpp.layout_transform(array, src_layout, dst_layout, schedule_rule)
def shape(array, dtype="int32"):
"""Get the shape of input array
Parameters
----------
array : tvm.te.Tensor
The source tensor.
dtype : str, optional
The target data type.
Returns
-------
result : tvm.te.Tensor
The resulting tensor.
"""
return cpp.shape(array, dtype)
def sequence_mask(data, valid_length, mask_value=0, axis=0):
"""Sets all elements outside the expected length of the sequence to a constant value.
This function takes an n-dimensional input array of the form [MAX_LENGTH, batch_size, ...] or
[batch_size, MAX_LENGTH, ...] and returns an array of the same shape.
`axis` means the axis of the length dimension and can only be 0 or 1. If `axis` is 0,
the data must have shape [MAX_LENGTH, batch_size, ...]. Otherwise (axis=1), the data must have
shape [batch_size, MAX_LENGTH, ...].
`valid_length` gives the length of each sequence. `valid_length` should be
a 1D int array with positive ints and has dimension [batch_size,].
Parameters
----------
data : tvm.te.Tensor
N-D with shape [MAX_LENGTH, batch_size, ...] or [batch_size, MAX_LENGTH, ...]
depending on the value of `axis`.
valid_length : tvm.te.Tensor
1-D with shape [batch_size,]
mask_value : float, optional
The masking value, default 0
axis : int, optional
axis of the length dimension, must be 0 or 1, default 0
Returns
-------
output : tvm.te.Tensor
N-D with shape [MAX_LENGTH, batch_size, ...] or [batch_size, MAX_LENGTH, ...]
depending on the value of `axis`.
"""
assert len(data.shape) >= 2, f"only support data.ndim >= 2, received data.shape = {data.shape}"
assert axis in (0, 1), f"only support axis = 0, 1, received axis = {axis}"
return cpp.sequence_mask(data, valid_length, mask_value, axis)
def ndarray_size(array, dtype="int32"):
"""Get the number of elements of input array
Parameters
----------
array : tvm.te.Tensor
The source tensor.
dtype : str, optional
The target data type.
Returns
-------
result : tvm.te.Tensor
The resulting tensor.
"""
return cpp.ndarray_size(array, dtype)
def where(condition, x, y):
"""Get the elements, either from x or y, depending on the condition.
Parameters
----------
condition : tvm.te.Tensor
The condition array.
x : tvm.te.Tensor
First array to be selected.
y : tvm.te.Tensor
Second array to be selected.
Returns
-------
result : tvm.te.Tensor
A Tensor selected from x or y depending on condition.
"""
return cpp.where(condition, x, y)
def one_hot(indices, on_value, off_value, depth, axis, dtype):
"""
Returns a one-hot tensor where the locations repsented by indices take value on_value,
other locations take value off_value.
Final dimension is <indices outer dimensions> x depth x <indices inner dimensions>.
Parameters
----------
indices : tvm.te.Tensor
Locations to set to on_value.
on_value : tvm.te.Tensor
Value to fill at indices.
off_value : tvm.te.Tensor
Value to fill at all other positions besides indices.
depth : int
Depth of the one-hot dimension.
axis : int
Axis to fill.
dtype : relay.DataType
Data type of the output tensor.
Returns
-------
ret : relay.Expr
The one-hot tensor.
Examples
--------
.. code-block:: python
indices = [0, 1, 2]
relay.one_hot(indices, 3) =
[[1, 0, 0],
[0, 1, 0],
[0, 0, 1]]
"""
return cpp.one_hot(indices, on_value, off_value, depth, axis, dtype)
def unravel_index(indices, shape):
"""Convert a flat index or array of flat indices into a tuple of coordinate arrays.
Example::
- unravel_index([22, 41, 37], [7, 6]) = [[3, 6, 6], [4, 5, 1]]
Parameters
----------
indices : relay.Expr
An integer array containing indices.
shape : relay.Expr
The shape of the array.
Returns
-------
result : relay.Expr
The tuple of coordinate arrays.
"""
return cpp.unravel_index(indices, shape)
def sparse_to_dense(sparse_indices, output_shape, sparse_values, default_value=0):
"""Converts a sparse representation into a dense tensor.
Example::
- sparse_to_dense([[0, 0], [1, 1]], [2, 2], [3, 3], 0) = [[3, 0], [0, 3]]
Parameters
----------
sparse_indices : tvm.te.Tensor
A 0-D, 1-D, or 2-D tensor of integers containing location of sparse values.
output_shape : A list of integers
Shape of the dense output tensor.
sparse_values : tvm.te.Tensor
A 0-D or 1-D tensor containing the sparse values for the sparse indices.
default_value : tvm.te.Tensor
A 0-D tensor containing the default value for the remaining locations.
Defaults to 0.
Returns
-------
result : tvm.te.Tensor
Dense tensor of shape output_shape. Has the same type as sparse_values.
"""
return cpp.sparse_to_dense(sparse_indices, output_shape, sparse_values, default_value)
def matrix_set_diag(data, diagonal, k=0, align="RIGHT_LEFT"):
"""
Returns a tensor with the diagonals of input tensor replaced with the provided diagonal values.
Parameters
----------
data : relay.Expr
Input Tensor.
diagonal : relay.Expr
Values to be filled in the diagonal.
k : int or tuple of int, optional
Diagonal Offset(s). The diagonal or range of diagonals to set. (0 by default)
Positive value means superdiagonal, 0 refers to the main diagonal, and
negative value means subdiagonals. k can be a single integer (for a single diagonal)
or a pair of integers specifying the low and high ends of a matrix band.
k[0] must not be larger than k[1].
align : string, optional
Some diagonals are shorter than max_diag_len and need to be padded.
align is a string specifying how superdiagonals and subdiagonals should be aligned,
respectively. There are four possible alignments: "RIGHT_LEFT" (default), "LEFT_RIGHT",
"LEFT_LEFT", and "RIGHT_RIGHT". "RIGHT_LEFT" aligns superdiagonals to the right
(left-pads the row) and subdiagonals to the left (right-pads the row). It is the packing
format LAPACK uses. cuSPARSE uses "LEFT_RIGHT", which is the opposite alignment.
Returns
-------
result : relay.Expr
New tensor with given diagonal values.
Examples
--------
.. code-block:: python
data = [[[7, 7, 7, 7],
[7, 7, 7, 7],
[7, 7, 7, 7]],
[[7, 7, 7, 7],
[7, 7, 7, 7],
[7, 7, 7, 7]]]
diagonal = [[1, 2, 3],
[4, 5, 6]]
topi.matrix_set_diag(input, diagonal) =
[[[1, 7, 7, 7],
[7, 2, 7, 7],
[7, 7, 3, 7]],
[[4, 7, 7, 7],
[7, 5, 7, 7],
[7, 7, 6, 7]]]
"""
if isinstance(k, (tuple, list)):
k_one = k[0]
if len(k) >= 2:
k_two = k[1]
else:
k_two = k[0]
else:
k_one = k
k_two = k
super_diag_right_align = align[:5] == "RIGHT"
sub_diag_right_align = align[-5:] == "RIGHT"
return cpp.matrix_set_diag(
data, diagonal, k_one, k_two, super_diag_right_align, sub_diag_right_align
)
def adv_index(data, indices):
"""Numpy style indexing with tensors.
Parameters
----------
data : tvm.te.Tensor
Input data.
indices : A list of tvm.te.Tensor
Tensor index.
Returns
-------
result : tvm.te.Tensor
Output tensor
"""
return cpp.adv_index(data, indices)
@hybrid.script
def invert_permutation(data):
"""Computes the inverse permutation of data.
Parameters
----------
data : tvm.te.Tensor
Input data
Returns
-------
result : tvm.te.Tensor
Output tensor
Examples
--------
.. code-block:: python
data = [3, 4, 0, 2, 1]
topi.invert_permutation(data) = [2, 4, 3, 0, 1]
"""
result = output_tensor(data.shape, data.dtype)
nums = data.shape[0]
for ind in range(nums):
r_ind = data[ind]
result[r_ind] = ind
return result
def sliding_window(data, axis, window_shape, strides):
"""Slide a window over the data tensor.
Parameters
----------
data : relay.Expr
The input data to the operator.
axis : int
What axis the window begins sliding over. Window will be slid over
this axis and all following axes. The axis value determines the window
shape (and thus, the number of strides): window shape and strides must
both be of length `data.ndim-axis`.
window_shape : List[int]
The window shape to form over the input. Window shape must be of length
`data.ndim-axis`.
strides : List[int]
How to stride the window along each dimension. Strides must be of length
`data.ndim-axis`.
Returns
-------
result : relay.Expr
The resulting tensor.
"""
return cpp.sliding_window(data, axis, window_shape, strides)
def trilu(data, k, upper):
"""
Given a 2-D matrix or batches of 2-D matrices, returns the
upper or lower triangular part of the tensor.
Parameters
----------
data: tvm.te.Tensor
The tensor that trilu will be applied to. Must be either
a 2D matrix or a tensor of batches of 2D matrices.
k: tvm.te.Tensor
The number of diagonals above or below the main diagonal
to exclude or include.
upper: bool
If True, only upper triangular values of input are kept,
if False, the lower triangular values are kept.
Returns
-------
ret : relay.Expr
The new tensor with appropriate diagonals set to zero.
Examples
--------
.. code-block:: python
x = [[0, 1, 2],
[3, 4, 5],
[6, 7, 8]]
relay.trilu(x, True, 0) =
[[0, 1, 2],
[0, 4, 5],
[0, 0, 8]]
"""
# Make sure datatype is consistent.
if k.dtype != "int32":
k = tvm.tir.Cast("int32", k)
# Check either above or below diagonal depending on upper.
check_op = tvm.tir.GE
if upper:
check_op = tvm.tir.LE
def _apply_trilu(*indices):
row_index = indices[-2]
col_index = indices[-1]
# promote row & col indices
if row_index.dtype != col_index.dtype:
target_type = (col_index + row_index).dtype
if row_index.dtype != target_type:
row_index = tvm.tir.Cast(target_type, row_index)
else:
col_index = tvm.tir.Cast(target_type, col_index)
other_indices = indices[:-2]
check_position = check_op(row_index, col_index - k)
value = data(*other_indices, row_index, col_index)
return tvm.tir.Select(check_position, value, tvm.tir.const(0, data.dtype))
return te.compute(data.shape, _apply_trilu, name="trilu", tag=topi.tag.ELEMWISE)
| 28,677 | 25.826941 | 99 | py |
tvm | tvm-main/python/tvm/topi/searchsorted.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""searchsorted operator"""
from . import utils
from . import te
from ..tir import ir_builder
from .math import cast
def binary_search(ib, sequence_offset, search_range, sorted_sequence, value, right, out_dtype):
"""Common IR generator for binary search used by CPU and GPU backends.
`sorted_sequence` is a N-D Buffer whose innermost dimension we want to search for `value`,
and `search_range` is the size of the innermost dimension. `sequence_offset` is
a 1-D linearlized offset specifying which of innermost sequences to search.
So the search for `value` is performed over
`sorted_sequence[sequence_offset:(sequence_offset + search_range)]`.
Note that we index N-D Buffer by 1-D linearlized indices.
"""
lo = ib.allocate(out_dtype, (1,), name="lo", scope="local")
hi = ib.allocate(out_dtype, (1,), name="hi", scope="local")
lo[0] = cast(0, out_dtype)
hi[0] = cast(search_range, out_dtype)
# Reference: pytorch/aten/src/ATen/native/cuda/Bucketization.cu
def condition(current_val, target_val):
if right:
return current_val <= target_val
return current_val < target_val
with ib.while_loop(lo[0] < hi[0]):
mid = lo[0] + (hi[0] - lo[0] >> 1)
with ib.if_scope(condition(sorted_sequence[sequence_offset + mid], value)):
lo[0] = mid + 1
with ib.else_scope():
hi[0] = mid
return lo[0]
def searchsorted(sorted_sequence, values, right=False, out_dtype="int64"):
"""Find indices where elements should be inserted to maintain order.
If `sorted_sequence` is N-dimensional, the innermost dimension of
`values` are searched in the corresponding dimension of `sorted_sequence`.
Parameters
----------
sorted_sequence : te.Tensor
N-D or 1-D Tensor, containing monotonically increasing sequence
on the innermost dimension.
values : te.Tensor
N-D Tensor containing the search values. When `sorted_sequence` is 1-D,
the shape of `values` can be arbitrary. Otherwise, ranks of `sorted_sequence`
and `values` must be the same, and outer N-1 axes must have the same size.
right : bool, optional
Controls which index is returned if a value lands exactly on one of sorted values. If
False, the index of the first suitable location found is given. If true, return the
last such index. If there is no suitable index, return either 0 or N (where N is the
size of the innermost dimension).
dtype : string, optional
The data type of the output indices.
Returns
-------
indices : te.Tensor
Tensor with same shape as values, representing the indices of
elements of `values` if they are inserted in `sorted_sequence`.
"""
def ir(sorted_sequence, values, indices):
ib = ir_builder.create()
sorted_sequence_shape = sorted_sequence.shape
values_shape = values.shape
num_search = utils.prod(values_shape)
search_range = sorted_sequence_shape[-1]
sorted_sequence = ib.buffer_ptr(sorted_sequence)
values = ib.buffer_ptr(values)
indices = ib.buffer_ptr(indices)
with ib.for_range(0, num_search, name="i", kind="parallel") as i:
if len(sorted_sequence_shape) == 1:
sequence_offset = 0
else:
sequence_id = i // values_shape[-1]
sequence_offset = sequence_id * search_range
indices[i] = binary_search(
ib,
sequence_offset,
search_range,
sorted_sequence,
values[i],
right,
out_dtype,
)
return ib.get()
return te.extern(
values.shape,
[sorted_sequence, values],
lambda ins, outs: ir(ins[0], ins[1], outs[0]),
name="searchsorted",
dtype=out_dtype,
)
| 4,777 | 36.328125 | 95 | py |
tvm | tvm-main/python/tvm/topi/scatter_elements.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""ScatterElements operator"""
from tvm import te
from tvm import tir
from . import utils
from .math import cast
def scatter_elements(data, indices, updates, axis=0, reduction="update"):
"""Scatter elements from updates to corresponding indices of copied data.
Data, indices, updates and output have the same shape.
Indices can not have duplicates (if idx1 != idx2, then indices[idx1] != indices[idx2])
if reduction == "update".
.. code-block::
output[indices[i][j]][j] = f(output[indices[i][j]][j], updates[i][j]) if axis = 0
output[i][indices[i][j]] = f(output[i][indices[i][j]], updates[i][j]) if axis = 1
where the update function f is determinted by the reduction.
Five types of the function are supported: "update", "add", "mul", "min" and "max" (see below)
Parameters
----------
data : tvm.te.Tensor
The source array.
indices : tvm.te.Tensor
The indices of the values to extract.
updates : tvm.te.Tensor
The updates to apply at the Indices
axis : optional, int
The axis to scatter on. It is zero by default.
reduction : optional, string
The update mode for the algorithm, either "update", "add", "mul", "min" or "max"
If update, the update values will replace the input data
If add, the update values will be added to the input data
If mul, the input data will be multiplied on the update values
If mean, the input data will be mean between the update values and the input data
If min, there is choice of minimal between the update values and the input data
If max, there is choice of maximal between the update values and the input data
It is "update" by default
Returns
-------
ret : tvm.te.Tensor
"""
if not isinstance(axis, int):
axis = utils.get_const_int(axis)
# Prepare ranges and strides
shape = data.shape
if axis < 0:
axis = len(shape) + axis
axis_range = cast(shape[axis], indices.dtype)
full_range = 1
after_axis_range = 1
for i, value in enumerate(shape, 0):
full_range *= value
if i > axis:
after_axis_range *= value
before_axis_stride = axis_range * after_axis_range
ind_shape = indices.shape
ind_axis_range = ind_shape[axis]
ind_before_axis_range = 1
ind_after_axis_range = 1
for i, value in enumerate(ind_shape, 0):
if i < axis:
ind_before_axis_range *= value
elif i > axis:
ind_after_axis_range *= value
ind_before_axis_stride = ind_axis_range * ind_after_axis_range
def gen_ir(data_ptr, indices_ptr, updates_ptr, out_ptr, reduce_func):
# pylint: disable=invalid-name
ib = tir.ir_builder.create()
data = ib.buffer_ptr(data_ptr)
indices = ib.buffer_ptr(indices_ptr)
updates = ib.buffer_ptr(updates_ptr)
out = ib.buffer_ptr(out_ptr)
# Copy initial input data to output
with ib.for_range(0, full_range, "i", kind="parallel") as i:
out[i] = data[i]
with ib.for_range(
0, ind_before_axis_range * ind_after_axis_range, "fused", kind="parallel"
) as fused:
i = fused // ind_after_axis_range
j = fused % ind_after_axis_range
pre_index1 = i * ind_before_axis_stride + j
pre_index2 = i * before_axis_stride + j
with ib.for_range(0, ind_axis_range, "k") as k:
# Offset along indices or updates
index1 = pre_index1 + k * ind_after_axis_range
# Get index and shift to positive side if need
k_new = indices[index1]
shifted_index = k_new + (k_new < 0) * axis_range
# Offset along data
index2 = pre_index2 + shifted_index * after_axis_range
reduce_func(out, index2, updates[index1])
return ib.get()
def update_func(dst_ptr, dst_index, update):
dst_ptr[dst_index] = update
def add_func(dst_ptr, dst_index, update):
dst_ptr[dst_index] += update
def mul_func(dst_ptr, dst_index, update):
dst_ptr[dst_index] *= update
def mean_func(dst_ptr, dst_index, update):
dst_ptr[dst_index] = (dst_ptr[dst_index] + update) / 2
def min_func(dst_ptr, dst_index, update):
dst_ptr[dst_index] = tir.min(dst_ptr[dst_index], update)
def max_func(dst_ptr, dst_index, update):
dst_ptr[dst_index] = tir.max(dst_ptr[dst_index], update)
reduce_func = None
if reduction == "update":
reduce_func = update_func
elif reduction == "add":
reduce_func = add_func
elif reduction == "mul":
reduce_func = mul_func
elif reduction == "mean":
reduce_func = mean_func
elif reduction == "min":
reduce_func = min_func
elif reduction == "max":
reduce_func = max_func
else:
raise NotImplementedError(
"scatter_elements reduction not in [update, add, mul, mean, min, max]:", reduction
)
out_buf = tir.decl_buffer(data.shape, data.dtype, "out_buf")
return te.extern(
[data.shape],
[data, indices, updates],
lambda ins, outs: gen_ir(ins[0], ins[1], ins[2], outs[0], reduce_func),
dtype=data.dtype,
out_buffers=[out_buf],
name="scatter_elements.generic",
tag="scatter_elements.generic",
)
| 6,243 | 34.885057 | 97 | py |
tvm | tvm-main/python/tvm/topi/utils.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""Common topi utilities"""
from __future__ import absolute_import as _abs
from numbers import Integral
import numpy as np
import tvm
from tvm import te
from tvm.tir import Any, SizeVar, bijective_layout, layout
from . import cpp, tag
class InvalidShapeError(ValueError):
"""Invalid shape for a topi function. i.e. call winograd template for non-3x3 kernel)"""
def ncw_pack_layout(layout_info):
"""Check whether the layout type is NCWinic"""
return layout_info[:3] == "NCW" and "c" in layout_info and "n" in layout_info
def ncw_xc_layout(layout_info):
"""Check whether the layout type is NCWxc"""
return layout_info[:3] == "NCW" and "c" in layout_info and layout_info[3:-1].isnumeric()
def nchw_pack_layout(layout_info):
"""Check whether the layout type is NCHWinic"""
return layout_info[:4] == "NCHW" and "c" in layout_info and "n" in layout_info
def nchw_xc_layout(layout_info):
"""Check whether the layout type is NCHWxc"""
return layout_info[:4] == "NCHW" and "c" in layout_info and layout_info[4:-1].isnumeric()
def traverse_inline(s, final_op, callback):
"""Traverse computation graph and do auto inline
Parameters
----------
s: schedule
The schedule
final_op: Operation
The final output operator.
callback: callable
The callback function on each op
"""
visited = set()
def _traverse(op):
if op in visited:
return
visited.add(op)
if tag.is_injective(op.tag):
if op not in s.outputs:
s[op].compute_inline()
for tensor in op.input_tensors:
if isinstance(tensor.op, tvm.te.ComputeOp):
_traverse(tensor.op)
callback(op)
_traverse(final_op)
def prod(x):
"""Get the product of every items in the tuple.
Parameters
----------
x: tuple
Input tuple
Returns
-------
value : Expr
The result value
"""
if not x:
return tvm.tir.const(1, "int32")
res = x[0]
for i in range(1, len(x)):
res = res * x[i]
return res
def get_const_int(expr):
"""Verifies expr is integer and get the constant value.
Parameters
----------
expr : tvm.Expr or int
The input expression.
Returns
-------
out_value : int
The output.
"""
if isinstance(expr, Integral):
return expr
if not isinstance(expr, tvm.tir.IntImm):
ana = tvm.arith.Analyzer()
expr = ana.simplify(expr)
if not isinstance(expr, tvm.tir.IntImm):
raise ValueError("Expect value to be constant int")
return int(expr.value)
def get_const_float(expr):
"""Verifies expr is a floating point and get the constant value.
Parameters
----------
expr : tvm.Expr or float
The input expression.
Returns
-------
out_value : float
The output.
"""
if isinstance(expr, float):
return float(expr)
if not isinstance(expr, tvm.tir.FloatImm):
ana = tvm.arith.Analyzer()
expr = ana.simplify(expr)
if not isinstance(expr, tvm.tir.FloatImm):
raise ValueError("Expect value to be constant float")
return float(expr.value)
def equal_const_int(expr, value):
"""Returns if expr equals value.
Parameters
----------
expr : tvm.Expr
The input expression.
Returns
-------
equal : bool
Whether they equals.
"""
if isinstance(expr, Integral):
return expr == value
if not isinstance(expr, tvm.tir.IntImm):
ana = tvm.arith.Analyzer()
expr = ana.simplify(expr)
if not isinstance(expr, tvm.tir.IntImm):
return False
return expr.value == value
def get_const_tuple(in_tuple):
"""Verifies input tuple is IntImm or Var, returns tuple of int or Var.
Parameters
----------
in_tuple : tuple of Expr
The input.
Returns
-------
out_tuple : tuple of int
The output.
"""
ret = []
ana = None
for elem in in_tuple:
if isinstance(elem, (tvm.tir.Var, tvm.tir.expr.Any)):
ret.append(elem)
elif not isinstance(elem, (tvm.tir.IntImm, int)):
ana = tvm.arith.Analyzer() if ana is None else ana
elem = ana.simplify(elem)
if not isinstance(elem, tvm.tir.IntImm):
ret.append(elem)
else:
ret.append(get_const_int(elem))
else:
ret.append(get_const_int(elem))
return tuple(ret)
def const_vector(vector, name="const_vector"):
"""convert a const numpy 1-dimensional vector to tvm tensor
Parameters
----------
vector: numpy.ndarray
Const input array
name: str, optional
The name of output op
Returns
-------
tensor: Tensor
The created tensor
"""
if not isinstance(vector, np.ndarray):
vector = np.array(vector)
row = vector.shape[0]
dtype = str(vector.dtype)
idxm = tvm.tir.indexmod
def select_array(i):
now = tvm.tir.const(0.0, dtype)
for ii in range(row):
now = tvm.tir.Select(
tvm.tir.all(idxm(i, row) == ii), tvm.tir.const(vector[ii], dtype), now
)
return now
return te.compute(vector.shape, select_array, name=name)
def get_float_tuple(in_tuple):
"""Verifies input tuple is FloatImm, returns tuple of float.
Parameters
----------
in_tuple : tuple of Expr
The input.
Returns
-------
out_tuple : tuple of float
The output.
"""
return tuple(get_const_float(elem) for elem in in_tuple)
def simplify(expr):
"""Simplify the expression if it is Expr, directly return if it is int.
Parameters
----------
expr : Expr or int
The input.
Returns
-------
out : Expr or int
The simplified output
"""
return tvm.arith.Analyzer().simplify(expr) if isinstance(expr, tvm.tir.PrimExpr) else expr
def ravel_index(indices, shape):
"""Flatten the index tuple to 1D
Parameters
----------
indices : tuple of int or tvm.tir.IntImm
The input coordinates
shape : tuple of int
Shape of the tensor.
Returns
-------
idx : int or Expr
The index after flattening
"""
idx = None
for i, (shape_val, ind) in enumerate(zip(shape, indices)):
if i != 0:
idx = idx * shape_val + ind
else:
idx = ind
return idx
def unravel_index(idx, shape):
"""Convert the flattened ind to the coordinate array
Parameters
----------
idx : int or tvm.tir.IntImm
The 1D index
shape : tuple of int
Shape of the tensor
Returns
-------
indices : tuple of int or tvm.tir.IntImm
Corresponding coordinate of the 1D index
"""
idxd = tvm.tir.indexdiv
idxm = tvm.tir.indexmod
indices = []
for i, dim in enumerate(reversed(shape)):
if dim == 0:
indices.append(0)
elif i == len(shape) - 1:
# Assuming the index is in-bounds, the last coordinate is
# already less than dim, and doesn't need the be remainder
# mod dim.
indices.append(idx)
else:
indices.append(idxm(idx, dim))
idx = idxd(idx, dim)
indices = indices[::-1]
return indices
def const_matrix(matrix, name="const_matrix", attrs=None):
"""convert a const numpy 2-dimensional matrix to tvm tensor
Parameters
----------
matrix: numpy.ndarray
Const input array
name: str, optional
The name of output op
Returns
-------
tensor: Tensor
The created tensor
"""
row, col = matrix.shape
dtype = str(matrix.dtype)
idxm = tvm.tir.indexmod
def select_array(i, j):
now = tvm.tir.const(0.0, dtype)
for ii in range(row):
for jj in range(col):
now = tvm.tir.Select(
tvm.tir.all(idxm(i, row) == ii, idxm(j, col) == jj),
tvm.tir.const(matrix[ii][jj], dtype),
now,
)
return now
if attrs is None:
attrs = {"const_matrix": True, "schedule_rule": "None"}
return te.compute(matrix.shape, select_array, name=name, attrs=attrs)
def get_max_power2_factor(n, max_value=None):
"""Get max factor of n in power of 2. If max_value is specificed, max factor
value will be no more max_value,
Parameter
---------
n : int
The input value
max_value : int, optional
The max value for the factor
Returns
-------
factor : int
The max factor in power of 2.
"""
x = 1
while n % 2 == 0:
if max_value is not None and max_value < x * 2:
break
x *= 2
n /= 2
return x
def get_shape(src_shape, src_layout, dst_layout):
"""Given a source shape, a source layout and a destination layout, infer
the destination shape.
Parameter
---------
src_shape : tuple of int or IntImm
Source shape
src_layout : str or Layout
Source layout
dst_layout : str or Layout
Destination layout
Returns
-------
dst_shape : tuple of int
Destination shape
"""
if src_layout == dst_layout:
return get_const_tuple(src_shape)
if isinstance(src_layout, str):
src_layout = layout(src_layout)
if isinstance(dst_layout, str):
dst_layout = layout(dst_layout)
assert len(src_layout) == len(dst_layout), f"Incompatible layout {src_layout} vs {dst_layout}"
layout_mapping = bijective_layout(src_layout, dst_layout)
dst_indices = layout_mapping.forward_index(tvm.runtime.convert(list(range(len(src_layout)))))
return get_const_tuple(tuple([src_shape[i.value] for i in dst_indices]))
def within_index(b, e, s, i):
"""Return a boolean value that indicates if i is within the given index.
Parameters
----------
b : Expr
beginning of the index
e : Expr
end of the index
s : Expr
strides of index
i : Expr
array position
Returns
-------
selected: Expr
bool expression that is True is the array position would be selected
by the index and False otherwise
"""
bc = tvm.tir.Select(s < 0, i <= e, i < b)
ec = tvm.tir.Select(s < 0, i > b, i >= e)
ss = te.if_then_else(s < 0, ((i - e) + (e % te.abs(s)) + 1) % te.abs(s), (i - b) % s)
return tvm.tir.Select(tvm.tir.Or(bc, ec), tvm.tir.const(False), ss.equal(0))
def make_idx(b, e, s, z, i):
"""Return the array position in the selection that corresponds to an
array position in the full array.
The returned value is only meaningful if within_index() returns True
for the same set of parameters.
Parameters
----------
b : Expr
beginning of the index
e : Expr
end of the index
s : Expr
strides of index
z : Expr
size of the indexed dimension
i : Expr
array position
Returns
-------
position: Expr
int expression that corresponds to an array position in the selection.
"""
bc = tvm.tir.Select(s < 0, i <= e, i < b)
ec = tvm.tir.Select(s < 0, i > b, i >= e)
# Clamp to array size
b = tvm.tir.Select(z < b, z - 1, b)
ss = tvm.tir.if_then_else(s < 0, (b - i) // te.abs(s), (i - b) // s)
return tvm.tir.if_then_else(tvm.tir.Or(bc, ec), 88, ss)
def is_empty_shape(shape):
"""Check whether an input shape has dimesion with size 0.
Parameter
---------
shape : list of Expr
Input shape
Returns
-------
is_empty: bool
Whether input shape is empty or has dimesion with size 0.
"""
return cpp.utils.is_empty_shape(shape)
def ceil_div(a, b):
"""Return ceil division of a by b"""
return tvm.tir.indexdiv(a + (b - 1), b)
def swap(arr, axis):
"""swap arr[axis] and arr[-1]"""
return arr[:axis] + [arr[-1]] + arr[axis + 1 : -1] + [arr[axis]]
def is_target(names):
"""Return True if the name of the current target is one of provided names"""
names = [names] if isinstance(names, str) else names
target = tvm.target.Target.current(allow_none=False)
return any(name in target.keys for name in names)
def is_dynamic_shape(shape):
"""Checks if any part of a shape is dynamic"""
return any([isinstance(x, (Any, SizeVar)) for x in shape])
| 13,369 | 24.274102 | 98 | py |
tvm | tvm-main/python/tvm/topi/broadcast.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Broadcast operators"""
from __future__ import absolute_import as _abs
from . import cpp as _cpp
def broadcast_to(data, shape):
"""Broadcast the src to the target shape
We follows the numpy broadcasting rule.
See also https://docs.scipy.org/doc/numpy/user/basics.broadcasting.html
Parameters
----------
data : tvm.te.Tensor
The input data
shape : list or tuple
The target shape to be broadcasted.
Returns
-------
ret : tvm.te.Tensor
"""
return _cpp.broadcast_to(data, shape)
def add(lhs, rhs):
"""Addition with auto-broadcasting
Parameters
----------
lhs : tvm.te.Tensor or Expr
The left operand
rhs : tvm.te.Tensor or Expr
The right operand
Returns
-------
ret : tvm.te.Tensor or Expr
Returns Expr if both operands are Expr.
Otherwise returns Tensor.
"""
return _cpp.add(lhs, rhs)
def subtract(lhs, rhs):
"""Subtraction with auto-broadcasting
Parameters
----------
lhs : tvm.te.Tensor or Expr
The left operand
rhs : tvm.te.Tensor or Expr
The right operand
Returns
-------
ret : tvm.te.Tensor or Expr
Returns Expr if both operands are Expr.
Otherwise returns Tensor.
"""
return _cpp.subtract(lhs, rhs)
def multiply(lhs, rhs):
"""Multiplication with auto-broadcasting
Parameters
----------
lhs : tvm.te.Tensor or Expr
The left operand
rhs : tvm.te.Tensor or Expr
The right operand
Returns
-------
ret : tvm.te.Tensor or Expr
Returns Expr if both operands are Expr.
Otherwise returns Tensor.
"""
return _cpp.multiply(lhs, rhs)
def divide(lhs, rhs):
"""Division with auto-broadcasting
Parameters
----------
lhs : tvm.te.Tensor or Expr
The left operand
rhs : tvm.te.Tensor or Expr
The right operand
Returns
-------
ret : tvm.te.Tensor or Expr
Returns Expr if both operands are Expr.
Otherwise returns Tensor.
"""
return _cpp.divide(lhs, rhs)
def floor_divide(lhs, rhs):
"""Floor division with auto-broadcasting
Parameters
----------
lhs : tvm.te.Tensor or Expr
The left operand
rhs : tvm.te.Tensor or Expr
The right operand
Returns
-------
ret : tvm.te.Tensor or Expr
Returns Expr if both operands are Expr.
Otherwise returns Tensor.
"""
return _cpp.floor_divide(lhs, rhs)
def mod(lhs, rhs):
"""Modulus with auto-broadcasting
Parameters
----------
lhs : tvm.te.Tensor or Expr
The left operand
rhs : tvm.te.Tensor or Expr
The right operand
Returns
-------
ret : tvm.te.Tensor or Expr
Returns Expr if both operands are Expr.
Otherwise returns Tensor.
"""
return _cpp.mod(lhs, rhs)
def floor_mod(lhs, rhs):
"""Floor modulus with auto-broadcasting
Parameters
----------
lhs : tvm.te.Tensor or Expr
The left operand
rhs : tvm.te.Tensor or Expr
The right operand
Returns
-------
ret : tvm.te.Tensor or Expr
Returns Expr if both operands are Expr.
Otherwise returns Tensor.
"""
return _cpp.floor_mod(lhs, rhs)
def maximum(lhs, rhs):
"""Take element-wise maximum of two tensors with auto-broadcasting
Parameters
----------
lhs : tvm.te.Tensor or Expr
The left operand
rhs : tvm.te.Tensor or Expr
The right operand
Returns
-------
ret : tvm.te.Tensor or Expr
Returns Expr if both operands are Expr.
Otherwise returns Tensor.
"""
return _cpp.maximum(lhs, rhs)
def minimum(lhs, rhs):
"""Take element-wise maximum of two tensors with auto-broadcasting
Parameters
----------
lhs : tvm.te.Tensor or Expr
The left operand
rhs : tvm.te.Tensor or Expr
The right operand
Returns
-------
ret : tvm.te.Tensor or Expr
Returns Expr if both operands are Expr.
Otherwise returns Tensor.
"""
return _cpp.minimum(lhs, rhs)
def power(lhs, rhs):
"""Power with auto-broadcasting
Parameters
----------
lhs : tvm.te.Tensor or Expr
The left operand
rhs : tvm.te.Tensor or Expr
The right operand
Returns
-------
ret : tvm.te.Tensor or Expr
Returns Expr if both operands are Expr.
Otherwise returns Tensor.
"""
return _cpp.power(lhs, rhs)
def left_shift(lhs, rhs):
"""Left shift with auto-broadcasting
Parameters
----------
lhs : tvm.te.Tensor or Expr
The left operand
rhs : tvm.te.Tensor or Expr
The right operand
Returns
-------
ret : tvm.te.Tensor or Expr
Returns Expr if both operands are Expr.
Otherwise returns Tensor.
"""
return _cpp.left_shift(lhs, rhs)
def right_shift(lhs, rhs):
"""Right shift with auto-broadcasting
Parameters
----------
lhs : tvm.te.Tensor or Expr
The left operand
rhs : tvm.te.Tensor or Expr
The right operand
Returns
-------
ret : tvm.te.Tensor or Expr
Returns Expr if both operands are Expr.
Otherwise returns Tensor.
"""
return _cpp.right_shift(lhs, rhs)
def greater(lhs, rhs):
"""Compute (lhs>rhs) with auto-broadcasting
Parameters
----------
lhs : tvm.te.Tensor or Expr
The left operand
rhs : tvm.te.Tensor or Expr
The right operand
Returns
-------
ret : tvm.te.Tensor or Expr
Returns Expr if both operands are Expr.
Otherwise returns Tensor.
"""
return _cpp.greater(lhs, rhs)
def less(lhs, rhs):
"""Compute (lhs<rhs) with auto-broadcasting
Parameters
----------
lhs : tvm.te.Tensor or Expr
The left operand
rhs : tvm.te.Tensor or Expr
The right operand
Returns
-------
ret : tvm.te.Tensor or Expr
Returns Expr if both operands are Expr.
Otherwise returns Tensor.
"""
return _cpp.less(lhs, rhs)
def equal(lhs, rhs):
"""Compute (lhs==rhs) with auto-broadcasting
Parameters
----------
lhs : tvm.te.Tensor or Expr
The left operand
rhs : tvm.te.Tensor or Expr
The right operand
Returns
-------
ret : tvm.te.Tensor or Expr
Returns Expr if both operands are Expr.
Otherwise returns Tensor.
"""
return _cpp.equal(lhs, rhs)
def not_equal(lhs, rhs):
"""Compute (lhs!=rhs) with auto-broadcasting
Parameters
----------
lhs : tvm.te.Tensor or Expr
The left operand
rhs : tvm.te.Tensor or Expr
The right operand
Returns
-------
ret : tvm.te.Tensor or Expr
Returns Expr if both operands are Expr.
Otherwise returns Tensor.
"""
return _cpp.not_equal(lhs, rhs)
def greater_equal(lhs, rhs):
"""Compute (lhs>=rhs) with auto-broadcasting
Parameters
----------
lhs : tvm.te.Tensor or Expr
The left operand
rhs : tvm.te.Tensor or Expr
The right operand
Returns
-------
ret : tvm.te.Tensor or Expr
Returns Expr if both operands are Expr.
Otherwise returns Tensor.
"""
return _cpp.greater_equal(lhs, rhs)
def less_equal(lhs, rhs):
"""Compute (lhs<=rhs) with auto-broadcasting
Parameters
----------
lhs : tvm.te.Tensor or Expr
The left operand
rhs : tvm.te.Tensor or Expr
The right operand
Returns
-------
ret : tvm.te.Tensor or Expr
Returns Expr if both operands are Expr.
Otherwise returns Tensor.
"""
return _cpp.less_equal(lhs, rhs)
def logical_and(lhs, rhs):
"""Compute element-wise logical and of data.
Parameters
----------
lhs : tvm.te.Tensor or Expr
The left operand
rhs : tvm.te.Tensor or Expr
The right operand
Returns
-------
ret : tvm.te.Tensor or Expr
Returns Expr if both operands are Expr.
Otherwise returns Tensor.
"""
return _cpp.logical_and(lhs, rhs)
def logical_or(lhs, rhs):
"""Compute element-wise logical or of data.
Parameters
----------
lhs : tvm.te.Tensor or Expr
The left operand
rhs : tvm.te.Tensor or Expr
The right operand
Returns
-------
ret : tvm.te.Tensor or Expr
Returns Expr if both operands are Expr.
Otherwise returns Tensor.
"""
return _cpp.logical_or(lhs, rhs)
def logical_xor(lhs, rhs):
"""Compute element-wise logical xor of data.
Parameters
----------
lhs : tvm.te.Tensor or Expr
The left operand
rhs : tvm.te.Tensor or Expr
The right operand
Returns
-------
ret : tvm.te.Tensor or Expr
Returns Expr if both operands are Expr.
Otherwise returns Tensor.
"""
return _cpp.logical_xor(lhs, rhs)
def bitwise_and(lhs, rhs):
"""Compute element-wise bitwise and of data.
Parameters
----------
lhs : tvm.te.Tensor or Expr
The left operand
rhs : tvm.te.Tensor or Expr
The right operand
Returns
-------
ret : tvm.te.Tensor or Expr
Returns Expr if both operands are Expr.
Otherwise returns Tensor.
"""
return _cpp.bitwise_and(lhs, rhs)
def bitwise_or(lhs, rhs):
"""Compute element-wise bitwise or of data.
Parameters
----------
lhs : tvm.te.Tensor or Expr
The left operand
rhs : tvm.te.Tensor or Expr
The right operand
Returns
-------
ret : tvm.te.Tensor or Expr
Returns Expr if both operands are Expr.
Otherwise returns Tensor.
"""
return _cpp.bitwise_or(lhs, rhs)
def bitwise_xor(lhs, rhs):
"""Compute element-wise bitwise xor of data.
Parameters
----------
lhs : tvm.te.Tensor or Expr
The left operand
rhs : tvm.te.Tensor or Expr
The right operand
Returns
-------
ret : tvm.te.Tensor or Expr
Returns Expr if both operands are Expr.
Otherwise returns Tensor.
"""
return _cpp.bitwise_xor(lhs, rhs)
def logical_not(data):
"""Compute element-wise logical not of data.
Parameters
----------
data : tvm.te.Tensor or Expr
Returns
-------
ret : tvm.te.Tensor or Expr
Returns Expr if the operand are Expr.
Otherwise returns Tensor.
"""
return _cpp.logical_not(data)
def bitwise_not(data):
"""Compute element-wise bitwise not of data.
Parameters
----------
data : tvm.te.Tensor or Expr
Returns
-------
ret : tvm.te.Tensor or Expr
Returns Expr if the operand are Expr.
Otherwise returns Tensor.
"""
return _cpp.bitwise_not(data)
| 11,673 | 21.068053 | 75 | py |
tvm | tvm-main/python/tvm/topi/tag.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Namespace of all tag system in tvm
Each operator can be tagged by a tag, which indicate its type.
Generic categories
- tag.ELEMWISE="elemwise":
Elementwise operator, for example :code:`out[i, j] = input[i, j]`
- tag.BROADCAST="broadcast":
Broadcasting operator, can always map output axis to the input in order.
for example :code:`out[i, ax1, j, ax2] = input[i, j]`.
Note that the axis need to be in order so transpose is not a bcast operator.
If an input of broadcast operator has same shape as output,
we can ensure that it is elementwise relation.
- tag.INJECTIVE="injective":
Injective operator, can always injectively map output axis to a single input axis.
All injective operator can still be safely fused similar to ewise to reduction.
- tag.COMM_REDUCE="comm_reduce":
Communicative reduction operator
- If an op does not belong to these generic categories, it should have a special tag.
Note
----
When we add a new topi operator, the op need to be tagged as generic as possible.
We can also compose tags like "injective,pad" to give generic and specific information.
When we use composed tags, we must always put generic tag in the first location.
"""
ELEMWISE = "elemwise"
BROADCAST = "broadcast"
INJECTIVE = "injective"
COMM_REDUCE = "comm_reduce"
COMM_REDUCE_IDX = "comm_reduce_idx"
def is_broadcast(tag):
"""Check if a tag is bcast
Parameters
----------
tag : str
The input tag
Returns
-------
ret : bool
Whether a tag is broadcast
"""
if tag in (ELEMWISE, BROADCAST):
return True
return tag.startswith(ELEMWISE) or tag.startswith(BROADCAST)
def is_injective(tag):
"""Check if a tag is injective
Parameters
----------
tag : str
The input tag
Returns
-------
ret : bool
Whether a tag is injective
"""
if tag in (ELEMWISE, BROADCAST, INJECTIVE):
return True
return tag.startswith(ELEMWISE) or tag.startswith(BROADCAST) or tag.startswith(INJECTIVE)
| 2,823 | 31.45977 | 93 | py |
tvm | tvm-main/python/tvm/topi/signal.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, too-many-arguments, too-many-nested-blocks, unused-argument
"""STFT operator"""
from math import pi
from tvm import te, tir
def stft(
data,
n_fft,
hop_length,
win_length,
window,
normalized,
onesided,
output_shape,
):
"""
The STFT computes the Fourier transform of short overlapping windows of the input.
This gives frequency components of the signal as they change over time.
Parameters
----------
data : relay.Expr
Either a 1-D tensor or a 2-D batch tensor.
n_fft : int
The size of Fourier transform
hop_length : int
The distance between neighboring sliding window frames
win_length : int
The size of window frame and STFT filter
window : relay.Expr
A 1-D tensor window frame
normalized : bool
Whether to return the normalized STFT results
onesided : bool
Whether to return onesided result or fill with conjugate symmetry
Returns
-------
output : relay.Expr
Tensor containing the STFT result
Examples
--------
.. code-block:: python
data = [1, 2, 3, 4, 5, 6]
window = [4, 3, 2]
[n_fft, hop_length, win_length, normalized, onesided] = [3, 3, 3, False, True]
relay.stft(data, n_fft, hop_length, win_length, window, normalized, onesided)
-> [[[15.0000, 0.0000], [34.0000, 0.0000]], [[ 4.5000, 0.8660], [ 1.0000, -1.7321]]]
"""
def gen_ir(
data_ptr,
n_fft,
hop_length,
win_length,
window_ptr,
normalized,
onesided,
output_ptr,
loop_kind,
):
ib = tir.ir_builder.create()
data = ib.buffer_ptr(data_ptr)
window = ib.buffer_ptr(window_ptr)
output = ib.buffer_ptr(output_ptr)
# https://librosa.org/doc/0.7.2/_modules/librosa/core/spectrum.html#stft
with ib.for_range(
0, output_ptr.shape[0] * output_ptr.shape[1], kind="parallel"
) as batch_row:
with ib.for_range(0, output_ptr.shape[2], kind=loop_kind) as col:
batch = ib.allocate("int32", (1), name="batch", scope="local")
row = ib.allocate("int32", (1), name="row", scope="local")
batch = tir.floordiv(batch_row, output_ptr.shape[1])
row = tir.floormod(batch_row, output_ptr.shape[1])
output[batch, row, col, 0] = tir.Cast(data_ptr.dtype, 0)
output[batch, row, col, 1] = tir.Cast(data_ptr.dtype, 0)
with ib.for_range(0, win_length) as wlen:
output[batch, row, col, 0] += (
window[wlen]
* data[batch, col * hop_length + wlen]
* tir.cos(2 * pi * row * wlen / win_length)
)
output[batch, row, col, 1] -= (
window[wlen]
* data[batch, col * hop_length + wlen]
* tir.sin(2 * pi * row * wlen / win_length)
)
with ib.if_scope(normalized):
output[batch, row, col, 0] /= tir.sqrt(tir.const(n_fft, "float32"))
output[batch, row, col, 1] /= tir.sqrt(tir.const(n_fft, "float32"))
return ib.get()
output_buf = tir.decl_buffer(output_shape, data.dtype, "output_buf")
loop_kind = "vectorize"
if isinstance(output_shape[2], tir.expr.SizeVar): # any_dim
loop_kind = "serial"
return te.extern(
output_shape,
[data, window],
lambda ins, outs: gen_ir(
ins[0], n_fft, hop_length, win_length, ins[1], normalized, onesided, outs[0], loop_kind
),
dtype=[data.dtype],
out_buffers=[output_buf],
name="stft_cpu",
tag="stft_cpu",
)
def dft(
re_data: te.Tensor,
im_data: te.Tensor,
inverse: tir.IntImm,
):
"""
Computes the discrete Fourier transform of input (calculation along the last axis).
This gives frequency components of the signal as they change over time.
Parameters
----------
re_data : relay.Expr
N-D tensor, real part of the input signal.
im_data : relay.Expr
N-D tensor, imaginary part of the input signal.
If the signal is real, then the values of this tensor are zeros.
inverse : bool
Whether to perform the inverse discrete fourier transform.
Returns
-------
re_output : relay.Expr
The Fourier Transform of the input (Real part).
im_output : relay.Expr
The Fourier Transform of the input (Imaginary part).
"""
def gen_ir(
re_data_buf,
im_data_buf,
re_output_buf,
im_output_buf,
):
ib = tir.ir_builder.create()
re_data_ptr = ib.buffer_ptr(re_data_buf)
im_data_ptr = ib.buffer_ptr(im_data_buf)
re_output_ptr = ib.buffer_ptr(re_output_buf)
im_output_ptr = ib.buffer_ptr(im_output_buf)
shape = re_data.shape
n_fft = shape[len(shape) - 1]
base_range = 1
for i in range(len(shape) - 1):
base_range *= shape[i]
sign = -1 if inverse else 1
factor = 1.0 / n_fft if inverse else 1.0
with ib.for_range(0, base_range, kind="parallel") as i:
base_idx = i * n_fft
with ib.for_range(0, n_fft) as n:
n_idx = base_idx + n
re_output_ptr[n_idx] = tir.Cast(re_output_ptr.dtype, 0)
im_output_ptr[n_idx] = tir.Cast(im_output_ptr.dtype, 0)
_w = sign * -2 * pi * n / n_fft
with ib.for_range(0, n_fft) as k:
k_idx = base_idx + k
w = _w * k
cos_w = tir.Cast(re_output_ptr.dtype, tir.cos(w))
sin_w = tir.Cast(re_output_ptr.dtype, tir.sin(w))
re_output_ptr[n_idx] += re_data_ptr[k_idx] * cos_w - im_data_ptr[k_idx] * sin_w
im_output_ptr[n_idx] += re_data_ptr[k_idx] * sin_w + im_data_ptr[k_idx] * cos_w
re_output_ptr[n_idx] *= tir.Cast(re_output_ptr.dtype, factor)
im_output_ptr[n_idx] *= tir.Cast(im_output_ptr.dtype, factor)
return ib.get()
output_shape = [re_data.shape] * 2
return te.extern(
shape=output_shape,
inputs=[re_data, im_data],
fcompute=lambda ins, outs: gen_ir(ins[0], ins[1], outs[0], outs[1]),
dtype=[re_data.dtype, im_data.dtype],
name="dft_cpu",
tag="dft_cpu",
)
| 7,390 | 34.533654 | 99 | py |
tvm | tvm-main/python/tvm/topi/sort.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=too-many-arguments
"""Argsort operator"""
import tvm
from tvm import te
from .utils import get_const_tuple
def sort(data, axis=-1, is_ascend=1):
"""Performs sorting along the given axis and returns an array
in sorted order.
Parameters
----------
data : tvm.te.Tensor
The input tensor.
axis : int, optional
Axis along which to sort the input tensor.
By default the flattened array is used.
is_ascend : boolean, optional
Whether to sort in ascending or descending order.
dtype : string, optional
DType of the output indices.
Returns
-------
out : tvm.te.Tensor
Sorted index tensor.
"""
data_buf = tvm.tir.decl_buffer(data.shape, data.dtype, "data_buf", data_alignment=8)
out_buf = tvm.tir.decl_buffer(data.shape, data.dtype, "out_buf", data_alignment=8)
out = te.extern(
data.shape,
[data],
lambda ins, outs: tvm.tir.call_packed(
"tvm.contrib.sort.sort", ins[0], outs[0], axis, is_ascend
),
dtype=data.dtype,
in_buffers=[data_buf],
out_buffers=out_buf,
name="sort_cpu",
tag="sort_cpu",
)
return out
def argsort(data, valid_count=None, axis=-1, is_ascend=1, dtype="float32"):
"""Performs sorting along the given axis and returns an array
of indices having the same shape as an input array that index
data in sorted order.
Parameters
----------
data : tvm.te.Tensor
The input tensor.
valid_count : tvm.te.Tensor, optional
1-D tensor for valid number of boxes.
axis : int, optional
Axis along which to sort the input tensor.
By default the flattened array is used.
is_ascend : boolean, optional
Whether to sort in ascending or descending order.
dtype : string, optional
DType of the output indices.
Returns
-------
out : tvm.te.Tensor
Sorted index tensor.
Example
--------
.. code-block:: python
# An example to use argsort
dshape = (1, 5, 6)
data = te.placeholder(dshape, name="data")
axis = 0
is_ascend = False
out = argsort(data, axis=axis, is_ascend=is_ascend)
np_data = np.random.uniform(dshape)
s = topi.generic.schedule_argsort(out)
f = tvm.build(s, [data, out], "llvm")
dev = tvm.cpu()
tvm_data = tvm.nd.array(np_data, dev)
tvm_out = tvm.nd.array(np.zeros(dshape, dtype=data.dtype), dev)
f(tvm_data, tvm_out)
"""
data_buf = tvm.tir.decl_buffer(data.shape, data.dtype, "data_buf", data_alignment=8)
if valid_count is not None:
valid_count_buf = tvm.tir.decl_buffer(
valid_count.shape, valid_count.dtype, "valid_count_buf", data_alignment=4
)
out_buf = tvm.tir.decl_buffer(data.shape, "int32", "out_buf", data_alignment=8)
out = te.extern(
data.shape,
[data, valid_count],
lambda ins, outs: tvm.tir.call_packed(
"tvm.contrib.sort.argsort_nms", ins[0], ins[1], outs[0], axis, is_ascend
),
dtype="int32",
in_buffers=[data_buf, valid_count_buf],
out_buffers=out_buf,
name="argsort_nms_cpu",
tag="argsort_nms_cpu",
)
else:
out_buf = tvm.tir.decl_buffer(data.shape, dtype, "out_buf", data_alignment=8)
out = te.extern(
data.shape,
[data],
lambda ins, outs: tvm.tir.call_packed(
"tvm.contrib.sort.argsort", ins[0], outs[0], axis, is_ascend
),
dtype=dtype,
in_buffers=[data_buf],
out_buffers=out_buf,
name="argsort_cpu",
tag="argsort_cpu",
)
return out
def topk(data, k=1, axis=-1, ret_type="both", is_ascend=False, dtype="int64"):
"""Get the top k elements in an input tensor along the given axis.
Parameters
----------
data : tvm.te.Tensor
The input tensor.
k : int or tvm.te.Tensor, optional
Number of top elements to select. Return all elements if k < 1.
axis : int, optional
Axis long which to sort the input tensor.
ret_type: str, optional
The return type [both, values, indices].
"both": return both top k data and indices.
"values": return top k data only.
"indices": return top k indices only.
is_ascend : boolean, optional
Whether to sort in ascending or descending order.
dtype : string, optional
The data type of the indices output.
Returns
-------
out : tvm.te.Tensor or List[tvm.te.Tensor]
The computed result.
"""
assert ret_type in ["both", "values", "indices"]
data_buf = tvm.tir.decl_buffer(data.shape, data.dtype, "data_buf", data_alignment=8)
out_shape = list(get_const_tuple(data.shape))
kvar = tvm.te.size_var("k")
if not isinstance(k, int):
out_shape[axis] = kvar
elif k >= 1:
out_shape[axis] = k
out_bufs = []
if ret_type in ["both", "values"]:
out_bufs.append(tvm.tir.decl_buffer(out_shape, data.dtype, "value_buf", data_alignment=8))
if ret_type in ["both", "indices"]:
out_bufs.append(tvm.tir.decl_buffer(out_shape, dtype, "indices_buf", data_alignment=8))
out_shapes = [out_shape] * len(out_bufs)
kv = kvar if not isinstance(k, int) else k
out = te.extern(
out_shapes,
[data],
lambda ins, outs: tvm.tir.call_packed(
"tvm.contrib.sort.topk", ins[0], *outs, kv, axis, ret_type, is_ascend
),
in_buffers=[data_buf],
out_buffers=out_bufs,
name="topk_cpu",
tag="topk_cpu",
)
return out
| 6,597 | 31.029126 | 98 | py |
tvm | tvm-main/python/tvm/topi/scatter.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""ScatterND operator"""
from tvm import te, tir # hide redefinition of min and max
from tvm.tir import expr
def _verify_scatter_nd_inputs(data, indices, updates):
mdim = int(indices.shape[0])
assert mdim <= len(data.shape), (
f"The first dimension of the indices ({mdim}) must be less than or equal to "
f"the length of the shape of the output ({len(data.shape)})."
)
for i in range(len(indices.shape) - 1):
if isinstance(indices.shape[i + 1], expr.Var) or isinstance(updates.shape[i], expr.Var):
continue
assert indices.shape[i + 1] == updates.shape[i], (
f"Dimension of indices[{i+1}] ({indices.shape[i+1]}) must equal dimension of "
f"updates[{i}] ({updates.shape[i]})."
)
for i in range(mdim, len(data.shape)):
data_ind = i - mdim + len(indices.shape) - 1
if isinstance(updates.shape[data_ind], expr.Var) or isinstance(data.shape[i], expr.Var):
continue
assert updates.shape[data_ind] == data.shape[i], (
f"Dimension of updates[{data_ind}] ({updates.shape[data_ind]}) must equal dimension "
f"of out_shape[{i}] ({data.shape[i]})."
)
assert (
"int" in indices.dtype
), f"Indices must be a tensor of integers, but its elements are {indices.dtype}."
def scatter_nd(data, indices, updates, mode):
"""Scatter elements from a n-dimension array.
Given updates with shape (Y_0, ..., Y_{K-1}, X_M, ..., X_{N-1}), indices with shape
(M, Y_0, ..., Y_{K-1}), and output copied from data with shape (X_0, X_1, ..., X_{N-1}),
scatter_nd computes
.. code-block::
output[indices[0, y_0, ..., y_{K-1}],
...,
indices[M-1, y_0, ..., y_{K-1}],
x_M,
...,
x_{N-1}
] = f(output[...], updates[y_0, ..., y_{K-1}, x_M, ..., x_{N-1}])
where the update function f is determinted by the mode.
Parameters
----------
data : tvm.te.Tensor
The source array.
indices : tvm.te.Tensor
The indices of the values to extract.
updates : tvm.te.Tensor
The updates to apply at the Indices
mode : string
The update mode for the algorithm, either "update" or "add"
If update, the update values will replace the input data
If add, the update values will be added to the input data
Returns
-------
ret : tvm.te.Tensor
"""
_verify_scatter_nd_inputs(data, indices, updates)
def gen_ir(data_ptr, indices_ptr, updates_ptr, out_ptr):
# pylint: disable=invalid-name
ib = tir.ir_builder.create()
data = ib.buffer_ptr(data_ptr)
indices = ib.buffer_ptr(indices_ptr)
updates = ib.buffer_ptr(updates_ptr)
out = ib.buffer_ptr(out_ptr)
# We combine all the indices dimensions but the first one into a single
# dimension so we can iterate it in single loop instead of an arbitrary
# number of loops. We do the same thing for all the update dimensions.
fused_indices_dimension = 1
for i in indices_ptr.shape[1:]:
fused_indices_dimension *= i
fused_updates_dimension = 1
for i in updates_ptr.shape[len(indices_ptr.shape) - 1 :]:
fused_updates_dimension *= i
fused_shape = 1
for i in data_ptr.shape:
fused_shape *= i
with ib.for_range(0, fused_shape) as i:
out[i] = data[i]
with ib.for_range(0, fused_indices_dimension) as i:
with ib.for_range(0, fused_updates_dimension, kind="parallel") as j:
offset = fused_updates_dimension
index = j # This is x_M, .. x_{N-1} part of the index into out.
# Build up the indices[0, y_0, .. y_{K-1}], .. indices[M-1, y_0, .. y_{K-1}] part
# of the index into out.
for l in reversed(range(indices_ptr.shape[0].value)):
# indices[i * l * fused_indices_dimension] = indices[l, y_0, ... y_{k-1}]
index += offset * indices[i + l * fused_indices_dimension]
offset *= data_ptr.shape[l]
if mode == "update":
out[index] = updates[i * fused_updates_dimension + j]
elif mode == "add":
out[index] += updates[i * fused_updates_dimension + j]
elif mode == "mul":
out[index] *= updates[i * fused_updates_dimension + j]
elif mode == "min":
out[index] = tir.min(out[index], updates[i * fused_updates_dimension + j])
elif mode == "max":
out[index] = tir.max(out[index], updates[i * fused_updates_dimension + j])
else:
raise NotImplementedError(
"scatter_nd mode not in [update, add, mul, min, max]:", mode
)
return ib.get()
out_buf = tir.decl_buffer(data.shape, data.dtype, "out_buf")
return te.extern(
[data.shape],
[data, indices, updates],
lambda ins, outs: gen_ir(ins[0], ins[1], ins[2], outs[0]),
dtype=data.dtype,
out_buffers=[out_buf],
name="scatter_nd.generic",
tag="scatter_nd.generic",
)
| 6,167 | 38.793548 | 97 | py |
tvm | tvm-main/python/tvm/topi/argwhere.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, too-many-arguments, too-many-nested-blocks
"""Argwhere operator"""
import tvm
from tvm.te import hybrid
@hybrid.script
def hybrid_argwhere_1d(output_shape, condition):
"""Find the indices of elements of a 1-D tensor that are non-zero.
Parameters
----------
condition : tvm.te.Tensor
1-D tensor with boolean values.
Returns
-------
out : tvm.te.Tensor
Indices of non-zero elements.
"""
a = output_tensor(output_shape, "int32")
a1 = condition.shape[0]
valid_index = 0
for i1 in range(a1):
if condition[i1] != 0:
a[valid_index, 0] = i1
valid_index += 1
return a
@hybrid.script
def hybrid_argwhere_2d(output_shape, condition):
"""Find the indices of elements of a 2-D tensor that are non-zero.
Parameters
----------
condition : tvm.te.Tensor
2-D tensor with boolean values.
Returns
-------
out : tvm.te.Tensor
Indices of non-zero elements.
"""
a = output_tensor(output_shape, "int32")
a1 = condition.shape[0]
a2 = condition.shape[1]
valid_index = 0
for i1 in range(a1):
for i2 in range(a2):
if condition[i1, i2] != 0:
a[valid_index, 0] = i1
a[valid_index, 1] = i2
valid_index += 1
return a
@hybrid.script
def hybrid_argwhere_3d(output_shape, condition):
"""Find the indices of elements of a 3-D tensor that are non-zero.
Parameters
----------
condition : tvm.te.Tensor
3-D tensor with boolean values.
Returns
-------
out : tvm.te.Tensor
Indices of non-zero elements.
"""
a = output_tensor(output_shape, "int32")
a1 = condition.shape[0]
a2 = condition.shape[1]
a3 = condition.shape[2]
valid_index = 0
for i1 in range(a1):
for i2 in range(a2):
for i3 in range(a3):
if condition[i1, i2, i3] != 0:
a[valid_index, 0] = i1
a[valid_index, 1] = i2
a[valid_index, 2] = i3
valid_index += 1
return a
@hybrid.script
def hybrid_argwhere_4d(output_shape, condition):
"""Find the indices of elements of a 4-D tensor that are non-zero.
Parameters
----------
condition : tvm.te.Tensor
4-D tensor with boolean values.
Returns
-------
out : tvm.te.Tensor
Indices of non-zero elements.
"""
a = output_tensor(output_shape, "int32")
a1 = condition.shape[0]
a2 = condition.shape[1]
a3 = condition.shape[2]
a4 = condition.shape[3]
valid_index = 0
for i1 in range(a1):
for i2 in range(a2):
for i3 in range(a3):
for i4 in range(a4):
if condition[i1, i2, i3, i4] != 0:
a[valid_index, 0] = i1
a[valid_index, 1] = i2
a[valid_index, 2] = i3
a[valid_index, 3] = i4
valid_index += 1
return a
@hybrid.script
def hybrid_argwhere_5d(output_shape, condition):
"""Find the indices of elements of a 5-D tensor that are non-zero.
Parameters
----------
condition : tvm.te.Tensor
5-D tensor with boolean values.
Returns
-------
out : tvm.te.Tensor
Indices of non-zero elements.
"""
a = output_tensor(output_shape, "int32")
a1 = condition.shape[0]
a2 = condition.shape[1]
a3 = condition.shape[2]
a4 = condition.shape[3]
a5 = condition.shape[4]
valid_index = 0
for i1 in range(a1):
for i2 in range(a2):
for i3 in range(a3):
for i4 in range(a4):
for i5 in range(a5):
if condition[i1, i2, i3, i4, i5] != 0:
a[valid_index, 0] = i1
a[valid_index, 1] = i2
a[valid_index, 2] = i3
a[valid_index, 3] = i4
a[valid_index, 4] = i5
valid_index += 1
return a
@tvm.target.generic_func
def argwhere(output_shape, condition):
"""Find the indices of elements of a tensor that are non-zero.
Parameters
----------
condition : tvm.te.Tensor
Tensor with boolean values.
Returns
-------
out : tvm.te.Tensor
Indices of non-zero elements.
"""
if len(condition.shape) == 1:
return hybrid_argwhere_1d(output_shape.shape, condition)
if len(condition.shape) == 2:
return hybrid_argwhere_2d(output_shape.shape, condition)
if len(condition.shape) == 3:
return hybrid_argwhere_3d(output_shape.shape, condition)
if len(condition.shape) == 4:
return hybrid_argwhere_4d(output_shape.shape, condition)
if len(condition.shape) == 5:
return hybrid_argwhere_5d(output_shape.shape, condition)
raise ValueError("Does not support rank higher than 5 in argwhere")
| 5,848 | 28.540404 | 74 | py |
tvm | tvm-main/python/tvm/topi/generic_op_impl.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Implementation of generic operators in the presence of Tensor"""
# pylint: disable=invalid-name, too-many-arguments
import tvm
from tvm import te
from . import broadcast as _broadcast
from . import math as _math
def _make_bop(broadcast_bop, orig_bop):
"""Make a specific overloaded binary operator of Tensor when applicable;
apply the original operator if it is not supposed to be overloaded.
Consider the following scenario:
OP : + | - | * | /
R0 : int | float | Expr | TensorSlice | Tensor (rank zero)
R1 : Tensor (positive rank)
In terms of (LHS OP RHS), we apply the following overloading rules:
(1) We use broadcast_OP(LHS, RHS), when both LHS and RHS are R1.
(2) We perform element-wise operation of Tensor and scalar,
when one of LHS and RHS is R1 and another is R0.
(3) We do not overload OP (i.e. stick to orig_bop) otherwise.
Parameters
----------
broadcast_bop : operator function
Operator for broadcast tensor-tensor operation, for rule (1).
orig_bop: operator function
Operator before overloading, for rule (3).
Returns
-------
ret : operator function
The overloaded operator function if applicable or orig_bop otherwise.
"""
name = orig_bop.__name__
def _tensor_bop_impl(lhs, rhs):
"""Overloaded {op} operator.
If both operands are non-zero-rank Tensors, it performs
tensor-tensor {op} operation, and broadcasts inputs when necessary.
If one operand is non-zero-rank Tensor, while the other operand is
scalar like type (e.g., numeric types, Expr, or TensorSlice),
it performs tensor-scalar {op} operation on an element-wise basis.
Otherwise, it performs default generic.{op} operation, as defined
in tvm.generic module.
Parameters
----------
lhs : object
Left operand.
rhs : object
Right operand.
Returns
-------
ret : tvm.te.Tensor (if at least one operand is non-zero-rank Tensor)
tvm.Expr (otherwise)
The result of {op} operation.
"""
if not isinstance(lhs, te.tensor.Tensor) and not isinstance(rhs, te.tensor.Tensor):
return orig_bop(lhs, rhs)
return broadcast_bop(lhs, rhs)
_tensor_bop_impl.__doc__ = _tensor_bop_impl.__doc__.format(op=name)
return _tensor_bop_impl
def _bind_generic_ops():
"""Bind generic operators for Tensor."""
# Check __op_priority__ to make sure the binding happens only once.
__op_priority__ = 1
if __op_priority__ > tvm.tir.generic.__op_priority__:
tvm.tir.generic.__op_priority__ = __op_priority__
tvm.tir.generic.add = _make_bop(_broadcast.add, tvm.tir.generic.add)
tvm.tir.generic.subtract = _make_bop(_broadcast.subtract, tvm.tir.generic.subtract)
tvm.tir.generic.multiply = _make_bop(_broadcast.multiply, tvm.tir.generic.multiply)
tvm.tir.generic.divide = _make_bop(_broadcast.divide, tvm.tir.generic.divide)
tvm.tir.generic.cast = _math.cast
_bind_generic_ops()
| 3,915 | 36.653846 | 91 | py |
tvm | tvm-main/python/tvm/topi/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=redefined-builtin, wildcard-import
"""TVM Operator Inventory.
TOPI is the operator collection library for TVM, to provide sugars
for constructing compute declaration as well as optimized schedules.
Some of the schedule function may have been specially optimized for a
specific workload.
"""
from tvm._ffi.libinfo import __version__
# Ensure C++ schedules get registered first, so python schedules can
# override them.
from . import cpp
from .math import *
from .tensor import *
from .generic_op_impl import *
from .reduction import *
from .transform import *
from .broadcast import *
from .sort import *
from .scatter import *
from .scatter_elements import *
from .sparse_fill_empty_rows import *
from .sparse_reshape import *
from .argwhere import *
from .scan import *
from .einsum import *
from .unique import *
from .searchsorted import *
from .signal import *
from . import generic
from . import nn
from . import x86
from . import cuda
from . import gpu
from . import arm_cpu
from . import mali
from . import bifrost
from . import intel_graphics
from . import utils
from . import rocm
from . import vision
from . import image
from . import sparse
from . import hls
from . import random
from . import hexagon
from . import adreno
# error reporting
from .utils import InvalidShapeError
# not import testing by default
# because testing can have extra deps that are not necessary
# we can import them from test cases explicitly
# from . import testing
| 2,264 | 28.802632 | 69 | py |
tvm | tvm-main/python/tvm/topi/sparse_fill_empty_rows.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHnew_sparse_indices WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=no-else-return, too-many-locals, too-many-arguments, too-many-branches
# pylint: disable=undefined-variable, invalid-name
"""SparseFillEmptyRows operator"""
from ..te import hybrid
@hybrid.script
def _sparse_fill_empty_rows(
sparse_indices,
sparse_values,
dense_shape,
default_value,
new_sparse_indices_shape,
new_sparse_values_shape,
empty_row_indicator_shape,
):
default_value_ = int64(default_value[0])
new_sparse_indices = output_tensor(new_sparse_indices_shape, "int64")
new_sparse_values = output_tensor(new_sparse_values_shape, "int64")
empty_row_indicator = output_tensor(empty_row_indicator_shape, "int64")
new_sparse_indices_row_id = 0
if int64(sparse_indices.shape[0]) == int64(0): # Handle Empty Case
# Fill all rows with default values
for i in range(0, new_sparse_indices_shape[0]):
new_sparse_indices[i, 0] = int64(i)
new_sparse_values[i] = default_value_
empty_row_indicator[i] = int64(1)
for k in range(1, int64(new_sparse_indices_shape[1])):
new_sparse_indices[i, k] = int64(0)
return (new_sparse_indices, new_sparse_values, empty_row_indicator)
else:
# Iterate through sparse_indices and add rows if/when required
for i in range(0, int64(sparse_indices.shape[0])):
if i == 0:
prev_row_id = int64(0)
else:
prev_row_id = int64(sparse_indices[i - 1, 0] + 1)
row_id = int64(sparse_indices[i, 0])
# Since input is in row-major order, add rows between prev_row_id and row_id
for j in range(prev_row_id, row_id):
new_sparse_indices[new_sparse_indices_row_id, 0] = int64(j)
for k in range(1, int64(new_sparse_indices_shape[1])):
new_sparse_indices[new_sparse_indices_row_id, k] = int64(0)
empty_row_indicator[prev_row_id] = int64(1)
new_sparse_values[new_sparse_indices_row_id] = default_value_
new_sparse_indices_row_id += 1
# Add current element to output
new_sparse_indices[new_sparse_indices_row_id, 0] = row_id
for k in range(1, int64(new_sparse_indices_shape[1])):
new_sparse_indices[new_sparse_indices_row_id, k] = int64(sparse_indices[i, k])
new_sparse_values[new_sparse_indices_row_id] = int64(sparse_values[i])
empty_row_indicator[row_id] = int64(0)
new_sparse_indices_row_id += 1
# Add rows with default value if last row id of sparse_indices is not dense_shape[0] - 1
for i in range(
int64(sparse_indices[sparse_indices.shape[0] - 1, 0] + 1), int64(dense_shape[0])
):
new_sparse_indices[new_sparse_indices_row_id, 0] = int64(i)
for k in range(1, int64(new_sparse_indices_shape[1])):
new_sparse_indices[new_sparse_indices_row_id, k] = int64(0)
empty_row_indicator[i] = int64(1)
new_sparse_values[new_sparse_indices_row_id] = default_value_
new_sparse_indices_row_id += 1
return (new_sparse_indices, new_sparse_values, empty_row_indicator)
def sparse_fill_empty_rows(
sparse_indices,
sparse_values,
dense_shape,
default_value,
new_sparse_indices_shape,
new_sparse_values_shape,
empty_row_indicator_shape,
):
return _sparse_fill_empty_rows(
sparse_indices,
sparse_values,
dense_shape,
default_value,
new_sparse_indices_shape,
new_sparse_values_shape,
empty_row_indicator_shape,
)
| 4,485 | 39.781818 | 96 | py |
tvm | tvm-main/python/tvm/topi/tensor.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,consider-using-enumerate,unused-argument,len-as-condition
"""Elementwise operators"""
from __future__ import absolute_import as _abs
from . import cpp
def elemwise_sum(xs):
"""Perform element-wise sum on inputs
Parameters
----------
xs : list of tvm.te.Tensor
Input arguments.
Returns
-------
y : tvm.te.Tensor
The result.
"""
return cpp.elemwise_sum(xs)
def full(shape, dtype, fill_value):
"""Fill tensor with fill_value
Parameters
----------
shape : tuple
Input tensor shape.
dtype : str
Data type
fill_value : float
Value to be filled
Returns
-------
y : tvm.te.Tensor
The result.
"""
return cpp.full(shape, dtype, fill_value)
def full_like(x, fill_value):
"""Construct a tensor with same shape as input tensor,
then fill tensor with fill_value.
Parameters
----------
x : tvm.te.Tensor
Input argument.
fill_value : float
Value to be filled
Returns
-------
y : tvm.te.Tensor
The result.
"""
return cpp.full_like(x, fill_value)
| 1,956 | 24.75 | 88 | py |
tvm | tvm-main/python/tvm/topi/nn/pooling.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""TVM operator pooling compute."""
from __future__ import absolute_import
from .. import cpp
POOL_TYPE_CODE = {"avg": 0, "max": 1}
def global_pool(data, pool_type, layout="NCHW"):
"""Perform global pooling on height and width dimension of data.
It decides the height and width dimension according to the layout string,
in which 'W' and 'H' means width and height respectively.
Width and height dimension cannot be split.
For example, NCHW, NCHW16c, etc. are valid for pool,
while NCHW16w, NCHW16h are not.
See parameter `layout` for more information of the layout string convention.
Parameters
----------
data : tvm.te.Tensor
n-D with shape of layout
pool_type : str
Pool type, 'max' or 'avg'
layout : str
Layout of the input data.
The layout is supposed to be composed of upper cases, lower cases and numbers,
where upper case indicates a dimension and
the corresponding lower case with factor size indicates the split dimension.
For example, NCHW16c can describe a 5-D tensor of
[batch_size, channel, height, width, channel_block],
in which channel_block=16 is a split of dimension channel.
Returns
-------
output : tvm.te.Tensor
n-D in same layout with height and width dimension size of 1.
e.g., for NCHW, the output shape will be [batch, channel, 1, 1]
"""
return cpp.nn.global_pool(data, POOL_TYPE_CODE[pool_type], layout)
def pool_grad(
grads,
data,
kernel,
stride,
padding,
pool_type,
ceil_mode=False,
layout="NCHW",
count_include_pad=True,
):
"""Gradient of pooling on height and width dimension of data.
It decides the height and width dimension according to the layout string,
in which 'W' and 'H' means width and height respectively.
Width and height dimension cannot be split.
For example, NCHW, NCHW16c, etc. are valid for pool,
while NCHW16w, NCHW16h are not.
See parameter `layout` for more information of the layout string convention.
Parameters
----------
grads : tvm.te.Tensor
n-D with shape of layout
data : tvm.te.Tensor
n-D with shape of layout
kernel : list/tuple of two ints
Kernel size, [kernel_height, kernel_width]
stride : list/tuple of two ints
Stride size, [stride_height, stride_width]
padding : list/tuple of four ints
Pad size, [pad_top, pad_left, pad_bottom, pad_right]]
pool_type : str
Pool type, 'max' or 'avg'
ceil_mode : bool
Whether to use ceil when calculating output size.
layout: string
Layout of the input data.
The layout is supposed to be composed of upper cases, lower cases and numbers,
where upper case indicates a dimension and
the corresponding lower case with factor size indicates the split dimension.
For example, NCHW16c can describe a 5-D tensor of
[batch_size, channel, height, width, channel_block],
in which channel_block=16 is a split of dimension channel.
count_include_pad: bool
Whether include padding in the calculation when pool_type is 'avg'
Returns
-------
output : tvm.te.Tensor
n-D in the same layout
"""
return cpp.nn.pool_grad(
grads,
data,
kernel,
stride,
padding,
POOL_TYPE_CODE[pool_type],
ceil_mode,
layout,
count_include_pad,
)
def adaptive_pool(data, output_size, pool_type, layout="NCHW"):
"""Perform pooling on height and width dimension of data.
The pooling kernel and stride sizes are automatically chosen for desired
output sizes.
It decides the height and width dimension according to the layout string,
in which 'W' and 'H' means width and height respectively.
Width and height dimension cannot be split.
For example, NCHW, NCHW16c, etc. are valid for pool,
while NCHW16w, NCHW16h are not.
See parameter `layout` for more information of the layout string convention.
Parameters
----------
data : tvm.te.Tensor
n-D with shape of layout
output_size : tuple of int
output height and width.
pool_type : str
Pool type, 'max' or 'avg'
layout: string
Layout of the input data.
The layout is supposed to be composed of upper cases, lower cases and numbers,
where upper case indicates a dimension and
the corresponding lower case with factor size indicates the split dimension.
For example, NCHW16c can describe a 5-D tensor of
[batch_size, channel, height, width, channel_block],
in which channel_block=16 is a split of dimension channel.
Returns
-------
output : tvm.te.Tensor
n-D in the same layout
"""
return cpp.nn.adaptive_pool(data, output_size, POOL_TYPE_CODE[pool_type], layout)
def adaptive_pool3d(data, output_size, pool_type, layout="NCDHW"):
"""Perform pooling on three dimensional data.
See the two dimensional version above for details.
"""
return cpp.nn.adaptive_pool3d(data, output_size, POOL_TYPE_CODE[pool_type], layout)
def pool1d(
data,
kernel,
stride,
dilation,
padding,
pool_type,
ceil_mode=False,
layout="NCW",
count_include_pad=True,
):
"""Perform pooling on width dimension of data.
Width axis is determined according to the layout string.
in which 'w' means width.
Width dimension cannot be split.
For example, NCW, NCW16c, etc. are valid for pool,
while NCW16w is not.
See parameter `layout` for more information of the layout string convention.
Parameters
----------
data : tvm.te.Tensor
n-D with shape of layout
kernel : list/tuple of one int or int
Kernel size, [kernel_width]
stride : list/tuple of one int or int
Stride size, [stride_width]
dilation: list/tuple of two ints
Dilation size, [dilation_height, dilation_width]
padding : list/tuple of two ints
Pad size, [pad_left, pad_right]
pool_type : str
Pool type, 'max' or 'avg'
ceil_mode : bool
Whether to use ceil when calculating output size.
layout: string
Layout of the input data.
The layout is supposed to be composed of upper cases, lower cases and numbers,
where upper case indicates a dimension and
the corresponding lower case with factor size indicates the split dimension.
For example, NCW16c can describe a 4-D tensor of
[batch_size, channel, width, channel_block],
in which channel_block=16 is a split of dimension channel.
count_include_pad: bool
Whether include padding in the calculation when pool_type is 'avg'
Returns
-------
output : tvm.te.Tensor
n-D in the same layout
"""
if isinstance(kernel, int):
kernel = [
kernel,
]
if isinstance(stride, int):
stride = [
stride,
]
return cpp.nn.pool1d(
data,
kernel,
stride,
dilation,
padding,
POOL_TYPE_CODE[pool_type],
ceil_mode,
layout,
count_include_pad,
)
def pool2d(
data,
kernel,
stride,
dilation,
padding,
pool_type,
ceil_mode=False,
layout="NCHW",
count_include_pad=True,
):
"""Perform pooling on height and width dimension of data.
It decides the height and width dimension according to the layout string,
in which 'W' and 'H' means width and height respectively.
Width and height dimension cannot be split.
For example, NCHW, NCHW16c, etc. are valid for pool,
while NCHW16w, NCHW16h are not.
See parameter `layout` for more information of the layout string convention.
Parameters
----------
data : tvm.te.Tensor
n-D with shape of layout
kernel : list/tuple of two ints
Kernel size, [kernel_height, kernel_width]
stride : list/tuple of two ints
Stride size, [stride_height, stride_width]
dilation: list/tuple of two ints
Dilation size, [dilation_height, dilation_width]
padding : list/tuple of four ints
Pad size, [pad_top, pad_left, pad_bottom, pad_right]]
pool_type : str
Pool type, 'max' or 'avg'
ceil_mode : bool
Whether to use ceil when calculating output size.
layout: string
Layout of the input data.
The layout is supposed to be composed of upper cases, lower cases and numbers,
where upper case indicates a dimension and
the corresponding lower case with factor size indicates the split dimension.
For example, NCHW16c can describe a 5-D tensor of
[batch_size, channel, height, width, channel_block],
in which channel_block=16 is a split of dimension channel.
count_include_pad: bool
Whether include padding in the calculation when pool_type is 'avg'
Returns
-------
output : tvm.te.Tensor
n-D in the same layout
"""
return cpp.nn.pool2d(
data,
kernel,
stride,
dilation,
padding,
POOL_TYPE_CODE[pool_type],
ceil_mode,
layout,
count_include_pad,
)
def pool3d(
data,
kernel,
stride,
dilation,
padding,
pool_type,
ceil_mode=False,
layout="NCDHW",
count_include_pad=True,
):
"""Perform pooling on depth, height and width dimension of data.
It decides the depth, height and width dimension according to the layout string,
in which 'D', 'W' and 'H' means depth, width and height respectively.
Depth, width and height dimension cannot be split.
For example, NCDHW, NCDHW16c, etc. are valid for pool,
while NCDHW16d, NCDHW16w, NCDHW16h are not.
See parameter `layout` for more information of the layout string convention.
Parameters
----------
data : tvm.te.Tensor
n-D with shape of layout
kernel : list/tuple of three ints
Kernel size, [kernel_depth, kernel_height, kernel_width]
stride : list/tuple of three ints
Stride size, [stride_depth, stride_height, stride_width]
dilation: list/tuple of two ints
Dilation size, [dilation_height, dilation_width]
padding : list/tuple of six ints
Pad size, [pad_front, pad_top, pad_left, pad_back, pad_bottom, pad_right]
pool_type : str
Pool type, 'max' or 'avg'
ceil_mode : bool
Whether to use ceil when calculating output size.
layout: string
Layout of the input data.
The layout is supposed to be composed of upper cases, lower cases and numbers,
where upper case indicates a dimension and
the corresponding lower case with factor size indicates the split dimension.
For example, NCDHW16c can describe a 6-D tensor of
[batch_size, channel, depth, height, width, channel_block],
in which channel_block=16 is a split of dimension channel.
count_include_pad: bool
Whether include padding in the calculation when pool_type is 'avg'
Returns
-------
output : tvm.te.Tensor
n-D in the same layout
"""
return cpp.nn.pool3d(
data,
kernel,
stride,
dilation,
padding,
POOL_TYPE_CODE[pool_type],
ceil_mode,
layout,
count_include_pad,
)
| 12,396 | 29.9925 | 87 | py |
tvm | tvm-main/python/tvm/topi/nn/batch_norm.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Batch normalization."""
import typing
from functools import reduce
from tvm import te
from tvm import topi
def batch_norm(
data: te.Tensor,
gamma: te.Tensor,
beta: te.Tensor,
moving_mean: te.Tensor,
moving_var: te.Tensor,
axis: typing.Optional[int] = None,
epsilon: typing.Optional[float] = None,
center: typing.Optional[bool] = None,
scale: typing.Optional[bool] = None,
training: typing.Optional[bool] = None,
momentum: typing.Optional[float] = None,
) -> typing.List[te.Tensor]:
"""Batch normalization layer (Ioffe and Szegedy, 2014).
Normalizes the input at each batch, i.e. applies a transformation
that maintains the mean activation close to 0 and the activation
standard deviation close to 1.
Parameters
----------
data : tvm.te.Tensor
Input to be batch-normalized.
gamma : tvm.te.Tensor
Scale factor to be applied to the normalized tensor.
beta : tvm.te.Tensor
Offset to be applied to the normalized tensor.
moving_mean : tvm.te.Tensor
Running mean of input.
moving_var : tvm.te.Tensor
Running variance of input.
axis : int, optional, default=1
Specify along which shape axis the normalization should occur.
epsilon : float, optional, default=1e-5
Small float added to variance to avoid dividing by zero.
center : bool, optional, default=True
If True, add offset of beta to normalized tensor, If False,
beta is ignored.
scale : bool, optional, defualt=True
If True, scale normalized tensor by gamma. If False, gamma
is ignored.
training : bool, optional, defualt=False
Indicating whether it is in training mode. If True, update
moving_mean and moving_var.
momentum : float, optional, default=0.1
The value used for the moving_mean and moving_var update.
Returns
-------
output : list of tvm.te.Tensor
Normalized data with same shape as input
moving_mean : tvm.te.Tensor
Running mean of input.
moving_var : tvm.te.Tensor
Running variance of input.
"""
if axis is None:
axis = 1
if epsilon is None:
epsilon = 1e-5
if center is None:
center = True
if scale is None:
scale = True
if training is None:
training = False
if momentum is None:
momentum = 0.1
shape = [1] * len(data.shape)
shape[axis] = data.shape[axis]
if training:
reduce_axes = list(range(len(data.shape)))
reduce_axes.remove(axis)
shape_prod = reduce(lambda x, y: x * y, [data.shape[ax] for ax in reduce_axes], 1)
data_mean = topi.sum(data, axis=reduce_axes) / shape_prod
data_mean_rs = topi.reshape(data_mean, shape)
data_var = (
topi.sum((data - data_mean_rs) * (data - data_mean_rs), axis=reduce_axes) / shape_prod
)
data_var_rs = topi.reshape(data_var, shape)
out = (data - data_mean_rs) / topi.math.sqrt(data_var_rs + epsilon)
else:
moving_mean_rs = topi.reshape(moving_mean, shape)
moving_var_rs = topi.reshape(moving_var, shape)
out = (data - moving_mean_rs) / topi.math.sqrt(moving_var_rs + epsilon)
if scale:
out = out * topi.reshape(gamma, shape)
if center:
out = out + topi.reshape(beta, shape)
if training:
assert 0 <= momentum <= 1, "the valid momentum range is [0, 1]."
data_var = (
topi.sum((data - data_mean_rs) * (data - data_mean_rs), axis=reduce_axes) / shape_prod
)
return [
out,
(1 - momentum) * moving_mean + momentum * data_mean,
(1 - momentum) * moving_var + momentum * data_var,
]
# Moving mean and var aren't updated during test. To avoid
# placeholder reuse, we multiply by 1 and return them.
return [out, moving_mean * 1, moving_var * 1]
| 4,747 | 30.865772 | 98 | py |
tvm | tvm-main/python/tvm/topi/nn/conv2d.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-variable, too-many-locals
# pylint: disable=unused-argument, redefined-builtin
"""Conv2D operators"""
from __future__ import absolute_import as _abs
import re
from collections import namedtuple
from typing import Optional, Sequence, Union
import numpy as np
import tvm
from tvm import auto_scheduler, te
from ..utils import get_const_int, get_const_tuple, simplify, tag
from .pad import pad
from .utils import get_pad_tuple, get_pad_tuple_generic
from .winograd_util import winograd_transform_matrices
# workload description of conv2d
Workload = namedtuple(
"Workload",
[
"in_dtype",
"out_dtype",
"height",
"width",
"in_filter",
"groups",
"out_filter",
"kernel_h",
"kernel_w",
"padt",
"padl",
"padb",
"padr",
"dilation_h",
"dilation_w",
"stride_h",
"stride_w",
],
)
def conv2d(
input, filter, strides, padding, dilation, data_layout="NCHW", kernel_layout="", out_dtype=None
):
"""Conv2D operator.
Parameters
----------
input : tvm.te.Tensor
4-D with shape [batch, in_channel, in_height, in_width] in data_layout
filter : tvm.te.Tensor
4-D with shape [num_filter, in_channel, filter_height, filter_width] in kernel_layout
strides : int or a list/tuple of two ints
stride size, or [stride_height, stride_width]
padding : int or a list/tuple of 2 or 4 ints
padding size, or
[pad_height, pad_width] for 2 ints, or
[pad_top, pad_left, pad_bottom, pad_right] for 4 ints
dilation: int or a list/tuple of two ints
dilation size, or [dilation_height, dilation_width]
data_layout : str
layout of data
kernel_layout : Optional[str]
layout of kernel. If unspecified, use default layout inferred from data_layout. "OIHW" if
data_layout == "NCHW", "HWIO" if data_layout == "NHWC".
Returns
-------
output : tvm.te.Tensor
4-D with shape [batch, out_channel, out_height, out_width]
"""
# search platform specific declaration first
# default declaration
return conv(input, filter, strides, padding, dilation, 1, data_layout, kernel_layout, out_dtype)
@tvm.target.generic_func
def conv2d_legalize(attrs, inputs, types):
"""Legalizes Conv2D op.
Parameters
----------
attrs : tvm.ir.Attrs
Attributes of current convolution
inputs : list of tvm.relay.Expr
The args of the Relay expr to be legalized
types : list of types
List of input and output types
Returns
-------
result : tvm.relay.Expr
The legalized expr
"""
# not to change by default
return None
@tvm.target.generic_func
def conv2d_alter_layout(attrs, inputs, tinfos, out_type):
"""Change Conv2D layout.
Parameters
----------
attrs : tvm.ir.Attrs
Attributes of current convolution
inputs : tvm.relay.Expr
Grouped input symbols
tinfos : list
Input shape and dtype
out_type: type
The output type
Note
----
Unlike other TOPI functions, this function operates on both graph level and operator level.
"""
# not to change by default
return None
@tvm.target.generic_func
def conv2d_infer_layout(workload, cfg):
"""Infer input/output shapes and layouts from a workload and cfg.
Parameters
----------
workload : tuple
conv2d workload
cfg : tuple
tvm.autotvm config
Returns
-------
Output : [tuple of tuple and str, tuple of tuple and str]
Input shapes and layouts, and output shapes and layouts
"""
raise ValueError("missing register for topi.nn.conv2d_infer_layout")
def _get_workload(data, kernel, stride, padding, dilation, out_dtype, data_layout="NCHW"):
"""Get the workload structure."""
if data_layout == "NCHW":
_, CI, IH, IW = get_const_tuple(data.shape)
elif data_layout == "NHWC":
_, IH, IW, CI = get_const_tuple(data.shape)
elif data_layout == "HWCN":
IH, IW, CI, _ = get_const_tuple(data.shape)
else:
raise ValueError(f"not support this layout {data_layout} yet")
if data_layout == "NCHW":
CO, CIG, KH, KW = get_const_tuple(kernel.shape)
else:
KH, KW, CIG, CO = get_const_tuple(kernel.shape)
dilation_h, dilation_w = (
dilation if isinstance(dilation, (tuple, list)) else (dilation, dilation)
)
pt, pl, pb, pr = get_pad_tuple(
padding,
(get_const_int((KH - 1) * dilation_h + 1), get_const_int((KW - 1) * dilation_w + 1)),
)
GRPS = CI // CIG
if isinstance(stride, (tuple, list)):
HSTR, WSTR = stride
else:
HSTR, WSTR = stride, stride
assert (data.dtype == kernel.dtype) or (
data.dtype == "uint8" and kernel.dtype == "int8"
), f"Do not support inputs with different data types now. {data.dtype} vs. {kernel.dtype}"
return Workload(
data.dtype,
out_dtype,
IH,
IW,
CI,
GRPS,
CO,
KH,
KW,
pt,
pl,
pb,
pr,
dilation_h,
dilation_w,
HSTR,
WSTR,
)
def conv2d_nchw(Input, Filter, stride, padding, dilation, out_dtype=None):
"""Convolution operator in NCHW layout.
Parameters
----------
Input : tvm.te.Tensor
4-D with shape [batch, in_channel, in_height, in_width]
Filter : tvm.te.Tensor
4-D with shape [num_filter, in_channel, filter_height, filter_width]
stride : int or a list/tuple of two ints
Stride size, or [stride_height, stride_width]
padding : int or a list/tuple of 2 or 4 ints
padding size, or
[pad_height, pad_width] for 2 ints, or
[pad_top, pad_left, pad_bottom, pad_right] for 4 ints
dilation: int or a list/tuple of two ints
dilation size, or [dilation_height, dilation_width]
Returns
-------
Output : tvm.te.Tensor
4-D with shape [batch, out_channel, out_height, out_width]
"""
return conv(Input, Filter, stride, padding, dilation, 1, "NCHW", "OIHW", out_dtype=out_dtype)
def conv2d_hwcn(Input, Filter, stride, padding, dilation, out_dtype=None):
"""Convolution operator in HWCN layout.
Parameters
----------
Input : tvm.te.Tensor
4-D with shape [in_height, in_width, in_channel, batch]
Filter : tvm.te.Tensor
4-D with shape [filter_height, filter_width, in_channel, num_filter]
stride : int or a list/tuple of two ints
Stride size, or [stride_height, stride_width]
padding : int or a list/tuple of 2 or 4 ints
padding size, or
[pad_height, pad_width] for 2 ints, or
[pad_top, pad_left, pad_bottom, pad_right] for 4 ints
dilation: int or a list/tuple of two ints
dilation size, or [dilation_height, dilation_width]
Returns
-------
output : tvm.te.Tensor
4-D with shape [out_height, out_width, out_channel, batch]
"""
return conv(Input, Filter, stride, padding, dilation, 1, "HWCN", "HWIO", out_dtype=out_dtype)
def conv2d_nhwc(
Input,
Filter,
stride,
padding,
dilation,
out_dtype="float32",
auto_scheduler_rewritten_layout="",
meta_schedule_original_shape=None,
):
"""Convolution operator in NHWC layout.
Parameters
----------
Input : tvm.te.Tensor
4-D with shape [batch, in_height, in_width, in_channel]
Filter : tvm.te.Tensor
4-D with shape [filter_height, filter_width, in_channel, num_filter]
stride : int or a list/tuple of two ints
Stride size, or [stride_height, stride_width]
padding : int or a list/tuple of 2 or 4 ints
padding size, or
[pad_height, pad_width] for 2 ints, or
[pad_top, pad_left, pad_bottom, pad_right] for 4 ints
dilation: int or a list/tuple of two ints
dilation size, or [dilation_height, dilation_width]
out_dtype: str = "float32",
The type of output tensor
auto_scheduler_rewritten_layout: str = ""
The layout after auto-scheduler's layout rewrite pass.
meta_schedule_original_shape: Optional[List[PrimExpr]] = None
The original shape of the input tensor.
Returns
-------
output : tvm.te.Tensor
4-D with shape [batch, out_height, out_width, out_channel]
"""
return conv(
Input,
Filter,
stride,
padding,
dilation,
1,
"NHWC",
"HWIO",
out_dtype,
auto_scheduler_rewritten_layout,
meta_schedule_original_shape,
auto_scheduler_should_rewrite_layout=True,
)
def conv2d_NCHWc(data, kernel, stride, padding, dilation, layout, out_layout, out_dtype="float32"):
"""Conv2D operator for nChw[x]c layout.
Parameters
----------
data : tvm.te.Tensor
5-D with shape [batch, in_channel_chunk, in_height, in_width, in_channel_block]
kernel : tvm.te.Tensor
6-D with shape
[num_filter_chunk, in_channel_chunk, filter_height, filter_width,
in_channel_block, num_filter_block]
stride : int or a list/tuple of two ints
stride size, or [stride_height, stride_width]
padding : int or a list/tuple of 2 or 4 ints
padding size, or
[pad_height, pad_width] for 2 ints, or
[pad_top, pad_left, pad_bottom, pad_right] for 4 ints
dilation: int or a list/tuple of two ints
dilation size, or [dilation_height, dilation_width]
layout : str
Input data layout
out_layout : str
Output data layout
out_dtype : str
output data type
Returns
-------
output : tvm.te.Tensor
5-D with shape [batch, out_channel_chunk, out_height, out_width, out_channel_block]
"""
# layout and out_layout are not used here,
# we keep them for debug convenience when dumping autotvm workload
HSTR, WSTR = stride if isinstance(stride, (tuple, list)) else (stride, stride)
dilation_h, dilation_w = (
dilation if isinstance(dilation, (tuple, list)) else (dilation, dilation)
)
n, ic_chunk, ih, iw, ic_bn = get_const_tuple(data.shape)
in_channel = ic_chunk * ic_bn
target = tvm.target.Target.current(allow_none=False)
oc_chunk, ic_chunk_group, kernel_height, kernel_width, kernel_ic_bn, oc_bn = get_const_tuple(
kernel.shape
)
num_filter = oc_chunk * oc_bn
groups = in_channel // (ic_chunk_group * kernel_ic_bn)
dilated_kernel_h = (kernel_height - 1) * dilation_h + 1
dilated_kernel_w = (kernel_width - 1) * dilation_w + 1
pad_top, pad_left, pad_down, pad_right = get_pad_tuple(
padding, (dilated_kernel_h, dilated_kernel_w)
)
HPAD = pad_top + pad_down
WPAD = pad_left + pad_right
# output shape
out_height = (ih + HPAD - dilated_kernel_h) // HSTR + 1
out_width = (iw + WPAD - dilated_kernel_w) // WSTR + 1
oshape = (n, oc_chunk, out_height, out_width, oc_bn)
pad_before = (0, 0, pad_top, pad_left, 0)
pad_after = (0, 0, pad_down, pad_right, 0)
# DOPAD
DOPAD = HPAD != 0 or WPAD != 0
if DOPAD:
data_pad = pad(data, pad_before, pad_after, name="data_pad")
else:
data_pad = data
kh = te.reduce_axis((0, kernel_height), name="kh")
kw = te.reduce_axis((0, kernel_width), name="kw")
idxdiv = tvm.tir.indexdiv
idxmod = tvm.tir.indexmod
if groups == 1:
ic = te.reduce_axis((0, in_channel), name="ic")
return te.compute(
oshape,
lambda n, oc_chunk, oh, ow, oc_block: te.sum(
data_pad[
n,
idxdiv(ic, ic_bn),
oh * HSTR + kh * dilation_h,
ow * WSTR + kw * dilation_w,
idxmod(ic, ic_bn),
].astype(out_dtype)
* kernel[oc_chunk, idxdiv(ic, ic_bn), kh, kw, idxmod(ic, ic_bn), oc_block].astype(
out_dtype
),
axis=[ic, kh, kw],
),
name="conv2d_NCHWc",
tag="conv2d_NCHWc",
)
ic = te.reduce_axis((0, in_channel // groups), name="ic")
return te.compute(
oshape,
lambda n, occ, oh, ow, oc_block: te.sum(
data_pad[
n,
(occ // (oc_chunk // groups)) * (ic_chunk // groups) + idxdiv(ic, ic_bn),
oh * HSTR + kh * dilation_h,
ow * WSTR + kw * dilation_w,
idxmod(ic, ic_bn),
].astype(out_dtype)
* kernel[occ, idxdiv(ic, ic_bn), kh, kw, idxmod(ic, ic_bn), oc_block].astype(out_dtype),
axis=[ic, kh, kw],
),
name="conv2d_NCHWc",
tag="conv2d_NCHWc",
)
def conv2d_NCHWc_int8(
data, kernel, stride, padding, dilation, layout, out_layout, out_dtype="int32", n_elems=4
):
"""Conv2D operator for nChw[x]c layout.
Parameters
----------
data : tvm.te.Tensor
5-D with shape [batch, in_channel_chunk, in_height, in_width, in_channel_block]
kernel : tvm.te.Tensor
7-D with shape
[num_filter_chunk, in_channel_chunk, filter_height, filter_width, in_channel_block/4,
num_filter_block, 4]
stride : int or a list/tuple of two ints
stride size, or [stride_height, stride_width]
padding : int or a list/tuple of 2 or 4 ints
padding size, or
[pad_height, pad_width] for 2 ints, or
[pad_top, pad_left, pad_bottom, pad_right] for 4 ints
dilation: int or a list/tuple of two ints
dilation size, or [dilation_height, dilation_width]
layout : str
Input data layout
out_layout : str
Output data layout
out_dtype : str
output data type
n_elems : int
numer of int8 elements accumulated
Returns
-------
output : tvm.te.Tensor
5-D with shape [batch, out_channel_chunk, out_height, out_width, out_channel_block]
"""
# layout and out_layout are not used here,
# we keep them for debug convenience when dumping autotvm workload
HSTR, WSTR = stride if isinstance(stride, (tuple, list)) else (stride, stride)
dilation_h, dilation_w = (
dilation if isinstance(dilation, (tuple, list)) else (dilation, dilation)
)
n, ic_chunk, ih, iw, ic_bn = get_const_tuple(data.shape)
in_channel = ic_chunk * ic_bn
oc_chunk, ic_chunk_group, kernel_height, kernel_width, _, oc_bn = get_const_tuple(kernel.shape)[
:6
]
groups = ic_chunk // ic_chunk_group
dilated_kernel_h = (kernel_height - 1) * dilation_h + 1
dilated_kernel_w = (kernel_width - 1) * dilation_w + 1
pad_top, pad_left, pad_down, pad_right = get_pad_tuple(
padding, (dilated_kernel_h, dilated_kernel_w)
)
HPAD = pad_top + pad_down
WPAD = pad_left + pad_right
# output shape
out_height = (ih + HPAD - dilated_kernel_h) // HSTR + 1
out_width = (iw + WPAD - dilated_kernel_w) // WSTR + 1
oshape = (n, oc_chunk, out_height, out_width, oc_bn)
pad_before = (0, 0, pad_top, pad_left, 0)
pad_after = (0, 0, pad_down, pad_right, 0)
# DOPAD
DOPAD = HPAD != 0 or WPAD != 0
if DOPAD:
data_pad = pad(data, pad_before, pad_after, name="data_pad")
else:
data_pad = data
ic = te.reduce_axis((0, in_channel), name="ic")
kh = te.reduce_axis((0, kernel_height), name="kh")
kw = te.reduce_axis((0, kernel_width), name="kw")
if groups == 1:
ic_outer = te.reduce_axis((0, in_channel // ic_bn), name="ic_outer")
ic_f_inner = te.reduce_axis((0, ic_bn // n_elems), name="ic_f_inner")
ic_s_inner = te.reduce_axis((0, n_elems), name="ic_s_inner")
return te.compute(
oshape,
lambda n, oc_chunk, oh, ow, oc_block: te.sum(
data_pad[
n,
ic_outer,
oh * HSTR + kh * dilation_h,
ow * WSTR + kw * dilation_w,
ic_f_inner * n_elems + ic_s_inner,
].astype(out_dtype)
* kernel[oc_chunk, ic_outer, kh, kw, ic_f_inner, oc_block, ic_s_inner].astype(
out_dtype
),
axis=[kh, kw, ic_outer, ic_f_inner, ic_s_inner],
),
name="conv2d_NCHWc_int8",
tag="conv2d_NCHWc_int8",
attrs={"schedule_rule": "conv2d_NCHWc_int8"},
)
# for int8 group conv support
ic_chunk = in_channel // ic_bn
ic_outer = te.reduce_axis((0, ic_chunk // groups), name="ic_outer")
ic_f_inner = te.reduce_axis((0, ic_bn // n_elems), name="ic_f_inner")
ic_s_inner = te.reduce_axis((0, n_elems), name="ic_s_inner")
oshape = (n, oc_chunk, out_height, out_width, oc_bn)
return te.compute(
oshape,
lambda n, occ, oh, ow, oc_block: te.sum(
data_pad[
n,
(occ * oc_bn // (oc_chunk * oc_bn // groups)) * (ic_chunk // groups) + ic_outer,
oh * HSTR + kh,
ow * WSTR + kw,
ic_f_inner * n_elems + ic_s_inner,
].astype(out_dtype)
* kernel[occ, ic_outer, kh, kw, ic_f_inner, oc_block, ic_s_inner].astype(out_dtype),
axis=[kh, kw, ic_outer, ic_f_inner, ic_s_inner],
),
name="conv2d_NCHWc_int8",
tag="conv2d_NCHWc_int8",
attrs={"schedule_rule": "conv2d_NCHWc_int8"},
)
def conv2d_gemm_weight_transform(kernel, tile_rows, tile_cols):
"""Weight transformation for winograd
Parameters
----------
kernel: Tensor
The raw kernel tensor with layout "NHWC".
tile_rows: int
Tile rows of the weight transformation for ConvGemm.
tile_cols: int
Tile columns of the weight transformation for ConvGemm.
Returns
-------
output : tvm.te.Tensor
2-D with shape [CI*KH*KW,CO]
"""
KH, KW, IC, OC = get_const_tuple(kernel.shape)
K = KH * KW * IC
N = OC
kernel_flat = te.compute(
(K, N), lambda x, y: kernel[(x // IC) // KW, (x // IC) % KW, x % IC, y], "weight_flatten"
)
pad_K = 0
pad_N = 0
if N % tile_rows != 0:
pad_N = tile_rows - (N % tile_rows)
# Tensorize will later make use of 4 tiles at once across the columns so make sure we pad such
# that the columns is multiple of 4
column_multiplier = 4
tile_cols_multiplied = tile_cols * column_multiplier
K_misalignment = K % tile_cols_multiplied
if K_misalignment != 0:
pad_K = tile_cols_multiplied - K_misalignment
N_padded = N + pad_N
K_padded = K + pad_K
if pad_K != 0 or pad_N != 0:
kernel_flat = pad(
kernel_flat, pad_before=(0, 0), pad_after=(pad_K, pad_N), name="weight_padding"
)
return te.compute(
(N_padded // tile_rows, K_padded // tile_cols, tile_rows, tile_cols),
lambda x, y, z, w: kernel_flat[w + tile_cols * y, z + tile_rows * x],
name="weight_block_reshape",
)
def conv2d_winograd_weight_transform(kernel, tile_size):
"""Weight transformation for winograd
Parameters
----------
kernel: Tensor
The raw kernel tensor with layout "NCHW".
tile_size: int
Tile size of winograd transform. e.g. 2 for F(2x2, 3x3) and 4 for F(4x4, 3x3)
Returns
-------
output : tvm.te.Tensor
4-D with shape [alpha, alpha, CO, CI]
"""
shape = get_const_tuple(kernel.shape)
assert shape[2] == shape[3], "Only support NxN kernel"
K = shape[3]
r = tile_size + K - 1
shape = (r, r) + shape[:2]
_, _, G = winograd_transform_matrices(tile_size, K, kernel.dtype)
r_kh = te.reduce_axis((0, K), name="r_kh")
r_kw = te.reduce_axis((0, K), name="r_kw")
return te.compute(
shape,
lambda eps, nu, co, ci: te.sum(
kernel[co][ci][r_kh][r_kw] * G[eps][r_kh] * G[nu][r_kw], axis=[r_kh, r_kw]
),
name="transform_weight",
)
def conv2d_winograd_nnpack_weight_transform(kernel, convolution_algorithm, out_dtype):
"""Weight transformation for winograd
Parameters
----------
kernel: Tensor
The raw kernel tensor with layout "NCHW". Only 3x3 kernel is supported for now.
convolution_algorithm: int
The convolution algorithm for Winograd NNPACK.
Returns
-------
output : tvm.te.Tensor
4-D with shape [alpha, alpha, CO, CI]
"""
# pylint: disable=import-outside-toplevel
from tvm.contrib import nnpack
return nnpack.convolution_inference_weight_transform(
kernel, algorithm=convolution_algorithm, dtype=out_dtype
)
def group_conv2d_nchw(Input, Filter, stride, padding, dilation, groups, out_dtype=None):
"""Group convolution operator in NCHW layout.
Parameters
----------
Input : tvm.te.Tensor
4-D with shape [batch, in_channel, in_height, in_width]
Filter : tvm.te.Tensor
4-D with shape [num_filter, in_channel // groups, filter_height, filter_width]
stride : int or a list/tuple of two ints
Stride size, or [stride_height, stride_width]
padding : int or a list/tuple of 2 or 4 ints
padding size, or
[pad_height, pad_width] for 2 ints, or
[pad_top, pad_left, pad_bottom, pad_right] for 4 ints
dilation : int or a list/tuple of two ints
dilation size, or [dilation_height, dilation_width]
groups : int
number of groups
out_dtype : str
The output type. This is used for mixed precision.
Returns
-------
Output : tvm.te.Tensor
4-D with shape [batch, out_channel, out_height, out_width]
"""
return conv(
Input, Filter, stride, padding, dilation, groups, "NCHW", "OIHW", out_dtype=out_dtype
)
def conv(
inp: te.Tensor,
filt: te.Tensor,
stride: Union[int, Sequence[int]],
padding: Union[int, Sequence[int]],
dilation: Union[int, Sequence[int]],
groups: int,
data_layout: str,
kernel_layout: str = "",
out_dtype: Union[str, None] = None,
auto_scheduler_rewritten_layout: Optional[str] = None,
meta_schedule_original_shape=None,
auto_scheduler_should_rewrite_layout: bool = False,
):
"""Convolution operator in NCHW or NHWC layout.
Supports 1D, 2D, 3D, ... and grouping.
Parameters
----------
inp : tvm.te.Tensor
N-D with shape [batch, in_channel, in_height, in_width, ...] in `data_layout`
filt : tvm.te.Tensor
N-D with shape [num_filter, in_channel // groups, filter_height, filter_width, ...] in
`kernel_layout`
stride : int or a list/tuple of dim ints
(where dim=2 for NCHW, dim=1 for NCH, etc.)
Stride size, or [stride_height, stride_width, ...]
padding : int or a list/tuple of dim or 2*dim ints
(where dim=2 for NCHW, dim=1 for NCH, etc.)
padding size, or
[pad_height, pad_width, ...] for dim ints, or
[pad_top, pad_left, pad_bottom, pad_right] for 2*dim ints
dilation : int or a list/tuple of two ints
dilation size, or [dilation_height, dilation_width]
groups : int
number of groups
data_layout : str
Layout of the input. N indicates batch dimension, C indicates
channels, any other character indicates HW (or H or HWD for 1D and 3D).
kernel_layout: Optional[str]
Layout of the filter. I indicates input channels, O indicates output channels,
any other character indicates HW dimension of the filter (or H or HWD for 1D and 3D).
If kernel_layout is empty, use data_layout to infer the default kernel_layout. Default
kernel_layout is OIHW for NCHW data layout, HWIO for NHWC data layout.
out_dtype : str
Elements are converted to this type before elementwise multiplication
and summation.
auto_scheduler_rewritten_layout: str
Layout from autoscheduler's layout rewritting.
meta_schedule_original_shape : Optional[List[PrimExpr]]
The original shape of the input tensor.
auto_scheduler_should_rewrite_layout : bool
Should auto scheduler be allowed to rewrite the layout of the filter
tensor. Defaults to false. This can cause errors if used with grouped
convs.
Returns
-------
Output : tvm.te.Tensor
N-D with shape [batch, out_channel, out_height, out_width, ...] in `data_layout`
"""
dim = len(inp.shape) - 2
if out_dtype is None:
out_dtype = inp.dtype
assert isinstance(stride, int) or len(stride) == dim
assert isinstance(dilation, int) or len(dilation) == dim
if isinstance(stride, int):
strides = [stride for _ in range(dim)]
else:
strides = stride
if isinstance(dilation, int):
dilations = [dilation for _ in range(dim)]
else:
dilations = list(dilation)
# transform from data_layout to NCHW
data_permutation_to = [data_layout.find("N"), data_layout.find("C")] + [
x.span()[0] for x in re.finditer("[^NC]", data_layout)
]
# transform from NCHW to data_layout
data_permutation_from = np.argsort(data_permutation_to)
# transform from CHW to data_layout
data_permutation_from_reductions = data_permutation_from[1:].copy()
data_permutation_from_reductions[
data_permutation_from_reductions > data_permutation_from[0]
] -= 1
if kernel_layout == "":
# kernel permutation, if C appears before HW then num_filter is first, otherwise it is last
# tkonolige: I don't really understand kernel ordering for NHWC, it seems
# like num_filters should match the N dimension
if data_layout.find("C") < re.search("[^NC]", data_layout).span()[0]:
kernel_permutation_to = [0, 1] + list(range(2, dim + 2))
else:
kernel_permutation_to = [dim + 1, dim] + list(range(dim))
else:
# transform from kernel_layout to OIHW
kernel_permutation_to = [kernel_layout.find("O"), kernel_layout.find("I")] + [
x.span()[0] for x in re.finditer("[^OI]", kernel_layout)
]
# transform from OIHW to kernel_layout
kernel_permutation_from = np.argsort(kernel_permutation_to)
if meta_schedule_original_shape:
auto_scheduler.rewrite_tensor_shape(filt, meta_schedule_original_shape)
batch, in_channel, *dimensions = np.array(get_const_tuple(inp.shape))[
data_permutation_to
].tolist()
num_filter, _, *kernel_dimensions = np.array(get_const_tuple(filt.shape))[
kernel_permutation_to
].tolist()
# Autoscheduler may have messed with the input layout, so we extract the
# dimensions that it gives us
if auto_scheduler_rewritten_layout:
num_filter, _, *kernel_dimensions = auto_scheduler.get_shape_from_rewritten_layout(
auto_scheduler_rewritten_layout,
["ff", "rc"] + [f"r{i}" for i in ["y", "x", "z"][: len(kernel_dimensions)]],
)
auto_scheduler.remove_index_check(filt)
assert in_channel % groups == 0, "input channels must divide group size"
assert num_filter % groups == 0, "output channels must divide group size"
dilated_kernel_dimensions = [(k - 1) * dil + 1 for k, dil in zip(kernel_dimensions, dilations)]
pad_begin, pad_end = get_pad_tuple_generic(padding, dilated_kernel_dimensions)
# compute the output shape
out_channel = num_filter
out_dimensions = [
simplify(d - (k - 1) * dil - 1 + pb + pe) // stride + 1
for d, k, dil, pb, pe, stride in zip(
dimensions, kernel_dimensions, dilations, pad_begin, pad_end, strides
)
]
# compute graph
pad_before = list(np.array([0, 0] + pad_begin)[data_permutation_from])
pad_after = list(np.array([0, 0] + pad_end)[data_permutation_from])
temp = pad(inp, pad_before, pad_after, name="pad_temp")
rc = te.reduce_axis((0, in_channel // groups), name="rc")
rs = [te.reduce_axis((0, k), name=f"r{i}") for i, k in zip(["y", "x", "z"], kernel_dimensions)]
def compute(*args):
nn, ff, *dim_indices = list(np.array(args)[data_permutation_to])
if groups == 1:
simplified_channel_index = rc
else:
simplified_channel_index = ff // (num_filter // groups) * (in_channel // groups) + rc
return te.sum(
temp.__getitem__(
tuple(
np.array(
[nn, simplified_channel_index]
+ [
di * stride + r * dil
for di, stride, r, dil in zip(dim_indices, strides, rs, dilations)
]
)[data_permutation_from]
)
).astype(out_dtype)
* filt.__getitem__(tuple(np.array([ff, rc] + rs)[kernel_permutation_from])).astype(
out_dtype
),
# Schedules depend on reduction axes being in the same order as the
# layout, so we reorder here.
axis=np.array([rc, *rs])[data_permutation_from_reductions].tolist(),
)
out = te.compute(
list(np.array([batch, out_channel] + out_dimensions)[data_permutation_from]),
compute,
# tag is expected to be lowercase
tag=f"{'group_' if groups > 1 else ''}conv{dim}d_{data_layout.lower()}",
name=f"{'group_' if groups > 1 else ''}conv{dim}d_{data_layout.lower()}",
attrs={"layout_free_placeholders": [filt]} if auto_scheduler_should_rewrite_layout else {},
varargs_names=list(np.array(["nn", "ff", "yy", "xx", "zz"])[data_permutation_from]),
)
# if we used autoscheduler's changed layout we need to rewrite the ordering
# of the output dimensions
if auto_scheduler_rewritten_layout:
out = auto_scheduler.rewrite_compute_body(out, auto_scheduler_rewritten_layout)
return out
def group_conv2d_nhwc(Input, Filter, stride, padding, dilation, groups, out_dtype=None):
"""Group convolution operator in NHWC layout.
Parameters
----------
Input : tvm.te.Tensor
4-D with shape [batch, in_height, in_width, in_channel, ...]
Filter : tvm.te.Tensor
4-D with shape [filter_height, filter_width, in_channel // groups, num_filter]
stride : int or a list/tuple of two ints
Stride size, or [stride_height, stride_width]
padding : int or a list/tuple of 2 or 4 ints
padding size, or
[pad_height, pad_width] for 2 ints, or
[pad_top, pad_left, pad_bottom, pad_right] for 4 ints
dilation : int or a list/tuple of two ints
dilation size, or [dilation_height, dilation_width]
groups : int
number of groups
out_dtype : str
The output type. This is used for mixed precision.
Returns
-------
Output : tvm.te.Tensor
4-D with shape [batch, out_height, out_width, out_channel]
"""
return conv(
Input, Filter, stride, padding, dilation, groups, "NHWC", "HWIO", out_dtype=out_dtype
)
def unpack_NCHWc_to_nchw(packed_out, out_dtype):
"""Unpack conv2d_NCHWc output from layout NCHWc to NCHW
Parameters
----------
packed_out : tvm.te.Tensor
The output tensor of conv2d_NCHWc.
out_dtype : str
The output dtype.
Returns
-------
unpacked_out : tvm.te.Tensor
The unpacked output tensor in NCHW layout.
"""
n, oc_chunk, oh, ow, oc_bn = get_const_tuple(packed_out.shape)
idxmod = tvm.tir.indexmod
idxdiv = tvm.tir.indexdiv
oshape = (n, oc_chunk * oc_bn, oh, ow)
unpacked_out = te.compute(
oshape,
lambda n, c, h, w: packed_out[n, idxdiv(c, oc_bn), h, w, idxmod(c, oc_bn)].astype(
out_dtype
),
name="output_unpack",
tag=tag.INJECTIVE + ",unpack_nchwc",
)
return unpacked_out
@tvm.target.generic_func
def conv2d_winograd_nhwc(
data,
weight,
strides,
padding,
dilation,
out_dtype,
pre_computed=False,
auto_scheduler_rewritten_layout="",
meta_schedule_original_shape=None,
):
"""Conv2D Winograd in NHWC layout.
This is a clean version to be used by the auto-scheduler for both CPU and GPU.
Parameters
----------
data : tvm.te.Tensor
4-D with shape [batch, in_height, in_width, in_channel]
weight : tvm.te.Tensor
4-D with shape [filter_height, filter_width, in_channel, num_filter]
strides : int or a list/tuple of two ints
stride size, or [stride_height, stride_width]
padding : int or a list/tuple of two ints
padding size, or [pad_height, pad_width]
dilation: int or a list/tuple of two ints
dilation size, or [dilation_height, dilation_width]
out_dtype : str, optional
Specifies the output data type.
pre_computed: bool
Whether the kernel is precomputed
auto_scheduler_rewritten_layout: str = ""
The layout after auto-scheduler's layout rewrite pass.
meta_schedule_original_shape: Optional[List[PrimExpr]] = None
The original shape of the input tensor.
Returns
-------
output : tvm.te.Tensor
4-D with shape [batch, out_height, out_width, out_channel]
"""
tile_size = 4
return _conv2d_winograd_nhwc_impl(
data,
weight,
strides,
padding,
dilation,
out_dtype,
tile_size,
pre_computed=pre_computed,
write_cache_level=2,
auto_scheduler_rewritten_layout=auto_scheduler_rewritten_layout,
meta_schedule_original_shape=meta_schedule_original_shape,
)
@tvm.target.generic_func
def conv2d_winograd_nchw(
data,
weight,
strides,
padding,
dilation,
out_dtype,
pre_computed=False,
auto_scheduler_rewritten_layout="",
meta_schedule_original_shape=None,
):
"""Conv2D Winograd in NCHW layout.
This is a clean version to be used by the auto-scheduler for both CPU and GPU.
Parameters
----------
data : tvm.te.Tensor
4-D with shape [batch, in_channel, in_height, in_width]
weight : tvm.te.Tensor
4-D with shape [filter_height, filter_width, in_channel, num_filter]
strides : int or a list/tuple of two ints
stride size, or [stride_height, stride_width]
padding : int or a list/tuple of two ints
padding size, or [pad_height, pad_width]
dilation: int or a list/tuple of two ints
dilation size, or [dilation_height, dilation_width]
out_dtype : str, optional
Specifies the output data type.
pre_computed: bool
Whether the kernel is precomputed
auto_scheduler_rewritten_layout: str = ""
The layout after auto-scheduler's layout rewrite pass.
meta_schedule_original_shape: Optional[List[PrimExpr]] = None
The original shape of the input tensor.
Returns
-------
output : tvm.te.Tensor
4-D with shape [batch, out_height, out_width, out_channel]
"""
tile_size = 4
return _conv2d_winograd_nchw_impl(
data,
weight,
strides,
padding,
dilation,
out_dtype,
tile_size,
pre_computed,
auto_scheduler_rewritten_layout,
meta_schedule_original_shape,
)
def _conv2d_winograd_nhwc_impl(
data,
weight,
strides,
padding,
dilation,
out_dtype,
tile_size,
pre_computed=False,
write_cache_level=None,
auto_scheduler_rewritten_layout="",
meta_schedule_original_shape=None,
):
"""Conv2D Winograd implementation in NHWC layout.
This is a clean version to be used by the auto-scheduler for both CPU and GPU.
Parameters
----------
data : tvm.te.Tensor
4-D with shape [batch, in_height, in_width, in_channel]
weight : tvm.te.Tensor
4-D with shape [filter_height, filter_width, in_channel, num_filter]
strides : int or a list/tuple of two ints
stride size, or [stride_height, stride_width]
padding : int or a list/tuple of two ints
padding size, or [pad_height, pad_width]
dilation: int or a list/tuple of two ints
dilation size, or [dilation_height, dilation_width]
out_dtype : str, optional
Specifies the output data type.
tile_size : int
The size of the tile to use for the Winograd filter
pre_computed: bool = False
Whether the kernel is precomputed
write_cache_level: Optional[int] = None
The cache level to write to in multi-level tiling rule in MetaSchedule.
auto_scheduler_rewritten_layout: str = ""
The layout after auto-scheduler's layout rewrite pass.
meta_schedule_original_shape: Optional[List[PrimExpr]] = None
The original shape of the input tensor.
Returns
-------
output : tvm.te.Tensor
4-D with shape [batch, out_height, out_width, out_channel]
"""
N, H, W, CI = get_const_tuple(data.shape)
if isinstance(dilation, int):
dilation_h = dilation_w = dilation
else:
dilation_h, dilation_w = dilation
if meta_schedule_original_shape:
auto_scheduler.rewrite_tensor_shape(weight, meta_schedule_original_shape)
assert (dilation_h, dilation_w) == (1, 1), "Does not support dilation"
if not pre_computed:
KH, KW, CI, CO = get_const_tuple(weight.shape)
else:
if auto_scheduler_rewritten_layout:
H_CAT, W_CAT, CO, CI = get_const_tuple(
auto_scheduler.get_shape_from_rewritten_layout(
auto_scheduler_rewritten_layout, ["eps", "nu", "co", "ci"]
)
)
auto_scheduler.remove_index_check(weight)
else:
H_CAT, W_CAT, CO, CI = get_const_tuple(weight.shape)
KH, KW = H_CAT - tile_size + 1, W_CAT - tile_size + 1
pad_t, pad_l, pad_b, pad_r = get_pad_tuple(padding, (KH, KW))
HSTR, WSTR = (strides, strides) if isinstance(strides, int) else strides
assert HSTR == 1 and WSTR == 1 and KH == 3 and KW == 3
r = KW
m = tile_size
alpha = m + r - 1
A, B, G = winograd_transform_matrices(m, r, out_dtype)
H = (H + pad_t + pad_b - KH) // HSTR + 1
W = (W + pad_l + pad_r - KW) // WSTR + 1
nH, nW = (H + m - 1) // m, (W + m - 1) // m
P = N * nH * nW
pad_extra = (nW - 1) * m + alpha - (H + pad_t + pad_b)
data_pad = pad(
data,
(0, pad_t, pad_l, 0),
(0, pad_b + pad_extra, pad_r + pad_extra, 0),
name="data_pad",
attrs={"schedule_rule": "None"},
)
if not pre_computed:
r_kh = te.reduce_axis((0, KH), name="r_kh")
r_kw = te.reduce_axis((0, KW), name="r_kw")
kernel_pack = te.compute(
(alpha, alpha, CO, CI),
lambda eps, nu, co, ci: te.sum(
weight[r_kh, r_kw, ci, co] * G[eps, r_kh] * G[nu, r_kw], axis=[r_kh, r_kw]
),
name="kernel_pack",
)
bgemm_attrs = {}
else:
kernel_pack = weight
bgemm_attrs = {"layout_free_placeholders": [kernel_pack]}
if write_cache_level is not None:
if not isinstance(write_cache_level, int):
bgemm_attrs["meta_schedule.write_cache_level"] = write_cache_level
else:
bgemm_attrs["meta_schedule.write_cache_level"] = [write_cache_level]
# pack data tile
input_tile = te.compute(
(alpha, alpha, P, CI),
lambda eps, nu, p, ci: data_pad[
p // (nH * nW), ((p // nW) % nH) * m + eps, (p % nW) * m + nu, ci
],
name="input_tile",
attrs={"schedule_rule": "None"},
)
# transform data
r_a = te.reduce_axis((0, alpha), "r_a")
r_b = te.reduce_axis((0, alpha), "r_b")
data_pack = te.compute(
(alpha, alpha, P, CI),
lambda eps, nu, p, ci: te.sum(
input_tile[r_a, r_b, p, ci] * B[r_a, eps] * B[r_b, nu], axis=[r_a, r_b]
),
name="data_pack",
attrs={
"auto_scheduler_simplify_const_tensor_indices": ["eps", "nu", "r_a", "r_b"],
"schedule_rule": "conv2d_nhwc_winograd_data_pack",
},
)
# do batch gemm
ci = te.reduce_axis((0, CI), name="ci")
bgemm = te.compute(
(alpha, alpha, P, CO),
lambda eps, nu, p, co: te.sum(
data_pack[eps, nu, p, ci] * kernel_pack[eps, nu, co, ci], axis=[ci]
),
name="bgemm",
attrs=bgemm_attrs,
)
if auto_scheduler_rewritten_layout:
bgemm = auto_scheduler.rewrite_compute_body(bgemm, auto_scheduler_rewritten_layout)
# inverse transform
r_a = te.reduce_axis((0, alpha), "r_a")
r_b = te.reduce_axis((0, alpha), "r_b")
inverse = te.compute(
(m, m, P, CO),
lambda vh, vw, p, co: te.sum(
bgemm[r_a, r_b, p, co] * A[r_a, vh] * A[r_b, vw], axis=[r_a, r_b]
),
name="inverse",
attrs={
"auto_scheduler_simplify_const_tensor_indices": ["vh", "vw", "r_a", "r_b"],
"schedule_rule": "conv2d_nhwc_winograd_inverse",
},
)
# output
output = te.compute(
(N, H, W, CO),
lambda n, h, w, co: inverse[h % m, w % m, n * nH * nW + (h // m) * nW + (w // m), co],
name="conv2d_winograd",
)
return output
def _conv2d_winograd_nchw_impl(
data,
weight,
strides,
padding,
dilation,
out_dtype,
tile_size,
pre_computed=False,
write_cache_level=None,
auto_scheduler_rewritten_layout="",
meta_schedule_original_shape=None,
):
"""
write_cache_level: Optional[int] = None
The cache level to write to in multi-level tiling rule in MetaSchedule.
"""
del auto_scheduler_rewritten_layout
N, CI, H, W = get_const_tuple(data.shape)
if isinstance(dilation, int):
dilation_h = dilation_w = dilation
else:
dilation_h, dilation_w = dilation
if meta_schedule_original_shape:
auto_scheduler.rewrite_tensor_shape(weight, meta_schedule_original_shape)
assert (dilation_h, dilation_w) == (1, 1), "Does not support dilation"
HSTR, WSTR = (strides, strides) if isinstance(strides, int) else strides
if not pre_computed: # kernel tensor is raw tensor, do strict check
CO, CI, KH, KW = get_const_tuple(weight.shape)
alpha = KW + tile_size - 1
assert HSTR == 1 and WSTR == 1 and KH == KW
else:
alpha, _, CI, CO = get_const_tuple(weight.shape)
KH = KW = alpha + 1 - tile_size
assert HSTR == 1 and WSTR == 1 and dilation_h == 1 and dilation_w == 1
pad_t, pad_l, pad_b, pad_r = get_pad_tuple(padding, (KH, KW))
assert HSTR == 1 and WSTR == 1 and KH == 3 and KW == 3
pt, pl, pb, pr = get_pad_tuple(padding, (KH, KW))
data_pad = pad(data, (0, 0, pt, pl), (0, 0, pb, pr), name="data_pad")
r = KW
m = tile_size
A, B, G = winograd_transform_matrices(m, r, out_dtype)
H = (H + pt + pb - KH) // HSTR + 1
W = (W + pl + pr - KW) // WSTR + 1
nH, nW = (H + m - 1) // m, (W + m - 1) // m
P = N * nH * nW if isinstance(N, int) else nH * nW
# transform kernel
if not pre_computed:
r_kh = te.reduce_axis((0, KH), name="r_kh")
r_kw = te.reduce_axis((0, KW), name="r_kw")
kernel_pack = te.compute(
(alpha, alpha, CI, CO),
lambda eps, nu, ci, co: te.sum(
weight[co, ci, r_kh, r_kw] * G[eps, r_kh] * G[nu, r_kw], axis=[r_kh, r_kw]
),
name="kernel_pack",
)
bgemm_attrs = {}
else:
kernel_pack = weight
bgemm_attrs = {"layout_free_placeholders": [kernel_pack]}
if write_cache_level is not None:
if not isinstance(write_cache_level, int):
bgemm_attrs["meta_schedule.write_cache_level"] = write_cache_level
else:
bgemm_attrs["meta_schedule.write_cache_level"] = [write_cache_level]
# pack data tile
input_tile = te.compute(
(CI, P, alpha, alpha),
lambda ci, p, eps, nu: data_pad[
p // (nH * nW), ci, ((p // nW) % nH) * m + eps, (p % nW) * m + nu
],
name="input_tile",
attrs={"schedule_rule": "None"},
)
# transform data
r_a = te.reduce_axis((0, alpha), "r_a")
r_b = te.reduce_axis((0, alpha), "r_b")
data_pack = te.compute(
(alpha, alpha, CI, P),
lambda eps, nu, ci, p: te.sum(
input_tile[ci, p, r_a, r_b] * B[r_a, eps] * B[r_b, nu], axis=[r_a, r_b]
),
name="data_pack",
attrs={"schedule_rule": "conv2d_nchw_winograd_data_pack"},
)
# do batch gemm
ci = te.reduce_axis((0, CI), name="ci")
bgemm = te.compute(
(alpha, alpha, CO, P),
lambda eps, nu, co, p: te.sum(
data_pack[eps, nu, ci, p] * kernel_pack[eps, nu, ci, co], axis=[ci]
),
name="bgemm",
attrs=bgemm_attrs,
)
# inverse transform
r_a = te.reduce_axis((0, alpha), "r_a")
r_b = te.reduce_axis((0, alpha), "r_b")
inverse = te.compute(
(CO, P, m, m),
lambda co, p, vh, vw: te.sum(
bgemm[r_a, r_b, co, p] * A[r_a, vh] * A[r_b, vw], axis=[r_a, r_b]
),
name="inverse",
attrs={"schedule_rule": "conv2d_nchw_winograd_inverse"},
)
# output
output = te.compute(
(N, CO, H, W),
lambda n, co, h, w: inverse[co, n * nH * nW + (h // m) * nW + (w // m), h % m, w % m],
name="conv2d_winograd",
)
return output
def conv2d_winograd_nhwc_without_weight_transform(
data,
weight,
strides,
padding,
dilation,
out_dtype,
auto_scheduler_rewritten_layout="",
meta_schedule_original_shape=None,
):
"""Conv2D Winograd without layout transform in NHWC layout.
This is a clean version to be used by the auto-scheduler for both CPU and GPU.
Parameters
----------
data : tvm.te.Tensor
4-D with shape [batch, in_height, in_width, in_channel]
weight : tvm.te.Tensor
4-D with shape [filter_height, filter_width, in_channel, num_filter]
strides : int or a list/tuple of two ints
stride size, or [stride_height, stride_width]
padding : int or a list/tuple of two ints
padding size, or [pad_height, pad_width]
dilation: int or a list/tuple of two ints
dilation size, or [dilation_height, dilation_width]
out_dtype : str, optional
Specifies the output data type.
auto_scheduler_rewritten_layout: str = ""
The layout after auto-scheduler's layout rewrite pass.
meta_schedule_original_shape: Optional[List[PrimExpr]] = None
The original shape of the input tensor.
Returns
-------
output : tvm.te.Tensor
4-D with shape [batch, out_height, out_width, out_channel]
"""
return conv2d_winograd_nhwc(
data,
weight,
strides,
padding,
dilation,
out_dtype,
pre_computed=True,
auto_scheduler_rewritten_layout=auto_scheduler_rewritten_layout,
meta_schedule_original_shape=meta_schedule_original_shape,
)
def conv2d_winograd_nchw_without_weight_transform(
data,
weight,
strides,
padding,
dilation,
out_dtype,
auto_scheduler_rewritten_layout="",
meta_schedule_original_shape=None,
):
"""Conv2D Winograd without layout transform in NCHW layout.
This is a clean version to be used by meta-schedule for both CPU and GPU.
Parameters
----------
data : tvm.te.Tensor
4-D with shape [batch, in_height, in_width, in_channel]
weight : tvm.te.Tensor
4-D with shape [filter_height, filter_width, in_channel, num_filter]
strides : int or a list/tuple of two ints
stride size, or [stride_height, stride_width]
padding : int or a list/tuple of two ints
padding size, or [pad_height, pad_width]
dilation: int or a list/tuple of two ints
dilation size, or [dilation_height, dilation_width]
out_dtype : str, optional
Specifies the output data type.
auto_scheduler_rewritten_layout: str = ""
The layout after auto-scheduler's layout rewrite pass.
meta_schedule_original_shape: Optional[List[PrimExpr]] = None
The original shape of the input tensor.
Returns
-------
output : tvm.te.Tensor
4-D with shape [batch, out_height, out_width, out_channel]
"""
return conv2d_winograd_nchw(
data,
weight,
strides,
padding,
dilation,
out_dtype,
pre_computed=True,
auto_scheduler_rewritten_layout=auto_scheduler_rewritten_layout,
meta_schedule_original_shape=meta_schedule_original_shape,
)
| 49,678 | 31.343099 | 100 | py |
tvm | tvm-main/python/tvm/topi/nn/softmax.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, pointless-exception-statement
"""TVM operator for softmax and log_softmax compute."""
from __future__ import absolute_import
import tvm
from tvm import te, topi
@tvm.te.tag_scope(tag="softmax_output")
def softmax(x, axis=-1):
"""Perform softmax activation on the data.
Parameters
----------
data : tvm.te.Tensor
can be any dimension
axis : int
channel axis
Returns
-------
output : tvm.te.Tensor
output shape is the same as input
"""
return softmax_common(x, axis, False)
@tvm.te.tag_scope(tag="fast_softmax_output")
def fast_softmax(x, axis=-1):
"""Perform softmax activation on the data.
Use approximation to compute exponent for faster speed.
Parameters
----------
data : tvm.te.Tensor
can be any dimension
axis : int
channel axis
Returns
-------
output : tvm.te.Tensor
output shape is the same as input
"""
return softmax_common(x, axis, True)
def softmax_common(x, axis, use_fast_exp):
"""The common part of softmax and fast_softmax"""
shape = x.shape
if axis < 0:
axis = len(shape) + axis
if axis >= len(shape):
ValueError("axis parameter should be less than input dim")
k1 = te.reduce_axis((0, shape[axis]), name="k")
k2 = te.reduce_axis((0, shape[axis]), name="k")
def insert_reduce_index(indices, reduce_index):
return indices[:axis] + (reduce_index,) + indices[axis:]
def get_non_reduce_indices(indices):
return tuple([var for (i, var) in enumerate(indices) if i != axis])
def _compute_max(*indices):
eval_range = insert_reduce_index(indices, k1)
return tvm.te.max(x[eval_range], axis=k1)
def _compute_delta(max_elem, *indices):
non_reduce_indices = get_non_reduce_indices(indices)
return x[indices] - max_elem[non_reduce_indices]
def _compute_exp(max_elem, *indices):
non_reduce_indices = get_non_reduce_indices(indices)
return te.exp(x[indices] - max_elem[non_reduce_indices])
def _compute_expsum(exp, *indices):
eval_range = insert_reduce_index(indices, k2)
return te.sum(exp[eval_range], axis=k2)
def _normalize(exp, expsum, *indices):
non_reduce_indices = get_non_reduce_indices(indices)
return exp[indices] / expsum[non_reduce_indices]
reduced_shape = tuple([dim for (i, dim) in enumerate(shape) if i != axis])
max_elem = te.compute(reduced_shape, _compute_max, name="T_softmax_maxelem")
if use_fast_exp:
delta = te.compute(
shape, lambda *indices: _compute_delta(max_elem, *indices), name="T_softmax_delta"
)
exp = topi.math.fast_exp(delta)
else:
exp = te.compute(
shape, lambda *indices: _compute_exp(max_elem, *indices), name="T_softmax_exp"
)
expsum = te.compute(
reduced_shape, lambda *indices: _compute_expsum(exp, *indices), name="T_softmax_expsum"
)
return te.compute(
shape,
lambda *indices: _normalize(exp, expsum, *indices),
name="T_softmax_norm",
attrs={"axis": axis},
)
@tvm.te.tag_scope(tag="log_softmax_output")
def log_softmax(x, axis=-1):
"""Perform log softmax activation on the data
Parameters
----------
data : tvm.te.Tensor
N-D input data
Returns
-------
output : tvm.te.Tensor
N-D output with same shape
"""
shape = x.shape
if axis < 0:
axis = len(shape) + axis
if axis >= len(shape):
ValueError("axis parameter should be less than input dim")
k1 = te.reduce_axis((0, shape[axis]), name="k")
k2 = te.reduce_axis((0, shape[axis]), name="k")
def insert_reduce_index(indices, reduce_index):
return indices[:axis] + (reduce_index,) + indices[axis:]
def get_non_reduce_indices(indices):
return tuple([var for (i, var) in enumerate(indices) if i != axis])
def _compute_max(*indices):
eval_range = insert_reduce_index(indices, k1)
return tvm.te.max(x[eval_range], axis=k1)
def _compute_expsum(max_elem, *indices):
eval_range = insert_reduce_index(indices, k2)
return te.sum(te.exp(x[eval_range] - max_elem[indices]), axis=k2)
def _normalize(max_elem, expsum, *indices):
non_reduce_indices = get_non_reduce_indices(indices)
return x[indices] - max_elem[non_reduce_indices] - te.log(expsum[non_reduce_indices])
reduced_shape = tuple([dim for (i, dim) in enumerate(shape) if i != axis])
max_elem = te.compute(reduced_shape, _compute_max, name="T_softmax_maxelem")
expsum = te.compute(reduced_shape, lambda *indices: _compute_expsum(max_elem, *indices))
return te.compute(
shape,
lambda *indices: _normalize(max_elem, expsum, *indices),
attrs={"axis": axis},
)
| 5,682 | 31.66092 | 95 | py |
tvm | tvm-main/python/tvm/topi/nn/conv1d_transpose.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-variable, unused-argument
"""Transposed 1D convolution operators (sometimes called Deconvolution)."""
from tvm import te
from .dilate import dilate
from .pad import pad
from ..utils import simplify
from .utils import get_pad_tuple1d
def conv1d_transpose_ncw(data, kernel, stride, padding, out_dtype, output_padding):
"""Transposed 1D convolution ncw forward operator.
Parameters
----------
data : tvm.te.Tensor
3-D with shape [batch, in_channel, in_width]
kernel : tvm.te.Tensor
3-D with shape [in_channel, num_filter, filter_width]
stride : ints
The spatial stride along width
padding : int or str
Padding size, or ['VALID', 'SAME']
out_dtype : str
The output data type. This is used for mixed precision.
output_padding : ints
Used to recover the actual output shape in case there are more
than one possible shape. Must be smaller than stride.
Returns
-------
output : tvm.te.Tensor
3-D with shape [batch, out_channel, out_width]
"""
# dilate and pad
if isinstance(stride, (tuple, list)):
stride = stride[0]
if isinstance(output_padding, (tuple, list)):
output_padding = output_padding[0]
batch, channels_in, data_width = data.shape
_, channels_out, kernel_width = kernel.shape
assert output_padding < stride
channels_out = simplify(channels_out)
data = dilate(data, [1, 1, stride], name="data_dilate")
pad_left, pad_right = get_pad_tuple1d(padding, (kernel_width,))
pad_left = kernel_width - 1 - pad_left
pad_right = kernel_width - 1 - pad_right + output_padding
data = pad(data, [0, 0, pad_left], [0, 0, pad_right], name="data_pad")
# transpose kernel, switch kernel layout to IOW
kernel = te.compute(
(channels_out, channels_in, kernel_width),
lambda o, i, w: kernel[i][o][kernel_width - 1 - w],
name="kernel",
)
# convolution
_, _, data_width = data.shape
out_w = simplify(data_width - kernel_width + 1)
dc = te.reduce_axis((0, channels_in), name="dc")
dw = te.reduce_axis((0, kernel_width), name="dw")
output = te.compute(
(batch, channels_out, out_w),
lambda b, c, w: te.sum(
data[b, dc, w + dw].astype(out_dtype) * kernel[c, dc, dw].astype(out_dtype),
axis=[dc, dw],
),
tag="conv1d_transpose_ncw",
)
return output
| 3,265 | 33.744681 | 88 | py |
tvm | tvm-main/python/tvm/topi/nn/lstm.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""General LSTM implementation using TE scan."""
from tvm import te, tir
from tvm.topi import tag
def lstm(
Xs,
Wi,
Wh,
Bi=None,
Bh=None,
h_init=None,
c_init=None,
proj=None,
p_i=None,
p_f=None,
p_o=None,
f_act=tir.sigmoid,
g_act=tir.tanh,
h_act=tir.tanh,
reverse=False,
weight_layout: str = "IFGO",
):
"""General LSTM implemented using TE scan.
Parameters
----------
Xs : te.Tensor
Input sequence with shape `(seq_len, batch_size, in_dim)`
Wi : te.Tensor
Input weight matrix with shape `(4 * hidden_dim, in_dim)`. The weights are packed according
to `weight_layout`.
Wh : te.Tensor
Hidden weight matrix with shape `(4 * hidden_dim, hidden_dim or proj_dim)`. Packed as `Wh`.
Bi : te.Tensor, optional
Input bias with shape `(4 * hidden_dim,)`, by default None. Packed as `Wh`.
Bh : te.Tensor, optional
Hidden bias with shape as `Bi`, by default None. Packed as `Wh`.
h_init : te.Tensor, optional
Initial hidden state with shape `(batch_size, hidden_dim or proj_dim)`, zero if None
c_init : te.Tensor, optional
Initial cell state with same shape as `h_init`, zero if None
proj : te.Tensor, optional
Projection matrix with shape `(proj_dim, hidden_dim)`, by default None
p_i, p_f, p_o : te.Tensor, optional
Peephole LSTM matrices with shape `(batch_size, hidden_dim)`, by default None
f_act, g_act, h_act : F, optional
Gate activation functions
reverse : bool, optional
Whether to process `Xs` in reverse, by default False
weight_layout : str, optional
The packed weight layout for gates, by default "IFGO". Note: I = input, F = forget,
G = cell, O = output.
Returns
-------
result : te.Tensor, te.Tensor
Tuple of hidden states (with shape `(seq_len, batch_size, hidden_dim or proj_dim)`), and
cell states (with shape `(seq_len, batch_size, hidden_dim)`).
"""
assert len(weight_layout) == 4 and sorted(weight_layout) == sorted(
"IFGO"
), f'given weight layout "{weight_layout}" is not a permutation of "IFGO"'
i_gate_idx = weight_layout.find("I")
f_gate_idx = weight_layout.find("F")
g_gate_idx = weight_layout.find("G")
o_gate_idx = weight_layout.find("O")
seq_len, batch_size, in_dim = Xs.shape
assert (
Wi.shape[0] % 4 == 0
), f"dim 0 of input weight should be 4 * hidden_dim, but {Wi.shape[0]} is not divisible by 4"
hidden_dim = Wi.shape[0] // 4
proj_dim = hidden_dim
if proj is not None:
proj_dim = proj.shape[0]
# te.scan uses up 1 element for the initial value
scan_len = seq_len + 1
# precompute input-hidden matmul outside the scan
ki = te.reduce_axis((0, in_dim), name="ki2h")
Xi2h = te.compute(
(seq_len * batch_size, 4 * hidden_dim),
lambda tb, ij: te.sum(Xs[(tb // batch_size), tb % batch_size, ki] * Wi[ij, ki], axis=ki),
name="Xi2h",
)
if Bi is not None:
Xi2h = te.compute(
Xi2h.shape, lambda tb, ij: Xi2h[tb, ij] + Bi[ij], name="Xi2h_bias", tag=tag.INJECTIVE
)
h_state = te.placeholder((scan_len, batch_size, proj_dim), name="h_state")
c_state = te.placeholder((scan_len, batch_size, hidden_dim), name="c_state")
h_init = te.compute(
(1, batch_size, proj_dim),
lambda _, b, i: h_init[b, i] if h_init is not None else 0.0,
name="h_init",
)
c_init = te.compute(
(1, batch_size, hidden_dim),
lambda _, b, i: c_init[b, i] if c_init is not None else 0.0,
name="c_init",
)
# begin scan computations, first the (batched) hidden-hidden dense
kh = te.reduce_axis((0, proj_dim), name="kh2h")
s_h2h = te.compute(
(scan_len, batch_size, 4, hidden_dim),
lambda t, b, i, j: te.sum(h_state[t - 1, b, kh] * Wh[i * hidden_dim + j, kh], axis=kh),
name="s_h2h",
)
if Bh is not None:
s_h2h = te.compute(
s_h2h.shape,
lambda t, b, i, j: s_h2h[t, b, i, j] + Bh[i * hidden_dim + j],
name="s_h2h_bias",
tag=tag.INJECTIVE,
)
# helper to reverse time if scanning backwards
get_x_t = lambda t: seq_len - t if reverse else t - 1
gates = te.compute(
(scan_len, batch_size, 4, hidden_dim),
lambda t, b, i, j: Xi2h[get_x_t(t) * batch_size + b, i * hidden_dim + j]
+ s_h2h[t, b, i, j],
name="gates",
tag=tag.INJECTIVE,
)
# helper to correctly read each gate dense from the batched output
read_gate = lambda t, b, j, idx: gates[t, b, idx, j]
gate_shape = (scan_len, batch_size, hidden_dim)
# compute the activated gates (and do some extra stuff if peephole weights are present)
if p_i is not None and p_f is not None:
i_gate = te.compute(
gate_shape,
lambda t, b, j: f_act(
read_gate(t, b, j, i_gate_idx) + p_i[b, j] * c_state[t - 1, b, j]
),
name="i_gate_p",
tag=tag.INJECTIVE,
)
f_gate = te.compute(
gate_shape,
lambda t, b, j: f_act(
read_gate(t, b, j, f_gate_idx) + p_f[b, j] * c_state[t - 1, b, j]
),
name="f_gate_p",
tag=tag.INJECTIVE,
)
else:
i_gate = te.compute(
gate_shape,
lambda *i: f_act(read_gate(*i, i_gate_idx)),
name="i_gate",
tag=tag.INJECTIVE,
)
f_gate = te.compute(
gate_shape,
lambda *i: f_act(read_gate(*i, f_gate_idx)),
name="f_gate",
tag=tag.INJECTIVE,
)
g_gate = te.compute(
gate_shape, lambda *i: g_act(read_gate(*i, g_gate_idx)), name="g_gate", tag=tag.INJECTIVE
)
next_c = te.compute(
gate_shape,
lambda t, b, j: f_gate[t, b, j] * c_state[t - 1, b, j] + i_gate[t, b, j] * g_gate[t, b, j],
name="next_c",
)
if p_o is not None:
o_gate = te.compute(
gate_shape,
lambda t, b, j: f_act(read_gate(t, b, j, o_gate_idx) + p_o[b, j] * next_c[t, b, j]),
name="o_gate_p",
tag=tag.INJECTIVE,
)
else:
o_gate = te.compute(
gate_shape,
lambda *i: f_act(read_gate(*i, o_gate_idx)),
name="o_gate",
tag=tag.INJECTIVE,
)
next_h = te.compute(gate_shape, lambda *i: o_gate(*i) * h_act(next_c(*i)), name="next_h")
# project hidden state back to proj_dim if projection matrix is present
if proj is not None:
kr = te.reduce_axis((0, hidden_dim), name="kh2p")
next_h = te.compute(
(scan_len, batch_size, proj_dim),
lambda t, b, j: te.sum(next_h[t, b, kr] * proj[j, kr], axis=kr),
name="next_h_proj",
)
scan_h, scan_c = te.scan(
[h_init, c_init], [next_h, next_c], [h_state, c_state], name="lstm_scan"
)
# drop the initial values, TODO(@altanh): is there a better way?
scan_h = te.compute(
(seq_len, batch_size, proj_dim), lambda t, b, j: scan_h[t + 1, b, j], name="hidden_states"
)
scan_c = te.compute(
(seq_len, batch_size, hidden_dim), lambda t, b, j: scan_c[t + 1, b, j], name="cell_states"
)
return scan_h, scan_c
| 8,244 | 33.936441 | 99 | py |
tvm | tvm-main/python/tvm/topi/nn/dense.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unused-argument
"""TVM operator fully connected compute."""
import tvm
from tvm import auto_scheduler, te
from .. import tag
def matmul(
tensor_a,
tensor_b,
bias=None,
out_dtype=None,
transpose_a=False,
transpose_b=False,
auto_scheduler_rewritten_layout="",
meta_schedule_original_shape=None,
):
"""The default implementation of matmul in topi.
Parameters
----------
tensor_a : tvm.te.Tensor
2-D with shape [batch, in_dim]
tensor_b : tvm.te.Tensor
2-D with shape [out_dim, in_dim]
bias : Optional[tvm.te.Tensor]
1-D with shape [out_dim]
out_dtype : Optional[str]
The output type. This is used for mixed precision.
transpose_a : Optional[bool] = False
Whether the tensor_a is in transposed format.
transpose_b : Optional[bool] = False
Whether the tensor_b is in transposed format.
auto_scheduler_rewritten_layout: Optional[str] = ""
The layout after auto-scheduler's layout rewrite pass.
meta_schedule_original_shape: Optional[List[PrimExpr]] = None
The original shape of the input tensor.
Returns
-------
output : tvm.te.Tensor
2-D with shape [batch, out_dim]
"""
# TODO(jcf94): Add multi-dim support for tensor_a
assert len(tensor_a.shape) == 2, "only support 2-dim matmul"
if bias is not None:
assert len(bias.shape) == 1
if out_dtype is None:
out_dtype = tensor_a.dtype
if transpose_a:
in_dim, batch = tensor_a.shape
else:
batch, in_dim = tensor_a.shape
if auto_scheduler_rewritten_layout:
# Infer shape for the rewritten layout
out_dim, red_dim = auto_scheduler.get_shape_from_rewritten_layout(
auto_scheduler_rewritten_layout, ["j", "k"]
)
auto_scheduler.remove_index_check(tensor_b)
elif meta_schedule_original_shape:
auto_scheduler.rewrite_tensor_shape(tensor_b, meta_schedule_original_shape)
if transpose_b:
out_dim, red_dim = tensor_b.shape
else:
red_dim, out_dim = tensor_b.shape
elif transpose_b:
out_dim, red_dim = tensor_b.shape
else:
red_dim, out_dim = tensor_b.shape
# cmp should be done by values
condition = True
if isinstance(in_dim, tvm.tir.SizeVar): # "any_dim"
condition = False
elif isinstance(red_dim, tvm.tir.SizeVar): # "any_dim"
condition = False
if condition:
assert int(in_dim) == int(
red_dim
), "Inner dimensions of dense do not match. {in_dim} vs {red_dim}."
k = te.reduce_axis((0, in_dim), name="k")
if (transpose_a, transpose_b) == (True, True):
compute_lambda = lambda i, j: te.sum(
tensor_a[k, i].astype(out_dtype) * tensor_b[j, k].astype(out_dtype), axis=k
)
compute_name = "T_matmul_TT"
compute_tag = "matmul"
elif (transpose_a, transpose_b) == (True, False):
compute_lambda = lambda i, j: te.sum(
tensor_a[k, i].astype(out_dtype) * tensor_b[k, j].astype(out_dtype), axis=k
)
compute_name = "T_matmul_TN"
compute_tag = "matmul"
elif (transpose_a, transpose_b) == (False, True):
compute_lambda = lambda i, j: te.sum(
tensor_a[i, k].astype(out_dtype) * tensor_b[j, k].astype(out_dtype), axis=k
)
compute_name = "T_matmul_NT"
# TODO(jcf94): Remove `dense` when `matmul` is finally ready
compute_tag = "dense"
else: # (transpose_a, transpose_b) == (False, False):
compute_lambda = lambda i, j: te.sum(
tensor_a[i, k].astype(out_dtype) * tensor_b[k, j].astype(out_dtype), axis=k
)
compute_name = "T_matmul_NN"
compute_tag = "matmul"
mat = te.compute(
(batch, out_dim),
compute_lambda,
name=compute_name,
tag=compute_tag,
attrs={"layout_free_placeholders": [tensor_b]},
)
if bias is not None:
mat = te.compute(
(batch, out_dim),
lambda i, j: mat[i, j] + bias[j].astype(out_dtype),
tag=tag.BROADCAST,
)
if auto_scheduler_rewritten_layout:
mat = auto_scheduler.rewrite_compute_body(mat, auto_scheduler_rewritten_layout)
return mat
@tvm.target.generic_func
def matmul_legalize(attrs, inputs, types):
"""Legalizes matmul op.
Parameters
----------
attrs : tvm.ir.Attrs
Attributes of current matmul
inputs : list of tvm.relay.Expr
The args of the Relay expr to be legalized
types : list of types
List of input and output types
Returns
-------
result : tvm.relay.Expr
The legalized expr
"""
# not to change by default
# pylint: disable=unused-argument
return None
def dense(
data,
weight,
bias=None,
out_dtype=None,
auto_scheduler_rewritten_layout="",
meta_schedule_original_shape=None,
):
"""The default implementation of dense in topi.
This is an alias of matmul_nt operator for data tensor in non-transposed format and weight
tensor in transposed format.
Parameters
----------
data : tvm.te.Tensor
2-D with shape [batch, in_dim]
weight : tvm.te.Tensor
2-D with shape [out_dim, in_dim]
bias : Optional[tvm.te.Tensor]
1-D with shape [out_dim]
out_dtype : Optional[str]
The output type. This is used for mixed precision.
auto_scheduler_rewritten_layout: str = ""
The layout after auto-scheduler's layout rewrite pass.
meta_schedule_original_shape: Optional[List[PrimExpr]] = None
The original shape of the input tensor.
Returns
-------
output : tvm.te.Tensor
2-D with shape [batch, out_dim]
"""
return matmul(
data,
weight,
bias,
out_dtype,
False,
True,
auto_scheduler_rewritten_layout,
meta_schedule_original_shape,
)
@tvm.target.generic_func
def dense_legalize(attrs, inputs, types):
"""Legalizes dense op.
Parameters
----------
attrs : tvm.ir.Attrs
Attributes of current dense
inputs : list of tvm.relay.Expr
The args of the Relay expr to be legalized
types : list of types
List of input and output types
Returns
-------
result : tvm.relay.Expr
The legalized expr
"""
# not to change by default
# pylint: disable=unused-argument
return None
def dense_pack(data, weight, bias=None, out_dtype=None):
"""The default implementation of dense_pack in topi.
Parameters
----------
data : tvm.te.Tensor
2-D with shape [batch, in_dim]
weight : tvm.te.Tensor
2-D with shape [out_dim, in_dim]
bias : Optional[tvm.te.Tensor]
1-D with shape [out_dim]
out_dtype : Optional[str]
The output type. This is used for mixed precision.
Returns
-------
output : tvm.te.Tensor
2-D with shape [batch, out_dim]
"""
if out_dtype is None:
out_dtype = data.dtype
M, K = get_const_tuple(data.shape) # batch, in_dim
N, _, packw_bn = get_const_tuple(weight.shape) # out_dim
N = N * packw_bn
idxdiv = tvm.tir.indexdiv
idxmod = tvm.tir.indexmod
k = te.reduce_axis((0, K), name="k")
C = te.compute(
(M, N),
lambda y, x: te.sum(
data[y, k].astype(out_dtype)
* weight[idxdiv(x, packw_bn), k, idxmod(x, packw_bn)].astype(out_dtype),
axis=k,
),
name="T_dense_pack",
tag="dense_pack",
)
if bias is not None:
C = te.compute((M, N), lambda i, j: C[i, j] + bias[j].astype(out_dtype), tag=tag.BROADCAST)
return C
@tvm.target.generic_func
def dense_alter_layout(attrs, inputs, tinfos, out_type):
"""Change dense layout.
Parameters
----------
attrs : tvm.ir.Attrs
Attributes of current convolution
inputs : tvm.relay.Expr
Grouped input symbols
tinfos : list
Input shape and dtype
out_type: type
The output type
Note
----
Unlike other TOPI functions, this function operates on both graph level and operator level.
"""
# not to change by default
return None
@tvm.target.generic_func
def batch_matmul_legalize(attrs, inputs, types):
"""Legalizes batch_matmul op.
Parameters
----------
attrs : tvm.ir.Attrs
Attributes of current batch_matmul
inputs : list of tvm.relay.Expr
The args of the Relay expr to be legalized
types : list of types
List of input and output types
Returns
-------
result : tvm.relay.Expr
The legalized expr
"""
return None
| 9,585 | 27.529762 | 99 | py |
tvm | tvm-main/python/tvm/topi/nn/bitserial_conv2d.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, too-many-locals, too-many-arguments
# pylint: disable=unused-argument, redefined-builtin
"""Bitserial Conv2D operators"""
import tvm
from tvm import te
from .pad import pad
from .utils import get_pad_tuple
from .bitserial_util import bitpack
from ..utils import get_const_tuple
def bitserial_conv2d_nchw(
data,
kernel,
stride,
padding,
activation_bits,
weight_bits,
pack_dtype="uint32",
out_dtype="int16",
unipolar=True,
):
"""Bitserial Conv2D operator.
Parameters
----------
data : tvm.te.Tensor
4-D with shape [batch, in_channel, in_height, in_width]
kernel : tvm.te.Tensor
4-D with shape [num_filter, in_channel, filter_height, filter_width]
stride : int or a list/tuple of two ints
stride size, or [stride_height, stride_width]
padding : int or a list/tuple of two or four ints
padding size, [pad_height, pad_width], [pad_top, pad_left, pad_down, pad_right]
activation_bits: int
number of bits used for activations/input elements
weight_bits: int
number of bits used for weight elements
out_dtype: str
return type of convolution
pack_dtype: str
bit packing type
unipolar: bool
if binarization style is in unipolar 1/0 format, instead of bipolar -1/+1 format
Returns
-------
output : tvm.te.Tensor
4-D with shape [batch, out_channel, out_height, out_width]
"""
assert isinstance(stride, int) or len(stride) == 2
Input_q = bitpack(data, activation_bits, pack_axis=1, bit_axis=2, pack_type=pack_dtype)
if len(kernel.shape) == 4:
Filter_q = bitpack(kernel, weight_bits, pack_axis=1, bit_axis=4, pack_type=pack_dtype)
else:
Filter_q = kernel
batch, in_channel, activation_bits, in_height, in_width = Input_q.shape
num_filter, _, kernel_h, kernel_w, weight_bits = Filter_q.shape
if isinstance(padding, int) or (isinstance(padding, (tuple, list)) and len(padding) == 2):
TPAD, LPAD, DPAD, RPAD = get_pad_tuple(padding, kernel)
else:
TPAD, LPAD, DPAD, RPAD = padding
pad_before = [0, 0, 0, TPAD, LPAD]
pad_after = [0, 0, 0, DPAD, RPAD]
PadInput_q = pad(Input_q, pad_before, pad_after, name="pad_temp")
# compute the output shape
if isinstance(stride, int):
stride_h = stride_w = stride
else:
stride_h, stride_w = stride
out_channel = num_filter
out_height = (in_height - kernel_h + TPAD + DPAD) // stride_h + 1
out_width = (in_width - kernel_w + LPAD + RPAD) // stride_w + 1
rc = te.reduce_axis((0, in_channel), name="rc")
ry = te.reduce_axis((0, kernel_h), name="ry")
rx = te.reduce_axis((0, kernel_w), name="rx")
b1 = te.reduce_axis((0, activation_bits), name="b1")
b2 = te.reduce_axis((0, weight_bits), name="b2")
if unipolar:
def _conv(nn, ff, yy, xx):
b1b2 = (b1 + b2).astype(out_dtype)
return te.sum(
(
(
tvm.tir.popcount(
PadInput_q[nn, rc, b1, yy * stride_h + ry, xx * stride_w + rx]
& Filter_q[ff, rc, ry, rx, b2]
)
- tvm.tir.popcount(
PadInput_q[nn, rc, b1, yy * stride_h + ry, xx * stride_w + rx]
& ~Filter_q[ff, rc, ry, rx, b2]
)
)
<< (b1b2)
).astype(out_dtype),
axis=[rc, ry, rx, b2, b1],
).astype(out_dtype)
else:
def _conv(nn, ff, yy, xx):
b1b2 = (b1 + b2).astype(out_dtype)
return te.sum(
(
tvm.tir.popcount(
PadInput_q[nn, rc, b1, yy * stride_h + ry, xx * stride_w + rx]
& Filter_q[ff, rc, ry, rx, b2]
)
<< (b1b2)
).astype(out_dtype),
axis=[rc, ry, rx, b2, b1],
).astype(out_dtype)
return te.compute(
(batch, out_channel, out_height, out_width),
_conv,
name="Conv2dOutput",
tag="bitserial_conv2d_nchw",
)
def bitserial_conv2d_nhwc(
data,
kernel,
stride,
padding,
activation_bits,
weight_bits,
pack_dtype="uint32",
out_dtype="int16",
unipolar=True,
):
"""Bitserial Conv2D operator.
Parameters
----------
data : tvm.te.Tensor
4-D with shape [batch, in_height, in_width, in_channel]
kernel : tvm.te.Tensor
4-D with shape [filter_height, filter_width, in_channel, num_filter]
stride : int or a list/tuple of two ints
stride size, or [stride_height, stride_width]
padding : int or a list/tuple of two or four ints
padding size, [pad_height, pad_width], [pad_top, pad_left, pad_down, pad_right]
activation_bits: int
number of bits used for activations/input elements
weight_bits: int
number of bits used for weight elements
out_dtype: str
return type of convolution
pack_dtype: str
bit packing type
unipolar: bool
if binarization style is in unipolar 1/0 format, instead of bipolar -1/+1 format
Returns
-------
output : tvm.te.Tensor
4-D with shape [batch, out_height, out_width, out_channel]
"""
assert isinstance(stride, int) or len(stride) == 2
Input_q = bitpack(data, activation_bits, pack_axis=3, bit_axis=4, pack_type=pack_dtype)
if len(kernel.shape) == 4:
Filter_q = bitpack(kernel, weight_bits, pack_axis=2, bit_axis=4, pack_type=pack_dtype)
kernel_h, kernel_w, _, num_filter, _ = get_const_tuple(Filter_q.shape)
else:
Filter_q = kernel
kernel_h, kernel_w, _, _, num_filter = get_const_tuple(Filter_q.shape)
batch, in_height, in_width, in_channel_q, _ = get_const_tuple(Input_q.shape)
if isinstance(padding, int) or (isinstance(padding, (tuple, list)) and len(padding) == 2):
TPAD, LPAD, DPAD, RPAD = get_pad_tuple(padding, kernel)
else:
TPAD, LPAD, DPAD, RPAD = padding
pad_before = [0, TPAD, LPAD, 0, 0]
pad_after = [0, DPAD, RPAD, 0, 0]
# compute the output shape
if isinstance(stride, int):
stride_h = stride_w = stride
else:
stride_h, stride_w = stride
out_channel = num_filter
out_height = (in_height - kernel_h + TPAD + DPAD) // stride_h + 1
out_width = (in_width - kernel_w + LPAD + RPAD) // stride_w + 1
PadInput_q = pad(Input_q, pad_before, pad_after, name="PaddedInput")
rc = te.reduce_axis((0, in_channel_q), name="rc")
ry = te.reduce_axis((0, kernel_h), name="ry")
rx = te.reduce_axis((0, kernel_w), name="rx")
b1 = te.reduce_axis((0, activation_bits), name="b1")
b2 = te.reduce_axis((0, weight_bits), name="b2")
if unipolar:
def _conv(nn, yy, xx, ff):
b1b2 = (b1 + b2).astype(out_dtype)
return te.sum(
(
(
tvm.tir.popcount(
PadInput_q[nn, yy * stride_h + ry, xx * stride_w + rx, rc, b1]
& Filter_q[ry, rx, rc, ff, b2]
)
- tvm.tir.popcount(
PadInput_q[nn, yy * stride_h + ry, xx * stride_w + rx, rc, b1]
& ~Filter_q[ry, rx, rc, ff, b2]
)
)
<< b1b2
).astype(out_dtype),
axis=[rc, ry, rx, b2, b1],
)
else:
def _conv(nn, yy, xx, ff):
b1b2 = (b1 + b2).astype(out_dtype)
return te.sum(
(
tvm.tir.popcount(
PadInput_q[nn, yy * stride_h + ry, xx * stride_w + rx, rc, b1]
& Filter_q[ry, rx, rc, ff, b2]
)
<< b1b2
).astype(out_dtype),
axis=[rc, ry, rx, b2, b1],
)
conv = te.compute(
(batch, out_height, out_width, out_channel),
_conv,
name="Conv2dOutput",
tag="bitserial_conv2d_nhwc",
)
return conv
@tvm.target.generic_func
def bitserial_conv2d_legalize(attrs, inputs, types):
"""Legalizes Bitserial Conv2D op.
Parameters
----------
attrs : tvm.ir.Attrs
Attributes of current convolution
inputs : list of tvm.relay.Expr
The args of the Relay expr to be legalized
types : list of types
List of input and output types
Returns
-------
result : tvm.relay.Expr
The legalized expr
"""
# not to change by default
return None
| 9,643 | 31.47138 | 94 | py |
tvm | tvm-main/python/tvm/topi/nn/correlation.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Correlation operators"""
from tvm import te
from .pad import pad
from ..utils import get_const_tuple
def correlation_nchw(
data1, data2, kernel_size, max_displacement, stride1, stride2, padding, is_multiply
):
"""Correlation operator in NCHW layout.
Parameters
----------
data1 : tvm.te.Tensor
4-D with shape [batch, channel, height, width]
data2 : tvm.te.Tensor
4-D with shape [batch, channel, height, width]
kernel_size: int
Kernel size for correlation, must be an odd number
max_displacement: int
Max displacement of Correlation
stride1: int
Stride for data1
stride2: int
Stride for data2 within the neightborhood centered around data1
padding : int or a list/tuple of 2 or 4 ints
Padding size, or
[pad_height, pad_width] for 2 ints, or
[pad_top, pad_left, pad_bottom, pad_right] for 4 ints
is_multiply: bool
operation type is either multiplication or substraction
Returns
-------
Output : tvm.te.Tensor
4-D with shape [batch, out_channel, out_height, out_width]
"""
# pylint: disable=unnecessary-lambda, invalid-name
data_shape = get_const_tuple(data1.shape)
assert get_const_tuple(data2.shape) == data_shape, "data1 and data2 should have the same shape"
assert kernel_size > 0 and kernel_size % 2, "kernel_size should be non-negative odd number"
if isinstance(padding, (tuple, list)):
if len(padding) == 2:
pad_before_h = pad_after_h = padding[0]
pad_before_w = pad_after_w = padding[1]
elif len(padding) == 4:
pad_before_h, pad_before_w, pad_after_h, pad_after_w = padding
else:
raise ValueError("invalid padding")
elif isinstance(padding, int):
pad_before_h = pad_after_h = pad_before_w = pad_after_w = padding
else:
raise ValueError("invalid padding")
pad_before = [0, 0, pad_before_h, pad_before_w]
pad_after = [0, 0, pad_after_h, pad_after_w]
padded_data1 = pad(data1, pad_before, pad_after)
padded_data2 = pad(data2, pad_before, pad_after)
batch, channel, height, width = data_shape
kernel_radius = (kernel_size - 1) // 2
border_size = max_displacement + kernel_radius
displacement_radius = max_displacement // stride2
displacement_size = 2 * displacement_radius + 1
padded_width = width + pad_before_w + pad_after_w
padded_height = height + pad_before_h + pad_after_h
out_channel = displacement_size * displacement_size
out_height = (padded_height - 2 * border_size + stride1 - 1) // stride1
out_width = (padded_width - 2 * border_size + stride1 - 1) // stride1
rc = te.reduce_axis((0, channel), name="rc")
ry = te.reduce_axis((0, kernel_size), name="ry")
rx = te.reduce_axis((0, kernel_size), name="rx")
if is_multiply:
corr_func = lambda x, y: x * y
else:
corr_func = lambda x, y: te.abs(x - y)
def _compute_correlation(n, q, i, j):
# location in data1
y1 = i * stride1 + max_displacement
x1 = j * stride1 + max_displacement
# location in data2
y2 = y1 + (te.indexdiv(q, displacement_size) - displacement_radius) * stride2
x2 = x1 + (te.indexmod(q, displacement_size) - displacement_radius) * stride2
return te.sum(
corr_func(padded_data1[n, rc, y1 + ry, x1 + rx], padded_data2[n, rc, y2 + ry, x2 + rx]),
axis=[rc, ry, rx],
)
correlation = te.compute(
(batch, out_channel, out_height, out_width),
lambda n, q, i, j: _compute_correlation(n, q, i, j),
tag="correlation_nchw",
)
return correlation / (kernel_size * kernel_size * channel)
| 4,544 | 35.95122 | 100 | py |
tvm | tvm-main/python/tvm/topi/nn/instance_norm.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Instance normalization operator."""
from .. import cpp
def instance_norm(data, gamma, beta, axis, epsilon=1e-5):
"""Instance normalization operator.
Parameters
----------
data : tvm.te.Tensor
N-D with shape (d_0, d_1, ..., d_{N-1})
gamma: tvm.te.Tensor
K-D with shape (r_0, r_1, ..., r_{K-1}) where K == len(axis) and d_{axis_k} == r_k
beta: tvm.te.Tensor
Optional, K-D with shape (r_0, r_1, ..., r_{K-1}) where K == len(axis) and d_{axis_k} == r_k
axis : list of int
Axis over the normalization applied (the axis along which the mean and variance are
computed)
epsilon : float
The epsilon value to avoid division by zero.
Returns
-------
result : tvm.te.Tensor
N-D with shape (d_0, d_1, ..., d_{N-1})
"""
return cpp.nn.instance_norm(data, gamma, beta, axis, epsilon)
| 1,672 | 33.854167 | 100 | py |
tvm | tvm-main/python/tvm/topi/nn/batch_to_space_nd.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""TVM operator batch_to_space_nd compute."""
from __future__ import absolute_import
from . import cpp
def batch_to_space_nd(data, block_shape, crop_begin_list, crop_end_list):
"""Perform space to batch transformation on the data
Parameters
----------
data : tvm.te.Tensor
N-D Tensor with shape [batch, spatial_shape, remaining_shapes],
where spatial_shape has M dimensions.
block_size : list of ints
list of size [M] where M is number of spatial dims, specifies block
size for each spatial dimension.
crop_begin_list : list of ints
list of shape [M] where M is number of spatial dims, specifies
begin crop size for each spatial dimension.
crop_end_list : list of ints
list of shape [M] where M is number of spatial dims, specifies
end crop size for each spatial dimension.
Returns
-------
output : tvm.te.Tensor
"""
return cpp.nn.batch_to_space_nd(data, block_shape, crop_begin_list, crop_end_list)
| 1,836 | 35.74 | 86 | py |
tvm | tvm-main/python/tvm/topi/nn/bitserial_util.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, too-many-locals, too-many-arguments
"""Utility functions for bitserial operators"""
import numpy as np
import tvm
from tvm import te
from tvm.topi.transform import concatenate
from ..utils import get_const_int
def bitpack(data, bits, pack_axis, bit_axis, pack_type, name="QuantizeInput"):
"""Packs data into format necessary for bitserial computation
Parameters
----------
pack_axis : int
index of the axis to pack in data
bit_axis : int
index of axis to place bit axis in resulting packed data
"""
ishape = data.shape
n = len(ishape)
if pack_type == "uint8":
data_width = 8
elif pack_type == "uint16":
data_width = 16
elif pack_type == "uint32":
data_width = 32
elif pack_type == "uint64":
data_width = 64
# Data must be in multiples of the data_width
assert get_const_int(ishape[pack_axis]) % data_width == 0, "Not a multiple of word size"
shape_vec = list(ishape)
shape_vec[pack_axis] = shape_vec[pack_axis] // data_width
shape_vec.insert(bit_axis, 1)
bitserial_oshape = tuple(shape_vec)
masks = np.array([0x1, 0x2, 0x4, 0x8, 0x10, 0x20, 0x40, 0x80])
# pack axis shifts if bit axis comes before
if bit_axis <= pack_axis:
pack_axis += 1
def _bitpack(*indices):
packed_data = [tvm.tir.const(0, pack_type)] * bits
for k in range(data_width):
# Translate indices for packed data back to original
idx = [0] * n
j = 0
for i in range(n + 1):
if i == bit_axis:
continue
if i == pack_axis:
idx[j] = indices[i] * data_width + k
else:
idx[j] = indices[i]
j += 1
element = data(*idx)
for b in range(bits):
extracted_bit = ((element & tvm.tir.const(masks[b], "int32")) >> b).astype(
pack_type
)
packed_data[b] = packed_data[b] | extracted_bit
if k < data_width - 1:
packed_data[b] = packed_data[b] << 1
if k == data_width - 1:
return tuple(packed_data)
return tuple(packed_data)
output_tuple = te.compute(bitserial_oshape, _bitpack, name=name, tag="bitpack")
if bits > 1:
return concatenate(output_tuple, axis=bit_axis)
return output_tuple
def binary_op_multiplier(pack_dtype):
""" "Returns number of bits packed into
pack_dtype: string
pack type for the operator (must be a uint)"""
return int(pack_dtype[4:])
| 3,465 | 33.66 | 92 | py |
tvm | tvm-main/python/tvm/topi/nn/conv3d.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-variable, too-many-locals
# pylint: disable=unused-argument, redefined-builtin, no-else-return
"""Conv3D operators"""
import tvm
from tvm import te
from ..utils import get_const_tuple
from .conv2d import conv
from .winograd_util import winograd_transform_matrices
def conv3d_ncdhw(Input, Filter, stride, padding, dilation, groups, out_dtype=None):
"""Conv3D operator in NCDHW layout.
Parameters
----------
Input : tvm.te.Tensor
5-D with shape [batch, in_channel, in_depth, in_height, in_width]
Filter : tvm.te.Tensor
5-D with shape [num_filter, in_channel, filter_depth, filter_height, filter_width]
stride : int or a list/tuple of three ints
Stride size, or [strid_depth, stride_height, stride_width]
padding : int or str
Padding size, or ['VALID', 'SAME']
dilation: int or a list/tuple of three ints
dilation size, or [dilation_depth, dilation_height, dilation_width]
groups: int
Number of groups.
Returns
-------
Output : tvm.te.Tensor
5-D with shape [batch, out_channel, out_depth, out_height, out_width]
"""
return conv(Input, Filter, stride, padding, dilation, groups, "NCDHW", "OIDHW", out_dtype)
def conv3d_ndhwc(
Input,
Filter,
stride,
padding,
dilation,
groups,
out_dtype="float32",
auto_scheduler_rewritten_layout="",
meta_schedule_origin_shape=None,
):
"""Convolution operator in NDHWC layout.
Parameters
----------
Input : tvm.te.Tensor
5-D with shape [batch, in_depth, in_height, in_width, in_channel]
Filter : tvm.te.Tensor
5-D with shape [filter_depth, filter_height, filter_width, in_channel, num_filter]
stride : int or a list/tuple of three ints
Stride size, or [stride_depth, stride_height, stride_width]
padding : int or str
Padding size, or ['VALID', 'SAME']
dilation: int or a list/tuple of three ints
dilation size, or [dilation_depth, dilation_height, dilation_width]
groups: int
Number of groups.
out_dtype: str = "float32",
The type of output tensor
auto_scheduler_rewritten_layout: str = ""
The layout after auto-scheduler's layout rewrite pass.
meta_schedule_origin_shape: Optional[List[PrimExpr]] = None
The original shape of the input tensor.
Returns
-------
Output : tvm.te.Tensor
5-D with shape [batch, out_depth, out_height, out_width, out_channel]
"""
return conv(
Input,
Filter,
stride,
padding,
dilation,
groups,
"NDHWC",
"DHWIO",
out_dtype,
auto_scheduler_rewritten_layout,
meta_schedule_origin_shape,
)
def conv3d_winograd_weight_transform(kernel, tile_size):
"""Weight transformation for 3D winograd
Parameters
----------
kernel: Tensor
The raw kernel tensor with layout "NCDHW".
tile_size: int
Tile size of winograd transform. e.g. 2 for F(2x2, 3x3) and 4 for F(4x4, 3x3)
Returns
-------
output : tvm.te.Tensor
5-D with shape [alpha, alpha, alpha, CO, CI]
"""
CO, CI, KD, KH, KW = get_const_tuple(kernel.shape)
depth_transform = 2 < KD < 8 and KD == KH
if depth_transform:
assert KD == KH == KW, "Only support NxNxN kernel"
else:
assert KH == KW, "Only supports DxNxN kernel"
r = tile_size + KH - 1
r_kh = te.reduce_axis((0, KH), name="r_kh")
r_kw = te.reduce_axis((0, KW), name="r_kw")
_, _, G = winograd_transform_matrices(tile_size, KH, kernel.dtype)
if depth_transform:
shape = (r, r, r, CO, CI)
r_kd = te.reduce_axis((0, KD), name="r_kd")
return te.compute(
shape,
lambda omg, eps, nu, co, ci: te.sum(
kernel[co][ci][r_kd][r_kh][r_kw] * G[omg][r_kd] * G[eps][r_kh] * G[nu][r_kw],
axis=[r_kd, r_kh, r_kw],
),
name="transform_weight",
)
else:
shape = (r, r, KD, CO, CI)
return te.compute(
shape,
lambda eps, nu, d, co, ci: te.sum(
kernel[co][ci][d][r_kh][r_kw] * G[eps][r_kh] * G[nu][r_kw], axis=[r_kh, r_kw]
),
name="transform_weight",
)
@tvm.target.generic_func
def conv3d_alter_layout(attrs, inputs, tinfos, out_type):
"""Change Conv3D layout.
Parameters
----------
attrs : tvm.ir.Attrs
Attributes of current convolution
inputs : tvm.relay.Expr
Grouped input symbols
tinfos : list
Input shape and dtype
out_type: type
The output type
Note
----
Unlike other TOPI functions, this function operates on both graph level and operator level.
"""
# not to change by default
return None
| 5,672 | 28.393782 | 95 | py |
tvm | tvm-main/python/tvm/topi/nn/upsampling.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""TVM operator upsampling compute."""
from tvm import topi
from tvm import te
from ..utils import simplify
def upsampling(
data,
scale_h,
scale_w,
layout="NCHW",
method="nearest_neighbor",
align_corners=False,
output_shape=None,
):
"""Perform upsampling on the data.
Nearest neighbor and bilinear upsampling are supported.
Parameters
----------
inputs : tvm.te.Tensor
inputs is a 4-D tensor with shape
[batch, channel, in_height, in_width]
or [batch, in_height, in_width, channel]
scale_h : float
Scaling factor for height
scale_w : float
Scaling factor for width
layout : string, optional
either "NCHW" or "NHWC"
method : {"bilinear", "nearest_neighbor", "bicubic"}
Method to be used for upsampling.
output_shape: tvm.tir.container.Array, optional
Shape to return. If left None will be inferred
(If shape is determined dynamically, pass out_dtype.shape as output_shape)
Returns
-------
output : tvm.te.Tensor
4-D with shape [batch, channel, in_height*scale_h, in_width*scale_w]
or [batch, in_height*scale, in_width*scale, channel]
"""
base_layout = layout[0:4]
if base_layout == "NCHW":
if not output_shape: # static case
scaled_h = data.shape[2] * scale_h
scaled_w = data.shape[3] * scale_w
reshape_size = (
simplify(topi.cast(te.round(scaled_h), data.shape[2].dtype)),
simplify(topi.cast(te.round(scaled_w), data.shape[3].dtype)),
)
else: # dynamic case -- we don't need to scale; already done in shape func
reshape_size = (
simplify(topi.cast(te.round(output_shape[2]), output_shape[2].dtype)),
simplify(topi.cast(te.round(output_shape[3]), output_shape[3].dtype)),
)
elif layout == "NHWC":
if not output_shape: # static case
scaled_h = data.shape[1] * scale_h
scaled_w = data.shape[2] * scale_w
reshape_size = (
simplify(topi.cast(te.round(scaled_h), data.shape[1].dtype)),
simplify(topi.cast(te.round(scaled_w), data.shape[2].dtype)),
)
else: # dynamic case
reshape_size = (
simplify(topi.cast(te.round(output_shape[1]), output_shape[1].dtype)),
simplify(topi.cast(te.round(output_shape[2]), output_shape[2].dtype)),
)
else:
raise ValueError(f"not support this layout {layout} yet")
coord_trans = "align_corners" if align_corners else "asymmetric"
if method[0:2] == "bi":
method = method[2:]
return topi.image.resize2d(
data,
[0.0] * 4,
reshape_size,
layout=layout,
method=method,
coordinate_transformation_mode=coord_trans,
output_shape=output_shape,
)
def upsampling3d(
data,
scale_d,
scale_h,
scale_w,
layout="NCDHW",
method="nearest_neighbor",
coordinate_transformation_mode="half_pixel",
output_shape=None,
):
"""Perform upsampling on the data.
Nearest neighbor and bilinear upsampling are supported.
Parameters
----------
inputs : tvm.te.Tensor
inputs is a 5-D tensor with shape
[batch, channel, in_depth, in_height, in_width]
or [batch, in_depth, in_height, in_width, channel]
scale_d : float
Scaling factor for depth
scale_h : float
Scaling factor for height
scale_w : float
Scaling factor for width
layout : string, optional
either "NCDHW" or "NDHWC"
method : {"trilinear", "nearest_neighbor"}
Method to be used for upsampling.
coordinate_transformation_mode: string, optional
Describes how to transform the coordinate in the resized tensor
to the coordinate in the original tensor.
Refer to the ONNX Resize operator specification for details.
Available options are "half_pixel", "align_corners" and "asymmetric".
output_shape: tvm.tir.container.Array, optional
Shape to return. If left None will be inferred
(If shape is determined dynamically, pass out_dtype.shape as output_shape)
Returns
-------
output : tvm.te.Tensor
5-D with shape [batch, channel, in_depth*scale, in_height*scale, in_width*scale]
or [batch, in_depth*scale, in_height*scale, in_width*scale, channel]
"""
base_layout = layout[0:5]
if base_layout == "NCDHW":
if not output_shape: # static case
scaled_d = data.shape[2] * scale_d
scaled_h = data.shape[3] * scale_h
scaled_w = data.shape[4] * scale_w
resize_shape = (
simplify(topi.cast(te.round(scaled_d), data.shape[2].dtype)),
simplify(topi.cast(te.round(scaled_h), data.shape[3].dtype)),
simplify(topi.cast(te.round(scaled_w), data.shape[4].dtype)),
)
else: # dynamic case -- don't need to scale; already done in shape func
resize_shape = (
simplify(topi.cast(te.round(output_shape[2]), data.shape[2].dtype)),
simplify(topi.cast(te.round(output_shape[3]), data.shape[3].dtype)),
simplify(topi.cast(te.round(output_shape[4]), data.shape[4].dtype)),
)
elif layout == "NDHWC":
if not output_shape: # static case
scaled_d = data.shape[1] * scale_d
scaled_h = data.shape[2] * scale_h
scaled_w = data.shape[3] * scale_w
resize_shape = (
simplify(topi.cast(te.round(scaled_d), data.shape[1].dtype)),
simplify(topi.cast(te.round(scaled_h), data.shape[2].dtype)),
simplify(topi.cast(te.round(scaled_w), data.shape[3].dtype)),
)
else: # dynamic case
resize_shape = (
simplify(topi.cast(te.round(output_shape[1]), data.shape[1].dtype)),
simplify(topi.cast(te.round(output_shape[2]), data.shape[2].dtype)),
simplify(topi.cast(te.round(output_shape[3]), data.shape[3].dtype)),
)
else:
raise ValueError(f"not support this layout {layout} yet")
if method[0:3] == "tri":
method = method[3:]
return topi.image.resize3d(
data,
[0.0] * 6,
resize_shape,
layout=layout,
method=method,
coordinate_transformation_mode=coordinate_transformation_mode,
)
| 7,378 | 35.171569 | 88 | py |
tvm | tvm-main/python/tvm/topi/nn/depth_to_space.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""TVM operator depth_to_space compute."""
from __future__ import absolute_import
import tvm
from tvm import te
from .. import tag
def depth_to_space(data, block_size, layout="NCHW", mode="DCR"):
"""Perform depth to space transformation on the data
Parameters
----------
data : tvm.te.Tensor
4-D tensor in either NCHW or NHWC layout.
block_size : int
Size of blocks to compose from channel dimension.
layout : string
Either NCHW or NHWC, indicating data layout.
mode : string
Either DCR or CDR, indicates how channels should be accessed.
In DCR, channels are interwoven in the Tensorflow style while
in CDR channels are accessed sequentially as in Pytorch.
Returns
-------
output : tvm.te.Tensor
Output of shape [N, C / block_size**2, H * block_size, W * block_size]
"""
if layout == "NCHW":
in_n, in_c, in_h, in_w = data.shape
channel_factor = tvm.tir.truncdiv(in_c, (block_size * block_size))
output_shape = [in_n, channel_factor, in_h * block_size, in_w * block_size]
elif layout == "NHWC":
in_n, in_h, in_w, in_c = data.shape
channel_factor = tvm.tir.truncdiv(in_c, (block_size * block_size))
output_shape = [in_n, in_h * block_size, in_w * block_size, channel_factor]
else:
raise ValueError("Only NCHW and NHWC layouts are currently supported.")
def _get_indices(*indices):
if layout == "NCHW":
n, c, y, x = indices
elif layout == "NHWC":
n, y, x, c = indices
return n, c, y, x
def _get_pixel(n, c, y, x):
block_x = tvm.tir.truncdiv(x, block_size)
block_y = tvm.tir.truncdiv(y, block_size)
idx_x = tvm.tir.truncmod(x, block_size)
idx_y = tvm.tir.truncmod(y, block_size)
if mode == "DCR":
channel_idx = channel_factor * ((block_size * idx_y) + idx_x) + c
else:
channel_idx = (c * block_size * block_size) + ((block_size * idx_y) + idx_x)
if layout == "NCHW":
output = data(n, channel_idx, block_y, block_x)
else:
output = data(n, block_y, block_x, channel_idx)
return output
def _compute(*indices):
n, c, y, x = _get_indices(*indices)
return _get_pixel(n, c, y, x)
return te.compute(output_shape, _compute, name="depth_to_space", tag=tag.INJECTIVE)
| 3,249 | 35.931818 | 88 | py |
tvm | tvm-main/python/tvm/topi/nn/sparse.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Sparse operators"""
from __future__ import absolute_import
import tvm
from tvm import te, auto_scheduler
from ..utils import get_const_tuple
def sparse_dense_sp_rhs(data, weight_data, weight_indices, weight_indptr):
"""
Computes sparse-dense matrix multiplication of `data` and
`(weight_data, weight_indices, weight_indptr).T`
Parameters
----------
data : tvm.te.Tensor
2-D with shape [M, K]
weight_data : tvm.te.Tensor
1-D with shape [nnz] (CSR) or
3-D with shape [num_blocks, bs_r, bs_c] (BSR)
weight_indices : tvm.te.Tensor
1-D with shape [nnz] (CSR) or
1-D with shape [num_blocks] (BSR)
weight_indptr : tvm.te.Tensor
1-D with shape [N + 1] (CSR) or
1-D with shape [(N + 1) // bs_r] (BSR)
Returns
-------
output : tvm.te.Tensor
2-D with shape [M, N]
"""
assert len(weight_data.shape) in (1, 3)
if len(weight_data.shape) == 1:
func = _sparse_dense_sp_rhs_csrmm
if len(weight_data.shape) == 3:
func = _sparse_dense_sp_rhs_bsrmm
return func(data, weight_data, weight_indices, weight_indptr)
def sparse_dense_sp_lhs(data_data, data_indices, data_indptr, weight):
"""
Computes sparse-dense matrix multiplication of
`(data_data, data_indices, data_indptr)` and `weight.T`
Parameters
----------
data_data:
1-D with shape [nnz] (CSR) or
3-D with shape [num_blocks, bs_r, bs_c] (BSR)
data_indices:
1-D with shape [nnz] (CSR) or
1-D with shape [num_blocks] (BSR)
data_indptr:
1-D with shape [M + 1] (CSR) or
1-D with shape [(M + 1) // bs_r] (BSR)
weight:
2-D with shape [N, K]
Returns
-------
output : tvm.te.Tensor
2-D with shape [M, N]
"""
assert len(data_data.shape) in (1, 3)
if len(data_data.shape) == 1:
func = _sparse_dense_sp_lhs_csrmm
if len(data_data.shape) == 3:
func = _sparse_dense_sp_lhs_bsrmm
return func(data_data, data_indices, data_indptr, weight)
# pylint: disable=no-else-return,inconsistent-return-statements
def sparse_dense(dense_data, sparse_data, sparse_indices, sparse_indptr, sparse_lhs=False):
"""
Computes sparse-dense matrix multiplication of `data` and
`(weight_data, weight_indices, weight_indptr).T`, if sparse_lhs=False
or
Computes sparse-dense matrix multiplication of
`(data_data, data_indices, data_indptr)` and `weight.T`, if sparse_lhs=True
Parameters
----------
dense_data : tvm.te.Tensor
2-D with shape [M, K]
sparse_data : tvm.te.Tensor
1-D with shape [nnz] (CSR) or
3-D with shape [num_blocks, bs_r, bs_c] (BSR)
sparse_indices : tvm.te.Tensor
1-D with shape [nnz] (CSR) or
1-D with shape [num_blocks] (BSR)
sparse_indptr : tvm.te.Tensor
1-D with shape [N + 1] (CSR) or
1-D with shape [(N + 1) // bs_r] (BSR)
sparse_lhs : bool, optional
Indicates whether lhs or rhs matrix is sparse. Default value is False.
Returns
-------
output : tvm.te.Tensor
2-D with shape [M, N]
"""
if sparse_lhs:
return sparse_dense_sp_lhs(sparse_data, sparse_indices, sparse_indptr, dense_data)
else:
return sparse_dense_sp_rhs(dense_data, sparse_data, sparse_indices, sparse_indptr)
def _sparse_dense_sp_lhs_csrmm(data_data, data_indices, data_indptr, weight):
oshape = (get_const_tuple(data_indptr.shape)[0] - 1, get_const_tuple(weight.shape)[0])
def f(row, i):
row_start = data_indptr[row]
row_end = data_indptr[row + 1]
row_elems = row_end - row_start
elem_idx = te.reduce_axis((0, row_elems), name="elem_idx")
elem = row_start + elem_idx
a_val = data_data[elem]
weight_val = weight[i, data_indices[elem]]
return te.sum(a_val * weight_val, axis=elem_idx)
return te.compute(oshape, f, tag="sparse_dense_sp_lhs_csrmm")
def _sparse_dense_sp_rhs_csrmm(data, weight_data, weight_indices, weight_indptr):
oshape = (get_const_tuple(data.shape)[0], get_const_tuple(weight_indptr.shape)[0] - 1)
def f(i, row):
row_start = weight_indptr[row]
row_end = weight_indptr[row + 1]
row_elems = row_end - row_start
elem_idx = te.reduce_axis((0, row_elems), name="elem_idx")
elem = row_start + elem_idx
a_val = weight_data[elem]
weight_val = data[i, weight_indices[elem]]
return te.sum(a_val * weight_val, axis=elem_idx)
return te.compute(oshape, f, tag="sparse_dense_sp_rhs_csrmm")
def _sparse_dense_sp_lhs_bsrmm(data_data, data_indices, data_indptr, weight):
(m, _) = get_const_tuple(weight.shape)
(_, bs_r, bs_c) = get_const_tuple(data_data.shape)
(num_blocks_plus_1,) = get_const_tuple(data_indptr.shape)
num_blocks = num_blocks_plus_1 - 1
def _compute_block(nb_j, j, i):
row_start = data_indptr[nb_j]
row_end = data_indptr[nb_j + 1]
row_elems = row_end - row_start
elem_idx = te.reduce_axis((0, row_elems), name="elem_idx")
block_offset = row_start + elem_idx
c = te.reduce_axis((0, bs_c), name="c")
block_j = data_indices[block_offset]
block_ij_val = data_data[block_offset][j][c]
x_val = weight[i, bs_c * block_j + c]
return te.sum(block_ij_val * x_val, axis=[elem_idx, c])
idxd = tvm.tir.indexdiv
idxm = tvm.tir.indexmod
bsrmm_block = te.compute(
(num_blocks, bs_r, m), _compute_block, tag="sparse_dense_sp_lhs_bsrmm_block"
)
return te.compute(
(num_blocks * bs_r, m),
lambda m, n: bsrmm_block[idxd(m, bs_r), idxm(m, bs_r), n],
tag="sparse_dense_sp_lhs_bsrmm",
)
def _sparse_dense_sp_rhs_bsrmm(data, weight_data, weight_indices, weight_indptr):
(m, k) = get_const_tuple(data.shape)
(_, bs_r, bs_c) = get_const_tuple(weight_data.shape)
(num_blocks_plus_1,) = get_const_tuple(weight_indptr.shape)
num_blocks = num_blocks_plus_1 - 1
def _compute_block(i, nb_j, j):
row_start = weight_indptr[nb_j]
row_end = weight_indptr[nb_j + 1]
row_elems = row_end - row_start
elem_idx = te.reduce_axis((0, row_elems), name="elem_idx")
block_offset = row_start + elem_idx
c = te.reduce_axis((0, bs_c), name="c")
block_j = weight_indices[block_offset]
block_ij_val = weight_data[block_offset][j][c]
x_val = data[i, bs_c * block_j + c]
return te.sum(block_ij_val * x_val, axis=[elem_idx, c])
idxd = tvm.tir.indexdiv
idxm = tvm.tir.indexmod
bsrmm_block = te.compute(
(m, num_blocks, bs_r),
_compute_block,
tag="sparse_dense_sp_rhs_bsrmm_block",
attrs={"FLOP": 2 * m * num_blocks * bs_r * k},
)
return te.compute(
(m, num_blocks * bs_r),
lambda m, n: bsrmm_block[m, idxd(n, bs_r), idxm(n, bs_r)],
tag="sparse_dense_sp_rhs_bsrmm",
)
def sparse_transpose(sparse_data, sparse_indices, sparse_indptr):
"""
Transpose a square sparse matrix,
`A` is an n-by-n sparse matrix in the CSR format.
** Currently only support Square Matrices **
Parameters
----------
sparse_data : tvm.te.Tensor
1-D with shape [nonzeros]
sparse_indices : tvm.te.Tensor
1-D with shape [nonzeros], dtype of 'int32'
sparse_indptr : tvm.te.Tensor
1-D with shape [n+1], dtype of 'int32'
Returns
-------
out_data : tvm.te.Tensor
1-D with shape [nonzeros]
out_indices : tvm.te.Tensor
1-D with shape [nonzeros], dtype of 'int32'
out_indptr : tvm.te.Tensor
1-D with shape [n+1], dtype of 'int32'
"""
assert len(sparse_data.shape) == 1, "error in data dimension"
assert len(sparse_indices.shape) == 1, "error in indices dimension"
assert len(sparse_indptr.shape) == 1, "error in indptr dimension"
nnz = get_const_tuple(sparse_data.shape)[0]
n = get_const_tuple(sparse_indptr.shape)[0] - 1
output_shape = [(nnz,), (nnz,), (n + 1,)]
# TODO: Add BSR transpose support
output_data, output_indices, output_indptr = te.extern(
shape=output_shape,
inputs=[sparse_data, sparse_indices, sparse_indptr],
fcompute=lambda ins, outs: _csr_transpose_ir(
ins[0], ins[1], ins[2], outs[0], outs[1], outs[2]
),
tag="sparse_transpose_csr",
dtype=[sparse_data.dtype, "int32", "int32"],
name="out",
)
return [output_data, output_indices, output_indptr]
def _csr_transpose_ir(data, indices, indptr, out_data, out_indices, out_indptr):
"""define ir for csr_transpose"""
irb = tvm.tir.ir_builder.create()
data_ptr = irb.buffer_ptr(data)
indices_ptr = irb.buffer_ptr(indices)
indptr_ptr = irb.buffer_ptr(indptr)
out_data_ptr = irb.buffer_ptr(out_data)
out_indices_ptr = irb.buffer_ptr(out_indices)
out_indptr_ptr = irb.buffer_ptr(out_indptr)
n = get_const_tuple(indptr.shape)[0] - 1
nnz = get_const_tuple(data.shape)[0]
with irb.for_range(0, n, kind="parallel", name="col") as col:
out_indptr_ptr[col] = 0
with irb.for_range(0, nnz, kind="serial", name="nz_idx") as nz_idx:
out_indptr_ptr[indices_ptr[nz_idx]] += 1
cumsum = irb.allocate("int32", (1,), name="cumsum", scope="local")
temp = irb.allocate("int32", (1,), name="temp", scope="local")
cumsum[0] = 0
with irb.for_range(0, n, kind="serial", name="col") as col:
temp[0] = out_indptr_ptr[col]
out_indptr_ptr[col] = cumsum[0]
cumsum[0] += temp[0]
out_indptr_ptr[n] = nnz
with irb.for_range(0, n, kind="serial", name="row") as row:
offset = indptr_ptr[row]
diff = indptr_ptr[row + 1] - indptr_ptr[row]
with irb.for_range(0, diff, kind="serial", name="idx") as idx:
real_idx = offset + idx
col = indices_ptr[real_idx]
dest = out_indptr_ptr[col]
out_indices_ptr[dest] = row
out_data_ptr[dest] = data_ptr[real_idx]
out_indptr_ptr[col] += 1
last = irb.allocate("int32", (1,), name="last", scope="local")
temp2 = irb.allocate("int32", (1,), name="temp2", scope="local")
last[0] = 0
with irb.for_range(0, n, kind="serial", name="col") as col:
temp2[0] = out_indptr_ptr[col]
out_indptr_ptr[col] = last[0]
last[0] = temp2[0]
return irb.get()
@tvm.target.generic_func
def sparse_dense_alter_layout(_attrs, _inputs, _tinfos, _out_type):
"""Change Sparse Dense layout.
This is used for modifying the inputs weights so they are more amenable for
the target.
Parameters
----------
attrs : tvm.ir.Attrs
Attributes of current convolution
inputs : tvm.relay.Expr
Grouped input symbols
tinfos : list
Input shape and dtype
out_type: type
The output type
Note
----
Unlike other TOPI functions, this function operates on both graph level and operator level.
"""
return None
@auto_scheduler.register_task_input_check_func
def try_get_sparse_input(args):
"""Analyze the input data from the given args.
Parameters
----------
args : List[Tensor]
Input/output Tensor of a TVM subgraph.
Returns
-------
Dict[Tensor, str] :
Map from the input Tensor to its buffer name.
Notes
-----
The buffer name is specially designed, and these buffer should be provided in
`SearchTask(..., task_inputs={...})`.
"""
sparse_prefix = sparse_data = sparse_indices = sparse_indptr = None
def _process_inputs(input_tensors, m, n, prefix_init):
nonlocal sparse_prefix
nonlocal sparse_data
nonlocal sparse_indices
nonlocal sparse_indptr
assert len(input_tensors) == 4
unsure_tensors = list(input_tensors)
# Get the Dense data
dense_data = None
for tensor in unsure_tensors:
if len(tensor.shape) == 2:
assert dense_data is None
dense_data = tensor
assert m == dense_data.shape[0]
k = dense_data.shape[1]
unsure_tensors.remove(dense_data)
# Get the Sparse data
sparse_data = None
for tensor in unsure_tensors:
if len(tensor.shape) == 3:
assert sparse_data is None
sparse_data = tensor
block_size, bs_r, bs_c = sparse_data.shape
unsure_tensors.remove(sparse_data)
# Get the Sparse indptr & indices
sparse_indices = None
for tensor in unsure_tensors:
assert len(tensor.shape) == 1
if tensor.shape[0] == block_size:
assert sparse_indices is None
sparse_indices = tensor
unsure_tensors.remove(sparse_indices)
assert len(unsure_tensors) == 1
sparse_indptr = unsure_tensors[0]
# Generate the sparse_prefix
density = 1.0
for i in sparse_data.shape:
density *= i
density /= k * n
density = density.value
sparse_prefix = "%s_%d_%d_%d_%d_%d_%d_" % (
prefix_init,
n,
k,
bs_r,
bs_c,
sparse_indices.shape[0],
sparse_indptr.shape[0],
)
visited = set()
def _traverse(t):
# We cannot directly add tensors to the set, because the comparison of
# two tensors with ndim=0 is ambiguous.
assert t.handle is not None
if t.handle.value in visited:
return
if isinstance(t.op, te.ComputeOp):
# TODO(jcf94): Currently only support to one sparse op, add more support here
if t.op.tag == "sparse_dense_sp_rhs_bsrmm":
m, n = t.shape
assert len(t.op.input_tensors) == 1
block_tensor = t.op.input_tensors[0]
_process_inputs(block_tensor.op.input_tensors, m, n, "sparse_dense_bsr")
if sparse_prefix is not None:
# Early stop if we find a sparse_prefix
# Notice: If any workload has more than one sparse input, this may get problem
return
for x in t.op.input_tensors:
_traverse(x)
visited.add(t.handle.value)
try:
for arg in args:
_traverse(arg)
# pylint: disable=broad-except
except Exception:
return {}
if sparse_data is None or sparse_indices is None or sparse_indptr is None:
return {}
sparse_input_map = {}
sparse_input_map[sparse_data] = sparse_prefix + "W_data"
sparse_input_map[sparse_indices] = sparse_prefix + "W_indices"
sparse_input_map[sparse_indptr] = sparse_prefix + "W_indptr"
return sparse_input_map
def _sparse_conv2d_bsr_compute_nhwc(data, weight_data, weight_indices, weight_indptr):
(m, h, w, k) = get_const_tuple(data.shape) # pylint: disable=C0103
if len(weight_data.shape) == 2:
_, bs_r = get_const_tuple(weight_data.shape)
elif len(weight_data.shape) == 3:
_, bs_r, bs_c = get_const_tuple(weight_data.shape)
(num_blocks_plus_1,) = get_const_tuple(weight_indptr.shape)
num_blocks = num_blocks_plus_1 - 1
def _compute_block(i, h, w, nb_j, j): # pylint: disable=C0103
row_start = weight_indptr[nb_j]
row_end = weight_indptr[nb_j + 1]
row_elems = row_end - row_start
elem_idx = te.reduce_axis((0, row_elems), name="elem_idx")
block_offset = row_start + elem_idx
block_j = weight_indices[block_offset]
if len(weight_data.shape) == 3:
c = te.reduce_axis((0, bs_c), name="c")
block_ij_val = weight_data[block_offset][j][c]
x_val = data[i, h, w, bs_c * block_j + c]
return te.sum(block_ij_val * x_val, axis=[elem_idx, c])
else:
block_ij_val = weight_data[block_offset][j]
x_val = data[i, h, w, block_j]
return te.sum(block_ij_val * x_val, axis=[elem_idx])
idxd = tvm.tir.indexdiv
idxm = tvm.tir.indexmod
bsrmm_block = te.compute(
(m, h, w, num_blocks, bs_r),
_compute_block,
tag="sparse_conv2d_sp_bsrmm_block",
attrs={"FLOP": 2 * m * num_blocks * bs_r * k * h * w},
)
return te.compute(
(m, h, w, num_blocks * bs_r),
lambda m, h, w, n: bsrmm_block[m, h, w, idxd(n, bs_r), idxm(n, bs_r)],
tag="sparse_conv2d_sp_bsrmm",
name="sparse_conv2d",
attrs={"layout": "NHWC"},
)
def _sparse_conv2d_bsr_compute_nchw(data, weight_data, weight_indices, weight_indptr):
(m, k, h, w) = get_const_tuple(data.shape) # pylint: disable=C0103
if len(weight_data.shape) == 2:
_, bs_r = get_const_tuple(weight_data.shape)
elif len(weight_data.shape) == 3:
_, bs_r, bs_c = get_const_tuple(weight_data.shape)
(num_blocks_plus_1,) = get_const_tuple(weight_indptr.shape)
num_blocks = num_blocks_plus_1 - 1
def _compute_block(i, nb_j, j, h, w): # pylint: disable=C0103
row_start = weight_indptr[nb_j]
row_end = weight_indptr[nb_j + 1]
row_elems = row_end - row_start
elem_idx = te.reduce_axis((0, row_elems), name="elem_idx")
block_offset = row_start + elem_idx
block_j = weight_indices[block_offset]
if len(weight_data.shape) == 3:
c = te.reduce_axis((0, bs_c), name="c")
block_ij_val = weight_data[block_offset][j][c]
x_val = data[i, bs_c * block_j + c, h, w]
return te.sum(block_ij_val * x_val, axis=[elem_idx, c])
else:
block_ij_val = weight_data[block_offset][j]
x_val = data[i, block_j, h, w]
return te.sum(block_ij_val * x_val, axis=[elem_idx])
idxd = tvm.tir.indexdiv
idxm = tvm.tir.indexmod
bsrmm_block = te.compute(
(m, num_blocks, bs_r, h, w),
_compute_block,
tag="sparse_conv2d_sp_bsrmm_block",
attrs={"FLOP": 2 * m * num_blocks * bs_r * k * h * w},
)
return te.compute(
(m, num_blocks * bs_r, h, w),
lambda m, n, h, w: bsrmm_block[m, idxd(n, bs_r), idxm(n, bs_r), h, w],
tag="sparse_conv2d_sp_bsrmm",
name="sparse_conv2d",
attrs={"layout": "NCHW"},
)
def sparse_conv2d(
dense_data, sparse_data, sparse_indices, sparse_indptr, layout="NHWC", kernel_size=1
):
"""
Computes sparse-conv2d(1*1) of ``data`` and
``(weight_data, weight_indices, weight_indptr)``
Parameters
----------
dense_data : tvm.te.Tensor
4-D with shape ``[M, H, W, K]`` (layout=NHWC)
4-D with shape ``[M, K, H, W]`` (layout=NCHW)
sparse_data : tvm.te.Tensor
2-D with shape ``[num_blocks, bs_r]`` (BSR)
3-D with shape ``[num_blocks, bs_r, bs_c]`` (BSR)
sparse_indices : tvm.te.Tensor
1-D with shape ``[num_blocks]`` (BSR)
sparse_indptr : tvm.te.Tensor
1-D with shape ``[(N + 1) // bs_r]`` (BSR)
layout : str
layout of data
Returns
-------
output : tvm.te.Tensor
4-D with shape [M, H, W, N] (layout=NHWC)
4-D with shape [M, N, H ,W] (layout=NCHW)
"""
if kernel_size == 1:
if layout == "NHWC":
return _sparse_conv2d_bsr_compute_nhwc(
dense_data, sparse_data, sparse_indices, sparse_indptr
)
elif layout == "NCHW":
return _sparse_conv2d_bsr_compute_nchw(
dense_data, sparse_data, sparse_indices, sparse_indptr
)
else:
raise ValueError(f"Unsupport Layout {layout}")
@auto_scheduler.register_task_input_check_func
def try_get_conv2d_sparse_input(args):
"""Analyze the input data from the given args.
Parameters
----------
args : List[Tensor]
Input/output Tensor of a TVM subgraph.
Returns
-------
Dict[Tensor, str] :
Map from the input Tensor to its buffer name.
Notes
-----
The buffer name is specially designed, and these buffer should be provided in
`SearchTask(..., task_inputs={...})`.
"""
sparse_prefix = sparse_data = sparse_indices = sparse_indptr = None
def _process_inputs(input_tensors, m, h, w, n, prefix_init, layout): # pylint: disable=C0103
nonlocal sparse_prefix
nonlocal sparse_data
nonlocal sparse_indices
nonlocal sparse_indptr
assert len(input_tensors) == 4
unsure_tensors = list(input_tensors)
# Get the Dense data
dense_data = None
for tensor in unsure_tensors:
if len(tensor.shape) == 4:
assert dense_data is None
dense_data = tensor
if layout == "NHWC":
assert m == dense_data.shape[0]
assert h == dense_data.shape[1]
assert w == dense_data.shape[2]
k = dense_data.shape[3]
elif layout == "NCHW":
assert m == dense_data.shape[0]
assert h == dense_data.shape[2]
assert w == dense_data.shape[3]
k = dense_data.shape[1]
unsure_tensors.remove(dense_data)
# Get the Sparse data
sparse_data = None
for tensor in unsure_tensors:
if len(tensor.shape) == 3:
assert sparse_data is None
sparse_data = tensor
block_size, bs_r, bs_c = sparse_data.shape
if len(tensor.shape) == 2:
assert sparse_data is None
sparse_data = tensor
block_size, bs_r = sparse_data.shape
bs_c = 1
unsure_tensors.remove(sparse_data)
# Get the Sparse indptr & indices
sparse_indices = None
for tensor in unsure_tensors:
assert len(tensor.shape) == 1
if tensor.shape[0] == block_size:
assert sparse_indices is None
sparse_indices = tensor
unsure_tensors.remove(sparse_indices)
assert len(unsure_tensors) == 1
sparse_indptr = unsure_tensors[0]
# Generate the sparse_prefix
density = 1.0
for i in sparse_data.shape:
density *= i
density /= k * n
density = density.value
sparse_prefix = "%s_%d_%d_%d_%d_%d_%d_" % (
prefix_init,
n,
k,
bs_r,
bs_c,
sparse_indices.shape[0],
sparse_indptr.shape[0],
)
visited = set()
def _traverse(t):
# We cannot directly add tensors to the set, because the comparison of
# two tensors with ndim=0 is ambiguous.
assert t.handle is not None
if t.handle.value in visited:
return
if isinstance(t.op, te.ComputeOp):
if t.op.tag == "sparse_conv2d_sp_bsrmm":
m, h, w, n = t.shape # pylint: disable=C0103
assert len(t.op.input_tensors) == 1
block_tensor = t.op.input_tensors[0]
_process_inputs(
block_tensor.op.input_tensors,
m,
h,
w,
n,
"sparse_conv2d_bsr",
t.op.attrs["layout"],
)
if sparse_prefix is not None:
# Early stop if we find a sparse_prefix
# Notice: If any workload has more than one sparse input, this may get problem
return
for x in t.op.input_tensors:
_traverse(x)
visited.add(t.handle.value)
try:
for arg in args:
_traverse(arg)
# pylint: disable=broad-except
except Exception:
return {}
if sparse_data is None or sparse_indices is None or sparse_indptr is None:
return {}
sparse_input_map = {}
sparse_input_map[sparse_data] = sparse_prefix + "W_data"
sparse_input_map[sparse_indices] = sparse_prefix + "W_indices"
sparse_input_map[sparse_indptr] = sparse_prefix + "W_indptr"
return sparse_input_map
def sparse_add(dense_data, sparse_data, sparse_indices, sparse_indptr):
"""
Computes sparse-dense addition
Parameters
----------
dense_data : tvm.te.Tensor
2-D with shape [M, N]
sparse_data : tvm.te.Tensor
1-D with shape [nnz] (CSR)
sparse_indices : tvm.te.Tensor
1-D with shape [nnz] (CSR)
sparse_indptr : tvm.te.Tensor
1-D with shape [M + 1] (CSR)
Returns
-------
output : tvm.te.Tensor
2-D with shape [M, N]
"""
# TODO(ANSHUMAN87): support BSR format too
assert len(sparse_data.shape) == 1, "only CSR format is supported"
return _sparse_add_csr(dense_data, sparse_data, sparse_indices, sparse_indptr)
def _sparse_add_csr(dense_data_inp, sparse_data_inp, sparse_indices_inp, sparse_indptr_inp):
oshape = get_const_tuple(dense_data_inp.shape)
def _csr_add_ir(dense_data, sparse_data, sparse_indices, sparse_indptr, out_data):
irb = tvm.tir.ir_builder.create()
dense_data_ptr = irb.buffer_ptr(dense_data)
sparse_data_ptr = irb.buffer_ptr(sparse_data)
sparse_indices_ptr = irb.buffer_ptr(sparse_indices)
sparse_indptr_ptr = irb.buffer_ptr(sparse_indptr)
out_data_ptr = irb.buffer_ptr(out_data)
with irb.for_range(0, oshape[0], kind="vectorize", name="row") as row:
with irb.for_range(0, oshape[1], kind="parallel", name="col") as col:
out_data_ptr[row, col] = dense_data_ptr[row, col]
with irb.for_range(0, oshape[0], kind="parallel", name="row") as row:
offset = sparse_indptr_ptr[row]
diff = sparse_indptr_ptr[row + 1] - sparse_indptr_ptr[row]
with irb.for_range(0, diff, kind="serial", name="idx") as idx:
real_idx = offset + idx
col = sparse_indices_ptr[real_idx]
out_data_ptr[row, col] = sparse_data_ptr[real_idx] + out_data_ptr[row, col]
return irb.get()
return te.extern(
shape=oshape,
inputs=[dense_data_inp, sparse_data_inp, sparse_indices_inp, sparse_indptr_inp],
fcompute=lambda ins, outs: _csr_add_ir(ins[0], ins[1], ins[2], ins[3], outs[0]),
tag="sparse_add_csr",
dtype=[
dense_data_inp.dtype,
sparse_data_inp.dtype,
sparse_indices_inp.dtype,
sparse_indptr_inp.dtype,
],
name="sparse_add_csr_output",
)
| 27,503 | 32.664627 | 97 | py |
tvm | tvm-main/python/tvm/topi/nn/loss.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unused-argument
"""Loss functions definitions."""
from __future__ import absolute_import
from . import cpp
def nll_loss(predictions, targets, weights, reduction, ignore_index):
"""Negative log likelihood loss on the input data.
output{n, i_1, i_2, ..., i_k} = -p * w
where t = target{n, i_1, i_2, ..., i_k}
p = predictions{n, t, i_1, i_2, i_k}
w = weights{n, i_1, i_2, ..., i_k} if t != ignore_index else 0
result = reduction(output)
Parameters
----------
predictions : tvm.te.Tensor
(k+2)-D with shape (N, C, d_1, d_2, ..., d_k),
where C is the number of target classes
targets : tvm.te.Tensor
(k+1)-D with shape (N, d_1, d_2, ..., d_k)
The target value of the input.
weights : tvm.te.Tensor
1-D with shape (C,)
The weight of each target value.
reduction : string
The reduction method to apply to output.
Can be "mean", "sum" or "none".
ignore_index : int
The target value to ignore.
Returns
-------
output : tvm.te.Tensor
a scalar if the reduction type is "mean" or "sum",
otherwise the same shape as `target`.
"""
return cpp.nn.nll_loss(predictions, targets, weights, reduction, ignore_index)
| 2,100 | 33.442623 | 82 | py |
tvm | tvm-main/python/tvm/topi/nn/utils.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-variable
"""NN operator common utilities"""
from __future__ import absolute_import
import tvm
from ..utils import get_const_int
def infer_pad(data, data_pad):
"""Infer the padding from stages in reverse.
Parameters
----------
data : Tensor
data stage.
data_pad : Tensor
pad stage.
Returns
-------
hpad : int
padding size on height
wpad : int
padding size on width
"""
if data_pad is None:
return 0, 0
_, _, IH, IW = data.shape
_, _, TH, TW = data_pad.shape
hpad = (TH - IH) // 2
wpad = (TW - IW) // 2
return get_const_int(hpad), get_const_int(wpad)
def infer_pad3d(data, data_pad, layout):
"""Infer the padding from stages in reverse.
Parameters
----------
data : Tensor
data stage.
data_pad : Tensor
pad stage.
Returns
-------
dpad : int
padding depth
hpad : int
padding height
wpad : int
padding width
"""
if data_pad is None:
return 0, 0, 0
if layout == "NDHWC":
_, ID, IH, IW, _ = data.shape
_, TD, TH, TW, _ = data_pad.shape
elif layout == "NCDHW":
_, _, ID, IH, IW = data.shape
_, _, TD, TH, TW = data_pad.shape
else:
raise ValueError(f"Layout {layout} is not supported")
dpad = TD - ID
hpad = TH - IH
wpad = TW - IW
return get_const_int(dpad), get_const_int(hpad), get_const_int(wpad)
def infer_stride(data, kernel, out):
"""Infer the stride from stages in reverse.
Parameters
----------
data : Tensor
data stage.
kernel : Tensor
kernel stage.
out : Tensor
output stage.
Returns
-------
hstride : int
stride size on height
wstride : int
stride size on width
"""
_, _, IH, IW = data.shape
_, _, KH, KW = kernel.shape
_, _, OH, OW = out.shape
hstride = (IH - KH) // tvm.te.max(OH - 1, 1) + tvm.tir.Select(OH == 1, 1, 0)
wstride = (IW - KW) // tvm.te.max(OW - 1, 1) + tvm.tir.Select(OW == 1, 1, 0)
return get_const_int(hstride), get_const_int(wstride)
def get_pad_tuple(padding, kernel):
"""Common code to get the pad option
Parameters
----------
padding : int or str
Padding size, or ['VALID', 'SAME']
kernel : tuple of int
Conv kernel size
Returns
-------
pad_top : int
Padding size on top
pad_left : int
Padding size on left
pad_down : int
Padding size on down.
pad_right : int
Padding size on right.
"""
# compute the padding size
if isinstance(padding, (tuple, list)):
if len(padding) == 2:
pad_h = padding[0] * 2
pad_w = padding[1] * 2
elif len(padding) == 4:
return padding[0], padding[1], padding[2], padding[3]
else:
raise ValueError("Size of padding can only be 2 or 4")
elif isinstance(padding, int):
pad_h = pad_w = padding * 2
elif padding == "VALID":
pad_h = 0
pad_w = 0
elif padding == "SAME":
pad_h = kernel[0] - 1
pad_w = kernel[1] - 1
else:
raise ValueError(f"Unknown padding option {padding}")
pad_top = (pad_h + 1) // 2
pad_left = (pad_w + 1) // 2
return pad_top, pad_left, pad_h - pad_top, pad_w - pad_left
def get_pad_tuple_generic(padding, kernel):
"""Common code to get the pad option
Parameters
----------
padding : int or str
Padding size, or ['VALID', 'SAME']
kernel : tuple of int
Conv kernel size
Returns
-------
pad_top : int
Padding size on top
pad_down : int
Padding size on down.
pad_left : int
Padding size on left
pad_right : int
Padding size on right.
"""
# compute the padding size
if isinstance(padding, (tuple, list)):
if len(padding) == len(kernel):
pad_dimensions = [p * 2 for p in padding]
elif len(padding) == len(kernel) * 2:
return (
[padding[i] for i in range(len(kernel))],
[padding[len(kernel) + i] for i in range(len(kernel))],
)
else:
raise ValueError("Size of padding can only be len(kernel) or len(kernel) * 2")
elif isinstance(padding, int):
pad_dimensions = [padding * 2 for _ in range(len(kernel))]
elif padding == "VALID":
pad_dimensions = [0 for _ in range(len(kernel))]
elif padding == "SAME":
pad_dimensions = [k - 1 for k in kernel]
else:
raise ValueError(f"Unknown padding option {padding}")
pad_begin = [(p + 1) // 2 for p in pad_dimensions]
return [pad_begin, [pd - pb for pb, pd in zip(pad_begin, pad_dimensions)]]
def get_pad_tuple3d(padding, kernel):
"""Common code to get the pad option
Parameters
----------
padding : int or str
Padding size, or ['VALID', 'SAME']
kernel : tuple of int
Conv kernel size
Returns
-------
pad_front : int
Padding size on front.
pad_top : int
Padding size on top
pad_left : int
Padding size on left
pad_back : int
Padding size on back.
pad_down : int
Padding size on down.
pad_right : int
Padding size on right.
"""
# compute the padding size
if isinstance(padding, (tuple, list)):
if len(padding) == 3:
pad_d = padding[0] * 2
pad_h = padding[1] * 2
pad_w = padding[2] * 2
elif len(padding) == 6:
return padding[0], padding[1], padding[2], padding[3], padding[4], padding[5]
else:
raise ValueError("Size of padding can only be 3 or 6")
elif isinstance(padding, int):
pad_d = pad_w = pad_h = padding * 2
elif padding == "VALID":
pad_h = 0
pad_w = 0
pad_d = 0
elif padding == "SAME":
pad_d = kernel[0] - 1
pad_h = kernel[1] - 1
pad_w = kernel[2] - 1
else:
raise ValueError(f"Unknown padding option {padding}")
pad_top = (pad_h + 1) // 2
pad_left = (pad_w + 1) // 2
pad_front = (pad_d + 1) // 2
return pad_front, pad_top, pad_left, pad_d - pad_front, pad_h - pad_top, pad_w - pad_left
def get_pad_tuple1d(padding, kernel):
"""Common code to get the pad option
Parameters
----------
padding : int or str
Padding size, or ['VALID', 'SAME']
kernel : tuple of int
Conv kernel size
Returns
-------
pad_left : int
Padding size on left
pad_right : int
Padding size on right.
"""
# compute the padding size
if isinstance(padding, (tuple, list)):
if len(padding) == 1:
pad_w = padding[0] * 2
elif len(padding) == 2:
return padding[0], padding[1]
else:
raise ValueError("Size of padding can only be 2 or 4")
elif isinstance(padding, int):
pad_w = padding * 2
elif padding == "VALID":
pad_w = 0
elif padding == "SAME":
pad_w = kernel[0] - 1
else:
raise ValueError(f"Unknown padding option {padding}")
pad_left = (pad_w + 1) // 2
return pad_left, pad_w - pad_left
| 8,120 | 25.11254 | 93 | py |
tvm | tvm-main/python/tvm/topi/nn/conv1d.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-variable, unused-argument
"""1D convolution operators."""
from .conv2d import conv
def conv1d(
data,
kernel,
strides=1,
padding="VALID",
dilation=1,
data_layout="NCW",
kernel_layout="",
out_dtype=None,
):
"""1D convolution forward operator.
Parameters
----------
data : tvm.te.Tensor
3-D input shape [batch, in_channel, in_width] for data_layout == 'NCW'
and [batch, in_width, in_channel] for data_layout == 'NWC'
kernel : tvm.te.Tensor
3-D kernel with shape [num_filter, in_channel, filter_size] for kernel_layout == 'OIW'
and [filter_size, in_channel, num_filter] for kernel_layout == 'WIO'
strides : int or tuple
The spatial stride along width
padding : int or str
Padding size, or ['VALID', 'SAME']
dilation : int or tuple
Dilation rate if convolution should be dilated.
data_layout : str
How input data is laid out, must be one of ['NCW', 'NWC']
kernel_layout: Optiona[str]
The layout of the kernel. If unspecified, use default layout. "OIW" if data_layout == "NCW",
"WIO" if data_layout == "NWC".
out_dtype : str
The output data type. If None then output is same type as input.
"""
return conv(data, kernel, strides, padding, dilation, 1, data_layout, kernel_layout, out_dtype)
def conv1d_nwc(data, kernel, strides=1, padding="VALID", dilation=1, out_dtype=None):
"""1D convolution in NWC layout. See :py:func:`conv` for details on parameters"""
return conv(data, kernel, strides, padding, dilation, 1, "NWC", "WIO", out_dtype=out_dtype)
def conv1d_ncw(data, kernel, strides=1, padding="VALID", dilation=1, out_dtype=None):
"""1D convolution in NCW layout. See :py:func:`conv` for details on parameters"""
return conv(data, kernel, strides, padding, dilation, 1, "NCW", "OIW", out_dtype=out_dtype)
def group_conv1d_nwc(
data, kernel, strides=1, padding="VALID", dilation=1, groups=1, out_dtype=None
):
"""1D convolution forward operator for NWC layout.
Parameters
----------
data : tvm.te.Tensor
3-D with shape [batch, in_width, in_channel]
kernel : tvm.te.Tensor
3-D with shape [filter_size, in_channel, num_filter]
strides : int or tuple
The spatial stride along width
padding : int, tuple, or str
Padding size can be an integer for equal padding,
a tuple of (left, right) or a string in ['VALID', 'SAME'].
dilation : int or tuple
Dilation rate if convolution should be dilated.
groups : int
Number of groups
out_dtype : str
The output data type. If None then output is same type as input.
"""
return conv(data, kernel, strides, padding, dilation, groups, "NWC", "WIO", out_dtype=out_dtype)
def group_conv1d_ncw(
data, kernel, strides=1, padding="VALID", dilation=1, groups=1, out_dtype=None
):
"""1D convolution forward operator for NCW layout.
Parameters
----------
data : tvm.te.Tensor
3-D with shape [batch, in_channel, in_width]
kernel : tvm.te.Tensor
3-D with shape [num_filter, in_channel, filter_size]
strides : int or tuple
The spatial stride along width
padding : int, tuple, or str
Padding size can be an integer for equal padding,
a tuple of (left, right) or a string in ['VALID', 'SAME'].
dilation : int or tuple
Dilation rate if convolution should be dilated.
groups : int
Number of groups
out_dtype : str
The output data type. If None then output is same type as input.
"""
return conv(data, kernel, strides, padding, dilation, groups, "NCW", "OIW", out_dtype=out_dtype)
| 4,570 | 32.123188 | 100 | py |
tvm | tvm-main/python/tvm/topi/nn/qnn.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Quantized Neural Network (QNN) Operators"""
import tvm
from tvm import te, tir, topi
SQNN_DISABLE = 0
SQNN_INT8 = 1
SQNN_UINT8 = 2
SQNN_INT32 = 3
SQNN_DTYPE_TO_CODE = {
"disable": SQNN_DISABLE,
"int8": SQNN_INT8,
"uint8": SQNN_UINT8,
"int32": SQNN_INT32,
}
SQNN_CODE_TO_DTYPE = {v: k for k, v in SQNN_DTYPE_TO_CODE.items()}
@tvm.te.tag_scope(tag=topi.tag.ELEMWISE)
def simulated_quantize(data, out_dtype, output_scale=None, output_zero_point=None, axis=-1):
"""Simulated QNN quantize operator that mimics QNN outputs without changing datatype.
The benefit of this operator over true QNN quantize is that this operator allows dynamic
datatype selection and can operate on both per-channel and scalar scales and zero points while
QNN quantize requires both of these to be fixed at compile time.
Parameters
----------
data: tvm.te.Tensor
An N-D input tensor to the operator.
out_dtype: tvm.te.Tensor
A scalar variable that indicates which datatype to simulate quantization with. Use
SQNN_DTYPE_TO_CODE to convert a dtype string into the corresponding variable
value.
output_scale: tvm.te.Tensor, optional
A scalar tensor representing the scale to use when quantizing to integer datatypes.
When it contains more than a single value, N must match the number of channels in data.
output_zero_point: tvm.te.Tensor, optional
A 1-D tensor representing the zero point to use when quantizing to integer datatypes.
When it contains more than a single value, N must match the number of channels in data.
axis: int, optional
The channel axis for quantization. Default value is -1 which corresponds to the last axis.
"""
# When disabled, just pass through the input values.
def _compute_pass_through(value, *indices):
return value[indices]
# Simulate quantization for arbitrary integer datatypes. The computation for all datatypes is:
# Q_output = clip((round(input_tensor/output_scale) + output_zero_point),
# out_dtype::min,
# out_dtype::max)
def _compute_intn(dtype, value, *indices):
assert output_scale is not None and output_zero_point is not None
const_min = tvm.tir.min_value(dtype)
const_max = tvm.tir.max_value(dtype)
# Use indexmod to handle both scalar and per-channel QNN parameters.
scale_idx = tir.indexmod(indices[axis], topi.shape(output_scale)[0])
zp_idx = tir.indexmod(indices[axis], topi.shape(output_zero_point)[0])
return te.max(
te.min(
te.round(value[indices] / output_scale[scale_idx]) + output_zero_point[zp_idx],
const_max,
),
const_min,
)
# Use an if chain to dynamically return the proper quantization based on the input datatype.
# This allows the op to compile once but apply different quantization approaches
# using a variable datatype input.
def _dispatch_sim_quantize(value):
pass_through_value = te.compute(
data.shape, lambda *indices: _compute_pass_through(value, *indices)
)
int8_value = te.compute(
data.shape,
lambda *indices: tir.if_then_else(
out_dtype.equal(SQNN_DTYPE_TO_CODE["int8"]),
_compute_intn("int8", value, *indices),
pass_through_value[indices],
),
)
uint8_value = te.compute(
data.shape,
lambda *indices: tir.if_then_else(
out_dtype.equal(SQNN_DTYPE_TO_CODE["uint8"]),
_compute_intn("uint8", value, *indices),
int8_value[indices],
),
)
int32_value = te.compute(
data.shape,
lambda *indices: tir.if_then_else(
out_dtype.equal(SQNN_DTYPE_TO_CODE["int32"]),
_compute_intn("int32", value, *indices),
uint8_value[indices],
),
)
return int32_value
return te.compute(data.shape, lambda *indices: _dispatch_sim_quantize(data)[indices])
@tvm.te.tag_scope(tag=topi.tag.ELEMWISE)
def simulated_dequantize(data, in_dtype, input_scale=None, input_zero_point=None, axis=-1):
"""Simulated QNN dequantize operator that mimics QNN outputs without changing datatype.
The benefit of this operator over true QNN dequantize is that this operator allows dynamic
datatype selection and can operate on both per-channel and scalar scales and zero points while
QNN dequantize requires both of these to be fixed at compile time.
Parameters
----------
data: tvm.te.Tensor
An N-D input tensor to the operator.
in_dtype: tvm.te.Tensor
A scalar variable that indicates which datatype to simulate dequantization with. Use
SQNN_DTYPE_TO_CODE to convert a dtype string into the corresponding variable
value.
input_scale: tvm.te.Tensor, optional
A scalar tensor representing the scale to use when dequantizing from integer datatypes.
When it contains more than a single value, N must match the number of channels in data.
input_zero_point: tvm.te.Tensor, optional
A 1-D tensor representing the zero point to use when dequantizing from integer datatypes.
When it contains more than a single value, N must match the number of channels in data.
axis: int, optional
The channel axis for quantization. Default value is -1 which corresponds to the last axis.
"""
# When disabled simply return the input tensor.
def _compute_pass_through(value, *indices):
return value[indices]
# Simulate dequantization for arbitrary integer datatypes. The computation for all datatypes is:
# DQ_output = (input - zero_point) * scale
def _compute_intn(value, *indices):
assert input_scale is not None and input_zero_point is not None
# Use indexmod to handle both scalar and per-channel QNN parameters.
scale_idx = tir.indexmod(indices[axis], topi.shape(input_scale)[0])
zp_idx = tir.indexmod(indices[axis], topi.shape(input_zero_point)[0])
return (value[indices] - input_zero_point[zp_idx]) * input_scale[scale_idx]
# Use an if chain to dynamically return the proper dequantization based on the input datatype.
# This allows the op to compile once but apply different quantization approaches
# using a variable datatype input.
def _dispatch_sim_dequantize(value):
pass_through_value = te.compute(
data.shape, lambda *indices: _compute_pass_through(value, *indices)
)
intn_condition = tvm.te.any(
in_dtype.equal(SQNN_DTYPE_TO_CODE["int8"]),
in_dtype.equal(SQNN_DTYPE_TO_CODE["uint8"]),
in_dtype.equal(SQNN_DTYPE_TO_CODE["int32"]),
)
intn_value = te.compute(
data.shape,
lambda *indices: tir.if_then_else(
intn_condition,
_compute_intn(value, *indices),
pass_through_value[indices],
),
)
return intn_value
return te.compute(data.shape, lambda *indices: _dispatch_sim_dequantize(data)[indices])
@tvm.target.generic_func
def qnn_conv2d_alter_layout(_attrs, _inputs, _tinfos, _out_type):
"""Change qnn.conv2d layout.
Parameters
----------
attrs : tvm.ir.Attrs
Attributes of current convolution
inputs : tvm.relay.Expr
Grouped input symbols
tinfos : list
Input shape and dtype
out_type: type
The output type
Note
----
Unlike other TOPI functions, this function operates on both graph level and operator level.
"""
return None
@tvm.target.generic_func
def bias_add_legalize(_attrs, _inputs, _tinfos):
"""Legalize bias_add layout.
Bias add is not a QNN-specific function, but this generic exists so that empty channels can
be excised from quantized conv2d operators and folded into bias adds.
Parameters
----------
attrs : tvm.ir.Attrs
Attributes of current convolution
inputs : tvm.relay.Expr
Grouped input symbols
tinfos : list
Input shape and dtype
"""
return None
@tvm.target.generic_func
def add_alter_layout(_attrs, _inputs, _tinfos, _out_type):
"""Change add layout.
Add is not a QNN-specific function, but this generic exists so that bias add operations can be
fused with input zero point add optimizations, which only happens if the previous operator is
quantized.
Parameters
----------
attrs : tvm.ir.Attrs
Attributes of current convolution
inputs : tvm.relay.Expr
Grouped input symbols
tinfos : list
Input shape and dtype
out_type: type
The output type
Note
----
Unlike other TOPI functions, this function operates on both graph level and operator level.
"""
return None
@tvm.target.generic_func
def qnn_requantize_alter_layout(_attrs, _inputs, _tinfos, _out_type):
"""Change requantize layout.
Parameters
----------
attrs : tvm.ir.Attrs
Attributes of current convolution
inputs : tvm.relay.Expr
Grouped input symbols
tinfos : list
Input shape and dtype
out_type: type
The output type
Note
----
Unlike other TOPI functions, this function operates on both graph level and operator level.
"""
return None
@tvm.target.generic_func
def qnn_dense_alter_layout(_attrs, _inputs, _tinfos, _out_type):
"""Change qnn.dense layout.
Not to change by default
Parameters
----------
attrs : tvm.ir.Attrs
Attributes of current dense op
inputs : tvm.relay.Expr
Grouped input symbols
tinfos : list
Input shape and dtype
out_type: type
The output type
"""
return None
| 10,747 | 34.826667 | 100 | py |
tvm | tvm-main/python/tvm/topi/nn/flatten.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""TVM operator flatten compute."""
from __future__ import absolute_import
import tvm
from tvm import te
from .. import tag
@tvm.te.tag_scope(tag=tag.INJECTIVE)
def flatten(data):
"""Flattens the input array into a 2-D array by collapsing the higher dimensions.
Parameters
----------
data : tvm.te.Tensor
Input array.
Returns
-------
output : tvm.te.Tensor
2-D array with collapsed higher dimensions.
"""
ishape = data.shape
dim = 1
for i in range(1, len(ishape)):
dim = dim * ishape[i]
oshape = [ishape[0], dim]
idxdiv = tvm.tir.indexdiv
idxmod = tvm.tir.indexmod
def unwrap(idx, shape):
index = []
for s in reversed(shape):
index.append(idxmod(idx, s))
idx = idxdiv(idx, s)
return list(reversed(index))
return te.compute(oshape, lambda i, j: data(i, *unwrap(j, ishape[1:])))
| 1,704 | 30.574074 | 85 | py |
tvm | tvm-main/python/tvm/topi/nn/dilate.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""Dilation operators"""
import tvm
from tvm import te
from .. import utils
from .. import tag
@te.tag_scope(tag=tag.INJECTIVE + ",dilate")
def dilate(data, strides, dilation_value=0.0, name="DilatedInput"):
"""Dilate data with given dilation value (0 by default).
Parameters
----------
data : tvm.te.Tensor
n-D, can be any layout.
strides : list / tuple of n ints
Dilation stride on each dimension, 1 means no dilation.
dilation_value : int/float, optional
Value used to dilate the input.
name : str, optional
The name prefix operators generated
Returns
-------
Output : tvm.te.Tensor
n-D, the same layout as data.
"""
n = len(data.shape)
if len(strides) != n:
raise ValueError(f"data dimension and strides size dismatch : {n} vs {len(strides)}")
ana = tvm.arith.Analyzer()
out_shape = tuple(ana.simplify((data.shape[i] - 1) * strides[i] + 1) for i in range(n))
def _dilate(*indices):
not_zero = []
index_tuple = []
idxdiv = tvm.tir.indexdiv
idxmod = tvm.tir.indexmod
for i in range(n):
if not utils.equal_const_int(strides[i], 1):
index_tuple.append(idxdiv(indices[i], strides[i]))
not_zero.append(idxmod(indices[i], strides[i]).equal(0))
else:
index_tuple.append(indices[i])
if not_zero:
not_zero = tvm.tir.all(*not_zero)
return tvm.tir.if_then_else(
not_zero, data(*index_tuple), tvm.tir.const(dilation_value, data.dtype)
)
return data(*index_tuple)
return te.compute(out_shape, _dilate, name=name)
| 2,524 | 33.589041 | 93 | py |
tvm | tvm-main/python/tvm/topi/nn/local_response_norm.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""TVM operator for local response norm compute."""
from __future__ import absolute_import
from .. import cpp
def lrn(data, size, axis=1, alpha=0.0001, beta=0.75, bias=2):
"""Perform the across channels local response normalisation
on the input data.
sum_sqr_up^i{x, y} = (bias+((alpha/size)* \
{sum_{j=max(0, i-size/2)}^{min(N-1,i+size/2)} \
(data^j{x,y})^2}))^beta
output^i{x, y} = data^i{x, y}/sum_sqr_up^i{x, y}
N is the number for input channels
Parameters
----------
data : tvm.te.Tensor
4-D with shape [batch, channel, height, width]
size : int
normalisation window size
axis : int
input data layout channel axis
default value is 1 for NCHW format
bias : float
offset to avoid dividing by 0
alpha : float
to be divided
beta : float
exponent
Returns
-------
output : tvm.te.Tensor
4-D output with same shape
"""
return cpp.nn.lrn(data, size, axis, alpha, beta, bias)
| 1,904 | 30.75 | 79 | py |
tvm | tvm-main/python/tvm/topi/nn/batch_matmul.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Batch matrix multiplication"""
# pylint: disable=invalid-name
import logging
import tvm
from tvm import auto_scheduler, te
from ..utils import get_const_tuple
logger = logging.getLogger("topi")
def batch_matmul(
tensor_a,
tensor_b,
oshape=None,
out_dtype=None,
transpose_a=False,
transpose_b=True,
auto_scheduler_rewritten_layout="",
meta_schedule_original_shape=None,
):
"""Compute batch matrix multiplication of `tensor_a` and `tensor_b`.
Both `tensor_a` and `tensor_b` can be transposed. For legacy reason, we use NT format
(transpose_a=False, transpose_b=True) by default.
Parameters
----------
tensor_a : tvm.te.Tensor
3-D with shape [batch, M, K] or [batch, K, M].
tensor_b : tvm.te.Tensor
3-D with shape [batch, K, N] or [batch, N, K].
oshape : List[Optional]
Explicit intended output shape of the computation. Can be useful in cases
with dynamic input shapes.
out_dtype : Optional[str]
Specifies the output data type for mixed precision batch matmul.
transpose_a : Optional[bool] = False
Whether the first tensor is in transposed format.
transpose_b : Optional[bool] = True
Whether the second tensor is in transposed format.
auto_scheduler_rewritten_layout: Optional[str] = ""
The layout after auto-scheduler's layout rewrite pass.
meta_schedule_original_shape: Optional[List[PrimExpr]] = None
The original shape of the tensor
Returns
-------
output : tvm.te.Tensor
3-D with shape [batch, M, N]
"""
assert len(tensor_a.shape) == 3, "tensor_a only support 3-dim"
if transpose_a:
XB, XK, XI = get_const_tuple(tensor_a.shape)
else:
XB, XI, XK = get_const_tuple(tensor_a.shape)
if auto_scheduler_rewritten_layout:
# Infer shape for the rewritten layout
YB, YK, YJ = auto_scheduler.get_shape_from_rewritten_layout(
auto_scheduler_rewritten_layout, ["b", "k", "j"]
)
auto_scheduler.remove_index_check(tensor_b)
elif meta_schedule_original_shape:
auto_scheduler.rewrite_tensor_shape(tensor_b, meta_schedule_original_shape)
if transpose_b:
YB, YJ, YK = get_const_tuple(tensor_b.shape)
else:
YB, YK, YJ = get_const_tuple(tensor_b.shape)
else:
assert len(tensor_b.shape) == 3, "tensor_b only support 3-dim"
if transpose_b:
YB, YJ, YK = get_const_tuple(tensor_b.shape)
else:
YB, YK, YJ = get_const_tuple(tensor_b.shape)
assert XK == YK or isinstance(YK, tvm.tir.expr.Var), "shapes of x and y are inconsistent"
k = te.reduce_axis((0, XK), name="k")
if oshape is None:
assert XB == YB or XB == 1 or YB == 1, "batch dimension doesn't match"
batch = (
tvm.tir.expr.SizeVar("batch", "int32")
if isinstance(XB, tvm.tir.expr.Var) or isinstance(YB, tvm.tir.expr.Var)
else te.max(XB, YB)
)
oshape = (batch, XI, YJ)
if out_dtype is None:
out_dtype = tensor_a.dtype
if tensor_a.dtype != tensor_b.dtype:
logger.warning(
"tensor_a has different data type with tensor_b: %s, %s",
tensor_a.dtype,
tensor_b.dtype,
)
if (transpose_a, transpose_b) == (True, True):
compute_lambda = lambda b, i, j: te.sum(
tensor_a[b if XB != 1 else 0, k, i].astype(out_dtype)
* tensor_b[b if YB != 1 else 0, j, k].astype(out_dtype),
axis=k,
)
compute_name = "T_batch_matmul_TT"
elif (transpose_a, transpose_b) == (True, False):
compute_lambda = lambda b, i, j: te.sum(
tensor_a[b if XB != 1 else 0, k, i].astype(out_dtype)
* tensor_b[b if YB != 1 else 0, k, j].astype(out_dtype),
axis=k,
)
compute_name = "T_batch_matmul_TN"
elif (transpose_a, transpose_b) == (False, True):
compute_lambda = lambda b, i, j: te.sum(
tensor_a[b if XB != 1 else 0, i, k].astype(out_dtype)
* tensor_b[b if YB != 1 else 0, j, k].astype(out_dtype),
axis=k,
)
compute_name = "T_batch_matmul_NT"
else: # (transpose_a, transpose_b) == (False, False):
compute_lambda = lambda b, i, j: te.sum(
tensor_a[b if XB != 1 else 0, i, k].astype(out_dtype)
* tensor_b[b if YB != 1 else 0, k, j].astype(out_dtype),
axis=k,
)
compute_name = "T_batch_matmul_NN"
output = te.compute(
oshape,
compute_lambda,
name=compute_name,
tag="batch_matmul",
attrs={"layout_free_placeholders": [tensor_b]},
)
if auto_scheduler_rewritten_layout:
output = auto_scheduler.rewrite_compute_body(output, auto_scheduler_rewritten_layout)
return output
@tvm.target.generic_func
def batch_matmul_legalize(attrs, inputs, types):
"""Legalizes batch_matmul op.
Parameters
----------
attrs : tvm.ir.Attrs
Attributes of current batch_matmul
inputs : list of tvm.relay.Expr
The args of the Relay expr to be legalized
types : list of types
List of input and output types
Returns
-------
result : tvm.relay.Expr
The legalized expr
"""
# not to change by default
# pylint: disable=unused-argument
return None
| 6,250 | 33.346154 | 93 | py |
tvm | tvm-main/python/tvm/topi/nn/elemwise.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Elementwise operators"""
from __future__ import absolute_import as _abs
import tvm
from tvm import te
from .. import tag
from ..utils import get_const_int
@tvm.te.tag_scope(tag=tag.ELEMWISE)
def relu(x):
"""Take relu of input x.
Parameters
----------
x : tvm.te.Tensor
Input argument.
Returns
-------
y : tvm.te.Tensor
The result.
"""
return te.compute(x.shape, lambda *i: tvm.te.max(x(*i), tvm.tir.const(0, x.dtype)))
@tvm.te.tag_scope(tag=tag.ELEMWISE)
def leaky_relu(x, alpha):
"""Take leaky relu of input x.
Parameters
----------
x : tvm.te.Tensor
Input argument.
alpha : float
The slope for the small gradient when x < 0
Returns
-------
y : tvm.te.Tensor
The result.
"""
def _compute(*indices):
value = x(*indices)
calpha = tvm.tir.const(alpha, value.dtype)
return tvm.tir.Select(value > 0, value, value * calpha)
return te.compute(x.shape, _compute)
@tvm.te.tag_scope(tag=tag.BROADCAST)
def prelu(x, slope, axis=1):
"""PReLU.
It accepts two arguments: an input ``x`` and a weight array ``W``
and computes the output as :math:`PReLU(x) y = x > 0 ? x : W * x`,
where :math:`*` is an elementwise multiplication for each sample in the
batch.
Parameters
----------
x : tvm.te.Tensor
Input argument.
slope : tvm.te.Tensor
Channelised slope tensor for prelu
axis : int
The axis where the channel data needs to be applied
Returns
-------
y : tvm.te.Tensor
The result.
Links
-----
[http://arxiv.org/pdf/1502.01852v1.pdf]
"""
assert len(slope.shape) == 1
assert axis < len(x.shape)
assert get_const_int(slope.shape[0]) == get_const_int(x.shape[axis])
def _compute_channelwise(*indices):
xval = x(*indices)
return tvm.tir.Select(xval > 0, xval, xval * slope(indices[axis]))
return te.compute(x.shape, _compute_channelwise)
| 2,807 | 25.490566 | 87 | py |
tvm | tvm-main/python/tvm/topi/nn/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=wildcard-import
"""Neural network operators"""
from __future__ import absolute_import as _abs
from .conv1d import *
from .conv2d import *
from .conv3d import *
from .correlation import *
from .deformable_conv2d import *
from .depthwise_conv2d import *
from .elemwise import *
from .dilate import *
from .flatten import *
from .dense import *
from .mapping import *
from .pooling import *
from .softmax import *
from .conv3d_transpose import *
from .conv2d_transpose import *
from .conv1d_transpose import *
from .bnn import *
from .qnn import *
from .upsampling import *
from .instance_norm import instance_norm
from .layer_norm import layer_norm
from .group_norm import group_norm
from .local_response_norm import *
from .bitserial_conv2d import *
from .bitserial_dense import *
from .batch_matmul import *
from .batch_norm import *
from .sparse import *
from .pad import *
from .fifo_buffer import *
from .depth_to_space import *
from .space_to_depth import *
from .space_to_batch_nd import *
from .batch_to_space_nd import *
from .loss import *
from .lstm import *
| 1,873 | 31.310345 | 62 | py |
tvm | tvm-main/python/tvm/topi/nn/conv3d_transpose.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-variable, unused-argument
"""Transposed 3D convolution operators (sometimes called Deconvolution)."""
import tvm
from tvm import te
from tvm import relay
from .dilate import dilate
from .pad import pad
from .utils import get_pad_tuple3d
from ..utils import simplify
def conv3d_transpose_ncdhw(Input, Filter, strides, padding, out_dtype, output_padding):
"""Transposed 3D convolution ncdhw forward operator.
Parameters
----------
Input : tvm.te.Tensor
5-D with shape [batch, in_channel, in_depth, in_height, in_width]
Filter : tvm.te.Tensor
5-D with shape [in_channel, num_filter, filter_depth, filter_height, filter_width]
strides : int or a list/tuple of three ints
The spatial stride along depth,height and width
padding : int or str
Padding size, or ['VALID', 'SAME']
out_dtype : str
The output data type. This is used for mixed precision.
output_padding : tuple of ints
Used to get the right output shape for gradients
Returns
-------
Output : tvm.te.Tensor
5-D with shape [batch, out_channel, out_depth, out_height, out_width]
"""
return declaration_conv3d_transpose_impl(
Input, Filter, strides, padding, out_dtype, output_padding
)
def conv3d_transpose_ncdhw_preprocess(data, kernel, strides, padding, out_dtype, output_padding):
"""Preprocess data and kernel to make the compute pattern
of conv3d_transpose the same as conv3d"""
batch, in_c, in_d, in_h, in_w = data.shape
_, out_c, filter_d, filter_h, filter_w = kernel.shape
stride_d, stride_h, stride_w = strides
opad_d, opad_h, opad_w = output_padding
assert opad_d < stride_d and opad_h < stride_h and opad_w < stride_w
# dilate data
data_dilate = dilate(data, [1, 1, stride_d, stride_h, stride_w], name="data_dilate")
# pad data
fpad_front, fpad_top, fpad_left, fpad_back, fpad_bottom, fpad_right = get_pad_tuple3d(
padding, (filter_d, filter_h, filter_w)
)
bpad_front = filter_d - 1 - fpad_front
bpad_back = filter_d - 1 - fpad_back + opad_d
bpad_top = filter_h - 1 - fpad_top
bpad_bottom = filter_h - 1 - fpad_bottom + opad_h
bpad_left = filter_w - 1 - fpad_left
bpad_right = filter_w - 1 - fpad_right + opad_w
data_pad = pad(
data_dilate,
[0, 0, bpad_front, bpad_top, bpad_left],
[0, 0, bpad_back, bpad_bottom, bpad_right],
name="data_pad",
)
# transform kernel layout from IODHW to OIDHW, and rotate kernel by 180 degrees
kernel_transform = te.compute(
(out_c, in_c, filter_d, filter_h, filter_w),
lambda o, i, d, h, w: kernel[i][o][filter_d - 1 - d][filter_h - 1 - h][filter_w - 1 - w],
name="kernel_transform",
)
return data_pad, kernel_transform
def declaration_conv3d_transpose_impl(data, kernel, strides, padding, out_dtype, output_padding):
"""Implementation of conv3d transpose"""
data_pad, kernel_transform = conv3d_transpose_ncdhw_preprocess(
data, kernel, strides, padding, out_dtype, output_padding
)
batch, in_c, in_d, in_h, in_w = data_pad.shape
out_c, _, filter_d, filter_h, filter_w = kernel_transform.shape
stride_d, stride_h, stride_w = strides
# convolution stage
out_c = simplify(out_c)
out_d = simplify(in_d - filter_d + 1)
out_h = simplify(in_h - filter_h + 1)
out_w = simplify(in_w - filter_w + 1)
dc = te.reduce_axis((0, in_c), name="dc")
dd = te.reduce_axis((0, filter_d), name="dd")
dh = te.reduce_axis((0, filter_h), name="dh")
dw = te.reduce_axis((0, filter_w), name="dw")
Output = te.compute(
(batch, out_c, out_d, out_h, out_w),
lambda b, c, d, h, w: te.sum(
data_pad[b, dc, d + dd, h + dh, w + dw].astype(out_dtype)
* kernel_transform[c, dc, dd, dh, dw].astype(out_dtype),
axis=[dc, dd, dh, dw],
),
tag="conv3d_transpose_ncdhw",
)
return Output
@tvm.target.generic_func
def conv3d_transpose_legalize(attrs, inputs, types):
"""Legalizes Transposed 3D convolution op.
Parameters
----------
attrs : tvm.ir.Attrs
Attributes of current Transposed 3D convolution
inputs : list of tvm.relay.Expr
The args of the Relay expr to be legalized
types : list of types
List of input and output types
Returns
-------
result : tvm.relay.Expr
The legalized expr
"""
if attrs["data_layout"] == "NDHWC":
data, kernel = inputs
kernel_layout = attrs["kernel_layout"]
# Convert Kernel layout to IODHW
if kernel_layout == "DHWIO":
# input kernel layout is swapped to DHWOI
# output kernel layout will be IODHW
kernel = relay.transpose(kernel, axes=(3, 4, 0, 1, 2))
elif kernel_layout == "DHWOI":
# input kernel layout is swapped to DHWIO
# output kernel layout will be IODHW
kernel = relay.transpose(kernel, axes=(4, 3, 0, 1, 2))
elif kernel_layout == "OIDHW":
# input kernel layout is swapped to OIDHW
# output kernel layout will be IODHW
kernel = relay.transpose(kernel, axes=(1, 0, 2, 3, 4))
elif kernel_layout == "IODHW":
# input kernel layout is swapped to IODHW
# output kernel layout will be IODHW
pass
else:
# Skip legalize. Let relay.nn.conv2d_transpose to handle the case
return None
# Set new attrs for conv3d_transpose.
new_attrs = {k: attrs[k] for k in attrs.keys()}
new_attrs["data_layout"] = "NCDHW"
# layout of kernel should be IODHW, but kernel_layout should be swapped - OIDHW
new_attrs["kernel_layout"] = "IODHW"
# Convert data to NCDHW.
data = relay.transpose(data, axes=(0, 4, 1, 2, 3))
deconv = relay.nn.conv3d_transpose(data, kernel, **new_attrs)
# Convert back to original NDHWC layout.
out = relay.transpose(deconv, axes=(0, 2, 3, 4, 1))
return out
return None
| 6,938 | 36.711957 | 97 | py |
tvm | tvm-main/python/tvm/topi/nn/space_to_depth.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""TVM operator space_to_depth compute."""
from __future__ import absolute_import
import tvm
from tvm import te
from .. import tag
def space_to_depth(data, block_size, layout="NCHW"):
"""Perform space to depth transformation on the data
Parameters
----------
data : tvm.te.Tensor
4-D tensor in either NCHW or NHWC layout.
block_size : int
Size of blocks to decompose into channel dimension.
layout : string
Either NCHW or NHWC, indicating data layout.
Returns
-------
output : tvm.te.Tensor
Output of shape [N, C * block_size**2, H / block_size, W / block_size]
"""
if layout == "NCHW":
in_n, in_c, in_h, in_w = data.shape
output_shape = [
in_n,
in_c * block_size * block_size,
tvm.tir.truncdiv(in_h, block_size),
tvm.tir.truncdiv(in_w, block_size),
]
elif layout == "NHWC":
in_n, in_h, in_w, in_c = data.shape
output_shape = [
in_n,
tvm.tir.truncdiv(in_h, block_size),
tvm.tir.truncdiv(in_w, block_size),
in_c * block_size * block_size,
]
else:
raise ValueError("Only NCHW and NHWC layouts are currently supported.")
def _get_indices(*indices):
if layout == "NCHW":
n, c, y, x = indices
elif layout == "NHWC":
n, y, x, c = indices
return n, c, y, x
def _get_pixel(n, c, y, x):
block_offset = tvm.tir.truncdiv(c, in_c)
channel_idx = tvm.tir.truncmod(c, in_c)
x_idx = tvm.tir.truncmod(block_offset, block_size)
y_idx = tvm.tir.truncdiv(block_offset, block_size)
if layout == "NCHW":
output = data(n, channel_idx, y_idx + (y * block_size), x_idx + (x * block_size))
else:
output = data(n, y_idx + (y * block_size), x_idx + (x * block_size), channel_idx)
return output
def _compute(*indices):
n, c, y, x = _get_indices(*indices)
return _get_pixel(n, c, y, x)
return te.compute(output_shape, _compute, name="space_to_depth", tag=tag.INJECTIVE)
| 2,964 | 32.693182 | 93 | py |
tvm | tvm-main/python/tvm/topi/nn/fifo_buffer.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""FIFO buffer op"""
from __future__ import absolute_import as _abs
import tvm
from tvm import te
from .. import tag
from ..transform import concatenate, strided_slice
@tvm.te.tag_scope(tag=tag.INJECTIVE + ",fifo_buffer")
def fifo_buffer(data, buffer, axis):
"""
FIFO buffer to enable computation reuse in CNNs with sliding indow input
Compute equivalent of
.. code-block:: python
concat(buffer, data, axis=axis)
.slice_axis(axis=axis,
begin=data.shape[axis],
end=data.shape[axis]+buffer.shape[axis])
Useful for
* Encoding explicit re-use of computation in convolution ops operated on a sliding window input
* Implementing a FIFO queue to cache intermediate results, e.g. as in Fast WaveNet.
Parameters
----------
data : tvm.te.Tensor
The input data
buffer : tvm.te.Tensor
Previous value of the FIFO buffer
axis : int
Specify which axis should be used for buffering
Returns
-------
result : tvm.te.Tensor
Updated value for the buffer
"""
assert len(data.shape) == len(buffer.shape), (
f"buffer and data must have same number of dimensions, "
f"buffer.shape = {buffer.shape}, data.shape = {data.shape}"
)
assert len(buffer.shape) >= 1, "Zero-dimension tensor not supported"
assert 0 <= axis < len(buffer.shape), "buffer axis out of range"
for i in range(len(data.shape)):
if i == axis:
assert int(str(data.shape[i])) <= int(str(buffer.shape[i]))
else:
assert int(str(data.shape[i])) == int(str(buffer.shape[i]))
buflen = buffer.shape[axis]
data_size = data.shape[axis]
# Explicitly write out formula up to 4D, and then use concat+slice combo for 5D and higher
if len(buffer.shape) == 1:
return te.compute(
buffer.shape,
lambda i: tvm.tir.if_then_else(
i < buflen - data_size, buffer[i + data_size], data[i - buflen + data_size]
),
name="new_buffer",
)
if len(buffer.shape) == 2:
if axis == 0:
return te.compute(
buffer.shape,
lambda i, j: tvm.tir.if_then_else(
i < buflen - data_size,
buffer[i + data_size, j],
data[i - buflen + data_size, j],
),
name="new_buffer",
)
if axis == 1:
return te.compute(
buffer.shape,
lambda i, j: tvm.tir.if_then_else(
j < buflen - data_size,
buffer[i, j + data_size],
data[i, j - buflen + data_size],
),
name="new_buffer",
)
assert False, f"Invalid value for axis; it should be at most {len(buffer.shape)}"
elif len(buffer.shape) == 3:
if axis == 0:
return te.compute(
buffer.shape,
lambda i, j, k: tvm.tir.if_then_else(
i < buflen - data_size,
buffer[i + data_size, j, k],
data[i - buflen + data_size, j, k],
),
name="new_buffer",
)
if axis == 1:
return te.compute(
buffer.shape,
lambda i, j, k: tvm.tir.if_then_else(
j < buflen - data_size,
buffer[i, j + data_size, k],
data[i, j - buflen + data_size, k],
),
name="new_buffer",
)
if axis == 2:
return te.compute(
buffer.shape,
lambda i, j, k: tvm.tir.if_then_else(
k < buflen - data_size,
buffer[i, j, k + data_size],
data[i, j, k - buflen + data_size],
),
name="new_buffer",
)
assert False, f"Invalid value for axis; it should be at most {len(buffer.shape)}"
elif len(buffer.shape) == 4:
if axis == 0:
return te.compute(
buffer.shape,
lambda i, j, k, l: tvm.tir.if_then_else(
i < buflen - data_size,
buffer[i + data_size, j, k, l],
data[i - buflen + data_size, j, k, l],
),
name="new_buffer",
)
if axis == 1:
return te.compute(
buffer.shape,
lambda i, j, k, l: tvm.tir.if_then_else(
j < buflen - data_size,
buffer[i, j + data_size, k, l],
data[i, j - buflen + data_size, k, l],
),
name="new_buffer",
)
if axis == 2:
return te.compute(
buffer.shape,
lambda i, j, k, l: tvm.tir.if_then_else(
k < buflen - data_size,
buffer[i, j, k + data_size, l],
data[i, j, k - buflen + data_size, l],
),
name="new_buffer",
)
if axis == 3:
return te.compute(
buffer.shape,
lambda i, j, k, l: tvm.tir.if_then_else(
l < buflen - data_size,
buffer[i, j, k, l + data_size],
data[i, j, k, l - buflen + data_size],
),
name="new_buffer",
)
assert False, f"Invalid value for axis; it should be at most {len(buffer.shape)}"
else:
# Implement FIFO buffer as combination of concat and slice
begin = [0] * len(buffer.shape)
begin[axis] = data.shape[axis]
end = list(buffer.shape[:])
end[axis] += data.shape[axis]
return strided_slice(concatenate((buffer, data), axis=axis), begin=begin, end=end)
return None
| 6,780 | 35.262032 | 99 | py |
tvm | tvm-main/python/tvm/topi/nn/layer_norm.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Layer normalization operator."""
from .. import cpp
def layer_norm(data, gamma, beta, axis, epsilon=1e-5):
"""Layer normalization operator.
Parameters
----------
data : tvm.te.Tensor
N-D with shape (d_0, d_1, ..., d_{N-1})
gamma: tvm.te.Tensor
K-D with shape (r_0, r_1, ..., r_{K-1}) where K == len(axis) and d_{axis_k} == r_k
beta: tvm.te.Tensor
Optional, K-D with shape (r_0, r_1, ..., r_{K-1}) where K == len(axis) and d_{axis_k} == r_k
axis : list of int
Axis over the normalization applied
epsilon : float
The epsilon value to avoid division by zero.
Returns
-------
result : tvm.te.Tensor
N-D with shape (d_0, d_1, ..., d_{N-1})
"""
return cpp.nn.layer_norm(data, gamma, beta, axis, epsilon)
| 1,594 | 32.93617 | 100 | py |
tvm | tvm-main/python/tvm/topi/nn/space_to_batch_nd.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""TVM operator space_to_batch_nd compute."""
from __future__ import absolute_import
from . import cpp
def space_to_batch_nd(data, block_shape, pad_before, pad_after, pad_value=0.0):
"""Perform batch to space transformation on the data
Parameters
----------
data : tvm.te.Tensor
N-D Tensor with shape [batch, spatial_shape, remaining_shapes],
where spatial_shape has M dimensions.
block_shape : list of ints
list of size [M] where M is number of spatial dims, specifies block
size for each spatial dimension.
pad_before : list of ints
list of shape [M] where M is number of spatial dims, specifies
zero-padding size before each spatial dimension.
pad_after : list of ints
list of shape [M] where M is number of spatial dims, specifies
zero-padding size after each spatial dimension.
pad_value : float, optional
The value used for padding.
Returns
-------
output : tvm.te.Tensor
"""
return cpp.nn.space_to_batch_nd(data, block_shape, pad_before, pad_after, pad_value)
| 1,916 | 35.169811 | 88 | py |
tvm | tvm-main/python/tvm/topi/nn/conv2d_transpose.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-variable, unused-argument
"""Transposed 2D convolution operators (sometimes called Deconvolution)."""
import collections
import tvm
from tvm import relay, te
from ..utils import simplify
from .dilate import dilate
from .pad import pad
from .utils import get_pad_tuple
def _ntuple(n):
def parse(x):
if isinstance(x, collections.abc.Iterable):
assert len(x) == n, f"Input can only have {n} elements, but got {len(x)} instead: {x}."
return x
return tuple(repeat(x, n))
return parse
_single = _ntuple(1)
_pair = _ntuple(2)
_triple = _ntuple(3)
_quadruple = _ntuple(4)
def conv2d_transpose_nchw(Input, Filter, strides, padding, out_dtype, output_padding):
"""Transposed 2D convolution nchw forward operator.
Parameters
----------
Input : tvm.te.Tensor
4-D with shape [batch, in_channel, in_height, in_width]
Filter : tvm.te.Tensor
4-D with shape [in_channel, num_filter, filter_height, filter_width]
strides : tuple of two ints
The spatial stride along height and width
padding : int or str
Padding size, or ['VALID', 'SAME']
out_dtype : str
The output data type. This is used for mixed precision.
output_padding : tuple of ints
Used to get the right output shape for gradients
Returns
-------
Output : tvm.te.Tensor
4-D with shape [batch, out_channel, out_height, out_width]
"""
return declaration_conv2d_transpose_impl(
Input, Filter, strides, padding, out_dtype, output_padding=output_padding
)
def conv2d_transpose_nchw_preprocess(data, kernel, strides, padding, out_dtype, output_padding):
"""Preprocess data and kernel to make the compute pattern
of conv2d_transpose the same as conv2d"""
batch, in_c, in_h, in_w = data.shape
_, out_c, filter_h, filter_w = kernel.shape
stride_h, stride_w = strides
opad_h, opad_w = output_padding
assert opad_h < stride_h and opad_w < stride_w
# dilate data
data_dilate = dilate(data, [1, 1, stride_h, stride_w], name="data_dilate")
# pad data
fpad_top, fpad_left, fpad_bottom, fpad_right = get_pad_tuple(padding, (filter_h, filter_w))
bpad_top = filter_h - 1 - fpad_top
bpad_bottom = filter_h - 1 - fpad_bottom + opad_h
bpad_left = filter_w - 1 - fpad_left
bpad_right = filter_w - 1 - fpad_right + opad_w
data_pad = pad(
data_dilate, [0, 0, bpad_top, bpad_left], [0, 0, bpad_bottom, bpad_right], name="data_pad"
)
# transform kernel layout from IOHW to OIHW, and rotate kernel by 180 degrees
kernel_transform = te.compute(
(out_c, in_c, filter_h, filter_w),
lambda o, i, h, w: kernel[i][o][filter_h - 1 - h][filter_w - 1 - w],
name="kernel_transform",
)
return data_pad, kernel_transform
def declaration_conv2d_transpose_impl(data, kernel, strides, padding, out_dtype, output_padding):
"""Implementation of conv2d transpose"""
data_pad, kernel_transform = conv2d_transpose_nchw_preprocess(
data, kernel, strides, padding, out_dtype, output_padding
)
batch, in_c, in_h, in_w = data_pad.shape
out_c, _, filter_h, filter_w = kernel_transform.shape
# convolution stage
out_c = simplify(out_c)
out_h = simplify(in_h - filter_h + 1)
out_w = simplify(in_w - filter_w + 1)
dc = te.reduce_axis((0, in_c), name="dc")
dh = te.reduce_axis((0, filter_h), name="dh")
dw = te.reduce_axis((0, filter_w), name="dw")
Output = te.compute(
(batch, out_c, out_h, out_w),
lambda b, c, h, w: te.sum(
data_pad[b, dc, h + dh, w + dw].astype(out_dtype)
* kernel_transform[c, dc, dh, dw].astype(out_dtype),
axis=[dc, dh, dw],
),
tag="conv2d_transpose_nchw",
)
return Output
def group_conv2d_transpose_nchw(data, kernel, stride, padding, out_dtype, output_padding, groups):
"""Group convolution operator in NCHW layout.
Parameters
----------
data : tvm.te.Tensor
4-D with shape [batch, in_channel, in_height, in_width]
kernel : tvm.te.Tensor
4-D with shape [in_channel, out_channel // groups, filter_height, filter_width]
stride : int or a list/tuple of two ints
Stride size, or [stride_height, stride_width]
padding : int or a list/tuple of 2 or 4 ints
padding size, or
[pad_height, pad_width] for 2 ints, or
[pad_top, pad_left, pad_bottom, pad_right] for 4 ints
out_dtype : str
The output data type. This is used for mixed precision.
output_padding : tuple of ints
Used to get the right output shape for gradients
groups : int
number of groups
out_dtype : str
The output type. This is used for mixed precision.
Returns
-------
Output : tvm.te.Tensor
4-D with shape [batch, out_channel, out_height, out_width]
"""
if groups == 1:
return conv2d_transpose_nchw(data, kernel, stride, padding, out_dtype, output_padding)
# some pre-processing and prelimnary checks
if out_dtype is None:
out_dtype = data.dtype
batch, in_channels, in_h, in_w = data.shape
_, out_c, filter_h, filter_w = kernel.shape
assert (
in_channels % groups == 0
), f"input channels {in_channels} must divide group size {groups}"
# assert out_c % groups == 0, f"output channels {in_c} must divide group size {groups}"
strides = _pair(stride)
# padding = _pair(padding)
# output_padding = _pair(output_padding)
# dilation = _pair(dilation)
stride_h, stride_w = strides
opad_h, opad_w = output_padding
assert (
opad_h < stride_h and opad_w < stride_w
), f"[{output_padding}] opad_h:{opad_h} < stride_h:{stride_h} \
and opad_w:{opad_w} < stride_w:{stride_w} does not satisfy."
# dilate data
data_dilate = dilate(data, [1, 1, stride_h, stride_w], name="data_dilate")
# pad data
fpad_top, fpad_left, fpad_bottom, fpad_right = get_pad_tuple(padding, (filter_h, filter_w))
bpad_top = filter_h - 1 - fpad_top
bpad_bottom = filter_h - 1 - fpad_bottom + opad_h
bpad_left = filter_w - 1 - fpad_left
bpad_right = filter_w - 1 - fpad_right + opad_w
data_pad = pad(
data_dilate, [0, 0, bpad_top, bpad_left], [0, 0, bpad_bottom, bpad_right], name="data_pad"
)
# transform kernel layout from IOHW to OIHW, and rotate kernel by 180 degrees
kernel_transform = te.compute(
(out_c, in_channels, filter_h, filter_w),
lambda i, o, h, w: kernel[o][i][filter_h - 1 - h][filter_w - 1 - w],
name="kernel_transform",
)
batch, in_channels, in_h, in_w = data_pad.shape
out_c, _, filter_h, filter_w = kernel_transform.shape
# convolution stage
out_channels = simplify(out_c * groups)
out_h = simplify(in_h - filter_h + 1)
out_w = simplify(in_w - filter_w + 1)
dc = te.reduce_axis((0, in_channels // groups), name="dc")
dh = te.reduce_axis((0, filter_h), name="dh")
dw = te.reduce_axis((0, filter_w), name="dw")
# data: batch, in_channels, out_h, out_w
# weight: out_channels // G, in_channels, out_h, out_w
return te.compute(
(batch, out_channels, out_h, out_w),
lambda b, c, h, w: te.sum(
data_pad[
b, c // (out_channels // groups) * (in_channels // groups) + dc, h + dh, w + dw
].astype(out_dtype)
* kernel_transform[
c % (out_channels // groups),
c // (out_channels // groups) * (in_channels // groups) + dc,
dh,
dw,
].astype(out_dtype),
axis=[dc, dh, dw],
),
tag="group_conv2d_transpose_nchw",
)
def layout_transform(tensor: "relay.Expr", current_layout: str, desired_layout: str):
"""Transform a tensor with the current layout to the desired layout.
E.g. layout_transform(t, "NCHW", "CNHW") --> relay.transpose(t, [1, 0, 2, 3])
Parameters
----------
tensor: relay.Expr
The Tensor to transpose
current_layout: str
The current layout e.g. NCHW or OIHW
desired_layout: str
The desired layout, must be compatible with current_layout
Returns
-------
The layout_transformed tensor.
"""
if sorted(current_layout) != sorted(desired_layout):
raise ValueError(f"Incompatible layouts: {current_layout} vs {desired_layout}")
if current_layout == desired_layout:
return tensor
current_layout_map = {c: i for i, c in enumerate(current_layout)}
desired_layout_map = {c: i for i, c in enumerate(desired_layout)}
axes = [None] * len(current_layout)
for c, i in desired_layout_map.items():
axes[i] = current_layout_map[c]
return relay.transpose(tensor, axes=axes)
@tvm.target.generic_func
def conv2d_transpose_legalize(attrs, inputs, types):
"""Legalizes Transposed 2D convolution op.
Parameters
----------
attrs : tvm.ir.Attrs
Attributes of current Transposed 2D convolution
inputs : list of tvm.relay.Expr
The args of the Relay expr to be legalized
types : list of types
List of input and output types
Returns
-------
result : tvm.relay.Expr
The legalized expr
"""
data, kernel = inputs
kernel_layout = attrs["kernel_layout"]
target = tvm.target.Target.current(allow_none=True)
if target and "cudnn" in target.libs:
# cuDNN backend can directly operate on NHWC layout.
return None
if attrs["data_layout"] == "NHWC":
kernel = layout_transform(kernel, kernel_layout, "IOHW")
# Set new attrs for conv2d_transpose.
new_attrs = {k: attrs[k] for k in attrs.keys()}
new_attrs["data_layout"] = "NCHW"
# layout of kernel should be IOHW, but kernel_layout will be swapped - OIHW
new_attrs["kernel_layout"] = "IOHW"
# Convert data to NCHW.
data = relay.transpose(data, axes=(0, 3, 1, 2))
deconv = relay.nn.conv2d_transpose(data, kernel, **new_attrs)
# Convert back to original NHWC layout.
out = relay.transpose(deconv, axes=(0, 2, 3, 1))
return out
if attrs["data_layout"] == "NCHW":
kernel = layout_transform(kernel, kernel_layout, "IOHW")
new_attrs = {k: attrs[k] for k in attrs.keys()}
# layout of kernel should be IOHW, but kernel_layout will be swapped - OIHW
new_attrs["kernel_layout"] = "IOHW"
return relay.nn.conv2d_transpose(data, kernel, **new_attrs)
return None
| 11,441 | 33.257485 | 99 | py |
tvm | tvm-main/python/tvm/topi/nn/group_norm.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Layer normalization operator."""
from .. import cpp
def group_norm(data, gamma, beta, num_groups, channel_axis, axes, epsilon=1e-5):
"""Group normalization operator.
Parameters
----------
data : tvm.te.Tensor
N-D with shape (d_0, d_1, ..., d_{N-1})
gamma: tvm.te.Tensor
1-D with shape (r_0) where r_0 == d_{channel_axis}
beta: tvm.te.Tensor
Optional, 1-D with shape (r_0) where r_0 == d_{channel_axis}
num_groups : int
The number of groups
channel_axis : int
The channel axis
axes : list of int
Axis over the normalization applied, excluding the channel axis
epsilon : float
The epsilon value to avoid division by zero.
Returns
-------
result : tvm.te.Tensor
N-D with shape (d_0, d_1, ..., d_{N-1})
"""
return cpp.nn.group_norm(data, gamma, beta, num_groups, channel_axis, axes, epsilon)
| 1,710 | 31.283019 | 88 | py |
tvm | tvm-main/python/tvm/topi/nn/depthwise_conv2d.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-variable, too-many-locals, unused-argument
"""Depthwise convolution operators"""
from __future__ import absolute_import as _abs
from collections import namedtuple
import numpy as np
import tvm
from tvm import te
from .dilate import dilate
from .pad import pad
from .utils import get_pad_tuple
from ..utils import simplify, get_const_tuple
# workload description of depthwise-conv2d
Workload = namedtuple(
"Workload",
[
"in_dtype",
"out_dtype",
"height",
"width",
"in_filter",
"out_filter",
"kernel_h",
"kernel_w",
"padt",
"padl",
"padb",
"padr",
"dilation_h",
"dilation_w",
"stride_h",
"stride_w",
],
)
def _get_workload(data, kernel, stride, padding, dilation, out_dtype, data_layout="NCHW"):
"""Get the workload structure for a depthwise conv2d.
Input data and filter should use NCHW layout.
"""
if data_layout == "NCHW":
_, in_channel, height, width = get_const_tuple(data.shape)
filter_channel, channel_multiplier, kh, kw = get_const_tuple(kernel.shape)
elif data_layout == "NHWC":
_, height, width, in_channel = get_const_tuple(data.shape)
kh, kw, filter_channel, channel_multiplier = get_const_tuple(kernel.shape)
elif data_layout == "NCHWc":
_, in_channel_chunk, height, width, in_channel_block = get_const_tuple(data.shape)
in_channel = in_channel_chunk * in_channel_block
(filter_channel_chunk, cm_chunk, kh, kw, cm_block, filter_channel_block) = get_const_tuple(
kernel.shape
)
filter_channel = filter_channel_chunk * filter_channel_block
channel_multiplier = cm_chunk * cm_block
assert in_channel_block == filter_channel_block, (
f"Incorrect dimensions, data has block size {in_channel_block}, but filter has "
f"block size {filter_channel_block}"
)
else:
raise ValueError(f"Data layout {data_layout} not supported")
assert in_channel == filter_channel, (
f"Incorrect dimensions, data has {in_channel} channels but filter expects "
f"{filter_channel} channels"
)
out_channel = filter_channel * channel_multiplier
dilation_h, dilation_w = (
dilation if isinstance(dilation, (tuple, list)) else (dilation, dilation)
)
if isinstance(stride, (tuple, list)):
HSTR, WSTR = stride
else:
HSTR, WSTR = stride, stride
assert (data.dtype == kernel.dtype) or (
data.dtype == "uint8" and kernel.dtype == "int8"
), f"Do not support inputs with different data types now. {data.dtype} vs. {kernel.dtype}"
dilated_kernel_h = (kh - 1) * dilation_h + 1
dilated_kernel_w = (kw - 1) * dilation_w + 1
pt, pl, pb, pr = get_pad_tuple(padding, (dilated_kernel_h, dilated_kernel_w))
return Workload(
data.dtype,
out_dtype,
height,
width,
in_channel,
out_channel,
kh,
kw,
pt,
pl,
pb,
pr,
dilation_h,
dilation_w,
HSTR,
WSTR,
)
def depthwise_conv2d_nchw(Input, Filter, stride, padding, dilation, out_dtype=None):
"""Depthwise convolution nchw forward operator.
Parameters
----------
Input : tvm.te.Tensor
4-D with shape [batch, in_channel, in_height, in_width]
Filter : tvm.te.Tensor
4-D with shape [in_channel, channel_multiplier, filter_height, filter_width]
stride : int or a list/tuple of two ints
The spatial stride, or (stride_height, stride_width).
padding : int or str
Padding size, or ['VALID', 'SAME']
dilation: int or a list/tuple of two ints
dilation size, or [dilation_height, dilation_width]
out_dtype: str, optional
Output data type
Returns
-------
Output : tvm.te.Tensor
4-D with shape [batch, out_channel, out_height, out_width]
"""
out_dtype = Input.dtype if out_dtype is None else out_dtype
if isinstance(stride, int):
stride_h = stride_w = stride
else:
stride_h, stride_w = stride
if isinstance(dilation, int):
dilation_h = dilation_w = dilation
else:
dilation_h, dilation_w = dilation
batch, in_channel, in_height, in_width = Input.shape
# shape of dilated kernel
filter_channel, channel_multiplier, filter_height, filter_width = Filter.shape
dilated_kernel_h = (filter_height - 1) * dilation_h + 1
dilated_kernel_w = (filter_width - 1) * dilation_w + 1
pad_top, pad_left, pad_down, pad_right = get_pad_tuple(
padding, (dilated_kernel_h, dilated_kernel_w)
)
out_channel = simplify(in_channel * channel_multiplier)
out_height = simplify((in_height - dilated_kernel_h + pad_top + pad_down) // stride_h + 1)
out_width = simplify((in_width - dilated_kernel_w + pad_left + pad_right) // stride_w + 1)
# padding stage
pad_before = [0, 0, pad_top, pad_left]
pad_after = [0, 0, pad_down, pad_right]
PaddedInput = pad(Input, pad_before, pad_after, name="PaddedInput")
# depthconv stage
idxdiv = tvm.tir.indexdiv
idxmod = tvm.tir.indexmod
di = te.reduce_axis((0, filter_height), name="di")
dj = te.reduce_axis((0, filter_width), name="dj")
Output = te.compute(
(batch, out_channel, out_height, out_width),
lambda b, c, i, j: te.sum(
(
PaddedInput[
b,
idxdiv(c, channel_multiplier),
i * stride_h + di * dilation_h,
j * stride_w + dj * dilation_w,
].astype(out_dtype)
* Filter[
idxdiv(c, channel_multiplier), idxmod(c, channel_multiplier), di, dj
].astype(out_dtype)
),
axis=[di, dj],
),
name="DepthwiseConv2d",
tag="depthwise_conv2d_nchw",
)
return Output
def depthwise_conv2d_nhwc(
Input, Filter, stride, padding, dilation, kernel_layout="HWOI", out_dtype=None
):
"""Depthwise convolution nhwc forward operator.
Parameters
----------
Input : tvm.te.Tensor
4-D with shape [batch, in_height, in_width, in_channel]
Filter : tvm.te.Tensor
4-D with shape [filter_height, filter_width, in_channel, channel_multiplier]
stride : tuple of two ints
The spatial stride along height and width
padding : int or str
Padding size, or ['VALID', 'SAME']
dilation: int or a list/tuple of two ints
dilation size, or [dilation_height, dilation_width]
out_dtype: str, optional
Output data type
Returns
-------
Output : tvm.te.Tensor
4-D with shape [batch, out_height, out_width, out_channel]
"""
out_dtype = Input.dtype if out_dtype is None else out_dtype
if isinstance(stride, int):
stride_h = stride_w = stride
else:
stride_h, stride_w = stride
if isinstance(dilation, int):
dilation_h = dilation_w = dilation
else:
dilation_h, dilation_w = dilation
batch, in_height, in_width, in_channel = Input.shape
# shape of dilated kernel
if kernel_layout == "HWIO":
filter_height, filter_width, channel_multiplier, filter_channel = Filter.shape
kernel_permutation = [0, 1, 3, 2]
else:
filter_height, filter_width, filter_channel, channel_multiplier = Filter.shape
kernel_permutation = [0, 1, 2, 3]
dilated_kernel_h = (filter_height - 1) * dilation_h + 1
dilated_kernel_w = (filter_width - 1) * dilation_w + 1
pad_top, pad_left, pad_down, pad_right = get_pad_tuple(
padding, (dilated_kernel_h, dilated_kernel_w)
)
out_channel = simplify(in_channel * channel_multiplier)
out_height = simplify((in_height - dilated_kernel_h + pad_top + pad_down) // stride_h + 1)
out_width = simplify((in_width - dilated_kernel_w + pad_left + pad_right) // stride_w + 1)
# padding stage
pad_before = [0, pad_top, pad_left, 0]
pad_after = [0, pad_down, pad_right, 0]
PaddedInput = pad(Input, pad_before, pad_after, name="PaddedInput")
# depthconv stage
idxdiv = tvm.tir.indexdiv
idxmod = tvm.tir.indexmod
di = te.reduce_axis((0, filter_height), name="di")
dj = te.reduce_axis((0, filter_width), name="dj")
Output = te.compute(
(batch, out_height, out_width, out_channel),
lambda b, i, j, c: te.sum(
(
PaddedInput[
b,
i * stride_h + di * dilation_h,
j * stride_w + dj * dilation_w,
idxdiv(c, channel_multiplier),
].astype(out_dtype)
* Filter[
tuple(
np.array(
[di, dj, idxdiv(c, channel_multiplier), idxmod(c, channel_multiplier)]
)[kernel_permutation]
)
].astype(out_dtype)
),
axis=[di, dj],
),
name="DepthwiseConv2d",
tag="depthwise_conv2d_nhwc",
)
return Output
def depthwise_conv2d_backward_input_nhwc(Filter, Out_grad, oshape, ishape, stride, padding):
"""Depthwise convolution nhwc backward wrt input operator.
Parameters
----------
Filter : tvm.te.Tensor
4-D with shape [filter_height, filter_width, in_channel, channel_multiplier]
Out_grad : tvm.te.Tensor
4-D with shape [batch, out_height, out_width, out_channel]
stride : tuple of two ints
The spatial stride along height and width
padding : int or str
Padding size, or ['VALID', 'SAME']
Returns
-------
Output : tvm.te.Tensor
4-D with shape [batch, in_height, in_width, in_channel]
"""
batch, in_h, in_w, in_c = ishape
_, out_h, out_w, out_c = oshape
filter_h, filter_w, _, channel_multiplier = Filter.shape
if isinstance(stride, int):
stride_h = stride_w = stride
else:
stride_h, stride_w = stride
dilated_out_grad = dilate(Out_grad, [1, stride_h, stride_w, 1], name="dilated_out_grad")
# padding params in forward propagation
fpad_top, fpad_left, fpad_bottom, fpad_right = get_pad_tuple(padding, (filter_h, filter_w))
# padding params in backward propagation
bpad_top = filter_h - 1 - fpad_top
bpad_bottom = (filter_h - 1 - fpad_bottom) + (stride_h - 1)
bpad_left = filter_w - 1 - fpad_left
bpad_right = (filter_w - 1 - fpad_right) + (stride_w - 1)
padded_out_grad = pad(
dilated_out_grad,
[0, bpad_top, bpad_left, 0],
[0, bpad_bottom, bpad_right, 0],
name="padded_out_grad",
)
dh = te.reduce_axis((0, filter_h), name="dh")
dw = te.reduce_axis((0, filter_w), name="dw")
dc = te.reduce_axis((0, channel_multiplier), name="dc")
In_grad = te.compute(
(batch, in_h, in_w, in_c),
lambda b, h, w, c: te.sum(
padded_out_grad[b, h + dh, w + dw, c * channel_multiplier + dc]
* Filter[filter_h - 1 - dh, filter_w - 1 - dw, c, dc],
axis=[dh, dw, dc],
),
tag="depthwise_conv2d_backward_input_nhwc",
)
return In_grad
def depthwise_conv2d_backward_weight_nhwc(Input, Out_grad, oshape, fshape, stride, padding):
"""Depthwise convolution nhwc backward wrt weight operator.
Parameters
----------
Input : tvm.te.Tensor
4-D with shape [batch, in_height, in_width, in_channel]
Out_grad : tvm.te.Tensor
4-D with shape [batch, out_height, out_width, out_channel]
stride : tuple of two ints
The spatial stride along height and width
padding : int or str
Padding size, or ['VALID', 'SAME']
Returns
-------
Output : tvm.te.Tensor
4-D with shape [filter_height, filter_width, in_channel, channel_multiplier]
"""
batch, out_h, out_w, out_c = oshape
filter_h, filter_w, _, channel_multiplier = fshape
in_c = Input.shape[3].value
if isinstance(stride, int):
stride_h = stride_w = stride
else:
stride_h, stride_w = stride
pad_top, pad_left, pad_bottom, pad_right = get_pad_tuple(padding, (filter_h, filter_w))
padded_in = pad(
Input, [0, pad_top, pad_left, 0], [0, pad_bottom, pad_right, 0], name="padded_in"
)
dh = te.reduce_axis((0, Out_grad.shape[1].value), name="dh")
dw = te.reduce_axis((0, Out_grad.shape[2].value), name="dw")
db = te.reduce_axis((0, batch), name="db")
idxdiv = tvm.tir.indexdiv
idxmod = tvm.tir.indexmod
Weight_grad = te.compute(
(filter_h, filter_w, in_c, channel_multiplier),
lambda fh, fw, c, m: te.sum(
Out_grad[db, dh, dw, c * channel_multiplier + idxmod(m, channel_multiplier)]
* padded_in[db, fh + dh * stride_h, fw + dw * stride_w, c],
axis=[db, dh, dw],
),
tag="depthwise_conv2d_backward_weight_nhwc",
)
return Weight_grad
def depthwise_conv2d_NCHWc(
Input, Filter, stride, padding, dilation, layout, out_layout, out_dtype=None
):
"""Depthwise convolution NCHW[x]c forward operator.
Parameters
----------
Input : tvm.te.Tensor
5-D with shape [batch, in_channel_chunk, in_height, in_width, in_channel_block]
Filter : tvm.te.Tensor
6-D with shape [out_channel_chunk, 1, filter_height, filter_width, 1, out_channel_block]
In NCHWc depthwise convolution,
we group kernel's in_channel and channel_multiplier together then do the tiling.
stride : tuple of two ints
The spatial stride along height and width
padding : int or str
Padding size, or ['VALID', 'SAME']
dilation: int or a list/tuple of two ints
dilation size, or [dilation_height, dilation_width]
layout : str
Input data layout
out_layout : str
Output data layout
out_dtype: str, optional
Output data type
Returns
-------
Output : tvm.te.Tensor
5-D with shape [batch, out_channel_chunk, out_height, out_width, out_channel_block]
"""
raise ValueError("missing register for topi.nn.depthwise_conv2d_NCHWc")
@tvm.target.generic_func
def depthwise_conv2d_infer_layout(workload, cfg):
"""Infer input/output shapes and layouts from a workload and cfg.
Parameters
----------
workload : tuple
conv2d workload
cfg : tuple
tvm.autotvm config
Returns
-------
Output : [tuple of tuple and str, tuple of tuple and str]
Input shapes and layouts, and output shapes and layouts
"""
raise ValueError("missing register for topi.nn.depthwise_conv2d_infer_layout")
| 15,625 | 31.554167 | 99 | py |
tvm | tvm-main/python/tvm/topi/nn/winograd_util.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
""" Utility functions for implementing Winograd convolutions
[*] Fast Algorithms for Convolutional Neural Networks
Andrew Lavin, Scott Gray
https://arxiv.org/abs/1509.09308
https://github.com/andravin/wincnn
"""
from operator import mul
from functools import reduce
import numpy as np
from tvm.contrib.pickle_memoize import memoize
from ..utils import const_matrix
# pylint: disable=invalid-name
def _cook_toom_convolution(a, n, r):
"""Compute Cook-Toom convolution A,B,G matrices"""
def _F_m(a, n):
f = lambda j, i: reduce(mul, ((a[i] - a[k] if k != i else 1) for k in range(0, n - 1)), 1)
F = np.fromfunction(np.vectorize(f), (1, n - 1), dtype=int)
F = np.diagflat(F)
F = np.append(F, np.zeros((n - 1, 1), dtype=int), axis=1)
f = lambda i, j: (1 if j == (n - 1) else 0)
z = np.fromfunction(np.vectorize(f), (1, n), dtype=int)
return np.append(F, z, axis=0)
def _A_m(a, m, n):
f = lambda i, j: a[i] ** j
A = np.fromfunction(np.vectorize(f), (m - 1, n), dtype=int)
f = lambda i, j: (1 if j == (n - 1) else 0)
z = np.fromfunction(np.vectorize(f), (1, n), dtype=int)
return np.append(A, z, axis=0)
def _B_m(a, n):
f = lambda j, i: reduce(mul, ((a[i] - a[k] if k != i else 1) for k in range(0, n - 1)), 1)
Ff = np.fromfunction(np.vectorize(f), (1, n - 1), dtype=int)
f = (
lambda i, nth: (
reduce(mul, [(np.poly1d([1, -a[k]]) if k != i else 1) for k in range(0, n - 1)], 1)
).coef[n - 1 - nth - 1]
/ Ff[0, i]
)
F = np.fromfunction(np.vectorize(f), (n - 1, n - 1), dtype=int)
f = lambda i, j: -a[i] ** (n - 1)
t = np.fromfunction(np.vectorize(f), (n - 1, 1), dtype=int)
T = np.append(np.eye(n - 1), t, axis=1)
return np.append(F.T.dot(T), np.array([np.eye(n)[n - 1]]), axis=0)
alpha = n + r - 1
f = _F_m(a, alpha)
if f[0, 0] < 0:
f[0, :] *= -1
A = _A_m(a, alpha, n)
G = _A_m(a, alpha, r).T
G = G.dot(np.linalg.inv(f)).T
B = _B_m(a, alpha)
B = B.dot(f.T)
return (A, B, G)
def _interpolation_points(degree):
"""Propose filter points"""
assert 2 < degree < 18
# Default interpolation lookup table
#
# [1] Error Analysis and Improving the Accuracy of Winograd Convolution for Deep Neural Networks
# Barbara Barabasz, Andrew Anderson, Kirk M. Soodhalter, David Gregg
# https://arxiv.org/abs/1803.10986
#
# pylint: disable=bad-whitespace,line-too-long
in_pts = [
# {invalid}
[],
# 01 {E=4.63E-08 on conv2d [1]}
[],
# 02 {E=7.65E-08 on F( 2,3) [1]}
[0, -1, 1],
# 03 {E=2.35E-07 on F( 3,3) [1]}
[0, -1, 1, 1 / 2],
# 04 {E=3.29E-07 on F( 4,3) [1]}
[0, -1, 1, 1 / 2, -2],
# 05 {E=6.81E-07 on F( 5,3) [1]}
[0, -1, 1, 1 / 2, -2, -1 / 2],
# 06 {E=8.79E-07 on F( 6,3) [1]}
[0, -1, 1, 1 / 2, -1 / 2, 2, -2],
# 07 {E=3.71E-06 on F( 7,3) [1]}
[0, -1, 1, 1 / 2, -1 / 2, 2, -2, -1 / 4],
# 08 {E=7.35E-06 on F( 8,3) [1]}
[0, -1, 1, 1 / 2, -1 / 2, 2, -2, -1 / 4, 4],
# 09 {E=2.20E-05 on F( 9,3) [1]}
[0, -1, 1, 1 / 2, -1 / 2, 2, -2, -1 / 4, 3 / 4, -4 / 3],
# 10 {E=3.22E-05 on F(10,3) [1]}
[0, -1, 1, 1 / 2, -1 / 2, 2, -2, -1 / 4, 4, 3 / 4, -4 / 3],
# 11 {E=1.09E-04 on F(11,3) [1]}
[0, -1, 1, 1 / 2, -1 / 2, 2, -2, -1 / 4, 4, 3 / 4, -4 / 3, 1 / 4],
# 12 {E=1.99E-04 on F(12,3) [1]}
[0, -1, 1, 1 / 2, -1 / 2, 2, -2, -1 / 4, 4, 1 / 4, -3 / 4, 4 / 3, -4],
# 13 {E=5.54E-04 on F(13,3) [1]}
[0, -1, 1, 1 / 2, -1 / 2, 2, -2, -1 / 4, 4, 1 / 4, -3 / 4, 4 / 3, 3 / 4, -4 / 3],
# 14 {E=8.80E-04 on F(14,3) [1]}
[0, -1, 1, 1 / 2, -1 / 2, 2, -2, -1 / 4, 4, 1 / 4, -3 / 4, 4 / 3, -4, 3 / 4, -4 / 3],
# 15 {E=1.07E-02 on F(15,3) [1]}
[0, -1, 1, 1 / 2, -1 / 2, 2, -2, -1 / 4, 4, 1 / 4, -3 / 4, 4 / 3, -4, 2 / 3, -3 / 2, 3 / 2],
# 16 {E=1.93E-02 on F(16,3) [1]}
[
0,
-1,
1,
1 / 2,
-1 / 2,
2,
-2,
-1 / 4,
4,
1 / 4,
-3 / 4,
4 / 3,
-4,
2 / 3,
-3 / 2,
-2 / 3,
3 / 2,
],
] # pylint: enable=bad-whitespace,line-too-long
return np.array(in_pts[degree - 1], dtype=np.float64)
@memoize("topi.nn.winograd_matrices", save_at_exit=False)
def winograd_transform_matrices(tile_size, kernel_size, out_dtype):
"""Compute the A, B, and G transform matrices for `tile_size` as a `tvm.Expr`."""
if not 1 < tile_size < 9:
raise ValueError(f"Unsupported tile size for Winograd: {tile_size}")
if not 2 < kernel_size < 8:
raise ValueError(f"Unsupported kernel size for Winograd: {kernel_size}")
degree = tile_size + kernel_size - 2
intp_pts = _interpolation_points(degree)
A_data, B_data, G_data = _cook_toom_convolution(intp_pts, tile_size, kernel_size)
out_dtype = "uint16" if out_dtype == "bfloat16" else out_dtype
return (
const_matrix(A_data.astype(out_dtype), "A"),
const_matrix(B_data.astype(out_dtype), "B"),
const_matrix(G_data.astype(out_dtype), "G"),
)
| 6,243 | 34.078652 | 100 | py |
tvm | tvm-main/python/tvm/topi/nn/deformable_conv2d.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, too-many-locals, too-many-arguments
"""Deformable Conv2D operators"""
import tvm
from tvm import te
from .utils import get_pad_tuple
from ..utils import get_const_tuple
from ..cpp.utils import bilinear_sample_nchw, bilinear_sample_nhwc
def deformable_conv2d_nchw(
data, offset, kernel, strides, padding, dilation, deformable_groups, groups, out_dtype
):
"""Deformable conv2D operator in NCHW layout.
The deformable convolution operation is described in https://arxiv.org/abs/1703.06211
Parameters
----------
data : tvm.te.Tensor
4-D with shape [batch, in_channel, in_height, in_width]
offset : tvm.te.Tensor
4-D with shape [batch, deformable_groups * filter_height * filter_width * 2,
out_height, out_width].
kernel : tvm.te.Tensor
4-D with shape [num_filter, in_channel, filter_height, filter_width]
strides : int or a list/tuple of two ints
stride size, or [stride_height, stride_width]
padding : int or a list/tuple of two ints
padding size, or [pad_height, pad_width]
dilation : int or a list/tuple of two ints
dilation size, or [dilation_height, dilation_width]
deformable_groups : int
number of deformable groups
groups : int
number of groups
Returns
-------
output : tvm.te.Tensor
4-D with shape [batch, out_channel, out_height, out_width]
"""
if out_dtype is None:
out_dtype = data.dtype
if isinstance(strides, int):
stride_h = stride_w = strides
else:
stride_h, stride_w = strides
if isinstance(dilation, int):
dilation_h = dilation_w = dilation
else:
dilation_h, dilation_w = dilation
batch, in_channel, in_height, in_width = get_const_tuple(data.shape)
out_channel, channel, kernel_h, kernel_w = get_const_tuple(kernel.shape)
_, _, out_height, out_width = get_const_tuple(offset.shape)
assert in_channel % deformable_groups == 0, "Input cahnnels must divide deformable group size"
assert groups == 1, "deformable_conv2d_nchw does not support groups > 1"
ic_per_dgroup = channel // deformable_groups
dilated_kernel_h = (kernel_h - 1) * dilation_h + 1
dilated_kernel_w = (kernel_w - 1) * dilation_w + 1
pad_top, pad_left, _, _ = get_pad_tuple(padding, (dilated_kernel_h, dilated_kernel_w))
rc = te.reduce_axis((0, in_channel), name="rc")
ry = te.reduce_axis((0, kernel_h), name="ry")
rx = te.reduce_axis((0, kernel_w), name="rx")
zero = tvm.tir.const(0.0, data.dtype)
def _bilinear(n, c, h, w):
outside = tvm.tir.any(h < 0, w < 0, h >= in_height, w >= in_width)
val = bilinear_sample_nchw(data, (n, c, h, w), in_height - 1, in_width - 1)
return tvm.tir.if_then_else(outside, zero, val)
data_deform = te.compute(
(batch, in_channel, kernel_h, kernel_w, out_height, out_width),
lambda n, c, kh, kw, y, x: _bilinear(
n,
c,
y * stride_h
- pad_top
+ kh * dilation_h
+ offset[
n, c // ic_per_dgroup * (kernel_w * kernel_h * 2) + (kh * kernel_w + kw) * 2, y, x
],
x * stride_w
- pad_left
+ kw * dilation_w
+ offset[
n,
c // ic_per_dgroup * (kernel_w * kernel_h * 2) + (kh * kernel_w + kw) * 2 + 1,
y,
x,
],
),
tag="data_deform",
)
return te.compute(
(batch, out_channel, out_height, out_width),
lambda n, f, y, x: te.sum(
data_deform[n, rc, ry, rx, y, x].astype(out_dtype)
* kernel[f, rc, ry, rx].astype(out_dtype),
axis=[rc, ry, rx],
),
tag="deformable_conv2d_nchw",
)
def deformable_conv2d_nhwc(
data, offset, kernel, strides, padding, dilation, deformable_groups, groups, out_dtype
):
"""Deformable conv2D operator in NHWC layout.
The deformable convolution operation is described in https://arxiv.org/abs/1703.06211
Parameters
----------
data : tvm.te.Tensor
4-D with shape [batch, in_height, in_width, in_channel]
offset : tvm.te.Tensor
4-D with shape [batch, out_height, out_width,
deformable_groups * filter_height * filter_width * 2].
kernel : tvm.te.Tensor
4-D with shape [filter_height, filter_width, in_channel, num_filter]
strides : int or a list/tuple of two ints
stride size, or [stride_height, stride_width]
padding : int or a list/tuple of two ints
padding size, or [pad_height, pad_width]
dilation : int or a list/tuple of two ints
dilation size, or [dilation_height, dilation_width]
deformable_groups : int
number of deformable groups
groups : int
number of groups
Returns
-------
output : tvm.te.Tensor
4-D with shape [batch, out_height, out_width, out_channel]
"""
if out_dtype is None:
out_dtype = data.dtype
if isinstance(strides, int):
stride_h = stride_w = strides
else:
stride_h, stride_w = strides
if isinstance(dilation, int):
dilation_h = dilation_w = dilation
else:
dilation_h, dilation_w = dilation
batch, in_height, in_width, in_channel = get_const_tuple(data.shape)
kernel_h, kernel_w, channel, out_channel = get_const_tuple(kernel.shape)
_, out_height, out_width, _ = get_const_tuple(offset.shape)
assert in_channel % deformable_groups == 0, "Input cahnnels must divide deformable group size"
assert groups == 1, "deformable_conv2d_nchw does not support groups > 1"
ic_per_dgroup = channel // deformable_groups
dilated_kernel_h = (kernel_h - 1) * dilation_h + 1
dilated_kernel_w = (kernel_w - 1) * dilation_w + 1
pad_top, pad_left, _, _ = get_pad_tuple(padding, (dilated_kernel_h, dilated_kernel_w))
rc = te.reduce_axis((0, in_channel), name="rc")
ry = te.reduce_axis((0, kernel_h), name="ry")
rx = te.reduce_axis((0, kernel_w), name="rx")
zero = tvm.tir.const(0.0, data.dtype)
def _bilinear(n, h, w, c):
outside = tvm.tir.any(h < 0, w < 0, h >= in_height, w >= in_width)
val = bilinear_sample_nhwc(data, (n, h, w, c), in_height - 1, in_width - 1)
return tvm.tir.if_then_else(outside, zero, val)
data_deform = te.compute(
(batch, kernel_h, kernel_w, in_channel, out_height, out_width),
lambda n, kh, kw, c, y, x: _bilinear(
n,
y * stride_h
- pad_top
+ kh * dilation_h
+ offset[
n, y, x, c // ic_per_dgroup * (kernel_w * kernel_h * 2) + (kh * kernel_w + kw) * 2
],
x * stride_w
- pad_left
+ kw * dilation_w
+ offset[
n,
y,
x,
c // ic_per_dgroup * (kernel_w * kernel_h * 2) + (kh * kernel_w + kw) * 2 + 1,
],
c,
),
tag="data_deform",
)
return te.compute(
(batch, out_height, out_width, out_channel),
lambda n, y, x, f: te.sum(
data_deform[n, ry, rx, rc, y, x].astype(out_dtype)
* kernel[ry, rx, rc, f].astype(out_dtype),
axis=[ry, rx, rc],
),
tag="deformable_conv2d_nhwc",
)
| 8,238 | 33.186722 | 98 | py |
tvm | tvm-main/python/tvm/topi/nn/mapping.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, line-too-long
"""Operators of one-to-one-mapping on the first input"""
from __future__ import absolute_import as _abs
import tvm
from tvm import te
from .. import tag
@tvm.te.tag_scope(tag=tag.BROADCAST)
def scale_shift_nchw(Input, Scale, Shift):
"""Batch normalization operator in inference.
Parameters
----------
Input : tvm.te.Tensor
4-D input tensor, NCHW layout [batch, channel, height, width]
Scale : tvm.te.Tensor
Scale tensor, 1-D of size channel number
Shift : tvm.te.Tensor
Shift tensor, 1-D of size channel number
Returns
-------
Output : tvm.te.Tensor
Output tensor, layout is NCHW
"""
return te.compute(
Input.shape, lambda b, c, i, j: Input[b, c, i, j] * Scale[c] + Shift[c], name="ScaleShift"
)
@tvm.te.tag_scope(tag=tag.BROADCAST)
def scale_shift_nhwc(Input, Scale, Shift):
"""Batch normalization operator in inference.
Parameters
----------
Input : tvm.te.Tensor
4-D input tensor, NHWC layout [batch, height, width, channel]
Scale : tvm.te.Tensor
Scale tensor, 1-D of size channel number
Shift : tvm.te.Tensor
Shift tensor, 1-D of size channel number
Returns
-------
Output : tvm.te.Tensor
Output tensor, layout is NHWC
"""
return te.compute(
Input.shape, lambda b, i, j, c: Input[b, i, j, c] * Scale[c] + Shift[c], name="ScaleShift"
)
@tvm.te.tag_scope(tag=tag.BROADCAST)
def scale_shift_nchwc(Input, Scale, Shift):
"""Batch normalization operator in inference.
Parameters
----------
Input : tvm.te.Tensor
5-D input tensor, NCHWc layout [batch, channel_chunk, height, width, channel_block]
Scale : tvm.te.Tensor
Scale tensor, 2-D of size [channel_chunk, channel_block]
Shift : tvm.te.Tensor
Shift tensor, 2-D of size [channel_chunk, channel_block]
Returns
-------
Output : tvm.te.Tensor
Output tensor, layout is NHWC
"""
return te.compute(
Input.shape,
lambda b, cc, i, j, cb: Input[b, cc, i, j, cb] * Scale[cc, cb] + Shift[cc, cb],
name="ScaleShift",
)
| 2,987 | 28.88 | 98 | py |
tvm | tvm-main/python/tvm/topi/nn/pad.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Pad the data by constant value """
from __future__ import absolute_import as _abs
import tvm
from tvm import te
from .. import tag
from ..utils import equal_const_int
@tvm.te.tag_scope(tag=tag.INJECTIVE + ",pad")
def pad(data, pad_before, pad_after=None, pad_value=0.0, name="PadInput", attrs=None):
"""Pad Input with zeros.
Parameters
----------
data : tvm.te.Tensor
n-D input, can be any layout.
pad_before : list / tuple of n ints
Pad width on each dimension to pad the before the axis begin.
pad_after : list / tuple of n ints, optional
Pad width each dimension to pad the after the axis end.
pad_value : float, optional
The value to be padded.
name : str, optional
The name prefix operators generated
Returns
-------
Output : tvm.te.Tensor
n-D, the same layout as Input.
"""
n = len(data.shape)
pad_after = pad_after if pad_after else pad_before
if len(pad_before) != n:
raise ValueError(f"Input dimension and pad_before dismatch : {n} vs {len(pad_before)}")
if len(pad_after) != n:
raise ValueError(f"Input dimension and pad_after dismatch : {n} vs {len(pad_after)}")
ana = tvm.arith.Analyzer()
dshape = []
for dim in data.shape:
if isinstance(dim, tvm.tir.Any):
dshape.append(tvm.te.size_var("dim"))
else:
dshape.append(dim)
out_shape = tuple(ana.simplify(dshape[i] + pad_before[i] + pad_after[i]) for i in range(n))
pad_value = (
pad_value
if isinstance(pad_value, tvm.tir.PrimExpr)
else tvm.tir.const(pad_value, data.dtype)
)
def _pad(*indices):
not_zero = []
index_tuple = []
for i in range(n):
if equal_const_int(pad_before[i], 0) and equal_const_int(pad_after[i], 0):
index_tuple.append(indices[i])
else:
index_tuple.append(indices[i] - pad_before[i])
not_zero.append(indices[i] >= pad_before[i])
not_zero.append(indices[i] < data.shape[i] + pad_before[i])
if not_zero:
not_zero = tvm.tir.all(*not_zero)
return tvm.tir.if_then_else(not_zero, data(*index_tuple), pad_value)
return data(*index_tuple)
return te.compute(out_shape, _pad, name=name, attrs=attrs)
@tvm.te.tag_scope(tag=tag.INJECTIVE + ",pad")
def mirror_pad(data, pad_before, pad_after=None, mode="SYMMETRIC", name="MirrorPadInput"):
"""Pad Input with mirroring either symmetric or reflected.
Parameters
----------
data : tvm.te.Tensor
n-D input, can be any layout.
pad_before : list / tuple of n ints
Pad width on each dimension to pad the before the axis begin.
pad_after : list / tuple of n ints, optional
Pad width each dimension to pad the after the axis end.
mode: str, optional
Type of mirror padding to apply. Must be SYMMETRIC or REFLECT
name : str, optional
The name prefix operators generated
Returns
-------
Output : tvm.te.Tensor
n-D, the same layout as Input.
"""
n = len(data.shape)
pad_after = pad_after if pad_after else pad_before
if len(pad_before) != n:
raise ValueError(f"Input dimension and pad_before dismatch : {n} vs {len(pad_before)}")
if len(pad_after) != n:
raise ValueError(f"Input dimension and pad_after dismatch : {n} vs {len(pad_after)}")
ana = tvm.arith.Analyzer()
out_shape = tuple(ana.simplify(data.shape[i] + pad_before[i] + pad_after[i]) for i in range(n))
assert mode in ("SYMMETRIC", "REFLECT")
mode = int(mode == "SYMMETRIC")
def _pad(*indices):
index_tuple = []
above = []
below = []
for i in range(n):
if equal_const_int(pad_before[i], 0) and equal_const_int(pad_after[i], 0):
index_tuple.append(indices[i])
above.append(False)
below.append(False)
else:
index_tuple.append(indices[i] - pad_before[i])
above.append(indices[i] >= data.shape[i] + pad_before[i])
below.append(indices[i] < pad_before[i])
mapped_tuple = []
for i, axis in enumerate(index_tuple):
mapped_axis = tvm.tir.if_then_else(below[i], -axis - mode, axis)
mapped_axis = tvm.tir.if_then_else(
above[i], (2 * (data.shape[i] - 1)) - axis + mode, mapped_axis
)
mapped_tuple.append(mapped_axis)
return data(*mapped_tuple)
return te.compute(out_shape, _pad, name=name)
| 5,423 | 34.92053 | 99 | py |
tvm | tvm-main/python/tvm/topi/nn/bitserial_dense.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, too-many-locals, too-many-arguments
"""Bitserial Dense operator."""
from __future__ import absolute_import
import tvm
from tvm import te
from tvm.topi.utils import get_const_tuple
from .bitserial_util import bitpack
def bitserial_dense(
data, weight, data_bits, weight_bits, pack_dtype="uint32", out_dtype="int16", unipolar=True
):
"""The default implementation of bitserial dense in topi.
Parameters
----------
data : tvm.te.Tensor
2-D with shape [batch, in_dim]
weight : tvm.te.Tensor
2-D with shape [out_dim, in_dim] or
3-D with shape [out_dim, weight_bits, in_dim]
Returns
-------
output : tvm.te.Tensor
2-D with shape [batch, out_dim]
"""
data_packed = bitpack(data, data_bits, pack_axis=1, bit_axis=1, pack_type=pack_dtype)
if len(weight.shape) == 2:
weight_packed = bitpack(weight, weight_bits, pack_axis=1, bit_axis=1, pack_type=pack_dtype)
else:
weight_packed = weight
Y, DB, K = get_const_tuple(data_packed.shape)
X, WB, _ = get_const_tuple(weight_packed.shape)
oshape = (Y, X)
k = te.reduce_axis((0, K), name="k")
db = te.reduce_axis((0, DB), name="db")
wb = te.reduce_axis((0, WB), name="wb")
matmul_unipolar = te.compute(
oshape,
lambda i, j: te.sum(
(
tvm.tir.popcount(weight_packed[j, wb, k] & data_packed[i, db, k])
- tvm.tir.popcount(~weight_packed[j, wb, k] & data_packed[i, db, k])
).astype(out_dtype)
<< (db + wb).astype(out_dtype),
axis=[wb, db, k],
),
tag="bitserial_dense_unipolar",
)
matmul = te.compute(
oshape,
lambda i, j: te.sum(
tvm.tir.popcount(weight_packed[j, wb, k] & data_packed[i, db, k]).astype(out_dtype)
<< (db + wb).astype(out_dtype),
axis=[wb, db, k],
),
tag="bitserial_dense",
)
if unipolar:
return matmul_unipolar
return matmul
| 2,830 | 33.52439 | 99 | py |
tvm | tvm-main/python/tvm/topi/nn/bnn.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Binary Neural Network (BNN) Operators"""
from __future__ import absolute_import as _abs
import tvm
from tvm import te
from .. import tag
from ..utils import simplify, get_const_int
def binarize_pack(data, axis=None, name="PackedInput"):
"""Binarization and bit-packing along a certain axis.
Parameters
----------
data : tvm.te.Tensor
n-D input, can be any layout.
axis : None or int
The axis along which to do binarization and bit-packing,
default is the last axis.
name : str, optional
The name prefix operators generate.
Returns
-------
output : tvm.te.Tensor
n-D, the same layout as input, dtype is uint32.
"""
ishape = data.shape
if axis is None:
axis = len(ishape) - 1
assert get_const_int(ishape[axis]) % 32 == 0
n = len(ishape)
oshape = tuple(simplify(ishape[i] // 32) if i == axis else ishape[i] for i in range(n))
def _binarize_pack(*indices):
start_idx = [indices[i] * 32 if i == axis else indices[i] for i in range(n)]
packed = tvm.tir.const(0, "uint32")
for j in range(32):
idx = [start_idx[i] + j if i == axis else start_idx[i] for i in range(n)]
sign = (data(*idx) >= 0).astype("uint32")
packed = packed | sign
if j == 31:
return packed
packed = packed << 1
raise RuntimeError("not resach")
return te.compute(oshape, _binarize_pack, name=name, tag="binarize_pack")
def binary_dense(data, weight):
"""Binary matrix multiplication using xor and bit-count.
Parameters
----------
data : tvm.te.Tensor
2-D with shape [batch, in_dim], dtype is uint32.
weight : tvm.te.Tensor
2-D with shape [out_dim, in_dim], dtype is uint32.
Returns
-------
output : tvm.te.Tensor
2-D with shape [batch, out_dim], dtype is float32.
"""
assert (
data.dtype == "uint32" and weight.dtype == "uint32"
), "dtype of data and weight should be uint32"
assert len(data.shape) == 2 and len(weight.shape) == 2, "only support 2-dim binary dense"
batch, in_dim = data.shape
out_dim, _ = weight.shape
k = te.reduce_axis((0, in_dim), name="k")
matmul = te.compute(
(batch, out_dim),
lambda i, j: te.sum(tvm.tir.popcount(data[i, k] ^ weight[j, k]), axis=k),
tag="binary_dense",
)
return te.compute(
(batch, out_dim), lambda i, j: 32 * in_dim - 2.0 * matmul(i, j), tag=tag.ELEMWISE
)
| 3,325 | 32.59596 | 93 | py |
tvm | tvm-main/python/tvm/topi/sparse/csrmv.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""TVM operator compute SpMV in CSR format."""
from __future__ import absolute_import
import tvm
from tvm import te
from .. import tag
from ...tir.generic import cast
def csrmv_default(data, indices, indptr, weight, bias=None):
"""The default implementation of csrmv in topi.
Parameters
----------
data : tvm.te.Tensor
1-D with shape [nonzeros]
indices : tvm.te.Tensor
1-D with shape [nonzeros]
indptr : tvm.te.Tensor
1-D with shape [m+1]
weight : tvm.te.Tensor
2-D with shape [k, 1]
bias : tvm.te.Tensor, optional
1-D with shape [1]
Returns
-------
output : tvm.te.Tensor
2-D with shape [m, 1]
"""
assert len(data.shape) == 1 and len(weight.shape) == 2, "only support 2-dim csrmv"
assert isinstance(
weight, te.tensor.Tensor
), f"weight matrix is assumed to be tvm.te.Tensor, but weight is `{type(weight)}`"
assert (
data.dtype == weight.dtype
), f"Data and weight must have the same dtype, but they have {data.dtype} and {weight.dtype}"
if bias is not None:
assert len(bias.shape) == 1
batch = indptr.shape[0] - 1
def csrmv_default_ir(data, indices, indptr, weight, out):
"""define ir for csrmv"""
irb = tvm.tir.ir_builder.create()
data_ptr = irb.buffer_ptr(data)
indices_ptr = irb.buffer_ptr(indices)
indptr_ptr = irb.buffer_ptr(indptr)
weight_ptr = irb.buffer_ptr(weight)
out_ptr = irb.buffer_ptr(out)
num_rows = indptr.shape[0] - 1
with irb.for_range(0, num_rows, kind="parallel", name="row") as row:
dot = irb.allocate(data.dtype, (1,), name="dot", scope="local")
out_ptr[row] = cast(0, data.dtype)
dot[0] = cast(0, data.dtype)
row_start = indptr_ptr[row]
row_end = indptr_ptr[row + 1]
row_elems = row_end - row_start
with irb.for_range(0, row_elems, name="elemidx") as elemidx:
elem = row_start + elemidx
dot[0] += data_ptr[elem] * weight_ptr[indices_ptr[elem]]
out_ptr[row] += dot[0]
return irb.get()
oshape = (batch, 1)
matmul = te.extern(
oshape,
[data, indices, indptr, weight],
lambda ins, outs: csrmv_default_ir(ins[0], ins[1], ins[2], ins[3], outs[0]),
tag="csrmv",
dtype=data.dtype,
name="csrmv",
)
if bias is not None:
matmul = te.compute((batch, 1), lambda i, j: matmul[i, 0] + bias[i], tag=tag.BROADCAST)
return matmul
def csrmv(a, x, y=None):
"""The `csrmv` routine performs a matrix-vector operation defined as :math:`y := A*x + y`,
where `x` and `y` are vectors, `A` is an m-by-k sparse matrix in the CSR format.
Parameters
----------
a : tvm.contrib.sparse.CSRNDArray
2-D sparse matrix with shape [m, k]
x : tvm.te.Tensor
2-D dense matrix with shape [k, 1]
y : tvm.te.Tensor, optional
1-D dense vector with shape [1]
Returns
-------
output : tvm.te.Tensor
2-D dense matrix with shape [m, 1]
"""
return csrmv_default(a.data, a.indices, a.indptr, x, y)
| 3,990 | 32.822034 | 97 | py |
tvm | tvm-main/python/tvm/topi/sparse/dense.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""TVM operator compute Dense in CSR format."""
from __future__ import absolute_import
import tvm
from tvm import te
from .. import tag
from ..utils import simplify
def dense_si(data, indices, indptr, weight, bias=None):
# pylint: disable=invalid-name
"""The implementation of dense in topi, assuming sparse input.
Parameters
----------
data : tvm.te.Tensor
1-D with shape [num_nonzeros]
indices : tvm.te.Tensor
1-D with shape [num_nonzeros]
indptr : tvm.te.Tensor
1-D with shape [m+1]
weight : tvm.te.Tensor
2-D with shape [k, n]
bias : tvm.te.Tensor, optional
1-D with shape [m]
Returns
-------
output : tvm.te.Tensor
2-D with shape [m, n]
"""
assert (
len(data.shape) == 1
and len(indices.shape) == 1
and len(indptr.shape) == 1
and len(weight.shape) == 2
), "only support 2-dim dense"
assert isinstance(
weight, te.tensor.Tensor
), f"weight matrix is assumed to be tvm.te.Tensor, but weight is `{type(weight)}`"
if bias is not None:
assert len(bias.shape) == 1
dtype = data.dtype
M = simplify(indptr.shape[0] - 1)
N, _ = weight.shape
def dense_default_ir(data, indices, indptr, weight, out):
"""Define IR for Dense"""
dtype = data.dtype
irb = tvm.tir.ir_builder.create()
data_ptr = irb.buffer_ptr(data)
indices_ptr = irb.buffer_ptr(indices)
indptr_ptr = irb.buffer_ptr(indptr)
weight_ptr = irb.buffer_ptr(weight)
out_ptr = irb.buffer_ptr(out)
M = simplify(indptr.shape[0] - 1)
N, K = weight.shape
with irb.for_range(0, N, kind="vectorize", name="n") as n:
with irb.for_range(0, M, kind="parallel", name="m") as m:
dot = irb.allocate(dtype, (1,), name="dot", scope="local")
out_ptr[m * N + n] = tvm.tir.const(0, dtype)
dot[0] = tvm.tir.const(0, dtype)
row_start = indptr_ptr[m]
row_elems = indptr_ptr[m + 1] - row_start
with irb.for_range(0, row_elems, name="k") as k:
elem = row_start + k
dot[0] += data_ptr[elem] * weight_ptr[indices_ptr[elem] + n * K]
out_ptr[m * N + n] += dot[0]
return irb.get()
oshape = (M, N)
matmul = te.extern(
oshape,
[data, indices, indptr, weight],
lambda ins, outs: dense_default_ir(ins[0], ins[1], ins[2], ins[3], outs[0]),
tag="dense",
dtype=dtype,
name="out",
)
if bias is not None:
matmul = te.compute(oshape, lambda i, j: matmul[i, j] + bias[j], tag=tag.BROADCAST)
return matmul
def dense_sw(data, w_data, w_indices, w_indptr, bias=None):
# pylint: disable=invalid-name
"""The implementation of dense in topi, assuming sparse weight.
Parameters
----------
data : tvm.te.Tensor
2-D with shape [m, k]
w_data : tvm.te.Tensor
1-D with shape [nonzeros]
w_indices : tvm.te.Tensor
1-D with shape [nonzeros]
w_indptr : tvm.te.Tensor
1-D with shape [n+1]
bias : tvm.te.Tensor, optional
1-D with shape [n]
Returns
-------
output : tvm.te.Tensor
2-D with shape [m, n]
"""
assert (
len(w_data.shape) == 1
and len(w_indices.shape) == 1
and len(w_indptr.shape) == 1
and len(data.shape) == 2
), "only support 2-dim dense"
assert isinstance(
data, te.tensor.Tensor
), f"data matrix is assumed to be tvm.te.Tensor, but weight is `{type(data)}`"
if bias is not None:
assert len(bias.shape) == 1
dtype = data.dtype
M, _ = data.shape
N = simplify(w_indptr.shape[0] - 1)
def dense_default_ir(data, w_data, w_indices, w_indptr, out):
"""Define IR for Dense"""
dtype = data.dtype
irb = tvm.tir.ir_builder.create()
data_ptr = irb.buffer_ptr(data)
w_data_ptr = irb.buffer_ptr(w_data)
w_indices_ptr = irb.buffer_ptr(w_indices)
w_indptr_ptr = irb.buffer_ptr(w_indptr)
out_ptr = irb.buffer_ptr(out)
M, K = data.shape
N = simplify(w_indptr.shape[0] - 1)
with irb.for_range(0, M, kind="vectorize", name="m") as m:
with irb.for_range(0, N, kind="parallel", name="n") as n:
dot = irb.allocate(dtype, (1,), name="dot", scope="local")
out_ptr[m * N + n] = tvm.tir.const(0, dtype)
dot[0] = tvm.tir.const(0, dtype)
row_start = w_indptr_ptr[n]
row_elems = w_indptr_ptr[n + 1] - row_start
with irb.for_range(0, row_elems, name="k") as k:
elem = row_start + k
dot[0] += w_data_ptr[elem] * data_ptr[w_indices_ptr[elem] + m * K]
out_ptr[m * N + n] += dot[0]
return irb.get()
oshape = (M, N)
matmul = te.extern(
oshape,
[data, w_data, w_indices, w_indptr],
lambda ins, outs: dense_default_ir(ins[0], ins[1], ins[2], ins[3], outs[0]),
tag="dense",
dtype=dtype,
name="out",
)
if bias is not None:
matmul = te.compute(oshape, lambda i, j: matmul[i, j] + bias[j], tag=tag.BROADCAST)
return matmul
def dense(data, weight, bias=None):
"""Applies a linear transformation: :math:`Y = XW^T + b`.
Either data or weight should be tvm.contrib.sparse.CSRNDArray.
Parameters
----------
data : tvm.contrib.sparse.CSRNDArray or te.tensor.Tensor
2-D with shape [batch, in_dim]
weight : te.tensor.Tensor or tvm.contrib.sparse.CSRNDArray
2-D with shape [out_dim, in_dim]
bias : te.tensor.Tensor, optional
1-D with shape [out_dim]
Returns
-------
output : tvm.te.Tensor
2-D with shape [batch, out_dim]
"""
ret = None
if isinstance(data, tvm.contrib.sparse.CSRPlaceholderOp) and isinstance(
weight, te.tensor.Tensor
):
ret = dense_si(data.data, data.indices, data.indptr, weight, bias)
elif isinstance(data, te.tensor.Tensor) and isinstance(
weight, tvm.contrib.sparse.CSRPlaceholderOp
):
ret = dense_sw(data, weight.data, weight.indices, weight.indptr, bias)
else:
raise NotImplementedError(
"implementation for %s as data and %s as weights, "
"is not supported yet." % (type(data), type(weight))
)
return ret
| 7,312 | 32.545872 | 91 | py |
tvm | tvm-main/python/tvm/topi/sparse/utils.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Some utils for Sparse operation."""
import tvm
from tvm import relay, auto_scheduler
from tvm.relay import data_dep_optimization as ddo
from tvm.auto_scheduler import _ffi_api
def random_bsr_matrix(m, n, bs_r, bs_c, density, dtype):
"""Generate a random sparse matrix in bsr format.
Returns
-------
scipy.sparse.bsr_matrix
"""
# pylint: disable=import-outside-toplevel
import numpy as np
import itertools
import scipy.sparse as sp
y = np.zeros((m, n), dtype=dtype)
assert m % bs_r == 0
assert n % bs_c == 0
nnz = int(density * m * n)
num_blocks = int(nnz / (bs_r * bs_c)) + 1
candidate_blocks = np.asarray(list(itertools.product(range(0, m, bs_r), range(0, n, bs_c))))
assert candidate_blocks.shape[0] == m // bs_r * n // bs_c
chosen_blocks = candidate_blocks[
np.random.choice(candidate_blocks.shape[0], size=num_blocks, replace=False)
]
# pylint: disable=invalid-name
for (r, c) in chosen_blocks:
y[r : r + bs_r, c : c + bs_c] = np.random.randn(bs_r, bs_c)
s = sp.bsr_matrix(y, blocksize=(bs_r, bs_c))
assert s.data.shape == (num_blocks, bs_r, bs_c)
assert s.indices.shape == (num_blocks,)
assert s.indptr.shape == (m // bs_r + 1,)
return s
def random_sparse_dense_params(func, params, bs_r, bs_c, density):
"""Replace the dense parameters with random sparse parameters. Mainly used for testing.
Parameters
----------
func : tvm.relay.Expr
Expr will be optimized to sparse operation.
params : Dict[Srting, tvm.nd.array]
Parameters of the Expr.
bs_r : int
The row of BSR matrix block.
bs_c : int
The column of BSR matrix block.
density : float
The density of the random sparse parameters.
Returns
-------
Dict[Srting, tvm.nd.array]
The generated random parameters.
"""
def deepcopy(param_dic):
ret = {}
for k, v in param_dic.items():
ret[k] = tvm.nd.array(v.numpy())
return ret
new_params = deepcopy(params)
dense_weight_names = relay.analysis.sparse_dense._search_dense_op_weight(func)
for item in dense_weight_names:
name = str(item)
shape = new_params[name].shape
if shape[0] % bs_r == 0 and shape[1] % bs_c == 0:
new_w = random_bsr_matrix(shape[0], shape[1], bs_r, bs_c, density, "float32").todense()
new_params[name] = tvm.nd.array(new_w)
return new_params
def random_sparse_conv2d_params(func, params, bs_r, bs_c, density, layout):
"""Replace the dense parameters with random sparse parameters. Mainly used for testing.
Parameters
----------
func : tvm.relay.Expr
Expr will be optimized to sparse operation.
params : Dict[Srting, tvm.nd.array]
Parameters of the Expr.
bs_r : int
The row of BSR matrix block.
bs_c : int
The column of BSR matrix block.
density : float
The density of the random sparse parameters.
layout : str
layout of network
Returns
-------
Dict[Srting, tvm.nd.array]
The generated random parameters.
"""
# pylint: disable=import-outside-toplevel
import numpy as np
def deepcopy(param_dic):
ret = {}
for k, v in param_dic.items():
ret[k] = tvm.nd.array(v.numpy())
return ret
new_params = deepcopy(params)
conv2d_weight_names = relay.analysis.sparse_conv2d._search_conv2d_op_weight(func)
for item in conv2d_weight_names:
name = str(item)
shape = new_params[name].shape
if not ((shape[0] == 1 and shape[1] == 1) or (shape[2] == 1 and shape[3] == 1)):
continue
if layout == "NCHW" and shape[0] % bs_r == 0 and shape[1] % bs_c == 0:
new_w = random_bsr_matrix(shape[0], shape[1], bs_r, bs_c, density, "float32").todense()
new_params[name] = tvm.nd.array(np.array(new_w).reshape(shape))
elif layout == "NHWC" and shape[3] % bs_r == 0 and shape[2] % bs_c == 0:
new_w = random_bsr_matrix(shape[3], shape[2], bs_r, bs_c, density, "float32").todense()
new_params[name] = tvm.nd.array(np.array(new_w).reshape(shape))
return new_params
def convert_model_dense_to_sparse(
mod, params, random_params=False, bs_r=1, bs_c=1, sparsity=0.85, layout="NHWC"
):
"""Convert a dense model to sparse model.
Parameters
----------
mod : tvm.Module
The dense model.
params : Dict[Srting, tvm.nd.array]
Parameters of the dense model.
random_params : Bool = False
True to replace the parameters of the dense model with some random sparse tensors.
This is mainly used for testing.
bs_r : int
The row of BSR matrix block.
bs_c : int
The column of BSR matrix block.
sparsity : float
The sparsity of the random sparse parameters.
layout : str
layout of network
Returns
-------
tvm.Module
The updated sparse model.
Dict[Srting, tvm.nd.array]
The updated parameters.
"""
mod, params = ddo.simplify_fc_transpose.convert(mod["main"], params)
if random_params:
# Manually replace the parameters of dense to sparse tensors
params = random_sparse_dense_params(mod, params, bs_r=bs_r, bs_c=bs_c, density=1 - sparsity)
# Manually replace the parameters of conv2d to sparse tensors
params = random_sparse_conv2d_params(
mod, params, bs_r=bs_r, bs_c=bs_c, density=1 - sparsity, layout=layout
)
# convert dense matmul to sparse matmul
mod, params = ddo.bsr_dense.convert(mod, params, (bs_r, bs_c), sparsity_threshold=0.8)
# convert dense conv2d to sparse conv2d
mod, params = ddo.bsr_conv2d.convert(
mod, params, (bs_r, bs_c), sparsity_threshold=0.8, layout=layout
)
return tvm.IRModule.from_expr(mod), params
def sparse_sketch_rules():
"""Return the sketch rules for sparse op"""
sparse_sketch_rule_list = [
auto_scheduler.PreloadCustomSketchRule(
sparse_conv2d_meet_condition_func, sparse_conv2d_apply_func, "SparseConv2D"
),
auto_scheduler.PreloadCustomSketchRule(
sparse_dense_meet_condition_func, sparse_dense_apply_func, "SparseDense"
),
# Add more sketch rules for sparse
]
return sparse_sketch_rule_list
def sparse_conv2d_meet_condition_func(search_policy, state, stage_id):
state = auto_scheduler.loop_state.State(state, search_policy.search_task.compute_dag)
if state.stages[stage_id].op.tag in [
"sparse_conv2d_sp_bsrmm",
"sparse_conv2d_sp_bsrmm_block",
]:
return auto_scheduler.PreloadCustomSketchRule.APPLY_AND_SKIP_REST
return auto_scheduler.PreloadCustomSketchRule.PASS
def sparse_conv2d_apply_func(search_policy, state, stage_id):
"""Describe how to generate the initial sketch for sparse conv2d"""
ret = []
s_0 = auto_scheduler.loop_state.State(state, search_policy.search_task.compute_dag)
if s_0.stages[stage_id].op.tag == "sparse_conv2d_sp_bsrmm_block":
return [s_0.state_object, stage_id - 1]
sparse_conv2d = s_0.stages[stage_id].op
sparse_conv2d_block = s_0.stages[stage_id - 1].op
assert sparse_conv2d.tag == "sparse_conv2d_sp_bsrmm"
assert sparse_conv2d_block.tag == "sparse_conv2d_sp_bsrmm_block"
layout = sparse_conv2d.attrs["layout"]
# Set the default consumer of compute block
consumer = sparse_conv2d
# If sparse conv2d has a single elementwise consumer
# We can compute inline the sparse_conv2d output stage
consumers = _ffi_api.SearchPolicyUtilsGetConsumers(
search_policy.search_task, s_0.state_object, stage_id
)
if len(consumers) == 1:
consumer_id = int(consumers.items()[0][0])
if _ffi_api.SearchPolicyUtilsIsElementwiseMatch(
search_policy.search_task, s_0.state_object, stage_id, consumer_id
):
consumer = s_0.stages[consumer_id].op
s_0.compute_inline(sparse_conv2d)
c = None
if layout == "NHWC":
if len(s_0[sparse_conv2d_block].iters) == 6:
# bs_c = 1
i, h, w, nb_j, j, row_offset = s_0[ # pylint: disable=invalid-name
sparse_conv2d_block
].iters
else:
i, h, w, nb_j, j, row_offset, c = s_0[ # pylint: disable=invalid-name
sparse_conv2d_block
].iters
m, x, y, n = s_0[consumer].iters
elif layout == "NCHW":
if len(s_0[sparse_conv2d_block].iters) == 6:
# bs_c = 1
i, nb_j, j, h, w, row_offset = s_0[ # pylint: disable=invalid-name
sparse_conv2d_block
].iters
else:
i, nb_j, j, h, w, row_offset, c = s_0[ # pylint: disable=invalid-name
sparse_conv2d_block
].iters
m, n, x, y = s_0[consumer].iters
i_0, i_1, i_2 = s_0.split(sparse_conv2d_block, i, [None, None])
m_0, m_1 = s_0.follow_split(consumer, m, len(s_0.transform_steps) - 1, 1)
h_0, h_1, h_2 = s_0.split(sparse_conv2d_block, h, [None, None])
x_0, x_1 = s_0.follow_split(consumer, x, len(s_0.transform_steps) - 1, 1)
w_0, w_1, w_2 = s_0.split(sparse_conv2d_block, w, [None, None]) # pylint: disable=invalid-name
y_0, y_1 = s_0.follow_split(consumer, y, len(s_0.transform_steps) - 1, 1)
j_0, j_1 = s_0.split(sparse_conv2d_block, nb_j, [None])
n_0, n_1 = s_0.follow_split(consumer, n, len(s_0.transform_steps) - 1, 1)
if layout == "NHWC":
if c is None:
s_0.reorder(
sparse_conv2d_block,
[i_0, h_0, w_0, j_0, i_1, h_1, w_1, j_1, row_offset, i_2, h_2, w_2, j],
)
else:
s_0.reorder(
sparse_conv2d_block,
[i_0, h_0, w_0, j_0, i_1, h_1, w_1, j_1, row_offset, i_2, h_2, w_2, j, c],
)
s_0.reorder(consumer, [m_0, x_0, y_0, n_0, m_1, x_1, y_1, n_1])
elif layout == "NCHW":
if c is None:
s_0.reorder(
sparse_conv2d_block,
[i_0, j_0, h_0, w_0, i_1, j_1, h_1, w_1, row_offset, i_2, j, h_2, w_2],
)
else:
s_0.reorder(
sparse_conv2d_block,
[i_0, j_0, h_0, w_0, i_1, j_1, h_1, w_1, row_offset, i_2, j, c, h_2, w_2],
)
s_0.reorder(consumer, [m_0, n_0, x_0, y_0, m_1, n_1, x_1, y_1])
s_0.compute_at(sparse_conv2d_block, consumer, n_0)
ret.append([s_0.state_object, stage_id - 2])
return ret
def sparse_dense_meet_condition_func(search_policy, state, stage_id):
state = auto_scheduler.loop_state.State(state, search_policy.search_task.compute_dag)
if state.stages[stage_id].op.tag in [
"sparse_dense_sp_rhs_bsrmm",
"sparse_dense_sp_rhs_bsrmm_block",
]:
return auto_scheduler.PreloadCustomSketchRule.APPLY_AND_SKIP_REST
return auto_scheduler.PreloadCustomSketchRule.PASS
def sparse_dense_apply_func(search_policy, state, stage_id):
"""Describe how to generate the initial sketch for sparse dense"""
ret = []
s_0 = auto_scheduler.loop_state.State(state, search_policy.search_task.compute_dag)
if s_0.stages[stage_id].op.tag == "sparse_dense_sp_rhs_bsrmm_block":
return [s_0.state_object, stage_id - 1]
sparse_dense = s_0.stages[stage_id].op
sparse_dense_block = s_0.stages[stage_id - 1].op
assert sparse_dense.tag == "sparse_dense_sp_rhs_bsrmm"
assert sparse_dense_block.tag == "sparse_dense_sp_rhs_bsrmm_block"
# Set the default consumer of compute block
consumer = sparse_dense
# If sparse dense has a single elementwise consumer
# We can compute inline the sparse_dense output stage
consumers = _ffi_api.SearchPolicyUtilsGetConsumers(
search_policy.search_task, s_0.state_object, stage_id
)
if len(consumers) == 1:
consumer_id = int(consumers.items()[0][0])
if _ffi_api.SearchPolicyUtilsIsElementwiseMatch(
search_policy.search_task, s_0.state_object, stage_id, consumer_id
):
consumer = s_0.stages[consumer_id].op
s_0.compute_inline(sparse_dense)
i, nb_j, j, row_offset, c = s_0[sparse_dense_block].iters
m, n = s_0[consumer].iters
i_0, i_1, i_2 = s_0.split(sparse_dense_block, i, [None, None])
m_0, m_1 = s_0.follow_split(consumer, m, len(s_0.transform_steps) - 1, 1)
j_0, j_1 = s_0.split(sparse_dense_block, nb_j, [None])
n_0, n_1 = s_0.follow_split(consumer, n, len(s_0.transform_steps) - 1, 1)
s_0.reorder(sparse_dense_block, [i_0, j_0, i_1, j_1, row_offset, i_2, j, c])
s_0.reorder(consumer, [m_0, n_0, m_1, n_1])
s_0.compute_at(sparse_dense_block, consumer, n_0)
ret.append([s_0.state_object, stage_id - 2])
return ret
| 13,690 | 37.13649 | 100 | py |
tvm | tvm-main/python/tvm/topi/sparse/csrmm.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""TVM operator compute SpMM in CSR format."""
from __future__ import absolute_import
import tvm
from tvm import te
from .. import tag
from ..utils import simplify
from ...tir.generic import cast
def csrmm_default(data, indices, indptr, weight, bias=None):
# pylint: disable=invalid-name
"""The default implementation of csrmm in topi.
Parameters
----------
data : tvm.te.Tensor
1-D with shape [nonzeros]
indices : tvm.te.Tensor
1-D with shape [nonzeros]
indptr : tvm.te.Tensor
1-D with shape [m+1]
weight : tvm.te.Tensor
2-D with shape [k, n]
bias : tvm.te.Tensor, optional
1-D with shape [m]
Returns
-------
output : tvm.te.Tensor
2-D with shape [m, n]
"""
assert (
len(data.shape) == 1
and len(indices.shape) == 1
and len(indptr.shape) == 1
and len(weight.shape) == 2
), "only support 2-dim csrmm"
assert isinstance(
weight, te.tensor.Tensor
), f"weight matrix is assumed to be tvm.te.Tensor, but weight is `{type(weight)}`"
assert (
data.dtype == weight.dtype
), f"Data and weight must have the same dtype, but they have {data.dtype} and {weight.dtype}"
if bias is not None:
assert len(bias.shape) == 1
M = simplify(indptr.shape[0] - 1)
_, N = weight.shape
def csrmm_default_ir(data, indices, indptr, weight, out):
"""define ir for csrmm"""
irb = tvm.tir.ir_builder.create()
data_ptr = irb.buffer_ptr(data)
indices_ptr = irb.buffer_ptr(indices)
indptr_ptr = irb.buffer_ptr(indptr)
weight_ptr = irb.buffer_ptr(weight)
out_ptr = irb.buffer_ptr(out)
M = simplify(indptr.shape[0] - 1)
_, N = weight.shape
with irb.for_range(0, N, kind="vectorize", name="n") as n:
with irb.for_range(0, M, kind="parallel", name="row") as row:
dot = irb.allocate(data.dtype, (1,), name="dot", scope="local")
out_ptr[row * N + n] = cast(0, data.dtype)
dot[0] = cast(0, data.dtype)
row_start = indptr_ptr[row]
row_end = indptr_ptr[row + 1]
row_elems = row_end - row_start
with irb.for_range(0, row_elems, name="idx") as idx:
elem = row_start + idx
dot[0] += data_ptr[elem] * weight_ptr[indices_ptr[elem] * N + n]
out_ptr[row * N + n] += dot[0]
return irb.get()
oshape = (M, N)
matmul = te.extern(
oshape,
[data, indices, indptr, weight],
lambda ins, outs: csrmm_default_ir(ins[0], ins[1], ins[2], ins[3], outs[0]),
tag="csrmm",
dtype=data.dtype,
name="out",
)
if bias is not None:
matmul = te.compute(oshape, lambda i, j: matmul[i, j] + bias[i], tag=tag.BROADCAST)
return matmul
def csrmm(a, b, c=None):
"""The `csrmm` routine performs a matrix-matrix operation defined as :math:`C := A*B + C`,
where `B` and `C` are dense matrices, `A` is an m-by-k sparse matrix in the CSR format.
Parameters
----------
a : tvm.contrib.sparse.CSRNDArray
2-D sparse matrix with shape [m, k]
b : tvm.te.Tensor
2-D dense matrix with shape [k, n]
c : tvm.te.Tensor, optional
1-D dense vector with shape [n]
Returns
-------
output : tvm.te.Tensor
2-D with shape [m, n]
"""
return csrmm_default(a.data, a.indices, a.indptr, b, c)
| 4,310 | 32.679688 | 97 | py |
tvm | tvm-main/python/tvm/topi/sparse/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=wildcard-import
"""Sparse operators"""
from __future__ import absolute_import as _abs
from .csrmv import csrmv
from .csrmm import csrmm
from .dense import dense
| 966 | 37.68 | 62 | py |
tvm | tvm-main/python/tvm/topi/random/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=wildcard-import
"""Pseudorandom generator kernels and operators."""
from __future__ import absolute_import
from .kernel import *
| 934 | 39.652174 | 62 | py |
tvm | tvm-main/python/tvm/topi/random/kernel.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Pseudorandom number kernels."""
import math
import numpy as np
import tvm
import tvm.topi
from ... import tir
from ...tir import ir_builder
# Threefry PRNG with splitting based on
# - J. K. Salmon, M. A. Moraes, R. O. Dror and D. E. Shaw, "Parallel random numbers: As easy as 1,
# 2, 3," SC '11: Proceedings of 2011 International Conference for High Performance Computing,
# Networking, Storage and Analysis, Seattle, WA, 2011, pp. 1-12, doi: 10.1145/2063384.2063405.
# - Claessen, K. ; Palka, M. (2013) "Splittable Pseudorandom Number Generators using Cryptographic
# Hashing". Proceedings of Haskell Symposium 2013 pp. 47-58. MLA
# - Ferguson, Niels, et al. "The Skein hash function family." Submission to NIST (round 3) 7.7.5
# (2010): 3.
# Threefry is a counter based PRNG: given a unique input, it generates a unique random number. As
# there is no state to maintain, we can apply it to a sequence of numbers (0..N) to generate a
# sequence of random numbers in parallel. In order to make the PRNG splittable (that is we can
# generate a sequence of random numbers in one place, and another sequence in another), we add a
# path and key in addition to the counter. The path allows us to encode a sequence of splits (a 0 in
# the path indicates the left result of a split, a 1 indicates the right). To avoid continuously
# growing the path, we can compress an existing path into the key portion of the generator by
# hashing the current key, path, and counter to create the new key (this same technique is used if
# we run out of room for the counter). They key is initialized with a unique initial state.
#
# Random numbers are generated by applying the Threefry hash to the current key, path, and counter.
# This module use encoding e4 from the appendix of "Splittable Pseudorandom Number Generators using
# Cryptographic Hashing" (confusingly, the definition in the paper uses e3 to define the encoding
# function). This encoding uses a 10 element uint64 tensor where each byte means the following:
# .. code-block:
# gen:
# words: 0 1 2 3 | 4 5 | 6 7 | 8 9
# usage: key | path | counter | position of next step in path encoded in binary
# ex: 0b00010 -> next path entry goes one from the right
# Right now, counter only uses the rightmost word.
# Threefry rotation constants from the Skein paper ("The Skein Hash Function Family"
# https://www.schneier.com/wp-content/uploads/2015/01/skein.pdf)
_ROTATIONS = {
4: [[14, 16], [52, 57], [23, 40], [5, 37], [25, 33], [46, 12], [58, 22], [32, 32]],
8: [
[46, 36, 19, 37],
[33, 27, 14, 42],
[17, 49, 36, 39],
[44, 9, 54, 56],
[39, 30, 34, 24],
[13, 50, 10, 17],
[25, 29, 39, 43],
[8, 35, 56, 22],
],
16: [
[24, 13, 8, 47, 8, 17, 22, 37],
[38, 19, 10, 55, 49, 18, 23, 52],
[33, 4, 51, 13, 34, 41, 59, 17],
[5, 20, 48, 41, 47, 28, 16, 25],
[41, 9, 37, 31, 12, 47, 44, 30],
[16, 34, 56, 51, 4, 53, 42, 41],
[31, 44, 47, 46, 19, 42, 44, 25],
[9, 48, 35, 52, 23, 31, 37, 20],
],
}
# Threefry permutation constants from the Skein paper ("The Skein Hash Function Family"
# https://www.schneier.com/wp-content/uploads/2015/01/skein.pdf)
_PERMUTATIONS = {
4: [0, 3, 2, 1],
8: [2, 1, 4, 7, 6, 5, 0, 3],
16: [0, 9, 2, 13, 6, 11, 4, 15, 10, 7, 12, 3, 14, 5, 8, 1],
}
def _threefry(
irb, key_buf, key_offset, counter_buf, counter_offset, out_buf, out_offset, out_shape
):
"""IRBuilder code for running Threefry
Parameters
----------
irb: IRBuilder
IRBuilder that this code will be generated for.
key_buf: BufferVar
Buffer to read the key from.
key_offset: number
Threefry will write to :code:`key_buf[key_offset:key_offset+4]`
counter_buf: BufferVar
Buffer to read the counter from.
counter_offset: number
Threefry will write to :code:`counter_buf[counter_offset:counter_offset+4]`
out_buf: BufferVar
Buffer to read the counter from.
out_offset: number
Threefry will write to :code:`out_buf[out_offset:out_offset+4*product(out_shape)]`
out_shape: number
Determines the number of output states to generate. :code:`state[i]` will correspond to
counter+i.
"""
nrounds = 20
nwords = 4
iwidth = 64
assert nrounds % 4 == 0
assert nwords in [4, 8, 16]
# The paper has constants for 32 bit threefry, but we keep the implementation simple by only
# using 64-bit words.
assert key_buf.dtype == "uint64", "threefry only supports 64-bit keys"
assert key_buf.dtype == counter_buf.dtype, "threefry key and counter must be the same dtype"
def mix(a, b, rotation):
x = a + b # wrapping
y = x ^ ((b << rotation) | (b >> (iwidth - rotation)))
return [x, y]
# temporary buffer for holding the results of _PERMUTATIONS
tmp = irb.allocate(out_buf.dtype, out_shape * nwords, name="tmp", scope="global")
tmp_offset = 0
# Initialize entire key. It is composed of the original key with one
# element appended. The appended element is the xor of all key words plus a
# constant.
full_key = irb.allocate("uint64", nwords + 1, name="full_key", scope="global")
for i in range(nwords):
full_key[i] = key_buf[key_offset + i]
# initial key constant, full_key[nwords] is equivalent to k_{N_W} in the Skein paper.
full_key[nwords] = tvm.tir.const(0x1BD11BDAA9FC1A22, dtype="uint64")
for i in range(nwords):
full_key[nwords] ^= key_buf[key_offset + i]
with irb.for_range(0, out_shape, dtype="uint64", name="i") as i:
for j in range(nwords):
out_buf[out_offset + i * nwords + j] = counter_buf[counter_offset + j] + i
def key_schedule(s, i):
# Threefry uses no tweak, so the key schedule is simple
if i == nwords - 1:
return full_key[(s + i) % (nwords + 1)] + tvm.tir.const(s, dtype="uint64")
return full_key[(s + i) % (nwords + 1)]
with irb.for_range(0, out_shape, name="l") as l: # pylint: disable=invalid-name
for i in range(nrounds // 4):
for j in range(nwords):
out_buf[out_offset + l * nwords + j] += key_schedule(i, j) # wrapping
for k in range(4):
for j in range(nwords // 2):
(
out_buf[out_offset + l * nwords + j * 2 + 0],
out_buf[out_offset + l * nwords + j * 2 + 1],
) = mix(
out_buf[out_offset + l * nwords + j * 2 + 0],
out_buf[out_offset + l * nwords + j * 2 + 1],
_ROTATIONS[nwords][(i * 4 + k) % 8][j],
)
for j in range(nwords):
tmp[tmp_offset + l * nwords + j] = out_buf[
out_offset + l * nwords + _PERMUTATIONS[nwords][j]
]
# number of rounds is even, so out always contains the result
(out_buf, tmp) = (tmp, out_buf)
(out_offset, tmp_offset) = (tmp_offset, out_offset)
def threefry_generate(gen, out_shape):
"""Generate a series of random values
Notes
-----
This function uses the counter portion of the generator state to generate a series of random
numbers in parallel. Random number `i` is generated by applying Threefry to the current
generator state with the counter portion incremented by `i`. This means that each random number
is generated independently from each other random number, so we can compute them in parallel.
If there is not enough room left in the counter to generate the desired shape of random values,
then a new generator is created by applying Threefry to the current key, path, and counter.
This new generator will have a reset counter.
Warning
-------
Threeyfry requires that unsigned integer arithmetic wraps on overflow. Currently TVM has no
guarantee of this, so threefry contains an internal assert to check wrapping behavior. This
assert may or may not run depending on your platform, so it is recommended you run
:py:func:`threefry_test_wrapping` to verify wrapping behavior.
Parameters
----------
gen : Tensor[10, uint64]
Generator state. Can be create with :py:func:`tvm.relay.random.threefry_key`. This should
not be reused in another function, otherwise random numbers will be repeated.
out_shape : Sequence[int]
Output shape of the random numbers.
Returns
-------
new_gen : Tensor[10, uint64]
The new generator state to be used in subsequent calls.
rand : Tensor[out_shape, uint64]
Tensor of random numbers with shape `out_shape`.
"""
out_len = tir.const(1)
for s in out_shape:
out_len *= s
assert (
out_len.value <= 2**64 - 1
), f"Can only generate up to 2^64 random numbers, but {out_len} were requested."
def gen_ir(gen_ptr, out_gen_ptr, out_array_ptr):
irb = ir_builder.create()
gen = irb.buffer_ptr(gen_ptr)
out_gen = irb.buffer_ptr(out_gen_ptr)
out_array = irb.buffer_ptr(out_array_ptr)
# Check that unsigned arithmetic wraps, as it is required to implement threefry correctly.
irb.emit(
tvm.tir.AssertStmt(
tvm.tir.const(0xFFFFFFFFFFFFFFFF, "uint64") + tvm.tir.const(1, "uint64")
== tvm.tir.const(0, "uint64"),
tvm.tir.StringImm(
"Unsigned integer arithmetic is not wrapping, but threefry requires wrapping."
),
tvm.tir.Evaluate(0),
)
)
# Create a temporary array to hold the generator state we will use to create the random
# numbers. We cannot use gen because we may need to update the key + path if there is not
# enough room in the counter.
tmp = irb.allocate(gen.dtype, 10, name="tmp", scope="global")
# TODO(tkonolige): for now we only use the last word of the counter for counting. It is too
# much work to figure out how to do 128 bit addition.
# Max value for counter should be 2**64-2 because we need to reserve a special value to
# indicate the counter is used up.
with irb.if_scope(gen[7] < tir.const(2**64 - 1, dtype=gen.dtype) - out_len):
for i in range(10):
tmp[i] = gen[i]
with irb.else_scope():
# no room left in the counter, we have to change the path or key
with irb.if_scope(gen[8] == 0 and gen[9] == 0):
# out of room in the path, have to generate new key
# The paper says the counter that we will be hashing should be a special value of
# all ones. We need to allocate some space for it because we cannot overwrite gen.
tmp_counter = irb.allocate(gen.dtype, 2, name="tmp_counter", scope="global")
tmp_counter[0] = tir.const(0xFFFFFFFFFFFFFFFF, dtype=gen.dtype)
tmp_counter[1] = tir.const(0xFFFFFFFFFFFFFFFF, dtype=gen.dtype)
_threefry(irb, gen, 0, tmp_counter, 0, tmp, 0, 1)
tmp[4] = tir.const(0, dtype=gen.dtype) # zero path, i.e. no path
tmp[5] = tir.const(0, dtype=gen.dtype)
tmp[6] = tir.const(0, dtype=gen.dtype) # zero counter
tmp[7] = tir.const(0, dtype=gen.dtype)
tmp[8] = tir.const(1 << 63, dtype=gen.dtype) # one in the leftmost position
tmp[9] = tir.const(0, dtype=gen.dtype)
with irb.else_scope():
tmp[0] = gen[0]
tmp[1] = gen[1]
tmp[2] = gen[2]
tmp[3] = gen[3]
tmp[4] = gen[4] | gen[8] # add a 1 to the path
tmp[5] = gen[5] | gen[9]
tmp[6] = tir.const(0, dtype=gen.dtype) # zero counter
tmp[7] = tir.const(0, dtype=gen.dtype)
_shift_right(irb, gen[8], gen[9], tmp, 8, tmp, 9)
# Compute random values
if out_len.value >= 4:
_threefry(irb, tmp, 0, tmp, 4, out_array, 0, out_len // 4)
if out_len.value % 4 != 0:
remaining = irb.allocate(gen.dtype, 4, name="remaining", scope="global")
tmp[7] = tmp[7] + tir.Cast(gen.dtype, out_len // 4 * 4) # increment counter
_threefry(irb, tmp, 0, tmp, 4, remaining, 0, 1)
with irb.for_range(0, out_len % 4, dtype="uint64", name="i") as i:
out_array[out_len // 4 * 4 + i] = remaining[i]
# Update generator state
out_gen[0] = tmp[0] # key stays the same
out_gen[1] = tmp[1]
out_gen[2] = tmp[2]
out_gen[3] = tmp[3]
out_gen[4] = tmp[4] # path stays the same
out_gen[5] = tmp[5]
out_gen[6] = tir.const(0, dtype=gen.dtype) # unused, leave it as 0
if out_len.value % 4 != 0:
# increment counter for the remaining
# as we will generate 4 random numbers for the remaining, increase 4 here.
# the main increment was done before the second _threefry.
out_gen[7] = tmp[7] + tir.Cast(gen.dtype, 4)
else:
out_gen[7] = tmp[7] + tir.Cast(gen.dtype, out_len) # increment counter
out_gen[8] = tmp[8] # path unchanged, so no update here
out_gen[9] = tmp[9]
return irb.get()
out_gen = tvm.tir.decl_buffer((10,), name="out_gen", dtype="uint64")
out_array = tvm.tir.decl_buffer(out_shape, name="out_array", dtype="uint64")
return tvm.te.extern(
[out_gen.shape, out_array.shape],
[gen],
lambda ins, outs: gen_ir(ins[0], outs[0], outs[1]),
out_buffers=[out_gen, out_array],
name="threefry_generate",
tag="threefry_generate",
)
def _shift_right(irb, a, b, out_a, a_off, out_b, b_off):
"""Binary shift a 128bit number composed of two 64 bit words right by one."""
with irb.if_scope(a == 1):
out_a[a_off] = tir.const(0, dtype=a.dtype)
out_b[b_off] = tir.const(0x8000000000000000, dtype=a.dtype)
with irb.else_scope():
with irb.if_scope(a == 0):
out_a[a_off] = tir.const(0, dtype=a.dtype)
out_b[b_off] = b >> 1
with irb.else_scope():
out_a[a_off] = a >> 1
out_b[b_off] = tir.const(0, dtype=a.dtype)
def threefry_split(gen):
"""Split a single generator state into two new ones
Notes
-----
The new generator is created by appending a one (for the right output) or a zero (for the left
output) to the end of the path portion of the generator If there is no longer and room in the
path, then we create a new key portion of the generator by applying Threefry to the old state,
path, and counter. i.e. :code:`new_key = threefry(old_key, [old_path, old_counter])`. This
resets the path portion of the new generator.
Parameters
----------
gen : Tensor[10, uint64]
Generator state. Can be create with :py:func:`tvm.relay.random.threefry_key`. This should
not be reused in another function, otherwise random numbers will be repeated.
Returns
-------
out_gen_left : Tensor[10, uint64]
New generator state that is distinct from `out_gen_right`.
out_gen_right : Tensor[10, uint64]
New generator state that is distinct from `out_gen_left`.
"""
def gen_ir(gen_ptr, out_left_ptr, out_right_ptr):
irb = ir_builder.create()
gen = irb.buffer_ptr(gen_ptr)
out_left = irb.buffer_ptr(out_left_ptr)
out_right = irb.buffer_ptr(out_right_ptr)
with irb.if_scope(gen[8] == 0 and gen[9] == 0):
# Generate new key because we have run out of room to extend the path
_threefry(irb, gen, 0, gen, 4, out_left, 0, 1)
out_left[4] = tir.const(0, dtype=gen.dtype)
out_left[5] = tir.const(0, dtype=gen.dtype)
out_left[6] = tir.const(0, dtype=gen.dtype) # counter gets zeroed
out_left[7] = tir.const(0, dtype=gen.dtype) # counter gets zeroed
out_left[8] = tir.const(
1 << 62, dtype=gen.dtype
) # one in the second from the leftmost position
out_left[9] = tir.const(0, dtype=gen.dtype)
out_right[0] = out_left[0]
out_right[1] = out_left[1]
out_right[2] = out_left[2]
out_right[3] = out_left[3]
out_right[4] = tir.const(1 << 63, dtype=gen.dtype) # one in the leftmost position
out_right[5] = tir.const(0, dtype=gen.dtype)
out_right[6] = tir.const(0, dtype=gen.dtype)
out_right[7] = tir.const(0, dtype=gen.dtype)
out_right[8] = tir.const(
1 << 62, dtype=gen.dtype
) # one in the second from the leftmost position
out_right[9] = tir.const(0, dtype=gen.dtype)
with irb.else_scope():
out_left[0] = gen[0]
out_left[1] = gen[1]
out_left[2] = gen[2]
out_left[3] = gen[3]
out_left[4] = gen[4] # adding a zero here, but its already zero padded
out_left[5] = gen[5]
out_left[6] = gen[6]
out_left[7] = gen[7]
# move path position over one bit
_shift_right(irb, gen[8], gen[9], out_left, 8, out_left, 9)
out_right[0] = gen[0]
out_right[1] = gen[1]
out_right[2] = gen[2]
out_right[3] = gen[3]
out_right[4] = gen[4] | gen[8] # add a one to the path
out_right[5] = gen[5] | gen[9]
out_right[6] = gen[6]
out_right[7] = gen[7]
_shift_right(irb, gen[8], gen[9], out_right, 8, out_right, 9)
return irb.get()
out_left = tvm.tir.decl_buffer((10,), name="out_left", dtype="uint64")
out_right = tvm.tir.decl_buffer((10,), name="out_right", dtype="uint64")
return tvm.te.extern(
[out_left.shape, out_right.shape],
[gen],
lambda ins, outs: gen_ir(ins[0], outs[0], outs[1]),
out_buffers=[out_left, out_right],
name="threefry_split",
tag="threefry_split",
)
def threefry_test_wrapping(target, device):
"""Test that unsigned arithmetic wraps on overflow.
Parameters
----------
target : tvm.target.Target
Target to run against
device : tvm.runtime.Device
Context to run the test on
Returns
-------
is_wrapping : bool
Whether or not unsigned integer arithmetic is wrapping for this target, context pair. True
indicates that threefry will work on this platform.
"""
if isinstance(target, str):
target = tvm.target.Target(target)
def gen_ir(out_ptr):
irb = ir_builder.create()
out = irb.buffer_ptr(out_ptr)
if "gpu" in target.keys:
thread_x = tvm.te.thread_axis("threadIdx.x")
irb.scope_attr(thread_x, "thread_extent", 1)
out[0] = tvm.tir.const(0xFFFFFFFFFFFFFFFF, "uint64") + tvm.tir.const(1, "uint64")
return irb.get()
out = tvm.tir.decl_buffer((1,), dtype="uint64")
f = tvm.te.extern(
[out.shape], [], lambda ins, outs: gen_ir(outs[0]), dtype="uint64", out_buffers=[out]
)
s = tvm.te.create_schedule([f.op])
out_ary = tvm.nd.array(np.ones((1,), "uint64"), device)
tvm.build(s, [f], target=target)(out_ary)
return out_ary.numpy()[0] == 0
def uniform(gen, low, high, out_shape, out_dtype):
"""Draw samples from a uniform distribution.
Samples are uniformly distributed over the half-open interval [low, high)
(includes low, but excludes high). In other words, any value within the
given interval is equally likely to be drawn by uniform.
Parameters
----------
gen : ThreefryKey
Generator state. Can be create with :py:func:`tvm.relay.threefry_key`. This should not be
reused in another function, otherwise random numbers will be repeated.
low : Tensor[(), out_dtype]
Lower boundary of the output interval. All values generated will be
greater than or equal to low.
high : Tensor[(), out_dtype]
Upper boundary of the output interval. All values generated will be
less than high.
out_shape : Sequence[int]
Output shape of the random numbers.
out_dtype : str
The output dtype.
Returns
-------
new_gen : ThreefryKey
New generator state that is distinct from `gen`.
out : Tensor[out_shape, out_dtype]
Tensor of random numbers with shape `out_shape` and type `out_dtype`.
"""
new_gen, random_bits = threefry_generate(gen, out_shape)
assert out_dtype in (
"float32",
"float64",
), f"Only support float32 or float64 for now, got {out_dtype}"
if out_dtype == "float32":
random_dtype = "uint32"
nbits = 32
nfraction = 23
elif out_dtype == "float64":
random_dtype = "uint64"
nbits = 64
nfraction = 52
nexp = nbits - nfraction - 1
random_bits = random_bits.astype(random_dtype)
fraction = tvm.topi.right_shift(
random_bits, tvm.tir.const(nbits - nfraction, dtype=random_dtype)
)
exponent = tvm.topi.left_shift(
tvm.topi.full(out_shape, random_dtype, (1 << (nexp - 1)) - 1),
tvm.tir.const(nfraction, dtype=random_dtype),
)
mantissa = tvm.topi.bitwise_or(fraction, exponent).astype(random_dtype)
standard_uniform_values = tvm.topi.reinterpret(mantissa, out_dtype) - tvm.tir.const(
1, dtype=out_dtype
)
uniform_values = tvm.topi.add(tvm.topi.multiply(standard_uniform_values, high - low), low)
return new_gen, uniform_values
def normal(gen, mean, scale, out_shape, out_dtype):
"""Draw samples from a normal distribution.
The algorithm is based on Box-Muller transform
Parameters
----------
gen : ThreefryKey
Generator state. Can be create with :py:func:`tvm.relay.threefry_key`. This should not be
reused in another function, otherwise random numbers will be repeated.
mean : Tensor[(), out_dtype]
The mean of the normal distribution.
scale : Tensor[(), out_dtype]
The standard deviation of the normal distribution.
out_shape : Sequence[int]
Output shape of the random numbers.
out_dtype : str
The output dtype.
Returns
-------
new_gen : ThreefryKey
New generator state that is distinct from `gen`.
out : Tensor[out_shape, out_dtype]
Tensor of random numbers with shape `out_shape` and type `out_dtype`.
"""
out_shape = list(out_shape)
# Box-Muller transform need two pieces of original uniform data
out_shape.insert(0, 2)
new_gen, uniform_values = uniform(
gen, tvm.tir.const(0.0, out_dtype), tvm.tir.const(1.0, out_dtype), out_shape, out_dtype
)
two_pi = tvm.tir.const(2.0 * math.pi, out_dtype)
uniform_values_1 = tvm.topi.strided_slice(uniform_values, [0], [1], strides=[1], axes=[0])
uniform_values_1 = tvm.topi.squeeze(uniform_values_1, axis=0)
uniform_values_2 = tvm.topi.strided_slice(uniform_values, [1], [2], strides=[1], axes=[0])
uniform_values_2 = tvm.topi.squeeze(uniform_values_2, axis=0)
uniform_values_1 = tvm.topi.subtract(tvm.tir.const(1.0, out_dtype), uniform_values_1)
sqrt_values = tvm.topi.sqrt(
tvm.topi.multiply(tvm.tir.const(-2.0, out_dtype), tvm.topi.log(uniform_values_1))
)
sin_values = tvm.topi.sin(tvm.topi.multiply(two_pi, uniform_values_2))
random_values = tvm.topi.add(
tvm.topi.multiply(tvm.topi.multiply(sqrt_values, sin_values), scale), mean
)
return new_gen, random_values
def multinomial(gen, probs, num_samples):
"""Draw samples from a multinomial distribution defined by the input tensor.
Parameters
----------
gen : ThreefryKey
Generator state. Can be created with :py:func:`tvm.relay.threefry_key`. This should not be
reused in another function, otherwise random numbers will be repeated.
probs: Tensor[(input_rows, indices), float]
A tensor containing the probabilities to sample from. Each value represents the
probability of choosing its corresponding index. If a tensor is provided, the last dimension
is treated independently. Negative values in this tensor will be clipped to zero to
represent they have no chance of being selected.
num_samples: int
Number of samples to draw from each row.
Returns
-------
new_gen : ThreefryKey
New generator state that is distinct from `gen`.
out : Tensor[(input_rows, num_samples), int64]
Tensor of sampled indices with shape `input_rows x num_samples` and type `out_dtype`.
"""
# Convert to float for consistent behavior.
probs = tvm.topi.cast(probs, "float32")
# Clip negative values to 0.
probs = tvm.topi.maximum(probs, 0)
# Normalize input probabilities.
probs = tvm.topi.divide(probs, tvm.topi.expand_dims(tvm.topi.sum(probs, axis=-1), -1))
# Convert probability to cumulative sum.
cumulative_probs = tvm.topi.cumsum(probs, axis=-1)
# Sample a set of uniform values.
new_gen, uniform_values = uniform(
gen,
tvm.tir.const(0.0, "float32"),
tvm.tir.const(1.0, "float32"),
[*probs.shape[:-1], num_samples],
"float32",
)
# Find index corresponding to sampled values.
closest_prob = tvm.topi.subtract(
tvm.topi.expand_dims(cumulative_probs, axis=-1),
tvm.topi.expand_dims(uniform_values, axis=-2),
)
zeros = tvm.topi.full_like(closest_prob, 0)
ones = tvm.topi.full_like(closest_prob, 1)
# Find the smallest positive index for each sample.
cond = tvm.topi.greater(closest_prob, zeros)
closest_non_neg = tvm.topi.where(cond, closest_prob, ones)
sampled_indices = tvm.topi.argmin(closest_non_neg, axis=-2)
return new_gen, sampled_indices
| 27,010 | 40.050152 | 100 | py |
tvm | tvm-main/python/tvm/topi/image/grid_sample.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""affine_grid and grid_sample operator"""
from tvm import te, tir
def affine_grid(data, target_shape):
"""affine_grid operator that generates 2D sampling grid.
This operation is described in https://arxiv.org/pdf/1506.02025.pdf. It generates a uniform
sampling grid within the target shape and normalizes it to [-1, 1]. The provided affine
transformation is then applied on the sampling grid.
Parameters
----------
data : tvm.Tensor
3-D with shape [batch, 2, 3]. The affine matrix.
target_shape: list/tuple of two int
Specifies the output shape (H, W).
Returns
-------
Output : tvm.Tensor
4-D with shape [batch, 2, target_height, target_width]
"""
assert target_shape is not None
assert len(target_shape) == 2
assert (
target_shape[0] > 1 and target_shape[1] > 1
), "target height/width should be greater than 1"
dtype = data.dtype
y_step = tir.const((2.0 - 1e-7) / (target_shape[0] - 1), dtype=dtype)
x_step = tir.const((2.0 - 1e-7) / (target_shape[1] - 1), dtype=dtype)
start = tir.const(-1.0, dtype=dtype)
def _compute(n, dim, i, j):
y = start + i * y_step
x = start + j * x_step
return data[n, dim, 0] * x + data[n, dim, 1] * y + data[n, dim, 2]
oshape = (data.shape[0], len(target_shape), *target_shape)
return te.compute(oshape, _compute, tag="affine_grid")
def _grid_sample_2d(
data, grid, method="bilinear", layout="NCHW", padding_mode="zeros", align_corners=True
):
"""Applies bilinear/nearest/bicubic sampling to input feature map.
Given :math:`data` and :math:`grid` assuming NCHW layout, then the output is computed by
.. math::
x_{src} = grid[batch, 0, y_{dst}, x_{dst}] \\
y_{src} = grid[batch, 1, y_{dst}, x_{dst}] \\
output[batch, channel, y_{dst}, x_{dst}] = G(data[batch, channel, y_{src}, x_{src})
:math:`x_{dst}`, :math:`y_{dst}` enumerate all spatial locations in :math:`output`, and
:math:`G()` denotes the interpolation method.
The out-boundary points will be padded with zeros if padding_mode is "zeros", or
border pixel value if padding_mode is "border", or
inner pixel value if padding_mode is "reflection".
The left-top corner (-1, -1) and right-bottom corner (1, 1) in grid will be map to
(0, 0) and (h - 1, w - 1) of data if align_corners is "True", or
(-0.5, -0.5) and (h - 0.5, w - 0.5) of data if align_corners is "False".
The shape of the output will be (data.shape[0], data.shape[1], grid.shape[2], grid.shape[3]).
The operator assumes that :math:`grid` has been normalized to [-1, 1].
grid_sample often cooperates with affine_grid which generates sampling grids for grid_sample.
Parameters
----------
data : tvm.Tensor
4-D with shape [batch, in_channel, in_height, in_width]
grid : tvm.Tensor
4-D with shape [batch, 2, out_height, out_width]
method : str
The interpolation method "nearest", "bilinear", "bicubic" are supported.
layout : str
The layout of input data and the output.
padding_mode : str
The padding mode for outside grid values, "zeros", "border", "reflection" are supported.
align_corners: bool
Geometrically, we consider the pixels of the input as squares rather than points.
If set to "True", the extrema ("-1" and "1") are considered as referring
to the center points of the input corner pixels. If set to "False", they
are instead considered as referring to the corner points of the input corner
pixels, making the sampling more resolution agnostic.
Returns
-------
Output : tvm.Tensor
4-D with shape [batch, in_channel, out_height, out_width]
"""
assert method in ("bilinear", "nearest", "bicubic"), f"{method} is not supported"
assert padding_mode in ("zeros", "border", "reflection"), f"{padding_mode} is not supported"
assert layout == "NCHW", f"{layout} is not supported"
batch, in_channel, in_height, in_width = data.shape
out_height, out_width = grid.shape[2:]
def _get_pixel_value(n, c, h, w):
return te.if_then_else(
te.all(h >= 0, w >= 0, h < in_height, w < in_width),
data[n, c, h, w],
tir.const(0.0, dtype=data.dtype),
)
def _unnormalize(h, w):
if align_corners:
y = (h + 1) * (in_height - 1) / 2
x = (w + 1) * (in_width - 1) / 2
else:
y = -0.5 + (h + 1) * in_height / 2
x = -0.5 + (w + 1) * in_width / 2
return (y, x)
def _clip_coordinates(x, size):
return te.min(te.max(x, 0), size - 1)
def _compute_source_index(n, h, w):
y = grid[n, 1, h, w]
x = grid[n, 0, h, w]
y, x = _unnormalize(y, x)
if padding_mode == "reflection":
y = _reflect_coordinates(y, in_height)
x = _reflect_coordinates(x, in_width)
y = _clip_coordinates(y, in_height)
x = _clip_coordinates(x, in_width)
elif padding_mode == "border":
y = _clip_coordinates(y, in_height)
x = _clip_coordinates(x, in_width)
return (y, x)
def _reflect_coordinates(x, size):
def __refelection(x, size, corner_start):
def __reflect(index, size, corner_start):
index_align_corner = te.abs(corner_start - index)
size_times = te.truncdiv(index_align_corner.astype("int32"), size).astype("int32")
t = tir.Mod(size_times, 2)
extra = index_align_corner - size_times * size
return tir.if_then_else(
tir.EQ(t, 0), extra + corner_start, size - extra + corner_start
)
return tir.if_then_else(
tir.all(x >= corner_start, x <= size + corner_start),
x,
__reflect(x, size, corner_start),
)
if align_corners:
new_x = __refelection(x, size - 1, 0)
else:
new_x = __refelection(x, size, -0.5)
return new_x
def _bilinear_sample(n, c, h, w):
y, x = _compute_source_index(n, h, w)
y0 = te.floor(y).astype("int32")
x0 = te.floor(x).astype("int32")
y1 = y0 + tir.const(1, "int32")
x1 = x0 + tir.const(1, "int32")
return (
_get_pixel_value(n, c, y0, x0) * (1.0 - (y - y0)) * (1.0 - (x - x0))
+ _get_pixel_value(n, c, y0, x1) * (1.0 - (y - y0)) * (x - x0)
+ _get_pixel_value(n, c, y1, x0) * (y - y0) * (1.0 - (x - x0))
+ _get_pixel_value(n, c, y1, x1) * (y - y0) * (x - x0)
)
def _nearest_sample(n, c, h, w):
y, x = _compute_source_index(n, h, w)
y_new = te.nearbyint(y).astype("int32")
x_new = te.nearbyint(x).astype("int32")
return _get_pixel_value(n, c, y_new, x_new)
def _bicubic_sample(n, c, h, w):
A = -0.75 # -0.75 is used in pytorch, it maybe different in other frameworks
def cubic_weight_1(fraction):
return ((A + 2) * fraction - (A + 3)) * fraction * fraction + 1
def cubic_weight_2(fraction):
return ((A * fraction - 5 * A) * fraction + 8 * A) * fraction - 4 * A
def cubic_interp_1d(pixel_0, pixel_1, pixel_2, pixel_3, fraction):
weights = [0] * 4
weights[0] = cubic_weight_2(fraction + 1)
weights[1] = cubic_weight_1(fraction)
weights[2] = cubic_weight_1(1 - fraction)
weights[3] = cubic_weight_2(2 - fraction)
return (
pixel_0 * weights[0]
+ pixel_1 * weights[1]
+ pixel_2 * weights[2]
+ pixel_3 * weights[3]
)
y = grid[n, 1, h, w]
x = grid[n, 0, h, w]
y, x = _unnormalize(y, x)
y_floor = te.floor(y).astype("int32")
x_floor = te.floor(x).astype("int32")
y_fraction = y - y_floor
x_fraction = x - x_floor
coefficients = [0] * 4
for i in range(4):
y_ = y_floor - 1 + i
x_0 = x_floor - 1
x_1 = x_floor + 0
x_2 = x_floor + 1
x_3 = x_floor + 2
if padding_mode == "border":
y_ = _clip_coordinates(y_, in_height).astype("int32")
x_0 = _clip_coordinates(x_0, in_width).astype("int32")
x_1 = _clip_coordinates(x_1, in_width).astype("int32")
x_2 = _clip_coordinates(x_2, in_width).astype("int32")
x_3 = _clip_coordinates(x_3, in_width).astype("int32")
elif padding_mode == "reflection":
y_ = _reflect_coordinates(y_, in_height)
x_0 = _reflect_coordinates(x_0, in_width)
x_1 = _reflect_coordinates(x_1, in_width)
x_2 = _reflect_coordinates(x_2, in_width)
x_3 = _reflect_coordinates(x_3, in_width)
y_ = _clip_coordinates(y_, in_height).astype("int32")
x_0 = _clip_coordinates(x_0, in_width).astype("int32")
x_1 = _clip_coordinates(x_1, in_width).astype("int32")
x_2 = _clip_coordinates(x_2, in_width).astype("int32")
x_3 = _clip_coordinates(x_3, in_width).astype("int32")
coefficients[i] = cubic_interp_1d(
_get_pixel_value(n, c, y_, x_0),
_get_pixel_value(n, c, y_, x_1),
_get_pixel_value(n, c, y_, x_2),
_get_pixel_value(n, c, y_, x_3),
x_fraction,
)
return cubic_interp_1d(
coefficients[0], coefficients[1], coefficients[2], coefficients[3], y_fraction
)
if method == "bilinear":
interpolation = _bilinear_sample
elif method == "nearest":
interpolation = _nearest_sample
else: # method == "bicubic"
interpolation = _bicubic_sample
return te.compute((batch, in_channel, out_height, out_width), interpolation, tag="grid_sample")
def _grid_sample_3d(
data, grid, method="bilinear", layout="NCDHW", padding_mode="zeros", align_corners=True
):
"""Applies bilinear/nearest sampling to input feature map.
Given :math:`data` and :math:`grid` assuming NCDHW layout, then the output is computed by
.. math::
x_{src} = grid[batch, 0, z_{dst}, y_{dst}, x_{dst}] \\
y_{src} = grid[batch, 1, z_{dst}, y_{dst}, x_{dst}] \\
z_{src} = grid[batch, 2, z_{dst}, y_{dst}, x_{dst}] \\
output[batch, channel, z_{src}, y_{dst}, x_{dst}]
= G(data[batch, channel, z_{src}, y_{src}, x_{src})
:math:`x_{dst}`, :math:`y_{dst}`, :math:`z_{dst}` enumerate all spatial locations
in :math:`output`, and :math:`G()` denotes the interpolation method.
The out-boundary points will be padded with zeros if padding_mode is "zeros", or
border pixel value if padding_mode is "border", or
inner pixel value if padding_mode is "reflection".
The left-top corner (-1, -1, -1) and right-bottom corner (1, 1, 1) in grid will be map to
(0, 0, 0) and (d - 1, h - 1, w - 1) of data if align_corners is "True", or
(-0.5, -0.5, -0.5) and (d - 0.5, h - 0.5, w - 0.5) of data if align_corners is "False".
The shape of the output will be
(data.shape[0], data.shape[1], grid.shape[2], grid.shape[3], grid.shape[4]).
The operator assumes that :math:`grid` has been normalized to [-1, 1].
grid_sample often cooperates with affine_grid which generates sampling grids for grid_sample.
Parameters
----------
data : tvm.Tensor
5-D with shape [batch, in_channel, in_depth, in_height, in_width]
grid : tvm.Tensor
5-D with shape [batch, 3, out_depth, out_height, out_width]
method : str
The interpolation method "nearest", "bilinear"("trilinear") are supported.
layout : str
The layout of input data and the output.
padding_mode : str
The padding mode for outside grid values, "zeros", "border", "reflection" are supported.
align_corners: bool
Geometrically, we consider the pixels of the input as squares rather than points.
If set to "True", the extrema ("-1" and "1") are considered as referring
to the center points of the input corner pixels. If set to "False", they
are instead considered as referring to the corner points of the input corner
pixels, making the sampling more resolution agnostic.
Returns
-------
Output : tvm.Tensor
5-D with shape [batch, in_channel, out_depth, out_height, out_width]
"""
assert method in ("bilinear", "nearest"), f"{method} is not supported"
assert padding_mode in ("zeros", "border", "reflection"), f"{padding_mode} is not supported"
assert layout == "NCDHW", f"{layout} is not supported"
batch, in_channel, in_depth, in_height, in_width = data.shape
out_depth, out_height, out_width = grid.shape[2:]
def _get_pixel_value(n, c, d, h, w):
return te.if_then_else(
te.all(d >= 0, h >= 0, w >= 0, d < in_depth, h < in_height, w < in_width),
data[n, c, d, h, w],
tir.const(0.0, dtype=data.dtype),
)
def _compute_source_index(n, d, h, w):
z = grid[n, 2, d, h, w]
y = grid[n, 1, d, h, w]
x = grid[n, 0, d, h, w]
if align_corners:
z = (z + 1) * (in_depth - 1) / 2
y = (y + 1) * (in_height - 1) / 2
x = (x + 1) * (in_width - 1) / 2
else:
z = -0.5 + (z + 1) * in_depth / 2
y = -0.5 + (y + 1) * in_height / 2
x = -0.5 + (x + 1) * in_width / 2
if padding_mode == "reflection":
z = _reflect_coordinates(z, in_depth)
y = _reflect_coordinates(y, in_height)
x = _reflect_coordinates(x, in_width)
z = _clip_coordinates(z, in_depth)
y = _clip_coordinates(y, in_height)
x = _clip_coordinates(x, in_width)
elif padding_mode == "border":
z = _clip_coordinates(z, in_depth)
y = _clip_coordinates(y, in_height)
x = _clip_coordinates(x, in_width)
return (z, y, x)
def _clip_coordinates(x, size):
return te.min(te.max(x, 0), size - 1)
def _reflect_coordinates(x, size):
def __refelection(x, size, corner_start):
def __reflect(index, size, corner_start):
index_align_corner = te.abs(corner_start - index)
size_times = te.truncdiv(index_align_corner.astype("int32"), size).astype("int32")
t = tir.Mod(size_times, 2)
extra = index_align_corner - size_times * size
return tir.if_then_else(
tir.EQ(t, 0), extra + corner_start, size - extra + corner_start
)
return tir.if_then_else(
tir.all(x >= corner_start, x <= size + corner_start),
x,
__reflect(x, size, corner_start),
)
if align_corners:
return __refelection(x, size - 1, 0)
return __refelection(x, size, -0.5)
def _trilinear_sample(n, c, d, h, w):
z, y, x = _compute_source_index(n, d, h, w)
z0 = te.floor(z).astype("int32")
y0 = te.floor(y).astype("int32")
x0 = te.floor(x).astype("int32")
z1 = z0 + tir.const(1, "int32")
y1 = y0 + tir.const(1, "int32")
x1 = x0 + tir.const(1, "int32")
return (
_get_pixel_value(n, c, z0, y0, x0) * (1 - (x - x0)) * (1 - (y - y0)) * (1 - (z - z0))
+ _get_pixel_value(n, c, z0, y0, x1) * (x - x0) * (1 - (y - y0)) * (1 - (z - z0))
+ _get_pixel_value(n, c, z1, y1, x0) * (1 - (x - x0)) * (y - y0) * (z - z0)
+ _get_pixel_value(n, c, z1, y1, x1) * (x - x0) * (y - y0) * (z - z0)
+ _get_pixel_value(n, c, z0, y1, x0) * (1 - (x - x0)) * (y - y0) * (1 - (z - z0))
+ _get_pixel_value(n, c, z1, y0, x1) * (x - x0) * (1 - (y - y0)) * (z - z0)
+ _get_pixel_value(n, c, z1, y0, x0) * (1 - (x - x0)) * (1 - (y - y0)) * (z - z0)
+ _get_pixel_value(n, c, z0, y1, x1) * (x - x0) * (y - y0) * (1 - (z - z0))
)
def _nearest_sample(n, c, d, h, w):
z, y, x = _compute_source_index(n, d, h, w)
z_new = te.nearbyint(z).astype("int32")
y_new = te.nearbyint(y).astype("int32")
x_new = te.nearbyint(x).astype("int32")
return _get_pixel_value(n, c, z_new, y_new, x_new)
if method == "bilinear":
interpolation = _trilinear_sample
else: # method == "nearest"
interpolation = _nearest_sample
return te.compute(
(batch, in_channel, out_depth, out_height, out_width), interpolation, tag="grid_sample"
)
def grid_sample(
data, grid, method="bilinear", layout="NCHW", padding_mode="zeros", align_corners=True
):
"""Applies grid sampling to input feature map.
Given :math:`data` and :math:`grid`, then for 4-D the output is computed by
.. math::
x_{src} = grid[batch, 0, y_{dst}, x_{dst}] \\
y_{src} = grid[batch, 1, y_{dst}, x_{dst}] \\
output[batch, channel, y_{dst}, x_{dst}] = G(data[batch, channel, y_{src}, x_{src}])
:math:`x_{dst}`, :math:`y_{dst}` enumerate all spatial locations in :math:`output`, and
:math:`G()` denotes the interpolation function.
The out-boundary points will be padded with zeros if padding_mode is "zeros", or
border pixel value if padding_mode is "border", or
inner pixel value if padding_mode is "reflection".
The left-top corner (-1, -1) and right-bottom corner (1, 1) in grid will be map to
(0, 0) and (h - 1, w - 1) of data if align_corners is "True", or
(-0.5, -0.5) and (h - 0.5, w - 0.5) of data if align_corners is "False".
The shape of the output will be
4-D (data.shape[0], data.shape[1], grid.shape[2], grid.shape[3]), or
5-D (data.shape[0], data.shape[1], grid.shape[2], grid.shape[3], grid.shape[4]).
The operator assumes that :math:`grid` has been normalized to [-1, 1].
grid_sample often cooperates with affine_grid which generates sampling grids for grid_sample.
Parameters
----------
data : tvm.Tensor
4-D with shape [batch, in_channel, in_height, in_width], or
5-D with shape [batch, in_channel, in_depth, in_height, in_width]
grid : tvm.Tensor
4-D with shape [batch, 2, out_height, out_width], or
5-D with shape [batch, 3, out_depth, out_height, out_width]
method : str
The interpolation method, 4-D "nearest", "bilinear", "bicubic" and
5-D "nearest", "bilinear"("trilinear") are supported.
layout : str
The layout of input data and the output.
padding_mode : str
The padding mode for outside grid values, "zeros", "border", "reflection" are supported.
align_corners: bool
Geometrically, we consider the pixels of the input as squares rather than points.
If set to "True", the extrema ("-1" and "1") are considered as referring
to the center points of the input corner pixels. If set to "False", they
are instead considered as referring to the corner points of the input corner
pixels, making the sampling more resolution agnostic.
Returns
-------
Output : tvm.Tensor
4-D with shape [batch, in_channel, out_height, out_width], or
5-D with shape [batch, in_channel, out_depth, out_height, out_width]
"""
if len(layout) == 4:
compute = _grid_sample_2d
elif len(layout) == 5:
compute = _grid_sample_3d
else:
msg = f"layout {layout} is not supported"
raise ValueError(msg)
return compute(data, grid, method, layout, padding_mode, align_corners)
| 20,738 | 38.130189 | 99 | py |
tvm | tvm-main/python/tvm/topi/image/dilation2d.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-variable, too-many-locals
# pylint: disable=unused-argument, redefined-builtin
"""Dilation2D operators"""
from __future__ import absolute_import as _abs
from tvm import te
from tvm.topi.utils import simplify
from ..nn.pad import pad
from ..nn.utils import get_pad_tuple
def dilation2d_nchw(input, filter, stride, padding, dilations, out_dtype=None):
"""Morphological dilation operator in NCHW layout.
Parameters
----------
input : tvm.te.Tensor
4-D with shape [batch, in_channel, in_height, in_width]
filter : tvm.te.Tensor
3-D with shape [ in_channel, filter_height, filter_width]
stride : int or a list/tuple of two ints
Stride size, or [stride_height, stride_width]
padding : int or str
Padding size
dilations: int or a list/tuple of two ints
dilation size, or [dilation_height, dilation_width]
out_dtype : Optional[str]
Specifies the output data type.
Returns
-------
Output : tvm.te.Tensor
4-D with shape [batch, in_channel, out_height, out_width]
"""
if out_dtype is None:
out_dtype = input.dtype
assert isinstance(stride, int) or len(stride) == 2
assert isinstance(dilations, int) or len(dilations) == 2
if isinstance(stride, int):
stride_h = stride_w = stride
else:
stride_h, stride_w = stride
if isinstance(dilations, int):
dilation_h = dilation_w = dilations
else:
dilation_h, dilation_w = dilations
batch, in_channel, in_height, in_width = input.shape
channel, kernel_h, kernel_w = filter.shape
assert (
in_channel.value == channel.value
), "For Dilation2D input and filter channels should be same."
# compute the output shape
dilated_kernel_h = (kernel_h - 1) * dilation_h + 1
dilated_kernel_w = (kernel_w - 1) * dilation_w + 1
pad_top, pad_left, pad_down, pad_right = get_pad_tuple(
padding, (dilated_kernel_h, dilated_kernel_w)
)
out_height = simplify((in_height - dilated_kernel_h + pad_top + pad_down) // stride_h + 1)
out_width = simplify((in_width - dilated_kernel_w + pad_left + pad_right) // stride_w + 1)
# compute graph
pad_before = [0, 0, pad_top, pad_left]
pad_after = [0, 0, pad_down, pad_right]
temp = pad(input, pad_before, pad_after, name="pad_temp")
ry = te.reduce_axis((0, kernel_h), name="ry")
rx = te.reduce_axis((0, kernel_w), name="rx")
return te.compute(
(batch, in_channel, out_height, out_width),
lambda nn, ff, yy, xx: te.max(
temp[nn, ff, yy * stride_h + ry * dilation_h, xx * stride_w + rx * dilation_w].astype(
out_dtype
)
+ filter[ff, ry, rx].astype(out_dtype),
axis=[ry, rx],
),
tag="dilation2d_nchw",
)
def dilation2d_nhwc(input, filter, stride, padding, dilations, out_dtype=None):
"""Morphological 2d dilation NHWC layout.
Parameters
----------
input : tvm.te.Tensor
4-D with shape [batch, in_height, in_width, in_channel]
filter : tvm.te.Tensor
3-D with shape [filter_height, filter_width, in_channel]
stride : int or a list/tuple of two ints
Stride size, or [stride_height, stride_width]
padding : int
Padding size
dilations: int or a list/tuple of two ints
dilation size, or [dilation_height, dilation_width]
out_dtype : Optional[str]
Specifies the output data type.
Returns
-------
Output : tvm.te.Tensor
4-D with shape [batch, out_height, out_width, in_channel]
"""
if out_dtype is None:
out_dtype = input.dtype
assert isinstance(stride, int) or len(stride) == 2
assert isinstance(dilations, int) or len(dilations) == 2
if isinstance(stride, int):
stride_h = stride_w = stride
else:
stride_h, stride_w = stride
if isinstance(dilations, int):
dilation_h = dilation_w = dilations
else:
dilation_h, dilation_w = dilations
batch, in_height, in_width, in_channel = input.shape
kernel_h, kernel_w, channel = filter.shape
assert (
in_channel.value == channel.value
), "For Dilation2D input and filter channels should be same."
# compute the output shape
dilated_kernel_h = (kernel_h - 1) * dilation_h + 1
dilated_kernel_w = (kernel_w - 1) * dilation_w + 1
pad_top, pad_left, pad_down, pad_right = get_pad_tuple(
padding, (dilated_kernel_h, dilated_kernel_w)
)
out_height = simplify((in_height - dilated_kernel_h + pad_top + pad_down) // stride_h + 1)
out_width = simplify((in_width - dilated_kernel_w + pad_left + pad_right) // stride_w + 1)
pad_before = [0, pad_top, pad_left, 0]
pad_after = [0, pad_down, pad_right, 0]
padded_input = pad(input, pad_before, pad_after, name="padded_input")
ry = te.reduce_axis((0, kernel_h), name="ry")
rx = te.reduce_axis((0, kernel_w), name="rx")
return te.compute(
(batch, out_height, out_width, in_channel),
lambda nn, yy, xx, ff: te.max(
padded_input[
nn, yy * stride_h + ry * dilation_h, xx * stride_w + rx * dilation_w, ff
].astype(out_dtype)
+ filter[ry, rx, ff].astype(out_dtype),
axis=[ry, rx],
),
tag="dilation2d_nhcw",
)
| 6,187 | 33.764045 | 98 | py |
tvm | tvm-main/python/tvm/topi/image/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=wildcard-import
"""IMAGE network operators"""
from __future__ import absolute_import as _abs
from .resize import *
from .dilation2d import *
from .grid_sample import *
| 973 | 37.96 | 62 | py |
tvm | tvm-main/python/tvm/topi/image/resize.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""TVM operator input resize compute."""
from __future__ import absolute_import
import tvm
from tvm import te
from tvm.topi.utils import nchw_pack_layout, nchw_xc_layout
from .. import tag
def can_convert_multiply_to_intdiv(origin_size, scaled_size):
"""Check whether can convert multiplication to division"""
# Only support IntImm type
if not isinstance(scaled_size, tvm.tir.expr.IntImm):
return False
div = scaled_size / origin_size.astype("float")
if div.value % 1 != 0:
return False
epsilon = 1e-5
check = 1 / (epsilon * origin_size + epsilon)
if div > check:
return False
return True
def get_1d_indices(indices, layout="NCW"):
"""Get 1d indices"""
(cc, inum, ic) = (0, 0, 0)
if layout == "NWC":
n, x, c = indices
cc = None
elif layout == "NCW":
n, c, x = indices
cc = None
elif ncw_pack_layout(layout):
n, c, x, inum, ic = indices
else:
# else must be NCHWxc
assert ncw_xc_layout(layout)
n, c, x, cc = indices
return n, c, x, cc, inum, ic
def get_2d_indices(indices, layout="NCHW"):
"""Get 2d indices"""
(cc, inum, ic) = (0, 0, 0)
if layout == "NHWC":
n, y, x, c = indices
cc = None
elif layout == "NCHW":
n, c, y, x = indices
cc = None
elif nchw_pack_layout(layout):
n, c, y, x, inum, ic = indices
else:
# else must be NCHWxc
assert nchw_xc_layout(layout)
n, c, y, x, cc = indices
return n, c, y, x, cc, inum, ic
def get_3d_indices(indices, layout="NCDHW"):
"""Get 3d indices"""
if layout == "NDHWC":
n, z, y, x, c = indices
cc = None
elif layout == "NCDHW":
n, c, z, y, x = indices
cc = None
else:
n, c, z, y, x, cc = indices
return n, c, z, y, x, cc
def get_1d_pixel(data, layout, image_width, n, c, x, cc, ib, ic):
"""Get 1d pixel"""
x = tvm.te.max(tvm.te.min(x, image_width - 1), 0)
if layout == "NWC":
return data(n, x, c).astype("float")
if layout == "NCW":
return data(n, c, x).astype("float")
if ncw_pack_layout(layout):
return data(n, c, x, ib, ic).astype("float")
# else must be NCHWxc
assert ncw_xc_layout(layout)
return data(n, c, x, cc).astype("float")
def get_2d_pixel(data, layout, image_height, image_width, n, c, y, x, cc, ib, ic):
"""Get 2d pixel"""
y = tvm.te.max(tvm.te.min(y, image_height - 1), 0)
x = tvm.te.max(tvm.te.min(x, image_width - 1), 0)
if layout == "NHWC":
return data(n, y, x, c).astype("float")
if layout == "NCHW":
return data(n, c, y, x).astype("float")
if nchw_pack_layout(layout):
return data(n, c, y, x, ib, ic).astype("float")
# else must be NCHWxc
assert nchw_xc_layout(layout)
return data(n, c, y, x, cc).astype("float")
def get_3d_pixel(data, layout, image_depth, image_height, image_width, n, c, z, y, x, cc):
"""Get 3d pixel"""
z = tvm.te.max(tvm.te.min(z, image_depth - 1), 0)
y = tvm.te.max(tvm.te.min(y, image_height - 1), 0)
x = tvm.te.max(tvm.te.min(x, image_width - 1), 0)
if layout == "NDHWC":
return data(n, z, y, x, c).astype("float")
if layout == "NCDHW":
return data(n, c, z, y, x).astype("float")
# else must be NCDHWxc
return data(n, c, z, y, x, cc).astype("float")
def get_inx(
x,
image_width,
target_width,
coordinate_transformation_mode,
start_x=0,
end_x=-1,
use_int_div=False,
):
"""Infer input x from output x with various coordinate transformation methods"""
scale_x = te.div(image_width.astype("float"), target_width.astype("float"))
if coordinate_transformation_mode == "half_pixel":
in_x = (x + 0.5) * scale_x - 0.5
elif coordinate_transformation_mode == "align_corners":
in_x = (image_width - 1).astype("float") / (target_width - 1) * x
elif coordinate_transformation_mode == "asymmetric":
if use_int_div:
in_x = te.div(x, te.div(target_width, image_width))
else:
in_x = scale_x * x
elif coordinate_transformation_mode == "pytorch_half_pixel":
in_x = te.if_then_else(target_width > 1, (x + 0.5) * scale_x - 0.5, 0.0)
elif coordinate_transformation_mode == "tf_half_pixel_for_nn":
in_x = (x + 0.5) * scale_x
elif coordinate_transformation_mode == "tf_crop_and_resize":
in_x = te.if_then_else(
target_width > 1,
start_x * (image_width - 1)
+ x * (end_x - start_x) * (image_width - 1).astype("float") / (target_width - 1),
0.5 * (start_x + end_x) * (image_width - 1),
)
else:
raise ValueError(
f"Unsupported coordinate_transformation_mode: {coordinate_transformation_mode}"
)
return in_x
def get_closest_index(in_x, rounding_method, boxes, use_int_div=False):
"""get the closest index to a value based on a certain rounding method"""
if use_int_div:
closest_x_index = in_x.astype("int32")
return closest_x_index
if rounding_method == "round" or boxes is not None:
closest_x_index = te.round(in_x).astype("int32")
elif rounding_method == "round_prefer_floor":
closest_x_index = te.ceil(in_x - 0.5).astype("int32")
elif rounding_method == "round_prefer_ceil":
closest_x_index = te.floor(in_x + 0.5).astype("int32")
elif rounding_method == "floor":
# Add epsilon to floor to prevent gpu rounding errors.
epsilon = 1e-5
closest_x_index = te.floor(in_x + epsilon).astype("int32")
elif rounding_method == "ceil":
# Subract epsilon from ceil to prevent gpu rounding errors.
epsilon = 1e-5
closest_x_index = te.ceil(in_x - epsilon).astype("int32")
else:
raise ValueError(f"Unknown rounding method: {rounding_method}")
return closest_x_index
def _lerp(A, B, t):
"""Perform Linear interpolation in 1D"""
return A * (1.0 - t) + B * t
def _cubic_spline_weights(t, alpha):
"""create cubic spline weights in 1D"""
t2 = t * t
t3 = t * t * t
w1 = alpha * (t3 - 2 * t2 + t)
w2 = (alpha + 2) * t3 - (3 + alpha) * t2 + 1
w3 = -(alpha + 2) * t3 + (3 + 2 * alpha) * t2 - alpha * t
w4 = -alpha * t3 + alpha * t2
return [w1, w2, w3, w4]
def _cubic_kernel(inputs, w):
"""perform cubic interpolation in 1D"""
return sum([a_i * w_i for a_i, w_i in zip(inputs, w)])
def _resize_1d(
indices,
data,
roi,
image_width,
target_width,
boxes=None,
box_indices=None,
method=None,
extrapolation_value=0.0,
layout="NCW",
coordinate_transformation_mode="align_corners",
rounding_method="",
alpha=-0.5,
exclude_outside=0,
out_dtype=None,
):
"""Perform resize operation on the data with selected method and options.
Parameters
----------
indices : tuple
The indices of input data
data : tvm.te.Tensor
inputs is a 3-D tensor with shape
[batch, channel, in_width]
or [batch, in_width, channel]
roi: Tuple of Float or Expr
The region of interest for cropping the input image. Expected to be of
size 2, and format [start_w, end_w].
Only used if coordinate_transformation_mode is tf_crop_and_resize.
image_width : integer
Input image width
target_width : integer
The target resized image width
boxes : tvm.te.Tensor, optional
A 2-D tensor of shape [num_boxes, 4]. Each row of the tensor specifies
the coordinates of a box.
box_indices : tvm.te.Tensor, optional
A 1-D tensor of shape [num_boxes], box_indices[i] specifies the data that
the i-th box refers to.
extrapolation_value: float, optional
Value used for extrapolation, when applicable.
layout: string, optional
"NCW", "NWC", or "NCWc".
method: string, optional
method of interpolation ("nearest", "linear", "bicubic")
coordinate_transformation_mode : string, optional
Describes how to transform the coordinate in the resized tensor
to the coordinate in the original tensor.
[half_pixel, align_corners, asymmetric, pytorch_half_pixel,
tf_half_pixel_for_nn, and tf_crop_and_resize].
rounding_method: string, optional
indicates how to find the "nearest" pixel in nearest_neighbor method
[round, floor, ceil]
alpha: float, optional
Bicubic spline coefficient
exclude_outside: bool, optional:
Exclude values outside the image fdor bicubic interpolation
out_dtype: string, optional
Type to return. If left None will be same as input type.
Returns
-------
output : out_dtype
The computed result with type out_dtype
"""
def _cast_output(value, data_dtype="float32", out_dtype=None):
if out_dtype:
dtype = out_dtype
else:
dtype = data_dtype
return value.astype(dtype)
n, c, x, cc, inum, ic = get_1d_indices(indices, layout)
box_idx = box_indices(n) if box_indices is not None else n
if boxes is not None:
# TODO(mbrookhart): Find an example of this
raise NotImplementedError("resize1d with image boxes not yet implemented")
in_x = get_inx(x, image_width, target_width, coordinate_transformation_mode, roi[0], roi[1])
if method == "nearest_neighbor":
if rounding_method == "":
if coordinate_transformation_mode == "align_corners":
rounding_method = "round"
else:
rounding_method = "floor"
closest_x_index = get_closest_index(in_x, rounding_method, boxes)
value = get_1d_pixel(data, layout, image_width, box_idx, c, closest_x_index, cc, inum, ic)
elif method == "linear":
x_int = te.floor(in_x).astype("int32")
x_lerp = in_x - x_int
p = [0 for i in range(2)]
for i in range(2):
p[i] = get_1d_pixel(data, layout, image_width, box_idx, c, x_int + i, cc, inum, ic)
value = _lerp(*p, x_lerp)
elif method == "cubic":
xint = te.floor(in_x).astype("int32")
xfract = in_x - te.floor(in_x)
# Get the surrounding values
p = [0 for i in range(4)]
for i in range(4):
p[i] = get_1d_pixel(data, layout, image_width, box_idx, c, xint + i - 1, cc, inum, ic)
wx = _cubic_spline_weights(xfract, alpha)
if exclude_outside:
for i in range(4):
wx[i] = te.if_then_else(
te.any(xint - 1 + i < 0, xint + i > image_width), 0.0, wx[i]
)
sum_wx = sum(wx)
wx = [w / sum_wx for w in wx]
value = _cubic_kernel(p, wx)
else:
raise ValueError("Unknown resize method:", method)
if coordinate_transformation_mode == "tf_crop_and_resize":
# use extrapolation_value if in_x is out of boundary
value = tvm.tir.if_then_else(
in_x < 0,
extrapolation_value,
tvm.tir.if_then_else(in_x > image_width - 1, extrapolation_value, value),
)
return _cast_output(value, data.dtype, out_dtype=out_dtype)
def resize1d(
data,
roi,
size,
layout="NCW",
method="linear",
coordinate_transformation_mode="half_pixel",
rounding_method="",
bicubic_alpha=-0.5,
bicubic_exclude=0,
extrapolation_value=0.0,
out_dtype=None,
output_shape=None,
):
"""Perform resize operation on the data.
Parameters
----------
data : tvm.te.Tensor
inputs is a 3-D tensor with shape
[batch, channel in_width]
or [batch in_width, channel]
roi: Tuple of Float or Expr
The region of interest for cropping the input image. Expected to be of
size 2, and format [start_w, end_w].
Only used if coordinate_transformation_mode is tf_crop_and_resize.
size: Tuple
Output resolution scale to
layout: string, optional
"NCW", "NWC", or "NCWc".
coordinate_transformation_mode: string, optional
Describes how to transform the coordinate in the resized tensor
to the coordinate in the original tensor.
Refer to the ONNX Resize operator specification for details.
Available options are "half_pixel", "align_corners" and "asymmetric".
method: string, optional
method of interpolation ("nearest", "linear", "bicubic")
coordinate_transformation_mode : string, optional
Describes how to transform the coordinate in the resized tensor
to the coordinate in the original tensor.
[half_pixel, align_corners, asymmetric, pytorch_half_pixel,
tf_half_pixel_for_nn, and tf_crop_and_resize].
rounding_method:
Method for rounding coordinate locations
bicubic_alpha: float, optional
Bicubic spline coefficient
bicubic_exclude: bool, optional:
Exclude values outside the image fdor bicubic interpolation
extrapolation_value: float, optional
Value used for extrapolation, when applicable.
out_dtype: string, optional
Type to return. If left None will be same as input type.
output_shape: tvm.tir.container.Array, optional
Shape to return. If left None will be inferred
(If shape is determined dynamically, pass out_dtype.shape as output_shape)
Returns
-------
output : tvm.te.Tensor
4-D with shape [batch, chananel, in_width*scale]
or [batch, in_width*scale, channel]
or 5-D with shape [batch, channel-major, in_width*scale, channel-minor]
"""
method = method.lower()
if layout == "NWC":
in_n, in_w, in_c = data.shape
if output_shape is None:
output_shape = [in_n, size[0], in_c]
elif layout == "NCW":
in_n, in_c, in_w = data.shape
if output_shape is None:
output_shape = [in_n, in_c, size[0]]
elif ncw_pack_layout(layout): # for NCWinic
in_n, in_c, in_w, in_inum, in_ic = data.shape
if output_shape is None:
output_shape = [in_n, in_c, size[0], in_inum, in_ic]
elif ncw_xc_layout(layout): # for NCWxc
in_n, in_c, in_w, in_cc = data.shape
if output_shape is None:
output_shape = [in_n, in_c, size[0], in_cc]
else:
raise ValueError(f"{layout} layout is not supported.")
if isinstance(size, tuple):
size = list(size)
for i in range(1):
if isinstance(size[i], int):
size[i] = tvm.tir.IntImm("int32", size[i])
def compute_func(*indices):
return _resize_1d(
indices,
data,
roi,
in_w,
size[0],
method=method,
layout=layout,
coordinate_transformation_mode=coordinate_transformation_mode,
rounding_method=rounding_method,
alpha=bicubic_alpha,
exclude_outside=bicubic_exclude,
extrapolation_value=extrapolation_value,
out_dtype=out_dtype,
)
return te.compute(output_shape, compute_func, name="resize", tag=tag.INJECTIVE)
def _resize_2d(
indices,
data,
roi,
image_height,
image_width,
target_height,
target_width,
boxes=None,
box_indices=None,
method=None,
extrapolation_value=0.0,
layout="NCHW",
coordinate_transformation_mode="align_corners",
rounding_method="",
alpha=-0.5,
exclude_outside=0,
out_dtype=None,
):
"""Perform resize operation on the data with selected method and options.
Parameters
----------
indices : tuple
The indices of input data
data : tvm.te.Tensor
inputs is a 4-D tensor with shape
[batch, channel, in_height, in_width]
or [batch, in_height, in_width, channel]
roi: Tuple of Float or Expr
The region of interest for cropping the input image. Expected to be of
size 4, and format [start_h, start_w, end_h, end_w].
Only used if coordinate_transformation_mode is tf_crop_and_resize.
image_height : integer
Input image height
image_width : integer
Input image width
target_height : integer
The target resized image height
target_width : integer
The target resized image width
boxes : tvm.te.Tensor, optional
A 2-D tensor of shape [num_boxes, 4]. Each row of the tensor specifies
the coordinates of a box.
method: string, optional
method of interpolation ("nearest", "linear", "bicubic")
box_indices : tvm.te.Tensor, optional
A 1-D tensor of shape [num_boxes], box_indices[i] specifies the data that
the i-th box refers to.
extrapolation_value: float, optional
Value used for extrapolation, when applicable.
layout: string, optional
"NCHW", "NHWC", or "NCHWc".
coordinate_transformation_mode : string, optional
Describes how to transform the coordinate in the resized tensor
to the coordinate in the original tensor.
[half_pixel, align_corners, asymmetric, pytorch_half_pixel,
tf_half_pixel_for_nn, and tf_crop_and_resize].
rounding_method: string, optional
indicates how to find the "nearest" pixel in nearest_neighbor method
[round, floor, ceil]
alpha: float, optional
Bicubic spline coefficient
exclude_outside: bool, optional:
Exclude values outside the image fdor bicubic interpolation
out_dtype: string, optional
Type to return. If left None will be same as input type.
Returns
-------
output : out_dtype
The computed result with type out_dtype
"""
def _cast_output(value, data_dtype="float32", out_dtype=None):
if out_dtype:
dtype = out_dtype
else:
dtype = data_dtype
return value.astype(dtype)
height_use_int_div = False
width_use_int_div = False
if method == "nearest_neighbor" and coordinate_transformation_mode == "asymmetric":
height_use_int_div = can_convert_multiply_to_intdiv(image_height, target_height)
width_use_int_div = can_convert_multiply_to_intdiv(image_width, target_width)
n, c, y, x, cc, inum, ic = get_2d_indices(indices, layout)
box_idx = box_indices(n) if box_indices is not None else n
if boxes is not None:
y1, x1 = boxes(n, 0), boxes(n, 1)
y2, x2 = boxes(n, 2), boxes(n, 3)
in_h = (image_height - 1) * (y2 - y1)
in_w = (image_width - 1) * (x2 - x1)
h_scale = in_h.astype("float") / (target_height - 1)
w_scale = in_w.astype("float") / (target_width - 1)
in_y = y1 * (image_height - 1) + h_scale * y
in_x = x1 * (image_width - 1) + w_scale * x
else:
in_x = get_inx(
x,
image_width,
target_width,
coordinate_transformation_mode,
roi[1],
roi[3],
width_use_int_div,
)
in_y = get_inx(
y,
image_height,
target_height,
coordinate_transformation_mode,
roi[0],
roi[2],
height_use_int_div,
)
if method == "nearest_neighbor":
if rounding_method == "":
if coordinate_transformation_mode == "align_corners":
rounding_method = "round"
else:
rounding_method = "floor"
closest_x_index = get_closest_index(in_x, rounding_method, boxes, width_use_int_div)
closest_y_index = get_closest_index(in_y, rounding_method, boxes, height_use_int_div)
value = get_2d_pixel(
data,
layout,
image_height,
image_width,
box_idx,
c,
closest_y_index,
closest_x_index,
cc,
inum,
ic,
)
elif method == "linear":
y_int = te.floor(in_y).astype("int32")
x_int = te.floor(in_x).astype("int32")
y_lerp = in_y - y_int
x_lerp = in_x - x_int
p = [[0 for i in range(2)] for j in range(2)]
for j in range(2):
for i in range(2):
p[j][i] = get_2d_pixel(
data,
layout,
image_height,
image_width,
box_idx,
c,
y_int + j,
x_int + i,
cc,
inum,
ic,
)
top = _lerp(*p[0], x_lerp)
bottom = _lerp(*p[1], x_lerp)
value = _lerp(top, bottom, y_lerp)
elif method == "cubic":
xint = te.floor(in_x).astype("int32")
xfract = in_x - te.floor(in_x)
yint = te.floor(in_y).astype("int32")
yfract = in_y - te.floor(in_y)
# Get the surrounding values
p = [[0 for i in range(4)] for j in range(4)]
for j in range(4):
for i in range(4):
p[j][i] = get_2d_pixel(
data,
layout,
image_height,
image_width,
box_idx,
c,
yint + j - 1,
xint + i - 1,
cc,
inum,
ic,
)
wx = _cubic_spline_weights(xfract, alpha)
wy = _cubic_spline_weights(yfract, alpha)
if exclude_outside:
for i in range(4):
wx[i] = te.if_then_else(
te.any(xint - 1 + i < 0, xint + i > image_width), 0.0, wx[i]
)
wy[i] = te.if_then_else(
te.any(yint - 1 + i < 0, yint + i > image_height), 0.0, wy[i]
)
sum_wx = sum(wx)
sum_wy = sum(wy)
wx = [w / sum_wx for w in wx]
wy = [w / sum_wy for w in wy]
col0 = _cubic_kernel(p[0], wx)
col1 = _cubic_kernel(p[1], wx)
col2 = _cubic_kernel(p[2], wx)
col3 = _cubic_kernel(p[3], wx)
value = _cubic_kernel([col0, col1, col2, col3], wy)
else:
raise ValueError("Unknown resize method:", method)
if coordinate_transformation_mode == "tf_crop_and_resize":
out = tvm.tir.if_then_else(
in_y < 0,
extrapolation_value,
tvm.tir.if_then_else(in_y > image_height - 1, extrapolation_value, value),
)
# use extrapolation_value if in_x is out of boundary
value = tvm.tir.if_then_else(
in_x < 0,
extrapolation_value,
tvm.tir.if_then_else(in_x > image_width - 1, extrapolation_value, out),
)
return _cast_output(value, data.dtype, out_dtype=out_dtype)
def resize2d(
data,
roi,
size,
layout="NCHW",
method="linear",
coordinate_transformation_mode="half_pixel",
rounding_method="",
bicubic_alpha=-0.5,
bicubic_exclude=0,
extrapolation_value=0.0,
out_dtype=None,
output_shape=None,
):
"""Perform resize operation on the data.
Parameters
----------
data : tvm.te.Tensor
inputs is a 4-D tensor with shape
[batch, channel, in_height, in_width]
or [batch, in_height, in_width, channel]
roi: Tuple of Float or Expr
The region of interest for cropping the input image. Expected to be of
size 4, and format [start_h, start_w, end_h, end_w].
Only used if coordinate_transformation_mode is tf_crop_and_resize.
size: Tuple
Output resolution scale to
layout: string, optional
"NCHW", "NHWC", or "NCHWc".
method: string, optional
method of interpolation ("nearest", "linear", "bicubic")
coordinate_transformation_mode : string, optional
Describes how to transform the coordinate in the resized tensor
to the coordinate in the original tensor.
[half_pixel, align_corners, asymmetric, pytorch_half_pixel,
tf_half_pixel_for_nn, and tf_crop_and_resize].
rounding_method:
Method for rounding coordinate locations
bicubic_alpha: float, optional
Bicubic spline coefficient
bicubic_exclude: bool, optional:
Exclude values outside the image fdor bicubic interpolation
extrapolation_value: float, optional
Value used for extrapolation, when applicable.
out_dtype: string, optional
Type to return. If left None will be same as input type.
output_shape: tvm.tir.container.Array, optional
Shape to return. If left None will be inferred
(If shape is determined dynamically, pass out_dtype.shape as output_shape)
Returns
-------
output : tvm.te.Tensor
4-D with shape [batch, channel, in_height*scale, in_width*scale]
or [batch, in_height*scale, in_width*scale, channel]
or 5-D with shape [batch, channel-major, in_height*scale, in_width*scale, channel-minor]
"""
method = method.lower()
if layout == "NHWC":
in_n, in_h, in_w, in_c = data.shape
if output_shape is None:
output_shape = [in_n, size[0], size[1], in_c]
elif layout == "NCHW":
in_n, in_c, in_h, in_w = data.shape
if output_shape is None:
output_shape = [in_n, in_c, size[0], size[1]]
elif nchw_pack_layout(layout): # for NCHWinic
in_n, in_c, in_h, in_w, in_inum, in_ic = data.shape
if output_shape is None:
output_shape = [in_n, in_c, size[0], size[1], in_inum, in_ic]
elif nchw_xc_layout(layout): # for NCHWxc
in_n, in_c, in_h, in_w, in_cc = data.shape
if output_shape is None:
output_shape = [in_n, in_c, size[0], size[1], in_cc]
else:
raise ValueError(f"{layout} layout is not supported.")
if isinstance(size, tuple):
size = list(size)
for i in range(2):
if isinstance(size[i], int):
size[i] = tvm.tir.IntImm("int32", size[i])
def compute_func(*indices):
return _resize_2d(
indices,
data,
roi,
in_h,
in_w,
size[0],
size[1],
method=method,
layout=layout,
coordinate_transformation_mode=coordinate_transformation_mode,
rounding_method=rounding_method,
alpha=bicubic_alpha,
exclude_outside=bicubic_exclude,
extrapolation_value=extrapolation_value,
out_dtype=out_dtype,
)
return te.compute(output_shape, compute_func, name="resize", tag=tag.INJECTIVE)
def crop_and_resize(
data,
boxes,
box_indices,
crop_size,
layout="NCHW",
method="bilinear",
extrapolation_value=None,
out_dtype=None,
):
"""Perform crop and resize operation on the data.
Parameters
----------
data : tvm.te.Tensor
inputs is a 4-D tensor with shape
[batch, channel, in_height, in_width]
or [batch, in_height, in_width, channel]
boxes : tvm.te.Tensor
A 2-D tensor of shape [num_boxes, 4]. Each row of the tensor specifies
the coordinates of a box.
box_indices : tvm.te.Tensor
A 1-D tensor of shape [num_boxes], box_indices[i] specifies the data that
the i-th box refers to.
crop_size : Tuple
The target size of each box.
layout : string, optional
"NCHW", "NHWC"
method : {"bilinear", "nearest_neighbor"}
Method to be used for resizing.
extrapolation_value: float, optional
Value used for extrapolation, when applicable.
out_dtype : string, optional
Type to return. If left None will be same as input type.
Returns
-------
output : tvm.te.Tensor
4-D with shape [num_boxes, channel, crop_height, crop_width]
or [num_boxes, crop_height, crop_width, channel]
"""
method = method.lower()
target_h = crop_size[0]
target_w = crop_size[1]
if layout == "NHWC":
output_shape = [box_indices.shape[0], crop_size[0], crop_size[1], data.shape[3]]
image_h = data.shape[1].astype("int32")
image_w = data.shape[2].astype("int32")
elif layout == "NCHW":
output_shape = [box_indices.shape[0], data.shape[1], crop_size[0], crop_size[1]]
image_h = data.shape[2].astype("int32")
image_w = data.shape[3].astype("int32")
elif layout.startswith("NCHW"): # for NCHWxc
output_shape = [
box_indices.shape[0],
data.shape[1],
crop_size[0],
crop_size[1],
data.shape[4],
]
image_h = data.shape[2].astype("int32")
image_w = data.shape[3].astype("int32")
else:
raise ValueError(f"{layout} layout is not supported.")
if method == "bilinear":
method = "linear"
def compute_func(*indices):
return _resize_2d(
indices,
data,
[0.0] * 4,
image_h,
image_w,
target_h,
target_w,
boxes,
box_indices,
method=method,
extrapolation_value=extrapolation_value,
layout=layout,
coordinate_transformation_mode="tf_crop_and_resize",
out_dtype=out_dtype,
)
return te.compute(output_shape, compute_func, name="crop_and_resize", tag=tag.INJECTIVE)
def _resize_3d(
indices,
data,
roi,
image_depth,
image_height,
image_width,
target_depth,
target_height,
target_width,
boxes=None,
box_indices=None,
method=None,
extrapolation_value=0.0,
layout="NCHW",
coordinate_transformation_mode="align_corners",
rounding_method="",
alpha=-0.5,
exclude_outside=0,
out_dtype=None,
):
"""Perform resize operation on the data with selected method and options.
Parameters
----------
indices : tuple
The indices of input data
data : tvm.te.Tensor
inputs is a 4-D tensor with shape
[batch, channel, in_height, in_width]
or [batch, in_height, in_width, channel]
roi: Tuple of Float or Expr
The region of interest for cropping the input image. Expected to be of
size 6, and format [start_d, start_h, start_w, end_d, end_h, end_w].
Only used if coordinate_transformation_mode is tf_crop_and_resize.
image_depth : integer
Input image depth
image_height : integer
Input image height
image_width : integer
Input image width
target_depth : integer
The target resized image depth
target_height : integer
The target resized image height
target_width : integer
The target resized image width
boxes : tvm.te.Tensor, optional
A 2-D tensor of shape [num_boxes, 4]. Each row of the tensor specifies
the coordinates of a box.
box_indices : tvm.te.Tensor, optional
A 1-D tensor of shape [num_boxes], box_indices[i] specifies the data that
the i-th box refers to.
method: string, optional
method of interpolation ("nearest", "linear", "bicubic")
extrapolation_value: float, optional
Value used for extrapolation, when applicable.
layout: string, optional
"NCHW", "NHWC", or "NCHWc".
coordinate_transformation_mode : string, optional
Describes how to transform the coordinate in the resized tensor
to the coordinate in the original tensor.
[half_pixel, align_corners, asymmetric, pytorch_half_pixel,
tf_half_pixel_for_nn, and tf_crop_and_resize].
rounding_method: string, optional
indicates how to find the "nearest" pixel in nearest_neighbor method
[round, floor, ceil]
alpha: float, optional
Bicubic spline coefficient
exclude_oiutside: bool, optional:
Exclude values outside the image fdor bicubic interpolation
out_dtype: string, optional
Type to return. If left None will be same as input type.
Returns
-------
output : out_dtype
The computed result with type out_dtype
"""
def _cast_output(value, data_dtype="float32", out_dtype=None):
if out_dtype:
dtype = out_dtype
else:
dtype = data_dtype
return value.astype(dtype)
n, c, z, y, x, cc = get_3d_indices(indices, layout)
box_idx = box_indices(n) if box_indices is not None else n
if boxes is not None:
# TODO(mbrookhart): Find an example of this
raise NotImplementedError("resize1d with image boxes not yet implemented")
in_z = get_inx(z, image_depth, target_depth, coordinate_transformation_mode, roi[2], roi[5])
in_y = get_inx(y, image_height, target_height, coordinate_transformation_mode, roi[1], roi[4])
in_x = get_inx(x, image_width, target_width, coordinate_transformation_mode, roi[0], roi[3])
if method == "nearest_neighbor":
if rounding_method == "":
if coordinate_transformation_mode == "align_corners":
rounding_method = "round"
else:
rounding_method = "floor"
closest_z_index = get_closest_index(in_z, rounding_method, boxes)
closest_y_index = get_closest_index(in_y, rounding_method, boxes)
closest_x_index = get_closest_index(in_x, rounding_method, boxes)
value = get_3d_pixel(
data,
layout,
image_depth,
image_height,
image_width,
box_idx,
c,
closest_z_index,
closest_y_index,
closest_x_index,
cc,
)
elif method == "linear":
z_int = te.floor(in_z).astype("int32")
y_int = te.floor(in_y).astype("int32")
x_int = te.floor(in_x).astype("int32")
z_lerp = in_z - z_int
y_lerp = in_y - y_int
x_lerp = in_x - x_int
p = [[[0 for i in range(2)] for j in range(2)] for k in range(2)]
for k in range(2):
for j in range(2):
for i in range(2):
p[k][j][i] = get_3d_pixel(
data,
layout,
image_depth,
image_height,
image_width,
box_idx,
c,
z_int + k,
y_int + j,
x_int + i,
cc,
)
l = [[0 for i in range(2)] for j in range(2)]
for j in range(2):
for i in range(2):
l[j][i] = _lerp(*p[j][i], x_lerp)
top = _lerp(*l[0], y_lerp)
bottom = _lerp(*l[1], y_lerp)
value = _lerp(top, bottom, z_lerp)
elif method == "cubic":
zint = te.floor(in_z).astype("int32")
zfract = in_z - te.floor(in_z)
yint = te.floor(in_y).astype("int32")
yfract = in_y - te.floor(in_y)
xint = te.floor(in_x).astype("int32")
xfract = in_x - te.floor(in_x)
# Get the surrounding values
p = [[[0 for i in range(4)] for j in range(4)] for k in range(4)]
for k in range(4):
for j in range(4):
for i in range(4):
p[k][j][i] = get_3d_pixel(
data,
layout,
image_depth,
image_height,
image_width,
box_idx,
c,
zint + k - 1,
yint + j - 1,
xint + i - 1,
cc,
)
wz = _cubic_spline_weights(zfract, alpha)
wy = _cubic_spline_weights(yfract, alpha)
wx = _cubic_spline_weights(xfract, alpha)
if exclude_outside:
for i in range(4):
wz[i] = te.if_then_else(
te.any(xint - 1 + i < 0, xint + i > image_height), 0.0, wx[i]
)
wy[i] = te.if_then_else(
te.any(yint - 1 + i < 0, yint + i > image_height), 0.0, wy[i]
)
wx[i] = te.if_then_else(
te.any(xint - 1 + i < 0, xint + i > image_width), 0.0, wx[i]
)
sum_wz = sum(wz)
sum_wy = sum(wy)
sum_wx = sum(wx)
wz = [w / sum_wz for w in wz]
wy = [w / sum_wy for w in wy]
wx = [w / sum_wx for w in wx]
l = [[0 for i in range(4)] for j in range(4)]
for j in range(4):
for i in range(4):
l[j][i] = _cubic_kernel(p[j][i], wx)
col0 = _cubic_kernel(l[0], wy)
col1 = _cubic_kernel(l[1], wy)
col2 = _cubic_kernel(l[2], wy)
col3 = _cubic_kernel(l[3], wy)
value = _cubic_kernel([col0, col1, col2, col3], wz)
else:
raise ValueError("Unknown resize method:", method)
if coordinate_transformation_mode == "tf_crop_and_resize":
out = tvm.tir.if_then_else(
in_z < 0,
extrapolation_value,
tvm.tir.if_then_else(in_z > image_depth - 1, extrapolation_value, value),
)
out = tvm.tir.if_then_else(
in_y < 0,
extrapolation_value,
tvm.tir.if_then_else(in_y > image_height - 1, extrapolation_value, value),
)
# use extrapolation_value if in_x is out of boundary
value = tvm.tir.if_then_else(
in_x < 0,
extrapolation_value,
tvm.tir.if_then_else(in_x > image_width - 1, extrapolation_value, out),
)
return _cast_output(value, data.dtype, out_dtype=out_dtype)
def resize3d(
data,
roi,
size,
layout="NCDHW",
method="linear",
coordinate_transformation_mode="half_pixel",
rounding_method="",
bicubic_alpha=-0.5,
bicubic_exclude=0,
extrapolation_value=0.0,
out_dtype=None,
output_shape=None,
):
"""Perform resize operation on the data.
Parameters
----------
data : tvm.te.Tensor
inputs is a 5-D tensor with shape
[batch, channel, in_depth, in_height, in_width]
or [batch, in_depth, in_height, in_width, channel]
roi: Tuple of Float or Expr
The region of interest for cropping the input image. Expected to be of
size 6, and format [start_d, start_h, start_w, end_d, end_h, end_w].
Only used if coordinate_transformation_mode is tf_crop_and_resize.
size: Tuple
Output resolution scale to
layout: string, optional
"NCDHW", "NDHWC", or "NCDHWc".
method: string, optional
method of interpolation ("nearest", "linear", "bicubic")
coordinate_transformation_mode : string, optional
Describes how to transform the coordinate in the resized tensor
to the coordinate in the original tensor.
[half_pixel, align_corners, asymmetric, pytorch_half_pixel,
tf_half_pixel_for_nn, and tf_crop_and_resize].
rounding_method:
Method for rounding coordinate locations
bicubic_alpha: float, optional
Bicubic spline coefficient
bicubic_exclude: bool, optional:
Exclude values outside the image fdor bicubic interpolation
extrapolation_value: float, optional
Value used for extrapolation, when applicable.
out_dtype: string, optional
Type to return. If left None will be same as input type.
output_shape: tvm.tir.container.Array, optional
Shape to return. If left None will be inferred
(If shape is determined dynamically, pass out_dtype.shape as output_shape)
Returns
-------
output : tvm.te.Tensor
4-D with shape [batch, channel, in_depth*scale, in_height*scale, in_width*scale]
or [batch, in_depth*scale, in_height*scale, in_width*scale, channel]
or 5-D with shape
[batch, channel-major, in_depth*scale, in_height*scale, in_width*scale, channel-minor]
"""
method = method.lower()
if layout == "NDHWC":
in_n, in_d, in_h, in_w, in_c = data.shape
output_shape = [in_n, size[0], size[1], size[2], in_c]
elif layout == "NCDHW":
in_n, in_c, in_d, in_h, in_w = data.shape
output_shape = [in_n, in_c, size[0], size[1], size[2]]
# Otherwise layout must be NCHWxc
else:
in_n, in_c, in_d, in_h, in_w, in_cc = data.shape
output_shape = [in_n, in_c, size[0], size[1], size[2], in_cc]
if isinstance(size, tuple):
size = list(size)
for i in range(3):
if isinstance(size[i], int):
size[i] = tvm.tir.IntImm("int32", size[i])
def compute_func(*indices):
return _resize_3d(
indices,
data,
roi,
in_d,
in_h,
in_w,
size[0],
size[1],
size[2],
method=method,
layout=layout,
coordinate_transformation_mode=coordinate_transformation_mode,
rounding_method=rounding_method,
alpha=bicubic_alpha,
exclude_outside=bicubic_exclude,
extrapolation_value=extrapolation_value,
out_dtype=out_dtype,
)
return te.compute(output_shape, compute_func, name="resize", tag=tag.INJECTIVE)
| 42,595 | 31.099472 | 98 | py |
tvm | tvm-main/python/tvm/topi/hls/injective.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-variable,
"""Schedule for composition of injective operator"""
import tvm
from tvm import te
def schedule_injective_from_existing(sch, out):
"""Schedule for injective op from existing schedule.
Parameters
----------
sch: Schedule
The schedule to update.
out: Tensor
The tensor representing the injective op.
Returns
-------
sch: Schedule
The updated schedule.
"""
fused = sch[out].fuse(*sch[out].op.axis)
px, x = sch[out].split(fused, nparts=1)
sch[out].bind(px, te.thread_axis("pipeline"))
return sch
def schedule_injective(outs):
"""Schedule for injective op.
Parameters
----------
outs: Array of Tensor
The computation graph description of injective in the format
of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
tvm.te.schedule.AutoInlineInjective(s)
for out in outs:
schedule_injective_from_existing(s, out)
return s
schedule_elemwise = schedule_injective
schedule_broadcast = schedule_injective
| 2,055 | 29.235294 | 70 | py |
tvm | tvm-main/python/tvm/topi/hls/nn.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unused-variable,unused-argument
"""HLS nn operators"""
from __future__ import absolute_import as _abs
import tvm
from tvm import te
from .. import tag
def _schedule_conv2d(outs):
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
tvm.te.schedule.AutoInlineInjective(s)
def traverse(OP):
"""Internal traverse function"""
# inline all one-to-one-mapping operators except the last stage (output)
if tag.is_injective(OP.tag):
if OP not in s.outputs:
s[OP].compute_inline()
for tensor in OP.input_tensors:
if isinstance(tensor.op, tvm.te.ComputeOp):
traverse(tensor.op)
# schedule conv2d
elif OP.tag.find("conv2d") >= 0:
Conv2d = OP.output(0)
if not Conv2d.op in s.outputs:
Out = outs[0].op.output(0)
s[Conv2d].compute_at(s[Out], s[Out].op.axis[1])
else:
raise RuntimeError(f"Unsupported operator: {OP.tag}")
traverse(outs[0].op)
px, x = s[outs[0]].split(outs[0].op.axis[0], nparts=1)
s[outs[0]].bind(px, te.thread_axis("pipeline"))
return s
def schedule_conv2d_nchw(outs):
"""Schedule for conv2d_nchw
Parameters
----------
outs: Array of Tensor
The computation graph description of conv2d_nchw
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return _schedule_conv2d(outs)
def schedule_conv2d_nhwc(outs):
"""Schedule for conv2d_nhwc
Parameters
----------
outs: Array of Tensor
The computation graph description of conv2d_nchw
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return _schedule_conv2d(outs)
def schedule_conv2d_NCHWc(outs):
"""Schedule for conv2d_NCHW[x]c
Parameters
----------
outs : Array of Tensor
The computation graph description of conv2d_NCHWc
in the format of an array of tensors.
Returns
-------
sch : Schedule
The computation schedule for the op.
"""
return _schedule_conv2d(outs)
def schedule_conv2d_transpose_nchw(outs):
"""Schedule for conv2d_transpose_nchw
Parameters
----------
outs: Array of Tensor
The computation graph description of conv2d_transpose_nchw
in the format of an array of tensors.
Returns
-------
s: Schedule
The computation schedule for the op.
"""
return _schedule_conv2d(outs)
def schedule_depthwise_conv2d_nchw(outs):
"""Schedule for depthwise_conv2d_nchw
Parameters
----------
outs: Array of Tensor
The computation graph description of depthwise_conv2d_nchw
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return _schedule_conv2d(outs)
def schedule_depthwise_conv2d_nhwc(outs):
"""Schedule for depthwise_conv2d_nhwc
Parameters
----------
outs: Array of Tensor
The computation graph description of depthwise_conv2d_nhwc
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return _schedule_conv2d(outs)
def schedule_bitserial_conv2d_nchw(outs):
"""Schedule for bitserial_conv2d_nchw
Parameters
----------
outs: Array of Tensor
The computation graph description of bitserial_conv2d_nchw
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return _schedule_conv2d(outs)
def schedule_bitserial_conv2d_nhwc(outs):
"""Schedule for bitserial_conv2d_nhwc
Parameters
----------
outs: Array of Tensor
The computation graph description of bitserial_conv2d_nchw
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return _schedule_conv2d(outs)
def schedule_reduce(outs):
"""Schedule for reduction
Parameters
----------
outs: Array of Tensor
The computation graph description of reduce
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
tvm.te.schedule.AutoInlineInjective(s)
def traverse(OP):
"""Internal traverse function"""
# inline all one-to-one-mapping operators except the last stage (output)
if tag.is_broadcast(OP.tag):
if OP not in s.outputs:
s[OP].compute_inline()
for tensor in OP.input_tensors:
if isinstance(tensor.op, tvm.te.ComputeOp):
traverse(tensor.op)
elif OP.tag in ["comm_reduce", "comm_reduce_idx"]:
if OP.tag == "comm_reduce":
Reduce = OP.output(0)
else:
Reduce = OP.input_tensors[0]
if not Reduce.op in s.outputs:
Out = outs[0].op.output(0)
s[Reduce].compute_at(s[Out], s[Out].op.axis[0])
else:
raise RuntimeError(f"Unsupported operator: {OP.tag}")
traverse(outs[0].op)
fused = s[outs[0]].fuse()
px, x = s[outs[0]].split(fused, nparts=1)
s[outs[0]].bind(px, te.thread_axis("pipeline"))
return s
def schedule_softmax(outs):
"""Schedule for softmax
Parameters
----------
outs: Array of Tensor
The computation graph description of softmax
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
tvm.te.schedule.AutoInlineInjective(s)
softmax = outs[0]
op_tag = softmax.op.tag
if op_tag == "softmax_output":
expsum = softmax.op.input_tensors[1]
exp = softmax.op.input_tensors[0]
max_elem = s[exp].op.input_tensors[1]
elif op_tag == "log_softmax_output":
exp = None
max_elem = softmax.op.input_tensors[1]
expsum = softmax.op.input_tensors[2]
else:
raise ValueError(
f"Tag is expected to be softmax_output or log_softmax_output. Got {op_tag}"
)
if exp is not None:
s[exp].compute_at(s[softmax], s[softmax].op.axis[1])
s[expsum].compute_at(s[softmax], s[softmax].op.axis[1])
s[max_elem].compute_at(s[softmax], s[softmax].op.axis[1])
px, x = s[softmax].split(softmax.op.axis[0], nparts=1)
s[softmax].bind(px, te.thread_axis("pipeline"))
return s
def schedule_dense(outs):
"""Schedule for dense
Parameters
----------
outs: Array of Tensor
The computation graph description of dense
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
tvm.te.schedule.AutoInlineInjective(s)
def traverse(OP):
"""Internal traverse function"""
# inline all one-to-one-mapping operators except the last stage (output)
if tag.is_broadcast(OP.tag):
if OP not in s.outputs:
s[OP].compute_inline()
for tensor in OP.input_tensors:
if isinstance(tensor.op, tvm.te.ComputeOp):
traverse(tensor.op)
# schedule dense
elif OP.tag == "dense":
Dense = OP.output(0)
if not Dense.op in s.outputs:
Out = outs[0].op.output(0)
s[Dense].compute_at(s[Out], s[Out].op.axis[1])
else:
raise RuntimeError(f"Unsupported operator: {OP.tag}")
traverse(outs[0].op)
px, x = s[outs[0]].split(outs[0].op.axis[0], nparts=1)
s[outs[0]].bind(px, te.thread_axis("pipeline"))
return s
def schedule_pool(outs, layout):
"""Schedule for pool
Parameters
----------
outs: Array of Tensor
The computation graph description of pool
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
tvm.te.schedule.AutoInlineInjective(s)
def traverse(OP):
"""Internal traverse function"""
# inline all one-to-one-mapping operators except the last stage (output)
if tag.is_broadcast(OP.tag):
if OP not in s.outputs:
s[OP].compute_inline()
for tensor in OP.input_tensors:
if isinstance(tensor.op, tvm.te.ComputeOp):
traverse(tensor.op)
# schedule pool
elif OP.tag.startswith("pool"):
Pool = OP.output(0)
if not Pool.op in s.outputs:
Out = outs[0].op.output(0)
s[Pool].compute_at(s[Out], s[Out].op.axis[1])
else:
raise RuntimeError(f"Unsupported operator: {OP.tag}")
traverse(outs[0].op)
px, x = s[outs[0]].split(outs[0].op.axis[0], nparts=1)
s[outs[0]].bind(px, te.thread_axis("pipeline"))
return s
def schedule_adaptive_pool(outs):
"""Schedule for adaptive_pool
Parameters
----------
outs: Array of Tensor
The computation graph description of adaptive_pool
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
tvm.te.schedule.AutoInlineInjective(s)
def traverse(OP):
"""Internal traverse function"""
# inline all one-to-one-mapping operators except the last stage (output)
if tag.is_broadcast(OP.tag):
if OP not in s.outputs:
s[OP].compute_inline()
for tensor in OP.input_tensors:
if isinstance(tensor.op, tvm.te.ComputeOp):
traverse(tensor.op)
# schedule global_pool
elif OP.tag.startswith("adaptive_pool"):
Pool = OP.output(0)
if not Pool.op in s.outputs:
Out = outs[0].op.output(0)
s[Pool].compute_at(s[Out], s[Out].op.axis[1])
else:
raise RuntimeError(f"Unsupported operator: {OP.tag}")
traverse(outs[0].op)
px, x = s[outs[0]].split(outs[0].op.axis[0], nparts=1)
s[outs[0]].bind(px, te.thread_axis("pipeline"))
return s
| 11,909 | 28.191176 | 87 | py |
tvm | tvm-main/python/tvm/topi/hls/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=redefined-builtin, wildcard-import
"""HLS specific declaration and schedules."""
from __future__ import absolute_import as _abs
from .injective import schedule_injective, schedule_elemwise, schedule_broadcast
from .nn import *
| 1,032 | 42.041667 | 80 | py |
tvm | tvm-main/python/tvm/topi/generic/injective.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""generic declaration and schedules."""
from __future__ import absolute_import as _abs
import tvm
from tvm import te
def schedule_injective_from_existing(sch, out):
"""Schedule for injective op from existing schedule.
Parameters
----------
sch: Schedule
The schedule to update.
out: Tensor
The tensor representing the injective op.
Returns
-------
sch: Schedule
The updated schedule.
"""
sch[out].fuse(*sch[out].op.axis)
return sch
def schedule_injective(outs):
"""Schedule for injective op.
Parameters
----------
outs: Array of Tensor
The computation graph description of injective in the format
of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
target = tvm.target.Target.current(allow_none=False)
if target.kind.name != "llvm":
raise RuntimeError(f"schedule_injective not registered for '{target}'")
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
x = outs[0]
s = te.create_schedule([x.op for x in outs])
te.schedule.AutoInlineInjective(s)
schedule_injective_from_existing(s, x)
return s
schedule_elemwise = schedule_injective
schedule_broadcast = schedule_injective
| 2,128 | 28.985915 | 79 | py |
tvm | tvm-main/python/tvm/topi/generic/conv2d.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-variable, too-many-locals
# pylint: disable=unused-argument, redefined-builtin
"""Generic convolution schedules"""
from tvm import te
from tvm import autotvm
from tvm import relay
from tvm.autotvm.task.space import SplitEntity, OtherOptionEntity
from ..utils import get_const_tuple, traverse_inline
from ..nn.utils import get_pad_tuple
def fallback_schedule_cpu_common_int8(cfg, wkl, int32_lanes, num_int8_elements):
"""Fallback schedule for conv2d int8 on cpu.
Normally the inner most pattern takes two int8/uint8 tensors
data[num_int8_elements] and kernel[int32_lanes, num_int8_elements],
produces a dot product int32/uint32 output[int32_lanes].
Parameters
----------
int32_lanes : int
How many numbers of int32/uint32 will be produced using intrinsic.
This is related to output channel.
num_int8_elements : int
How many numbers of input int32/uint32 will be multiplied and reduced.
This is related to input channel.
"""
pt, pl, pb, pr = wkl.padt, wkl.padl, wkl.padb, wkl.padr
HSTR, WSTR = wkl.stride_h, wkl.stride_w
dilated_kernel_w = (wkl.kernel_w - 1) * wkl.dilation_w + 1
out_width = (wkl.width + pl + pr - dilated_kernel_w) // WSTR + 1
assert (
wkl.out_filter % int32_lanes == 0
), f"wkl.out_filter={wkl.out_filter}, int32_lanes={int32_lanes}"
assert (
wkl.in_filter % num_int8_elements == 0
), f"wkl.in_filter={wkl.in_filter}, num_int8_elements={num_int8_elements}"
oc_bn = int32_lanes if int32_lanes >= num_int8_elements else num_int8_elements
ic_bn = 1
for bn in range(oc_bn, 0, -4):
if wkl.in_filter % bn == 0:
ic_bn = bn
break
reg_n = 1
for n in range(31, 0, -1):
if out_width % n == 0:
reg_n = n
break
cfg["tile_ic"] = SplitEntity([wkl.in_filter // ic_bn, ic_bn])
cfg["tile_oc"] = SplitEntity([wkl.out_filter // oc_bn, oc_bn])
cfg["tile_ow"] = SplitEntity([out_width // reg_n, reg_n])
cfg["unroll_kw"] = OtherOptionEntity(False)
def fallback_schedule_cpu_1x1_int8(cfg, wkl, int32_lanes, num_int8_elements):
"""Fallback schedule for 1x1 conv2d int8 on cpu.
Normally the inner most pattern takes two int8/uint8 tensors
data[num_int8_elements] and kernel[int32_lanes, num_int8_elements],
produces a dot product int32/uint32 output[int32_lanes].
Parameters
----------
int32_lanes : int
How many numbers of int32/uint32 will be produced using intrinsic.
This is related to output channel.
num_int8_elements : int
How many numbers of input int32/uint32 will be multiplied and reduced.
This is related to input channel.
"""
pt, pl, pb, pr = wkl.padt, wkl.padl, wkl.padb, wkl.padr
HSTR, WSTR = wkl.stride_h, wkl.stride_w
out_height = (wkl.height + pt + pb - wkl.kernel_h) // HSTR + 1
out_width = (wkl.width + pl + pr - wkl.kernel_w) // WSTR + 1
assert (
wkl.out_filter % int32_lanes == 0
), f"wkl.out_filter={wkl.out_filter}, int32_lanes={int32_lanes}"
assert (
wkl.in_filter % num_int8_elements == 0
), f"wkl.in_filter={wkl.in_filter}, num_int8_elements={num_int8_elements}"
oc_bn = int32_lanes if int32_lanes >= num_int8_elements else num_int8_elements
ic_bn = 1
for bn in range(oc_bn, 0, -4):
if wkl.in_filter % bn == 0:
ic_bn = bn
break
for ow_factor in range(out_width, 0, -1):
if out_width % ow_factor == 0:
for oh_factor in range(out_height, 0, -1):
if out_height % oh_factor == 0 and ow_factor * oh_factor < 32:
cfg["tile_ic"] = SplitEntity([wkl.in_filter // ic_bn, ic_bn])
cfg["tile_oc"] = SplitEntity([wkl.out_filter // oc_bn, oc_bn])
cfg["tile_oh"] = OtherOptionEntity(oh_factor)
cfg["tile_ow"] = SplitEntity([out_width // ow_factor, ow_factor])
return
raise ValueError(f"cannot decide default schedule for workload: {wkl}")
def schedule_conv_NCHWc_cpu_common_int8(
s,
cfg,
data_vec,
kernel_vec,
conv_out,
last,
int32_lanes=16,
int8_elems=4,
intrin=None,
inline_fused=True,
mem_scope="global",
):
"""
Defines the schedule for INT8 for Intel and ARM machines
Uses the Intel/ARM intrinsics to use INT8 operations
More details - https://software.intel.com/en-us/articles/
lower-numerical-precision-deep-learning-inference-and-training
"""
if isinstance(cfg["tile_ow"], int):
reg_n = cfg["tile_ow"]
else:
reg_n = cfg["tile_ow"].size[-1]
if isinstance(cfg["unroll_kw"], (int, bool)):
unroll_kw = cfg["unroll_kw"]
else:
unroll_kw = cfg["unroll_kw"].val
_, _, _, _, ic_bn = get_const_tuple(data_vec.shape)
_, _, _, _, oc_bn = get_const_tuple(conv_out.shape)
# schedule pad
if isinstance(s[data_vec].op, te.tensor.ComputeOp) and "pad" in data_vec.op.tag:
batch, ic_chunk, ih, iw, ic_block = s[data_vec].op.axis
parallel_axis = s[data_vec].fuse(batch, ic_chunk, ih)
s[data_vec].parallel(parallel_axis)
data_vec = data_vec.op.input_tensors[0]
if autotvm.GLOBAL_SCOPE.in_tuning:
# only in autotuning, input data of conv2d_NCHWc will be 4-D.
# skip this part during tuning to make records accurate.
# this part will be folded during Relay fold_constant pass.
if isinstance(data_vec.op, te.tensor.ComputeOp):
s[data_vec].pragma(s[data_vec].op.axis[0], "debug_skip_region")
if isinstance(kernel_vec.op, te.tensor.ComputeOp):
s[kernel_vec].pragma(s[kernel_vec].op.axis[0], "debug_skip_region")
elif isinstance(kernel_vec.op, te.tensor.ComputeOp) and kernel_vec.name == "kernel_vec":
# data and kernel are not pre-computed, schedule layout transform here.
# this should only be used by x86 conv2d_nchw, which is for
# testing purpose.
batch, ic_chunk, ih, iw, ic_block = s[data_vec].op.axis
parallel_axis = s[data_vec].fuse(batch, ic_chunk, ih)
s[data_vec].parallel(parallel_axis)
# conv2d_nchwc_int8 has 7D kernel
oc_chunk, ic_chunk, oh, ow, ic_block, oc_block, _ = s[kernel_vec].op.axis
s[kernel_vec].reorder(oc_chunk, oh, ic_chunk, ow, ic_block, oc_block)
oc_bn = cfg["tile_oc"].size[-1]
if oc_bn > 1:
s[kernel_vec].vectorize(oc_block)
parallel_axis = s[kernel_vec].fuse(oc_chunk, oh)
s[kernel_vec].parallel(parallel_axis)
# schedule 5-D NCHW[x]c conv
C, O = conv_out, last
CC = s.cache_write(C, mem_scope)
batch, oc_chunk, oh, ow, oc_block = s[C].op.axis
ow_chunk, ow_block = s[C].split(ow, factor=reg_n)
s[C].reorder(oc_chunk, oh, ow_chunk, ow_block, oc_block)
parallel_axis = s[C].fuse(batch, oc_chunk, oh)
s[C].vectorize(oc_block)
if C == O:
s[C].parallel(parallel_axis)
s[CC].compute_at(s[C], parallel_axis)
_, oc_chunk, oh, ow, oc_block = s[CC].op.axis
kh, kw, ic_outer, ic_f_inner, ic_s_inner = s[CC].op.reduce_axis
ow_chunk, ow_block = s[CC].split(ow, factor=reg_n)
assert oc_bn % int32_lanes == 0, f"oc_bn={oc_bn} % int32_lanes={int32_lanes} != 0"
assert (
ic_bn % int8_elems == 0
), f"ic_bn={ic_bn} % int8_elems={int8_elems} != 0" # (u)int8 elements in (u)int32
oc_f_inner, oc_s_inner = s[CC].split(oc_block, factor=int32_lanes)
if unroll_kw:
s[CC].reorder(
oc_chunk,
oh,
ow_chunk,
ic_outer,
kh,
ic_f_inner,
kw,
ow_block,
oc_f_inner,
oc_s_inner,
ic_s_inner,
)
s[CC].unroll(kw)
else:
s[CC].reorder(
oc_chunk,
oh,
ow_chunk,
ic_outer,
kh,
kw,
ic_f_inner,
ow_block,
oc_f_inner,
oc_s_inner,
ic_s_inner,
)
if intrin is not None:
s[CC].tensorize(oc_s_inner, intrin)
s[CC].unroll(ow_block)
s[CC].unroll(oc_f_inner)
if C != O:
out_ndim = len(s[O].op.axis)
if out_ndim == 5:
batch, oc_chunk, oh, ow, oc_block = s[O].op.axis
ow_chunk, ow_block = s[O].split(ow, factor=reg_n)
s[O].reorder(oc_chunk, oh, ow_chunk, ow_block, oc_block)
elif out_ndim == 4:
batch, oc, oh, ow = s[O].op.axis
ow_chunk, ow_block = s[O].split(ow, factor=reg_n)
oc_chunk, oc_block = s[O].split(oc, factor=oc_bn)
s[O].reorder(oc_chunk, oh, ow_chunk, ow_block, oc_block)
else:
raise ValueError(f"Unsupported output ndim: {out_ndim}")
parallel_axis = s[O].fuse(batch, oc_chunk, oh)
if inline_fused:
s[C].compute_at(s[O], ow_block)
else:
s[C].compute_at(s[O], parallel_axis)
s[O].vectorize(oc_block)
s[O].parallel(parallel_axis)
return s
def schedule_conv_NCHWc_cpu_1x1_int8(
s,
cfg,
data_vec,
kernel_vec,
conv_out,
last,
int32_lanes=16,
int8_elems=4,
intrin=None,
inline_fused=False,
mem_scope="global",
):
"""
Defines the 1x1 conv schedule for INT8 for Intel and ARM machines
Uses the Intel/ARM intrinsics to use INT8 operations
More details - https://software.intel.com/en-us/articles/
lower-numerical-precision-deep-learning-inference-and-training
"""
oh_factor, ow_factor = cfg["tile_oh"].val, cfg["tile_ow"].size[-1]
_, _, _, _, ic_bn = get_const_tuple(data_vec.shape)
_, _, _, _, oc_bn = get_const_tuple(conv_out.shape)
# schedule pad
if isinstance(s[data_vec].op, te.tensor.ComputeOp) and "pad" in data_vec.op.tag:
batch, ic_chunk, ih, iw, ic_block = s[data_vec].op.axis
parallel_axis = s[data_vec].fuse(batch, ic_chunk, ih)
s[data_vec].parallel(parallel_axis)
data_vec = data_vec.op.input_tensors[0]
if autotvm.GLOBAL_SCOPE.in_tuning:
# only in autotuning, input data of conv2d_NCHWc will be 4-D.
# skip this part during tuning to make records accurate.
# this part will be folded during Relay fold_constant pass.
if isinstance(data_vec.op, te.tensor.ComputeOp):
s[data_vec].pragma(s[data_vec].op.axis[0], "debug_skip_region")
if isinstance(kernel_vec.op, te.tensor.ComputeOp):
s[kernel_vec].pragma(s[kernel_vec].op.axis[0], "debug_skip_region")
elif isinstance(kernel_vec.op, te.tensor.ComputeOp) and kernel_vec.name == "kernel_vec":
# data and kernel are not pre-computed, schedule layout transform here.
# this should only be used by x86 conv2d_nchw, which is for
# testing purpose.
batch, ic_chunk, ih, iw, ic_block = s[data_vec].op.axis
parallel_axis = s[data_vec].fuse(batch, ic_chunk, ih)
s[data_vec].parallel(parallel_axis)
# Conv2d int8 schedule has 7D kernel
oc_chunk, ic_chunk, oh, ow, ic_block, oc_block, _ = s[kernel_vec].op.axis
s[kernel_vec].reorder(oc_chunk, oh, ic_chunk, ow, ic_block, oc_block)
oc_bn = cfg["tile_oc"].size[-1]
if oc_bn > 1:
s[kernel_vec].vectorize(oc_block)
parallel_axis = s[kernel_vec].fuse(oc_chunk, oh)
s[kernel_vec].parallel(parallel_axis)
C, O = conv_out, last
CC = s.cache_write(C, mem_scope)
batch, oc_chunk, oh, ow, oc_block = s[C].op.axis
oh_outer, oh_inner = s[C].split(oh, factor=oh_factor)
ow_outer, ow_inner = s[C].split(ow, factor=ow_factor)
s[C].reorder(oc_chunk, oh_outer, ow_outer, oh_inner, ow_inner, oc_block)
s[C].vectorize(oc_block)
parallel_axis = s[C].fuse(batch, oc_chunk, oh_outer)
if C == O:
s[C].parallel(parallel_axis)
s[CC].compute_at(s[C], parallel_axis) # good perf on mobilenet, but not on individuals?
_, oc_chunk, oh, ow, oc_block = s[CC].op.axis
kh, kw, ic_outer, ic_f_inner, ic_s_inner = s[CC].op.reduce_axis
assert oc_bn % int32_lanes == 0
assert ic_bn % int8_elems == 0 # (u)int8 elements in (u)int32
oc_f_inner, oc_s_inner = s[CC].split(oc_block, factor=int32_lanes)
oh_outer, oh_inner = s[CC].split(oh, factor=oh_factor)
ow_outer, ow_inner = s[CC].split(ow, factor=ow_factor)
s[CC].reorder(
oc_chunk,
oh_outer,
ow_outer,
kh,
kw,
ic_outer,
ic_f_inner,
oh_inner,
ow_inner,
oc_f_inner,
oc_s_inner,
ic_s_inner,
)
s[CC].fuse(oc_chunk, oh_outer)
if intrin is not None:
s[CC].tensorize(oc_s_inner, intrin)
s[CC].unroll(ow_inner)
s[CC].unroll(oh_inner)
if C != O:
out_ndim = len(s[O].op.axis)
if out_ndim == 5:
batch, oc_chunk, oh, ow, oc_block = s[O].op.axis
oh_outer, oh_inner = s[O].split(oh, factor=oh_factor)
ow_outer, ow_inner = s[O].split(ow, factor=ow_factor)
elif out_ndim == 4:
batch, oc, oh, ow = s[O].op.axis
oc_chunk, oc_block = s[O].split(oc, factor=oc_bn)
oh_outer, oh_inner = s[O].split(oh, factor=oh_factor)
ow_outer, ow_inner = s[O].split(ow, factor=ow_factor)
else:
raise ValueError(f"Unsupported output ndim: {out_ndim}")
s[O].reorder(oc_chunk, oh_outer, ow_outer, oh_inner, ow_inner, oc_block)
parallel_axis = s[O].fuse(batch, oc_chunk, oh_outer)
if inline_fused:
s[C].compute_at(s[O], ow_inner)
else:
s[C].compute_at(s[O], parallel_axis)
s[O].vectorize(oc_block)
s[O].parallel(parallel_axis)
return s
def schedule_depthwise_conv2d_nhwc(outs):
"""Create schedule for depthwise conv2d in NHWC layout.
Parameters
----------
outs : list[te.tensor.Tensor]
The output tensors.
Returns
-------
s : tvm.te.schedule.Schedule
The computation schedule for depthwise conv2d.
"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
def _callback(op):
"""Traverse operators from computation graph"""
if "depthwise_conv2d_nhwc" in op.tag:
out = outs[0]
depthwise_conv2d_out = op.output(0)
data_pad = depthwise_conv2d_out.op.input_tensors[0]
s[data_pad].compute_inline()
if depthwise_conv2d_out != out:
s[depthwise_conv2d_out].compute_at(s[out], s[out].op.axis[3])
s[out].fuse(*s[out].op.axis)
traverse_inline(s, outs[0].op, _callback)
return s
def conv2d_alter_int8_common(
data,
data_tensor,
kernel,
kernel_tensor,
output_tensor,
attrs,
data_dtype: str,
in_channel_vector_length: int,
out_channel_vector_length: int,
):
"""
Convert TE inputs/outputs so that they are suitable for fast Int8 instructions.
Int8 instructions require input channels and output channels to be a
multiple of the vector length. For input channels, we pad both the inputs
and weights channels. For output channels, we pad the weight and
stride_slice the output.
Arguments
---------
data: Expr
Data Expr
data_tensor: Tensor
Data tensor
kernel: Expr
Kernel Expr
kernel_tensor: Tensor
Kernel tensor
output_tensor: Tensor
Output tensor
attrs: Conv2dAttrs
Attributes of the computation
data_dtype: "int8" or "uint8"
Desired dtype of data. Data will be converted to this dtype before the main computation.
in_channel_vector_length: int
Length of vector units on target hardware. Input channels are padded to this length.
out_channel_vector_length: int
Output size of vector instruction. Output channels are padded to this length.
Returns
-------
out : Tensor
Conv2d computation with inputs in the correct order for tensorization.
"""
# Dilation not supported yet. Return None if dilation is not (1, 1)
dilation = attrs.get_int_tuple("dilation")
if not (dilation[0] == 1 and dilation[1] == 1):
return None
# No legalization for depthwise convolutions yet.
groups = attrs.get_int("groups")
if groups != 1:
return None
# Get the conv attrs
new_attrs = {k: attrs[k] for k in attrs.keys()}
padding = attrs.get_int_tuple("padding")
kh, kw = attrs.get_int_tuple("kernel_size")
pt, pl, pb, pr = get_pad_tuple(padding, (kh, kw))
if data_tensor.dtype != data_dtype:
# How to convert data to uint8
# Original --> C = A (conv) B
# A and B are int8
# C = (A + 128 - 128) (conv) B
# C = (A' conv B) - 128 (conv) B
# where A' = A + 128
# and 128 (conv) B is basically a reduce on CRS axis for weights.
#
# How to convert data to int8
# C = (A - 128 + 128) (conv) B
# C = (A' conv B) + 128 (conv) B
# where A' = A - 128
if data_dtype == "uint8":
# shift data to uint8
before_shift = relay.add
after_shift = relay.subtract
pad_value = 128
else:
# shift data to int8
before_shift = relay.subtract
after_shift = relay.add
pad_value = -128
if attrs["data_layout"] == "NHWC" and attrs["kernel_layout"] == "HWIO":
adjust_shift = relay.sum(relay.cast(kernel, dtype="int32"), axis=(0, 1, 2))
pad_width = ((0, 0), (pt, pb), (pl, pr), (0, 0))
elif attrs["data_layout"] == "NCHW" and attrs["kernel_layout"] == "OIHW":
pad_width = ((0, 0), (0, 0), (pt, pb), (pl, pr))
adjust_shift = relay.sum(relay.cast(kernel, dtype="int32"), axis=(1, 2, 3))
adjust_shift = relay.expand_dims(adjust_shift, axis=1, num_newaxis=2)
else:
return None
data = relay.cast(data, "int32")
data = before_shift(data, relay.const(128, "int32"))
data = relay.cast(data, data_dtype)
# Do external padding as pad value has to be 128.
if any(padding):
data = relay.nn.pad(data, pad_width=pad_width, pad_value=pad_value)
new_attrs["padding"] = (0, 0)
# Multiply 128 to adjust shift.
adjust_shift = relay.multiply(adjust_shift, relay.const(128, "int32"))
# Flags to remember if the expr is modified
ic_modified = False
oc_modified = False
# Find the value of input and output channel.
in_channel = -1
out_channel = -1
if attrs["data_layout"] == "NHWC" and attrs["kernel_layout"] == "HWIO":
in_channel = data_tensor.shape[3].value
out_channel = kernel_tensor.shape[3].value
elif attrs["data_layout"] == "NCHW" and attrs["kernel_layout"] == "OIHW":
in_channel = data_tensor.shape[1].value
out_channel = kernel_tensor.shape[0].value
else:
return None
if in_channel % in_channel_vector_length != 0:
new_in_channel = (
(in_channel + in_channel_vector_length) // in_channel_vector_length
) * in_channel_vector_length
diff = new_in_channel - in_channel
if attrs["data_layout"] == "NHWC" and attrs["kernel_layout"] == "HWIO":
data = relay.nn.pad(data, pad_width=((0, 0), (0, 0), (0, 0), (0, diff)))
kernel = relay.nn.pad(kernel, pad_width=((0, 0), (0, 0), (0, diff), (0, 0)))
ic_modified = True
elif attrs["data_layout"] == "NCHW" and attrs["kernel_layout"] == "OIHW":
pad_width = ((0, 0), (0, diff), (0, 0), (0, 0))
data = relay.nn.pad(data, pad_width=pad_width)
kernel = relay.nn.pad(kernel, pad_width=pad_width)
ic_modified = True
else:
return None
new_out_channel = out_channel
if out_channel % out_channel_vector_length != 0:
new_out_channel = (
(out_channel + out_channel_vector_length) // out_channel_vector_length
) * out_channel_vector_length
diff = new_out_channel - out_channel
if attrs["data_layout"] == "NHWC" and attrs["kernel_layout"] == "HWIO":
kernel = relay.nn.pad(kernel, pad_width=((0, 0), (0, 0), (0, 0), (0, diff)))
oc_modified = True
elif attrs["data_layout"] == "NCHW" and attrs["kernel_layout"] == "OIHW":
kernel = relay.nn.pad(kernel, pad_width=((0, diff), (0, 0), (0, 0), (0, 0)))
oc_modified = True
else:
return None
if oc_modified:
new_attrs["channels"] = new_out_channel
out = relay.nn.conv2d(data, kernel, **new_attrs)
original_out_shape = [x.value for x in output_tensor.shape]
out = relay.strided_slice(out, begin=[0, 0, 0, 0], end=original_out_shape)
else:
out = relay.nn.conv2d(data, kernel, **new_attrs)
if data_tensor.dtype != data_dtype:
out = after_shift(out, adjust_shift)
return out
| 21,998 | 36.097808 | 96 | py |
tvm | tvm-main/python/tvm/topi/generic/image.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Generic image operators"""
from .default import default_schedule as _default_schedule
def schedule_dilation2d_nchw(outs):
"""Schedule for dilation2d
Parameters
----------
outs : Array of Tensor
The computation graph description of dilation2d
in the format of an array of tensors.
Returns
-------
sch : Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
def schedule_dilation2d_nhwc(outs):
"""Schedule for dilation2d
Parameters
----------
outs : Array of Tensor
The computation graph description of dilation2d
in the format of an array of tensors.
Returns
-------
sch : Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
| 1,606 | 31.795918 | 62 | py |
tvm | tvm-main/python/tvm/topi/generic/math.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Generic math operators"""
from .default import default_schedule as _default_schedule
def schedule_einsum(outs):
"""Schedule for einsum operator.
Parameters
----------
outs: Array of Tensor
The computation graph description of einsum.
Returns
-------
s: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
| 1,181 | 32.771429 | 62 | py |
tvm | tvm-main/python/tvm/topi/generic/sort.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, no-member
"""Generic sort operators"""
from __future__ import absolute_import as _abs
from .default import default_schedule as _default_schedule
def schedule_sort(outs):
"""Schedule for sort operator.
Parameters
----------
outs: Array of Tensor
The indices that would sort an input array along
the given axis.
Returns
-------
s: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
def schedule_argsort(outs):
"""Schedule for argsort operator.
Parameters
----------
outs: Array of Tensor
The indices that would sort an input array along
the given axis.
Returns
-------
s: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
def schedule_topk(outs):
"""Schedule for topk operator.
Parameters
----------
outs: Array of Tensor
The indices that would sort an input array along
the given axis.
Returns
-------
s: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
| 1,958 | 26.208333 | 62 | py |
tvm | tvm-main/python/tvm/topi/generic/nn.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unused-argument
"""Generic nn operators"""
from tvm import te
from .default import default_schedule as _default_schedule
def schedule_conv1d_ncw(outs):
"""Schedule for conv1d_ncw
Parameters
----------
outs: Array of Tensor
The computation graph description of conv1d_ncw
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
def schedule_conv1d_nwc(outs):
"""Schedule for conv1d_nwc
Parameters
----------
outs: Array of Tensor
The computation graph description of conv1d_nwc
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
def schedule_group_conv1d_ncw(outs):
"""Schedule for group_conv1d_ncw
Parameters
----------
outs: Array of Tensor
The computation graph description of group_conv1d_ncw
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
def schedule_group_conv1d_nwc(outs):
"""Schedule for group_conv1d_nwc
Parameters
----------
outs: Array of Tensor
The computation graph description of group_conv1d_nwc
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
def schedule_conv2d_hwcn(outs):
"""Schedule for conv2d_hwcn
Parameters
----------
outs: Array of Tensor
The computation graph description of conv2d_hwcn
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
def schedule_conv2d_nchw(outs):
"""Schedule for conv2d_nchw
Parameters
----------
outs: Array of Tensor
The computation graph description of conv2d_nchw
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
def schedule_conv2d_nhwc_pack(outs):
"""Schedule for conv2d_nhwc_pack
Parameters
----------
outs: Array of Tensor
The computation graph description of conv2d_nhwc_pack
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
def schedule_conv2d_nhwc(outs):
"""Schedule for conv2d_nhwc
Parameters
----------
outs: Array of Tensor
The computation graph description of conv2d_nchw
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
def schedule_conv2d_NCHWc(outs):
"""Schedule for conv2d_NCHW[x]c
Parameters
----------
outs : Array of Tensor
The computation graph description of conv2d_NCHWc
in the format of an array of tensors.
The number of filter, i.e., the output channel.
Returns
-------
sch : Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
def schedule_conv2d_NCHWc_int8(outs):
"""Schedule for conv2d_NCHW[x]c_int8
Parameters
----------
outs : Array of Tensor
The computation graph description of conv2d_NCHWc_int8
in the format of an array of tensors.
The number of filter, i.e., the output channel.
Returns
-------
sch : Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
def schedule_conv2d_winograd_weight_transform(outs):
"""Schedule for weight transformation of winograd
Parameters
----------
outs: Array of Tensor
The computation graph description of this operator
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
# Typically this is computed in PreCompute pass
# so we make a schedule here for cpu llvm
s = te.create_schedule([x.op for x in outs])
output = outs[0]
_, G = s[output].op.input_tensors
s[G].compute_inline()
eps, nu, co, ci = s[output].op.axis
r_kh, r_kw = s[output].op.reduce_axis
s[output].reorder(co, ci, r_kh, r_kw, eps, nu)
for axis in [r_kh, r_kw, eps, nu]:
s[output].unroll(axis)
s[output].parallel(co)
return s
def schedule_conv2d_gemm_weight_transform(outs):
"""Schedule for weight transformation of gemm
Parameters
----------
outs: Array of Tensor
The computation graph description of this operator
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
# Typically this is computed in PreCompute pass
s = te.create_schedule([x.op for x in outs])
return s
def schedule_conv3d_winograd_weight_transform(outs):
"""Schedule for weight transformation of 3D winograd
Parameters
----------
outs: Array of Tensor
The computation graph description of this operator
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
# Typically this is computed in PreCompute pass
# so we make a schedule here for cpu llvm
s = te.create_schedule([x.op for x in outs])
output = outs[0]
_, G = s[output].op.input_tensors
s[G].compute_inline()
transform_depth = len(s[output].op.reduce_axis) == 3
if transform_depth:
omg, eps, nu, ci, co = s[output].op.axis
r_kd, r_kh, r_kw = s[output].op.reduce_axis
s[output].reorder(co, ci, omg, eps, nu, r_kd, r_kh, r_kw)
for axis in [r_kd, r_kh, r_kw]:
s[output].unroll(axis)
else:
eps, nu, d, ci, co = s[output].op.axis
r_kh, r_kw = s[output].op.reduce_axis
s[output].reorder(co, ci, d, eps, nu, r_kh, r_kw)
for axis in [r_kh, r_kw]:
s[output].unroll(axis)
s[output].parallel(co)
return s
def schedule_conv2d_winograd_without_weight_transform(outs):
"""Schedule for winograd without weight transformation
Parameters
----------
outs: Array of Tensor
The computation graph description of this operator
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
def schedule_conv2d_winograd_nnpack_weight_transform(outs):
"""Schedule for weight transformation of winograd
Parameters
----------
outs: Array of Tensor
The computation graph description of this operator
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
# Typically this is computed in PreCompute pass
s = te.create_schedule([x.op for x in outs])
return s
def schedule_conv3d_ncdhw(outs):
"""Schedule for conv3d_ncdhw
Parameters
----------
outs: Array of Tensor
The computation graph description of conv2d_nchw
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
def schedule_conv3d_ndhwc(outs):
"""Schedule for conv3d_ndhwc
Parameters
----------
outs: Array of Tensor
The computation graph description of conv3d_ndhwc
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
def schedule_conv3d_transpose_ncdhw(outs):
"""Schedule for conv3d_transpose_ncdhw
Parameters
----------
outs: Array of Tensor
The computation graph description of conv3d_transpose_ncdhw
in the format of an array of tensors.
Returns
-------
s: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
def schedule_conv2d_transpose_nchw(outs):
"""Schedule for conv2d_transpose_nchw
Parameters
----------
outs: Array of Tensor
The computation graph description of conv2d_transpose_nchw
in the format of an array of tensors.
Returns
-------
s: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
def schedule_conv1d_transpose_ncw(outs):
"""Schedule for conv1d_transpose_ncw
Parameters
----------
outs: Array of Tensor
The computation graph description of conv2d_transpose_ncw
in the format of an array of tensors.
Returns
-------
s: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
def schedule_depthwise_conv2d_nchw(outs):
"""Schedule for depthwise_conv2d_nchw
Parameters
----------
outs: Array of Tensor
The computation graph description of depthwise_conv2d_nchw
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
def schedule_depthwise_conv2d_nhwc(outs):
"""Schedule for depthwise_conv2d_nhwc
Parameters
----------
outs: Array of Tensor
The computation graph description of depthwise_conv2d_nhwc
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
def schedule_depthwise_conv2d_NCHWc(outs):
"""Schedule for depthwise_conv2d_NCHWc
Parameters
----------
outs: Array of Tensor
The computation graph description of depthwise_conv2d_nhwc
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
def schedule_group_conv2d_nchw(outs):
"""Schedule for group_conv2d_nchw
Parameters
----------
outs: Array of Tensor
The computation graph description of group_conv2d_nchw
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
def schedule_group_conv2d_transpose_nchw(outs):
"""Schedule for group_conv2d_transpose_nchw
Parameters
----------
outs: Array of Tensor
The computation graph description of group_conv2d_nhwc
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
def schedule_group_conv2d_nhwc(outs):
"""Schedule for group_conv2d_nhwc
Parameters
----------
outs: Array of Tensor
The computation graph description of group_conv2d_nhwc
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
def schedule_deformable_conv2d_nchw(outs):
"""Schedule for deformable_conv2d_nchw
Parameters
----------
outs: Array of Tensor
The computation graph description of deformable_conv2d_nchw
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
def schedule_deformable_conv2d_nhwc(outs):
"""Schedule for deformable_conv2d_nhwc.
We only use the default schedule here and rely on auto_scheduler.
Parameters
----------
outs: Array of Tensor
The computation graph description of deformable_conv2d_nhwc
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
def schedule_bitserial_conv2d_nchw(outs):
"""Schedule for bitserial_conv2d_nchw
Parameters
----------
outs: Array of Tensor
The computation graph description of bitserial_conv2d_nchw
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
def schedule_bitserial_conv2d_nhwc(outs):
"""Schedule for bitserial_conv2d_nhwc
Parameters
----------
outs: Array of Tensor
The computation graph description of bitserial_conv2d_nchw
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
def schedule_bitserial_dense(outs):
"""Schedule for bitserial_dense
Parameters
----------
outs: Array of Tensor
The computation graph description of bitserial_dense
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
def schedule_reduce(outs):
"""Schedule for reduction
Parameters
----------
outs: Array of Tensor
The computation graph description of reduce
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, True)
def schedule_softmax(outs):
"""Schedule for softmax
Parameters
----------
outs: Array of Tensor
The computation graph description of softmax
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
def schedule_fast_softmax(outs):
"""Schedule for fast_softmax
Parameters
----------
outs: Array of Tensor
The computation graph description of fast_softmax
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
def schedule_matmul(outs):
"""Schedule for matmul
Parameters
----------
outs: Array of Tensor
The computation graph description of matmul
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
def schedule_dense(outs):
"""Schedule for dense
Parameters
----------
outs: Array of Tensor
The computation graph description of dense
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
def schedule_pool(outs, layout):
"""Schedule for pool
Parameters
----------
outs: Array of Tensor
The computation graph description of pool
in the format of an array of tensors.
layout: str
Data layout.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
def schedule_pool_grad(outs):
"""Schedule for pool_grad
Parameters
----------
outs: Array of Tensor
The computation graph description of pool
in the format of an array of tensors.
"""
return _default_schedule(outs, False)
def schedule_adaptive_pool(outs):
"""Schedule for adaptive pool
Parameters
----------
outs: Array of Tensor
The computation graph description of adaptive pool
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
def schedule_binarize_pack(outs):
"""Schedule for binarize_pack
Parameters
----------
outs: Array of Tensor
The computation graph description of binarize_pack
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
def schedule_bitpack(outs):
"""Schedule for bitpack
Parameters
----------
outs: Array of Tensor
The computation graph description of bitpack
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
def schedule_binary_dense(outs):
"""Schedule for binary_dense
Parameters
----------
outs: Array of Tensor
The computation graph description of binary_dense
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
def schedule_lrn(outs):
"""Schedule for lrn
Parameters
----------
outs: Array of Tensor
The computation graph description of lrn
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
def schedule_sparse_dense(outs):
"""Schedule for sparse_dense
Parameters
----------
outs: Array of Tensor
The computation graph description of sparse_dense
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
def schedule_sparse_transpose(outs):
"""Schedule for sparse_transpose
Parameters
----------
outs: Array of Tensor
The computation graph description of sparse_transpose
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
def schedule_sparse_conv2d(outs):
"""Schedule for sparse_conv2d
Parameters
----------
outs: Array of Tensor
The computation graph description of sparse_conv2d
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
def schedule_batch_matmul(outs):
"""Schedule for batch_matmul
Parameters
----------
outs: Array of Tensor
The computation graph description of sparse_transpose
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
def schedule_batch_norm(outs):
"""Schedule for batch_norm
Parameters
----------
outs: Array of Tensor
The computation graph description of sparse_transpose
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
def schedule_correlation_nchw(outs):
"""Schedule for correlation_nchw
Parameters
----------
outs: Array of Tensor
The computation graph description of correlation_nchw
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
def schedule_lstm(outs):
"""Schedule for LSTM
Parameters
----------
outs : Array of Tensor
The outputs of LSTM (hidden states and cell states).
Returns
-------
sch: Schedule
The default schedule for LSTM.
"""
return _default_schedule(outs, False)
| 21,388 | 22.765556 | 69 | py |
tvm | tvm-main/python/tvm/topi/generic/extern.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""generic declaration and schedules."""
import tvm
from .. import cpp
def schedule_extern(outs):
"""Schedule for an extern op followed by injective operations.
Parameters
----------
outs: Array of Tensor
The computation graph description of extern plus injective ops in the format
of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
target = tvm.target.Target.current()
return cpp.generic.schedule_extern(target, outs)
| 1,351 | 33.666667 | 86 | py |
tvm | tvm-main/python/tvm/topi/generic/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=wildcard-import
"""Generic declaration and schedules.
This is a recommended way of using TOPI API.
To use the generic schedule function, user must set
the current target scope using with block. See also :any:`tvm.target`
Example
-------
.. code-block:: python
# create schedule that dispatches to topi.cuda.schedule_injective
with tvm.target.Target("cuda"):
s = tvm.tir.generic.schedule_injective(outs)
"""
from __future__ import absolute_import as _abs
from .nn import *
from .injective import *
from .extern import *
from .vision import *
from .sort import *
from .search import *
from .image import *
from .math import *
| 1,440 | 32.511628 | 69 | py |
tvm | tvm-main/python/tvm/topi/generic/vision.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, no-member
"""Generic vision operators"""
from __future__ import absolute_import as _abs
import tvm
from .. import cpp
from .default import default_schedule as _default_schedule
def schedule_reorg(outs):
"""Schedule for reorg
Parameters
----------
outs: Array of Tensor
The computation graph description of reorg
in the format of an array of tensors.
Returns
-------
s: Schedule
The computation schedule for the op.
"""
target = tvm.target.Target.current(allow_none=False)
cpp_target = cpp.TEST_create_target(target.kind.name)
return cpp.generic.default_schedule(cpp_target, outs, False)
def schedule_get_valid_counts(outs):
"""Schedule for get_valid_counts
Parameters
----------
outs: Array of Tensor
The computation graph description of nms
in the format of an array of tensors.
Returns
-------
s: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
def schedule_nms(outs):
"""Schedule for non-maximum suppression
Parameters
----------
outs: Array of Tensor
The computation graph description of nms
in the format of an array of tensors.
Returns
-------
s: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
def schedule_multibox_prior(outs):
"""Schedule for multibox_prior
Parameters
----------
outs: Array of Tensor
The computation graph description of multibox_prior
in the format of an array of tensors.
Returns
-------
s: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
def schedule_multibox_transform_loc(outs):
"""Schedule for multibox_transform_loc
Parameters
----------
outs: Array of Tensor
The computation graph description of
multibox_transform_loc in the format
of an array of tensors.
Returns
-------
s: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
def schedule_multibox_detection(outs):
"""Schedule for multibox_detection
Parameters
----------
outs: Array of Tensor
The computation graph description of multibox_detection
in the format of an array of tensors.
Returns
-------
s: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
def schedule_roi_align(outs):
"""Schedule for roi_align
Parameters
----------
outs: Array of Tensor
The computation graph description of roi_align
in the format of an array of tensors.
Returns
-------
s: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
def schedule_roi_pool(outs):
"""Schedule for roi_align
Parameters
----------
outs: Array of Tensor
The computation graph description of roi_pool
in the format of an array of tensors.
Returns
-------
s: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
def schedule_proposal(outs):
"""Schedule for proposal operator.
Parameters
----------
outs: Array of Tensor
The computation graph description of proposal
in the format of an array of tensors.
Returns
-------
s: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
| 4,372 | 23.430168 | 64 | py |
tvm | tvm-main/python/tvm/topi/generic/search.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, no-member
"""Generic search operators"""
from __future__ import absolute_import as _abs
from .default import default_schedule as _default_schedule
def schedule_argwhere(outs):
"""Schedule for argwhere operator.
Parameters
----------
outs: Array of Tensor
The computation graph description of argwhere.
Returns
-------
s: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
def schedule_sparse_fill_empty_rows(outs):
return _default_schedule(outs, False)
def schedule_unique(outs):
"""Schedule for unique operator.
Parameters
----------
outs: Array of Tensor
The computation graph description of unique.
Returns
-------
s: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
| 1,673 | 28.368421 | 62 | py |
tvm | tvm-main/python/tvm/topi/generic/default.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unused-argument
"""The default schedule used by various operators"""
import tvm
from tvm import te
def default_schedule(outs, auto_inline):
"""Default schedule for llvm."""
target = tvm.target.Target.current(allow_none=False)
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
if target.kind.name not in ("llvm", "c"):
raise RuntimeError(f"schedule not registered for '{target}'")
s = te.create_schedule([x.op for x in outs])
if auto_inline:
x = outs[0]
te.schedule.AutoInlineInjective(s)
s[x].fuse(s[x].op.axis)
return s
| 1,411 | 39.342857 | 69 | py |
tvm | tvm-main/python/tvm/topi/arm_cpu/injective.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-variable
"""Schedule for pooling operators"""
import numpy as np
import tvm
from tvm import te
from ..utils import is_empty_shape
def schedule_injective_from_existing(sch, out):
"""Schedule for injective op from existing schedule.
Parameters
----------
sch: Schedule
The schedule to update.
out: Tensor
The tensor representing the injective op.
Returns
-------
sch: Schedule
The updated schedule.
"""
if len(sch[out].op.axis) >= 4:
fused = sch[out].fuse(sch[out].op.axis[0], sch[out].op.axis[1], sch[out].op.axis[2])
sch[out].parallel(fused)
elif len(sch[out].op.axis) >= 3:
fused = sch[out].fuse(sch[out].op.axis[0], sch[out].op.axis[1])
sch[out].parallel(fused)
elif len(sch[out].op.axis) >= 2:
sch[out].parallel(sch[out].op.axis[0])
return sch
def schedule_injective(outs):
"""ARM CPU schedule for injective op.
Parameters
----------
outs: Array of Tensor
The computation graph description of injective in the format
of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
x = outs[0]
if list(s[x].op.axis):
# do not vectorize for broadcast
dtype = "uint16" if x.dtype == "bfloat16" else x.dtype
(io, ii) = s[x].split(list(s[x].op.axis)[-1], 16 // np.dtype(dtype).itemsize)
s[x].vectorize(ii)
tvm.te.schedule.AutoInlineInjective(s)
if not is_empty_shape(x.shape):
schedule_injective_from_existing(s, x)
return s
def schedule_concatenate(outs):
"""Schedule for concatenate op.
Parameters
----------
outs: Array of Tensor
The computation graph description of concatenate in the format
of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
x = outs[0]
tvm.te.schedule.AutoInlineInjective(s)
if len(s[x].op.axis) >= 4:
fused = s[x].fuse(s[x].op.axis[0], s[x].op.axis[1], s[x].op.axis[2])
s[x].parallel(fused)
elif len(s[x].op.axis) >= 3:
fused = s[x].fuse(s[x].op.axis[0], s[x].op.axis[1])
s[x].parallel(fused)
elif len(s[x].op.axis) >= 2:
s[x].parallel(s[x].op.axis[0])
return s
| 3,390 | 30.398148 | 92 | py |
tvm | tvm-main/python/tvm/topi/arm_cpu/pooling.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-variable
"""Schedule for pooling operators"""
from .mprofile.dsp.pool import pool_dsp_schedule
def schedule_pool(outs, layout):
"""Create schedule for avgpool/maxpool with dsp"""
return pool_dsp_schedule(outs, layout)
| 1,053 | 39.538462 | 62 | py |
tvm | tvm-main/python/tvm/topi/arm_cpu/conv2d.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-variable, no-else-return, unused-argument, import-outside-toplevel
"""Conv2D schedule for ARM CPU"""
from __future__ import absolute_import as _abs
import tvm
from tvm import te
from tvm import autotvm
import tvm.contrib.nnpack
from ..utils import traverse_inline, get_const_tuple
from .. import nn
from ..nn.utils import get_const_int, get_pad_tuple
from ..nn.winograd_util import winograd_transform_matrices
from .conv2d_spatial_pack import (
conv2d_spatial_pack_nchw,
conv2d_spatial_pack_nhwc,
schedule_conv2d_spatial_pack_nchw,
schedule_conv2d_spatial_pack_nhwc,
)
from .mprofile.dsp.conv2d import conv2d_nhwc_dsp_compute, conv2d_nhwc_dsp_schedule
@autotvm.register_topi_compute("conv2d_nchw_spatial_pack.arm_cpu")
def conv2d_nchw_spatial_pack(cfg, data, kernel, strides, padding, dilation, out_dtype):
"""Compute conv2d with NCHW layout"""
return conv2d_spatial_pack_nchw(
cfg, data, kernel, strides, padding, dilation, out_dtype, num_tile=2
)
@autotvm.register_topi_schedule("conv2d_nchw_spatial_pack.arm_cpu")
def schedule_conv2d_nchw_spatial_pack(cfg, outs):
"""Create schedule for conv2d_nchw"""
s = te.create_schedule([x.op for x in outs])
def _callback(op):
# schedule conv2d
if "spatial_conv2d_output" in op.tag:
output = op.output(0)
conv = op.input_tensors[0]
data_vec = conv.op.input_tensors[0]
data_pad = data_vec.op.input_tensors[0]
s[data_pad].compute_inline()
kernel_vec = conv.op.input_tensors[1]
if kernel_vec.op.name == "kernel_vec":
kernel = kernel_vec.op.input_tensors[0]
else:
kernel = kernel_vec
if isinstance(kernel.op, tvm.te.ComputeOp) and "dilate" in kernel.op.tag:
s[kernel].compute_inline()
schedule_conv2d_spatial_pack_nchw(cfg, s, data_vec, kernel_vec, conv, output, outs[0])
traverse_inline(s, outs[0].op, _callback)
return s
@autotvm.register_topi_compute("conv2d_nhwc_spatial_pack.arm_cpu")
def conv2d_nhwc_spatial_pack(cfg, data, kernel, strides, padding, dilation, out_dtype):
"""Compute conv2d with NHWC layout"""
return conv2d_spatial_pack_nhwc(cfg, data, kernel, strides, padding, dilation, out_dtype)
@autotvm.register_topi_schedule("conv2d_nhwc_spatial_pack.arm_cpu")
def schedule_conv2d_nhwc_spatial_pack(cfg, outs):
"""Create schedule for conv2d_nhwc"""
s = te.create_schedule([x.op for x in outs])
def _callback(op):
if "spatial_conv_output_NHWC" in op.tag:
schedule_conv2d_spatial_pack_nhwc(cfg, s, op, outs[0])
traverse_inline(s, outs[0].op, _callback)
return s
@autotvm.register_topi_compute("conv2d_nchw_winograd.arm_cpu")
def conv2d_nchw_winograd(cfg, data, kernel, strides, padding, dilation, out_dtype):
"""Compute conv2d_nchw layout using Winograd with weight transform"""
tile_size = 4
return _decl_winograd(cfg, data, kernel, strides, padding, dilation, out_dtype, tile_size)
@autotvm.register_topi_schedule("conv2d_nchw_winograd.arm_cpu")
def schedule_conv2d_nchw_winograd(cfg, outs):
"""Create schedule for conv2d_nchw_winograd"""
s = te.create_schedule([x.op for x in outs])
def _callback(op):
if "winograd_conv2d_output" in op.tag:
output = op.output(0)
_schedule_winograd(cfg, s, output, outs[0])
traverse_inline(s, outs[0].op, _callback)
return s
def _decl_winograd(cfg, data, kernel, strides, padding, dilation, out_dtype, tile_size):
N, CI, IH, IW = get_const_tuple(data.shape)
if isinstance(N, tvm.tir.Any):
N = tvm.te.size_var("n")
if not isinstance(IH, int) or not isinstance(IW, int):
raise RuntimeError("ARM winograd conv2d doesn't support dynamic input height or width.")
if isinstance(dilation, int):
dilation_h = dilation_w = dilation
else:
dilation_h, dilation_w = dilation
if len(kernel.shape) == 4:
if dilation_h != 1 or dilation_w != 1:
kernel = nn.dilate(kernel, (1, 1, dilation_h, dilation_w))
pre_computed = False
CO, _, KH, KW = get_const_tuple(kernel.shape)
else:
assert (dilation_h, dilation_w) == (1, 1), "Does not support dilation"
pre_computed = True
H_CAT, W_CAT, CO, CI, VC = get_const_tuple(kernel.shape)
CO *= VC
KH, KW = H_CAT - tile_size + 1, W_CAT - tile_size + 1
HSTR, WSTR = strides if isinstance(strides, (tuple, list)) else (strides, strides)
pt, pl, pb, pr = get_pad_tuple(padding, (KH, KW))
assert KH == 3 and KW == 3 and HSTR == 1 and WSTR == 1
data_pad = nn.pad(data, (0, 0, pt, pl), (0, 0, pb, pr), name="data_pad")
idxd = tvm.tir.indexdiv
idxm = tvm.tir.indexmod
r = KW
m = tile_size
alpha = m + r - 1
A, B, G = winograd_transform_matrices(m, r, out_dtype)
K = CO
C = CI
H = (IH + pt + pb - 3) // HSTR + 1
W = (IW + pl + pr - 3) // WSTR + 1
nH, nW = (H + m - 1) // m, (W + m - 1) // m
P = N * nH * nW
# TODO(@kevinthesun): Support tuning/optimization for dynamic shape.
tile_p = P if isinstance(N, int) else nH * nW
cfg.define_split("tile_p", cfg.axis(tile_p), num_outputs=2, filter=lambda x: x.size[-1] <= 16)
cfg.define_split("tile_k", cfg.axis(K), num_outputs=2, filter=lambda x: x.size[-1] <= 16)
VP = cfg["tile_p"].size[-1]
VK = cfg["tile_k"].size[-1]
# pack input tile
input_tile = te.compute(
(C, idxd(P, VP), alpha, alpha, VP),
lambda c, b, eps, nu, bb: data_pad[
idxd(b * VP + bb, nH * nW),
c,
idxm(idxd(b * VP + bb, nW), nH) * m + eps,
idxm(b * VP + bb, nW) * m + nu,
],
name="d",
)
if autotvm.GLOBAL_SCOPE.in_tuning:
VC = cfg["tile_k"].size[-1]
kvshape = (KH + tile_size - 1, KW + tile_size - 1, idxd(CO, VC), CI, VC)
U = tvm.te.placeholder(kvshape, kernel.dtype, name="U")
else:
# transform kernel
if pre_computed:
U = kernel
else:
r_kh = te.reduce_axis((0, KH), "r_kh")
r_kw = te.reduce_axis((0, KW), "r_kw")
U = te.compute(
(alpha, alpha, idxd(K, VK), C, VK),
lambda eps, nu, k, c, kk: te.sum(
kernel[k * VK + kk][c][r_kh][r_kw].astype(out_dtype)
* G[eps][r_kh]
* G[nu][r_kw],
axis=[r_kh, r_kw],
),
name="U",
)
# transform image
r_eps = te.reduce_axis((0, alpha), "r_eps")
r_nu = te.reduce_axis((0, alpha), "r_nu")
V = te.compute(
(alpha, alpha, idxd(P, VP), C, VP),
lambda eps, nu, b, c, bb: te.sum(
input_tile[c][b][r_eps][r_nu][bb].astype(out_dtype) * B[r_eps][eps] * B[r_nu][nu],
axis=[r_eps, r_nu],
),
name="V",
)
# batch gemm
c = te.reduce_axis((0, C), name="c")
M = te.compute(
(alpha, alpha, K, P),
lambda eps, nu, k, b: te.sum(
U[eps][nu][idxd(k, VK)][c][idxm(k, VK)] * V[eps][nu][idxd(b, VP)][c][idxm(b, VP)],
axis=c,
),
name="M",
)
# inverse transform
r_eps = te.reduce_axis((0, alpha), "r_eps")
r_nu = te.reduce_axis((0, alpha), "r_nu")
Y = te.compute(
(K, P, m, m),
lambda k, b, vh, vw: te.sum(
M[r_eps][r_nu][k][b] * A[r_eps][vh] * A[r_nu][vw], axis=[r_eps, r_nu]
),
name="Y",
)
# unpack output
output = te.compute(
(N, K, H, W),
lambda n, k, h, w: Y[k][n * nH * nW + idxd(h, m) * nW + idxd(w, m), idxm(h, m), idxm(w, m)],
name="output",
tag="winograd_conv2d_output",
)
# we have to manually assign effective GFLOP for winograd
if isinstance(N, int):
cfg.add_flop(2 * N * K * H * W * KH * KW * C)
return output
def _schedule_winograd(cfg, s, output, last):
Y = output.op.input_tensors[0]
M, A = Y.op.input_tensors
U, V = M.op.input_tensors
d, B = V.op.input_tensors
data_pad = d.op.input_tensors[0]
# padding
s[data_pad].compute_inline()
# pack input tiles
s[d].compute_inline()
# transform kernel
if isinstance(U.op, tvm.te.ComputeOp):
kernel, G = U.op.input_tensors
s[G].compute_inline()
(eps, nu, k, c, kk) = s[U].op.axis
if autotvm.GLOBAL_SCOPE.in_tuning:
# kernel transformation will be pre-computed during compilation, so we skip
# this part to make tuning records correct
s[U].pragma(eps, "debug_skip_region")
else:
r_kh, r_kw = s[U].op.reduce_axis
s[U].reorder(k, c, eps, nu, r_kh, r_kw, kk)
for axis in [eps, nu, r_kh, r_kw]:
s[U].unroll(axis)
s[U].vectorize(kk)
s[U].parallel(k)
if isinstance(kernel.op, tvm.te.ComputeOp) and "dilate" in kernel.op.tag:
s[kernel].compute_inline()
# transform image
DD = s.cache_read(d, "global", [V])
s[B].compute_inline()
eps, nu, b, c, bb = s[V].op.axis
r_eps, r_nu = s[V].op.reduce_axis
s[V].reorder(b, c, eps, nu, r_eps, r_nu, bb)
for axis in [eps, nu, r_eps, r_nu]:
s[V].unroll(axis)
s[DD].compute_at(s[V], c)
s[V].vectorize(bb)
s[V].parallel(b)
# batch gemm
eps, nu, k, b = s[M].op.axis
c = s[M].op.reduce_axis[0]
cfg.define_split("tile_c", c, num_outputs=2, filter=lambda x: x.size[-1] <= 16)
co, ci = cfg["tile_c"].apply(s, M, c)
xo, xi = cfg["tile_p"].apply(s, M, b)
s[M].reorder(eps, nu, xo, co, k, ci, xi)
cfg.define_annotate("ann_reduce", [ci], policy="try_unroll")
cfg.define_annotate("ann_spatial", [k, xi], policy="try_unroll_vec")
cfg["ann_reduce"].apply(s, M, [ci], axis_lens=[cfg["tile_c"].size[-1]], max_unroll=16, cfg=cfg)
cfg["ann_spatial"].apply(s, M, [k, xi])
# inverse transform
s[A].compute_inline()
k, b, vh, vw = s[Y].op.axis
r_eps, r_nu = s[Y].op.reduce_axis
for axis in [vh, vw, r_eps, r_nu]:
s[Y].unroll(axis)
# output
n, co, h, w = s[last].op.axis
co, coi = cfg["tile_k"].apply(s, last, co)
p = s[last].fuse(n, co)
s[M].compute_at(s[last], p)
s[last].parallel(p)
MM = s.cache_read(M, "global", [Y])
m = get_const_int(V.shape[0]) + 1 - 3
ho, wo, hi, wi = s[last].tile(h, w, m, m)
s[Y].compute_at(s[last], wo)
s[MM].compute_at(s[last], wo)
if output != last:
s[output].compute_inline()
@autotvm.register_topi_compute("conv2d_nchw_winograd_nnpack.arm_cpu")
def conv2d_nchw_winograd_nnpack(cfg, data, kernel, strides, padding, dilation, out_dtype):
"""Compute conv2d_nchw using nnpack Winograd implementation"""
dtype = data.dtype
if dtype == "float32":
return _conv2d_arm_cpu_winograd_nnpack(
cfg,
data,
kernel,
strides,
padding,
dilation,
out_dtype,
tvm.contrib.nnpack.ConvolutionAlgorithm.WT_8x8,
)
elif dtype == "float16":
return _conv2d_arm_cpu_winograd_nnpack(
cfg,
data,
kernel,
strides,
padding,
dilation,
out_dtype,
tvm.contrib.nnpack.ConvolutionAlgorithm.WT_8x8_FP16,
)
else:
raise ValueError(f"Unsupported data type {dtype} for conv2d winograd nnpack")
@autotvm.register_topi_schedule("conv2d_nchw_winograd_nnpack.arm_cpu")
def schedule_conv2d_nchw_winograd_nnpack(cfg, outs):
"""Create schedule for conv2d_nchw_winograd_nnpack"""
s = te.create_schedule([x.op for x in outs])
def _callback(op):
if "winograd_nnpack_conv2d_output" in op.tag:
output = op.output(0)
_schedule_winograd_nnpack(cfg, s, output, outs[0])
traverse_inline(s, outs[0].op, _callback)
return s
def _conv2d_arm_cpu_winograd_nnpack(
cfg, data, kernel, strides, padding, dilation, out_dtype, convolution_algorithm
):
"""TOPI compute callback. Use winograd NNPACK template"""
N, CI, IH, IW = get_const_tuple(data.shape)
if isinstance(dilation, int):
dilation_h = dilation_w = dilation
else:
dilation_h, dilation_w = dilation
assert (dilation_h, dilation_w) == (1, 1)
assert len(kernel.shape) == 4
CO, _, KH, KW = get_const_tuple(kernel.shape)
HSTR, WSTR = strides if isinstance(strides, (tuple, list)) else (strides, strides)
pt, pl, pb, pr = get_pad_tuple(padding, (KH, KW))
assert (
KH == 3
and KW == 3
and pt == 1
and pb == 1
and pl == 1
and pr == 1
and HSTR == 1
and WSTR == 1
)
H = (IH + pt + pb - 3) // HSTR + 1
W = (IW + pl + pr - 3) // WSTR + 1
cfg.define_knob("winograd_nnpack_algorithm", [convolution_algorithm])
assert N == 1
with tvm.te.tag_scope("winograd_nnpack_conv2d_weight_transform"):
transformed_kernel = tvm.contrib.nnpack.convolution_inference_weight_transform(
kernel, algorithm=cfg["winograd_nnpack_algorithm"].val
)
if autotvm.GLOBAL_SCOPE.in_tuning:
transformed_kernel = te.compute(transformed_kernel.shape, lambda *args: 0.0)
with tvm.te.tag_scope("winograd_nnpack_conv2d_output"):
output = tvm.contrib.nnpack.convolution_inference_without_weight_transform(
data,
transformed_kernel,
bias=None,
padding=[pt, pb, pl, pr],
stride=[HSTR, WSTR],
algorithm=cfg["winograd_nnpack_algorithm"].val,
)
# we have to manually assign effective GFLOP for winograd
cfg.add_flop(2 * N * CI * H * W * KH * KW * CO)
return output
def _schedule_winograd_nnpack(cfg, s, output, last):
# Could have bias.
(X, TK) = output.op.input_tensors[:2]
# transform kernel
assert isinstance(TK.op, (te.tensor.ComputeOp, te.tensor.ExternOp, te.tensor.PlaceholderOp))
if autotvm.GLOBAL_SCOPE.in_tuning and isinstance(TK.op, te.tensor.ComputeOp):
# kernel transformation will be pre-computed during compilation, so we skip
# this part to make tuning records correct
s[TK].pragma(s[TK].op.axis[0], "debug_skip_region")
@autotvm.register_topi_compute("conv2d_nchw_winograd_nnpack_without_weight_transform.arm_cpu")
def conv2d_nchw_winograd_nnpack_without_weight_transform(
cfg, data, transformed_kernel, bias, strides, padding, dilation, out_dtype
):
"""Compute conv2d_nchw using NNPack winograd without weight transform"""
N, CI, IH, IW = get_const_tuple(data.shape)
if isinstance(dilation, int):
dilation_h = dilation_w = dilation
else:
dilation_h, dilation_w = dilation
assert (dilation_h, dilation_w) == (1, 1)
assert len(transformed_kernel.shape) == 4
CO, _, _, _ = get_const_tuple(transformed_kernel.shape)
HSTR, WSTR = strides if isinstance(strides, (tuple, list)) else (strides, strides)
KH, KW = 3, 3
pt, pl, pb, pr = get_pad_tuple(padding, (KH, KW))
assert (
KH == 3
and KW == 3
and pt == 1
and pb == 1
and pl == 1
and pr == 1
and HSTR == 1
and WSTR == 1
)
H = (IH + pt + pb - 3) // HSTR + 1
W = (IW + pl + pr - 3) // WSTR + 1
assert N == 1
with tvm.te.tag_scope("winograd_nnpack_conv2d_output"):
output = tvm.contrib.nnpack.convolution_inference_without_weight_transform(
data=data,
transformed_kernel=transformed_kernel,
bias=bias,
padding=[pt, pb, pl, pr],
stride=[HSTR, WSTR],
algorithm=cfg["winograd_nnpack_algorithm"].val,
)
# we have to manually assign effective GFLOP for winograd
cfg.add_flop(2 * N * CI * H * W * KH * KW * CO)
return output
@autotvm.register_topi_schedule("conv2d_nchw_winograd_nnpack_without_weight_transform.arm_cpu")
def schedule_conv2d_nchw_winograd_nnpack_without_weight_transform(cfg, outs):
"""TOPI schedule callback"""
s = te.create_schedule([x.op for x in outs])
def _callback(op):
if "winograd_nnpack_conv2d_output" in op.tag:
output = op.output(0)
_schedule_winograd_nnpack(cfg, s, output, outs[0])
traverse_inline(s, outs[0].op, _callback)
return s
@autotvm.register_topi_compute("conv2d_nhwc_dsp.arm_cpu")
def conv2d_nhwc_dsp(cfg, data, kernel, strides, padding, dilation, out_dtype):
"""Compute conv2d_nhwc with v7e-m DSP instructions."""
return conv2d_nhwc_dsp_compute(cfg, data, kernel, strides, padding, dilation, out_dtype)
@autotvm.register_topi_schedule("conv2d_nhwc_dsp.arm_cpu")
def schedule_conv2d_nhwc_dsp(cfg, outs):
"""Create schedule for conv2d_nhwc_dsp"""
return conv2d_nhwc_dsp_schedule(cfg, outs)
| 17,799 | 33.765625 | 105 | py |
tvm | tvm-main/python/tvm/topi/arm_cpu/dense.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-variable, no-else-return, unused-argument, import-outside-toplevel
"""Dense schedule for ARM CPU"""
from tvm import autotvm
from .mprofile.dsp.dense import dense_dsp_schedule, dense_dsp_compute
@autotvm.register_topi_compute("dense_dsp.arm_cpu")
def dense_dsp(cfg, data, weight, bias, out_dtype):
"""Compute conv2d_nhwc with v7e-m DSP instructions."""
return dense_dsp_compute(cfg, data, weight, bias=bias, out_dtype=out_dtype)
@autotvm.register_topi_schedule("dense_dsp.arm_cpu")
def schedule_dense_dsp(cfg, outs):
"""Create schedule for dense_dsp"""
return dense_dsp_schedule(cfg, outs)
| 1,434 | 41.205882 | 105 | py |
tvm | tvm-main/python/tvm/topi/arm_cpu/bitserial_conv2d.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unused-variable,invalid-name,unused-argument
"""Bitserial conv2d schedule on arm cpu"""
from __future__ import absolute_import as _abs
import tvm
from tvm import te
from tvm import autotvm
from tvm import relay
from ..nn.pad import pad
from ..nn.bitserial_conv2d import bitserial_conv2d_legalize
from ..nn.bitserial_util import bitpack, binary_op_multiplier
from ..nn.utils import get_pad_tuple
from ..utils import get_const_int, get_const_tuple, traverse_inline
def _kernel_vec_spatial_pack_nhwc(kernel, kernel_bits, VC, use_bitpack=True):
if use_bitpack:
kernel_q = bitpack(kernel, kernel_bits, pack_axis=2, bit_axis=2, pack_type="uint8")
else:
kernel_q = kernel
KH, KW, KB, CI, CO = kernel_q.shape
kvshape = (CO // VC, KH, KW, KB, VC, CI)
return te.compute(
kvshape,
lambda co, dh, dw, b, vc, ci: kernel_q[dh][dw][b][ci][co * VC + vc],
name="kernel_vec",
)
@autotvm.register_topi_compute("bitserial_conv2d_nhwc.arm_cpu")
def bitserial_conv2d_nhwc(
cfg,
data,
kernel,
stride,
padding,
activation_bits,
weight_bits,
pack_dtype,
out_dtype,
unipolar,
):
"""Compute convolution with pack on spatial axes."""
assert data.shape[0].value == 1, "spatial pack convolution only support batch size=1"
assert pack_dtype == "uint8", "only support packing into uint8 bits"
assert out_dtype == "int16", "only support output type of int16"
N, H, W, CI = get_const_tuple(data.shape)
if len(kernel.shape) == 4:
KH, KW, _, CO = get_const_tuple(kernel.shape)
CI_packed = CI // 8
else:
KH, KW, KB, CI_packed, CO = get_const_tuple(kernel.shape)
if isinstance(padding, int) or (isinstance(padding, (tuple, list)) and len(padding) == 2):
TPAD, LPAD, DPAD, RPAD = get_pad_tuple(padding, kernel)
else:
TPAD, LPAD, DPAD, RPAD = padding
if isinstance(stride, (tuple, list)):
HSTR, WSTR = stride
else:
HSTR, WSTR = stride, stride
HCAT, WCAT = KH - 1, KW - 1
PAD_H = H + (TPAD + DPAD)
PAD_W = W + (LPAD + RPAD)
OH = (PAD_H - KH) // HSTR + 1
OW = (PAD_W - KW) // WSTR + 1
oshape = (1, OH, OW, CO)
idxd = tvm.tir.indexdiv
idxm = tvm.tir.indexmod
# Pad input channels of weights and data when it is not a multiple of 8
if CI_packed % 8 != 0:
CI_PAD = CI_packed % 8
CI_packed += CI_PAD
else:
CI_PAD = 0
# ==================== define configuration space ====================
n, oh, ow, co = cfg.axis(N), cfg.axis(OH), cfg.axis(OW), cfg.axis(CO)
ci, kh, kw = cfg.reduce_axis(CI_packed), cfg.reduce_axis(KH), cfg.reduce_axis(KW)
ib, kb = cfg.reduce_axis(activation_bits), cfg.reduce_axis(weight_bits)
co, vc = cfg.define_split("tile_co", co, num_outputs=2, filter=lambda x: x.size[-1] == 8)
oh, vh = cfg.define_split("tile_oh", oh, num_outputs=2, filter=lambda x: x.size[-1] >= 2)
ow, vw = cfg.define_split("tile_ow", ow, num_outputs=2, filter=lambda x: x.size[-1] >= 2)
ci_o, ci_i = cfg.define_split(
"tile_ci", ci, num_outputs=2, filter=lambda x: x.size[-1] == 8 or x.size[-1] == 16
)
re_axes = cfg.define_reorder(
"reorder_0",
[n, oh, ow, co, vh, vw, kh, kw, ci_o, kb, ib, vc, ci_i],
policy="candidate",
candidate=[
[n, oh, ow, co, vh, vw, kh, kw, ci_o, kb, ib, vc, ci_i],
[n, oh, ow, co, vh, vw, kw, kh, ci_o, kb, ib, vc, ci_i],
],
)
# binary ops
cfg.add_flop(2 * N * OH * OW * CO * CI * KH * KW * binary_op_multiplier(pack_dtype))
# ====================
VC = cfg["tile_co"].size[-1]
VH = cfg["tile_oh"].size[-1]
VW = cfg["tile_ow"].size[-1]
data_q = bitpack(data, activation_bits, pack_axis=3, bit_axis=3, pack_type="uint8")
kernel_vec = _kernel_vec_spatial_pack_nhwc(kernel, weight_bits, VC, len(kernel.shape) == 4)
idxm = tvm.tir.indexmod
if idxm(kernel_vec.shape[-1], 8) != 0 and CI_PAD != 0:
kernel_vec = pad(kernel_vec, [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, CI_PAD])
N, H, W, IB, CI = data_q.shape
OCO, KH, KW, KB, VC, CI = kernel_vec.shape
dvshape = (
N,
PAD_H // (VH * HSTR),
PAD_W // (VW * WSTR),
VH * HSTR + HCAT,
VW * WSTR + WCAT,
IB,
CI,
)
ovshape = (1, OH // VH, OW // VW, CO // VC, VH, VW, VC)
if TPAD != 0 and RPAD != 0:
data_pad = pad(data_q, (0, TPAD, LPAD, 0, 0), (0, DPAD, RPAD, 0, CI_PAD), name="data_pad")
elif CI_PAD != 0:
data_pad = pad(data_q, (0, 0, 0, 0, 0), (0, 0, 0, 0, CI_PAD), name="data_pad")
else:
data_pad = data_q
data_vec = te.compute(
dvshape,
lambda n, h, w, vh, vw, b, ci: data_pad[n][h * VH * HSTR + vh][w * VW * WSTR + vw][b][ci],
name="data_vec",
)
ci = te.reduce_axis((0, CI), name="ci")
dh = te.reduce_axis((0, KH), name="dh")
dw = te.reduce_axis((0, KW), name="dw")
ib = te.reduce_axis((0, IB), name="ib")
kb = te.reduce_axis((0, KB), name="kb")
def _bipolar_conv(n, h, w, co, vh, vw, vc):
return te.sum(
(
tvm.tir.popcount(
kernel_vec[co, dh, dw, kb, vc, ci].astype("uint16")
& data_vec[n, h, w, vh * HSTR + dh, vw * WSTR + dw, ib, ci].astype("uint16")
)
<< (kb + ib).astype("uint16")
),
axis=[dh, dw, kb, ib, ci],
)
def _unipolar_conv(n, h, w, co, vh, vw, vc):
return te.sum(
(
(
tvm.tir.popcount(
kernel_vec[co, dh, dw, kb, vc, ci].astype("int16")
& data_vec[n, h, w, vh * HSTR + dh, vw * WSTR + dw, ib, ci].astype("int16")
)
- tvm.tir.popcount(
~kernel_vec[co, dh, dw, kb, vc, ci].astype("int16")
& data_vec[n, h, w, vh * HSTR + dh, vw * WSTR + dw, ib, ci]
).astype("int16")
)
<< (kb + ib).astype("int16")
),
axis=[dh, dw, kb, ib, ci],
)
if unipolar:
conv_vec = te.compute(ovshape, _unipolar_conv, name="conv_vec", tag="unipolar")
else:
conv_vec = te.compute(ovshape, _bipolar_conv, name="conv_vec", tag="bipolar")
conv = te.compute(
oshape,
lambda n, h, w, co: conv_vec[
n, idxd(h, VH), idxd(w, VW), idxd(co, VC), idxm(h, VH), idxm(w, VW), idxm(co, VC)
].astype(out_dtype),
name="conv",
tag="spatial_bitserial_conv_nhwc",
)
return conv
def _intrin_popcount(m, k_i, w_b, x_b, unipolar):
pack_dtype = "uint8"
w = te.placeholder((w_b, m, k_i), dtype=pack_dtype, name="w")
x = te.placeholder(
(
x_b,
k_i,
),
dtype=pack_dtype,
name="x",
)
k = te.reduce_axis((0, k_i), name="k")
bw = te.reduce_axis((0, w_b), name="bw")
bx = te.reduce_axis((0, x_b), name="bx")
if unipolar:
dtype = "int16"
z = te.compute(
(m,),
lambda i: te.sum(
(
tvm.tir.popcount(w[bw, i, k].astype(dtype) & x[bx, k].astype(dtype))
- tvm.tir.popcount(~w[bw, i, k].astype(dtype) & x[bx, k].astype(dtype))
)
<< (bw + bx).astype(dtype),
axis=[bw, bx, k],
),
name="z",
)
else:
dtype = "uint16"
z = te.compute(
(m,),
lambda i: te.sum(
tvm.tir.popcount(w[bw, i, k].astype(dtype) & x[bx, k].astype(dtype))
<< (bw + bx).astype(dtype),
axis=[bw, bx, k],
),
name="z",
)
Wb = tvm.tir.decl_buffer(
w.shape, w.dtype, name="W", offset_factor=k_i, strides=[te.var("ldw"), te.var("ldw"), 1]
) # stride can be inferred
Xb = tvm.tir.decl_buffer(
x.shape, x.dtype, name="X", offset_factor=k_i, strides=[te.var("ldw"), 1]
)
Zb = tvm.tir.decl_buffer(z.shape, z.dtype, name="Z", offset_factor=1, strides=[1])
def _intrin_func(ins, outs):
ww, xx = ins
zz = outs[0]
args_2 = tvm.tir.const(2, "uint32")
if unipolar:
vpadd = "llvm.arm.neon.vpadd.v8i8"
vpadalu = "llvm.arm.neon.vpadals.v16i8.v8i16"
full_dtype = "int8x16"
half_dtype = "int8x8"
return_dtype = "int16x8"
else:
vpadd = "llvm.arm.neon.vpadd.v8u8"
vpadalu = "llvm.arm.neon.vpadalu.v16u8.v8u16"
full_dtype = "uint8x16"
half_dtype = "uint8x8"
return_dtype = "uint16x8"
def _instr(index):
irb = tvm.tir.ir_builder.create()
if index == 1: # reduce reset
irb.emit(zz.vstore(0, tvm.tir.const(0, return_dtype)))
return irb.get()
# body and reduce update
cnts8 = [None] * 8
cnts4 = [None] * 4
cnts2 = [None] * 2
for bw in range(w_b):
for bx in range(x_b):
if k_i == 16:
for i in range(m):
w_ = ww.vload([bw, i, 0], "uint8x16").astype(full_dtype)
x_ = xx.vload([bx, 0], "uint8x16").astype(full_dtype)
if unipolar:
cnts = tvm.tir.popcount(w_ & x_) - tvm.tir.popcount(~w_ & x_)
else:
cnts = tvm.tir.popcount(w_ & x_)
upper_half = tvm.tir.call_intrin(half_dtype, "tir.vectorhigh", cnts)
lower_half = tvm.tir.call_intrin(half_dtype, "tir.vectorlow", cnts)
cnts8[i] = upper_half + lower_half
for i in range(m // 2):
cnts4[i] = tvm.tir.call_llvm_pure_intrin(
half_dtype, vpadd, args_2, cnts8[i * 2], cnts8[i * 2 + 1]
)
for i in range(m // 4):
cnts2[i] = tvm.tir.call_llvm_pure_intrin(
half_dtype, vpadd, args_2, cnts4[i * 2], cnts4[i * 2 + 1]
)
cnts = tvm.tir.call_intrin(
full_dtype, "tir.vectorcombine", cnts2[0], cnts2[1]
)
shifted_cnts = cnts << tvm.tir.const(bw + bx, pack_dtype)
out = tvm.tir.call_llvm_pure_intrin(
return_dtype, vpadalu, args_2, zz.vload(0, return_dtype), shifted_cnts
)
else: # ki == 8
for i in range(m):
w_ = ww.vload([bw, i, 0], "uint8x8").astype(half_dtype)
x_ = xx.vload([bx, 0], "uint8x8").astype(half_dtype)
if unipolar:
cnts8[i] = tvm.tir.popcount(w_ & x_) - tvm.tir.popcount(~w_ & x_)
else:
cnts8[i] = tvm.tir.popcount(w_ & x_)
for i in range(m // 2):
cnts4[i] = tvm.tir.call_llvm_pure_intrin(
half_dtype, vpadd, args_2, cnts8[i * 2], cnts8[i * 2 + 1]
)
for i in range(m // 4):
cnts2[i] = tvm.tir.call_llvm_pure_intrin(
half_dtype, vpadd, args_2, cnts4[i * 2], cnts4[i * 2 + 1]
)
cnts = tvm.tir.call_intrin(
full_dtype, "tir.vectorcombine", cnts2[0], cnts2[1]
)
shifted_cnts = cnts << tvm.tir.const(bw + bx, pack_dtype)
out = tvm.tir.call_llvm_pure_intrin(
return_dtype, vpadalu, args_2, zz.vload(0, return_dtype), shifted_cnts
)
irb.emit(zz.vstore(0, out))
return irb.get()
# body, reset, update
return _instr(0), _instr(1), _instr(2)
buffer_params = {"offset_factor": 1}
return te.decl_tensor_intrin(
z.op, _intrin_func, binds={w: Wb, x: Xb, z: Zb}, default_buffer_params=buffer_params
)
# ARM specific schedule that using custom microkernel
def _schedule_spatial_conv2d_nhwc(
cfg, s, data_pad, data_vec, kernel_vec, conv_out, output, last, unipolar
):
_, _, _, _, _, IB, CI = data_vec.shape
_, KH, KW, KB, _, _ = kernel_vec.shape
KB = get_const_int(KB)
IB = get_const_int(IB)
VC = cfg["tile_co"].size[-1]
VH = cfg["tile_oh"].size[-1]
VW = cfg["tile_ow"].size[-1]
##### Schedule data padding and packing
if data_pad is not None:
s[data_pad].compute_inline()
_, h, _, _, _, _, _ = s[data_vec].op.axis
cfg.define_split("tile_ah", cfg.axis(h), num_outputs=2, max_factor=32)
oh, ih = cfg["tile_ah"].apply(s, data_vec, h)
s[data_vec].parallel(oh)
#### Schedule kernel packing
co, _, _, _, _, _ = s[kernel_vec].op.axis
cfg.define_split("tile_bco", cfg.axis(co), num_outputs=2, max_factor=32)
oco, ico = cfg["tile_bco"].apply(s, kernel_vec, co)
s[kernel_vec].parallel(oco)
##### Schedule Convolution
n, oh, ow, co, vh, vw, vc = s[conv_out].op.axis
kh, kw, kb, ib, ci = s[conv_out].op.reduce_axis
ci_o, ci_i = cfg["tile_ci"].apply(s, conv_out, ci)
re_axes = cfg["reorder_0"].apply(
s, conv_out, [n, oh, ow, co, vh, vw, kh, kw, ci_o, kb, ib, vc, ci_i]
)
# Use microkernel
kfactor = cfg["tile_ci"].size[1]
if kfactor % 8 == 0:
pc = _intrin_popcount(VC, kfactor, KB, IB, unipolar)
s[conv_out].tensorize(kb, pc)
n, h, w, co = s[last].op.axis
co, vc = cfg["tile_co"].apply(s, last, co)
oh, vh = cfg["tile_oh"].apply(s, last, h)
ow, vw = cfg["tile_ow"].apply(s, last, w)
s[last].reorder(n, oh, ow, co, vh, vw, vc)
s[last].vectorize(vc)
if last != output:
s[output].compute_inline()
s[conv_out].compute_at(s[last], co)
s[last].parallel(oh)
return s
@autotvm.register_topi_schedule("bitserial_conv2d_nhwc.arm_cpu")
def schedule_bitserial_conv2d_nhwc(cfg, outs):
"""Arm cpu schedule for bitserial conv2d"""
s = te.create_schedule([x.op for x in outs])
def _callback(op):
if "spatial_bitserial_conv_nhwc" in op.tag:
output = op.output(0)
conv_out = op.input_tensors[0]
kernel_vec = conv_out.op.input_tensors[0]
data_vec = conv_out.op.input_tensors[1]
data_q = data_vec.op.input_tensors[0]
data = data_q.op.input_tensors[0]
data_pad = None
if isinstance(data_q.op, te.tensor.ComputeOp) and "pad" in data_q.op.tag:
data_pad = data_q
data_q = data
data = data.op.input_tensors[0]
unipolar = "unipolar" in conv_out.op.tag
_schedule_spatial_conv2d_nhwc(
cfg, s, data_pad, data_vec, kernel_vec, conv_out, output, outs[0], unipolar
)
traverse_inline(s, outs[0].op, _callback)
return s
@bitserial_conv2d_legalize.register("arm_cpu")
def _bitserial_conv2d_legalize(attrs, inputs, arg_types):
"""Legalizes Bitserial Conv2D op.
Parameters
----------
attrs : tvm.ir.Attrs
Attributes of current convolution
inputs : list of tvm.relay.Expr
The args of the Relay expr to be legalized
types : list of types
List of input and output types
Returns
-------
result : tvm.relay.Expr
The legalized expr
"""
# Fix different kernel layouts where possible.
if attrs["data_layout"] == "NHWC":
data, kernel = inputs
if len(kernel.data.shape) == 4:
# HWIO layout is expected for NHWC input.
if attrs["kernel_layout"] == "HWOI":
# Handle HWOI layout. This is common in TF depthwise conv2d graph.
kernel = relay.transpose(kernel, axes=(0, 1, 3, 2))
elif attrs["kernel_layout"] == "OIHW":
kernel = relay.transpose(kernel, axes=(2, 3, 1, 0))
## Set new attrs for the tranposed conv.
new_attrs = {k: attrs[k] for k in attrs.keys()}
new_attrs["kernel_layout"] = "HWIO"
conv = relay.nn.bitserial_conv2d(data, kernel, **new_attrs)
return conv
return None
| 17,600 | 36.528785 | 99 | py |
tvm | tvm-main/python/tvm/topi/arm_cpu/qnn_legalize.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""QNN legalization transforms that help eliminate sparse channels.
Some models (like MobileNetV1 when fine-tuned) have output channels in their kernels which are
completely full of zeros. Sometimes these can be optimized away by the C compiler, but this does not
happen when complex schedules (like the ACLE tensordot convolutions) are used.
Instead, we will remove these channels by replacing blocks of operators with equivalent "denser"
ones during legalization. This is harder than it looks - while the outputs of channels with all-zero
kernels do not depend on the input data, they are usually not zero. We work around this by computing
how these constant values affect subsequent operators, and "folding" these effects into a bias_add.
It would eventually be nice to have a generalized, cross-target solution for removing zero channels,
as there is no downside. This may be possible with Relax, but I'm unsure.
"""
import numpy as np
from scipy.signal import convolve2d
from tvm.topi.utils import get_const_tuple
from tvm import nd, relay
from .qnn_alter_op import prev_ops_match, edit_attrs
from ..nn import bias_add_legalize
def _compute_fixed_conv2d_outputs(requantize_op):
"""Compute all conv2d output values that do not depend on the layer input.
Parameters
----------
requantize_op : relay.expr.Call
A qnn.requantize Relay operator, which must be preceeded by a nn.bias_add op and a
qnn.conv2d operator. The qnn.conv2d operator must have groups==1. All arguments to all three
operators, besides the main tensor, must be constants.
Returns
-------
fixed_outputs : Dict[int, int]
A dictionary showing which of the conv2d -> bias_add -> requantize output channels are
"fixed" - i.e. those that do not depend on the input tensor. Each key in the dictionary is
an output channel index, and each value is the value that all entries in that output channel
will have. If the block has no fixed output channels, this dictionary will be empty.
"""
bias_add_op = requantize_op.args[0]
conv2d_op = bias_add_op.args[0]
assert conv2d_op.attrs.kernel_layout.isalpha()
assert conv2d_op.attrs.groups == 1
kernel = conv2d_op.args[1].data.numpy()
oc_axis = conv2d_op.attrs.kernel_layout.index("O")
num_channels = kernel.shape[oc_axis]
rq_input_scale = requantize_op.args[1].data.numpy()
rq_output_scale = requantize_op.args[3].data.numpy().item()
rq_output_zero_point = requantize_op.args[4].data.numpy().item()
bias_data = bias_add_op.args[1].data.numpy()
fixed_outputs = {}
for i in range(num_channels):
if np.any(np.take(kernel, i, axis=oc_axis)):
continue
scale = rq_input_scale[i] / rq_output_scale
channel_constant = round(bias_data[i] * scale + rq_output_zero_point)
clipped = min(127, max(-128, channel_constant))
fixed_outputs[i] = clipped
return fixed_outputs
def _compute_fixed_depthwise_outputs(requantize_op, fixed_channel_inputs):
"""Compute all depthwise conv2d output values that do not depend on the PREVIOUS layer input.
We take as input a requantize operator, and a dictionary of which inputs to our depthwise
operator are fixed and what values they are fixed to. However, a fixed input to one channel
of our depthwise operator does NOT guarantee we can remove the output, because of padding.
This function checks if the padding makes a difference in the outputs, and if not, removes
the channels from the depthwise_conv2d.
Parameters
----------
requantize_op : relay.expr.Call
A qnn.requantize Relay operator, which must be preceeded by a nn.bias_add op and a
qnn.conv2d operator. The qnn.conv2d operator must be depthwise. All arguments to all three
operators, besides the main tensor, must be constants.
fixed_channel_inputs : Dict[int, int]
A dictionary showing which input channels to the qnn.conv2d operator have fixed values, and
what those values are fixed to. Can be empty. Usually, this will be generated by
_compute_fixed_conv2d_outputs.
Returns
-------
fixed_outputs : Dict[int, int]
A dictionary showing which of the conv2d -> bias_add -> requantize output channels are
"fixed" - i.e. those that do not depend on the input tensor. Each key in the dictionary is
an output channel index, and each value is the value that all entries in that output channel
will have. If the block has no fixed output channels, this dictionary will be empty.
"""
bias_add_op = requantize_op.args[0]
depthwise_op = bias_add_op.args[0]
assert depthwise_op.attrs.kernel_layout.isalpha()
assert depthwise_op.attrs.groups > 1
kernel = depthwise_op.args[1].data.numpy()
oc_axis = depthwise_op.attrs.kernel_layout.index("O")
conv_input_zero_point = depthwise_op.args[2].data.numpy().item()
rq_input_scale = requantize_op.args[1].data.numpy()
rq_output_scale = requantize_op.args[3].data.numpy().item()
rq_output_zero_point = requantize_op.args[4].data.numpy().item()
bias_data = bias_add_op.args[1].data.numpy()
kernel_size = get_const_tuple(depthwise_op.attrs.kernel_size)
fixed_outputs = {}
for i, fixed_input in fixed_channel_inputs.items():
input_array = np.full(kernel_size, fixed_input, dtype="int32") - conv_input_zero_point
kernel_channel = np.take(kernel, i, axis=oc_axis).reshape(kernel_size)
scale = rq_input_scale[i] / rq_output_scale
convolved = convolve2d(input_array, kernel_channel, mode="same")
rounded = np.around((convolved + bias_data[i]) * scale).astype("int32")
clipped = np.clip(rounded + rq_output_zero_point, -128, 127)
# We require the ENTIRE padded convolution to all have the same clipped value before we do
# a replacement. This is excessive - we only have to check for the padding that will
# actually be performed on the depthwise convolution, which is often less. If we felt even
# more ambitious, we could do the replacement for "close enough" looking convolution
# outputs, which in theory could reduce accuracy but in practice does not. Doing this would
# yield a ~0.5% speed gain on MobileNetV1, and nothing on other models.
if np.all(clipped == clipped[0, 0]):
fixed_outputs[i] = clipped[0, 0]
# TODO @guberti look for all-zero entries in the depthwise kernel. I don't think these really
# occur in practice, but it would be nice for theoretical completeness.
return fixed_outputs
def _excise_conv2d_channels(empty_channels, input_op, requantize_op, is_depthwise=False):
bias_add_op = requantize_op.args[0]
conv2d_op = bias_add_op.args[0]
axis = conv2d_op.attrs.kernel_layout.index("O")
kernel_data = np.delete(conv2d_op.args[1].data.numpy(), empty_channels, axis=axis)
bias_data = np.delete(bias_add_op.args[1].data.numpy(), empty_channels)
in_scale_data = np.delete(conv2d_op.args[5].data.numpy(), empty_channels)
out_scale_data = np.delete(requantize_op.args[1].data.numpy(), empty_channels)
num_channels = kernel_data.shape[axis]
if is_depthwise:
num_groups = num_channels
else:
num_groups = 1
return relay.qnn.op.requantize(
relay.nn.bias_add(
relay.qnn.op.conv2d(
input_op,
relay.Constant(nd.array(kernel_data)),
*conv2d_op.args[2:5],
relay.Constant(nd.array(in_scale_data)),
**edit_attrs(conv2d_op.attrs, channels=num_channels, groups=num_groups),
),
relay.Constant(nd.array(bias_data)),
**bias_add_op.attrs,
),
relay.Constant(nd.array(out_scale_data)),
*requantize_op.args[2:],
**requantize_op.attrs,
)
def _excise_avg_pool_channels(empty_channels, input_op, first_reshape_op, axis=1):
outer_cast = first_reshape_op.args[0].args[0]
avg_pool = outer_cast.args[0]
inner_cast = avg_pool.args[0]
new_shape = list(get_const_tuple(first_reshape_op.attrs.newshape))
new_shape[axis] -= len(empty_channels)
return relay.reshape(
relay.cast(
relay.nn.avg_pool2d(relay.cast(input_op, **inner_cast.attrs), **avg_pool.attrs),
**outer_cast.attrs,
),
**edit_attrs(first_reshape_op.attrs, newshape=new_shape),
)
def _fold_into_conv_bias(fixed_inputs, conv2d_op, input_op):
assert not any(get_const_tuple(conv2d_op.attrs.padding))
in_axis = conv2d_op.attrs.kernel_layout.index("I")
out_axis = conv2d_op.attrs.kernel_layout.index("O")
kernel = conv2d_op.args[1].data.numpy()
zero_point = conv2d_op.args[2].data.numpy().item()
extra_bias = np.zeros((kernel.shape[out_axis],), dtype="int32")
# For every output channel
for i in range(kernel.shape[out_axis]):
out_kernel_slice = np.expand_dims(np.take(kernel, i, axis=out_axis), axis=out_axis)
# For every input channel that is being removed:
for j, val in fixed_inputs.items():
kernel_slice = np.take(out_kernel_slice, j, axis=in_axis)
accumulator = np.sum(kernel_slice * (val - zero_point))
extra_bias[i] += accumulator
stripped_kernel = np.delete(kernel, tuple(fixed_inputs.keys()), axis=in_axis)
new_conv = relay.qnn.op.conv2d(
input_op,
relay.Constant(nd.array(stripped_kernel)),
*conv2d_op.args[2:],
**conv2d_op.attrs,
)
return new_conv, extra_bias
def _fold_into_dense_bias(fixed_inputs, dense_op, input_op, channel_axis=1):
weights = dense_op.args[1].data.numpy()
assert channel_axis < 2
assert len(weights.shape) == 2
zero_point = dense_op.args[2].data.numpy().item()
extra_bias = np.zeros((weights.shape[1 - channel_axis],), dtype="int32")
# For every output channel
for i in range(weights.shape[1 - channel_axis]):
out_weights_slice = np.take(weights, i, axis=1 - channel_axis)
# For every input channel that is being removed:
for j, val in fixed_inputs.items():
weight = out_weights_slice[j]
extra_bias[i] += (val - zero_point) * weight
stripped_weights = np.delete(weights, tuple(fixed_inputs.keys()), axis=channel_axis)
new_dense = relay.qnn.op.dense(
input_op,
relay.Constant(nd.array(stripped_weights)),
*dense_op.args[2:],
**dense_op.attrs,
)
return new_dense, extra_bias
def _densify_conv_depthwise_conv_pattern(attrs, inputs):
"""Rewrites a regular -> depthwise -> regular convolution pattern to excise empty out channels.
Should be called as part of legalization (before dtypes and layouts are rewritten) and with the
BIAS ADD OPERATOR'S (the one we'll use to "fold in" our constants) `attrs` and `inputs`. The
last regular conv2d operator must be unpadded.
"""
current_conv = inputs[0]
depthwise_requantize = current_conv.args[0]
top_requantize = depthwise_requantize.args[0].args[0].args[0]
top_conv2d = top_requantize.args[0].args[0]
fixed_conv2d_outputs = _compute_fixed_conv2d_outputs(top_requantize)
fixed_dw_outputs = _compute_fixed_depthwise_outputs(depthwise_requantize, fixed_conv2d_outputs)
# Ensure number of channels is divisible by two
if len(fixed_dw_outputs) % 2 > 0:
fixed_dw_outputs.popitem()
if not fixed_dw_outputs:
return None
unneeded_channels = tuple(fixed_dw_outputs.keys())
new_top_conv2d = _excise_conv2d_channels(unneeded_channels, top_conv2d.args[0], top_requantize)
new_dw_conv2d = _excise_conv2d_channels(
unneeded_channels, new_top_conv2d, depthwise_requantize, is_depthwise=True
)
new_conv, extra_bias = _fold_into_conv_bias(fixed_dw_outputs, current_conv, new_dw_conv2d)
new_bias = inputs[1].data.numpy() + extra_bias
new_op = relay.nn.bias_add(new_conv, relay.Constant(nd.array(new_bias)), **attrs)
return new_op
def _densify_conv_pool_dense_pattern(attrs, inputs):
"""Rewrites a regular conv -> pool -> dense pattern to excise empty out channels from the conv.
Should be called as part of legalization (before dtypes and layouts are rewritten) and with the
BIAS ADD operator's `attrs` and `inputs` (the one we'll use to "fold in" our constants). The
average pool operator must reduce the height and width dimensions to 1x1.
"""
first_reshape = inputs[0].args[0]
top_requantize = first_reshape.args[0].args[0].args[0].args[0].args[0]
top_conv2d = top_requantize.args[0].args[0]
fixed_conv2d_outputs = _compute_fixed_conv2d_outputs(top_requantize)
# Ensure number of channels is divisible by two
if len(fixed_conv2d_outputs) % 2 > 0:
fixed_dw_outputs.popitem()
if not fixed_conv2d_outputs:
return None
unneeded_channels = tuple(fixed_conv2d_outputs.keys())
new_top_conv2d = _excise_conv2d_channels(unneeded_channels, top_conv2d.args[0], top_requantize)
new_avg_pool = _excise_avg_pool_channels(unneeded_channels, new_top_conv2d, first_reshape)
new_conv, extra_bias = _fold_into_dense_bias(fixed_conv2d_outputs, inputs[0], new_avg_pool)
new_bias = inputs[1].data.numpy() + extra_bias
new_op = relay.nn.bias_add(new_conv, relay.Constant(nd.array(new_bias)), **attrs)
return new_op
@bias_add_legalize.register(["arm_cpu"])
def legalize_bias_add(attrs, inputs, _tinfos):
"""Remove empty convolution channels when possible, and "fold" them into the bias add.
TODO @guberti: these rewrites are always beneficial and will improve performance cross-platform,
should we enable them for all platforms, not just arm_cpu?
"""
if prev_ops_match(
inputs[0],
(
"qnn.conv2d",
"qnn.requantize",
"nn.bias_add",
"qnn.conv2d",
"qnn.requantize",
"nn.bias_add",
"qnn.conv2d",
),
):
current_conv = inputs[0]
depthwise_conv2d = current_conv.args[0].args[0].args[0]
top_conv2d = depthwise_conv2d.args[0].args[0].args[0]
if (
not any(get_const_tuple(current_conv.attrs.padding))
and current_conv.attrs.groups == 1
and depthwise_conv2d.attrs.groups > 1
and top_conv2d.attrs.groups == 1
):
return _densify_conv_depthwise_conv_pattern(attrs, inputs)
if prev_ops_match(
inputs[0],
(
"qnn.dense",
"reshape",
"reshape",
"cast",
"nn.avg_pool2d",
"cast",
"qnn.requantize",
"nn.bias_add",
"qnn.conv2d",
),
):
avg_pool = inputs[0].args[0].args[0].args[0].args[0]
top_requantize = avg_pool.args[0].args[0]
top_conv2d = top_requantize.args[0].args[0]
if top_conv2d.attrs.groups == 1:
return _densify_conv_pool_dense_pattern(attrs, inputs)
return None
| 15,910 | 40.543081 | 100 | py |
tvm | tvm-main/python/tvm/topi/arm_cpu/conv2d_gemm.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-variable, too-many-locals
# pylint: disable=unused-argument, redefined-builtin
"""GEMM Convolution schedule on ARM"""
import tvm
from tvm.target import Target
from tvm import te
from tvm.topi import nn
from tvm.autotvm.task.space import AnnotateEntity, ReorderEntity, OtherOptionEntity
from ..utils import get_const_tuple, get_const_int
from ..nn.utils import get_pad_tuple
from .tensor_intrin import (
gemm_4x4_int8_int8_int32,
gemm_acc_4x4_int8_int8_int32,
gemm_acc_nx16_int8_int8_int32,
gemm_acc_2x2_int8_int8_int32,
)
def configure_knobs(cfg, M, K, target):
"""Configure auto-tuning knobs for the interleaved strategy"""
x, y = cfg.axis(M // 4), cfg.axis(K // 16)
cfg.define_reorder("reorder_gemm", [x, y], policy="candidate", candidate=[[x, y], [y, x]])
outer_loop, inner_loop = cfg.axis(4), cfg.axis(16)
cfg.define_annotate(
"A_interleaved_unroll_vec", [outer_loop, inner_loop], policy="try_unroll_vec"
)
# Fallback configuration
if cfg.is_fallback:
cfg["reorder_gemm"] = ReorderEntity([0, 1])
cfg["A_interleaved_unroll_vec"] = AnnotateEntity(["unroll", "vec"])
if not target.features.has_dotprod:
cfg.define_knob("gemm_quantized_unroll", [True, False])
if cfg.is_fallback:
cfg["gemm_quantized_unroll"] = OtherOptionEntity(False)
# Compute function
def compute_conv2d_gemm_without_weight_transform(
cfg,
data,
B_interleaved_t,
strides,
padding,
dilation,
out_dtype,
kernel_size,
output_channels,
interleave_A,
):
"""Compute conv2d by transforming the input,
executing GEMM and transforming the output back"""
batches, IH, IW, IC = get_const_tuple(data.shape)
KH, KW = get_const_tuple(kernel_size)
OC = get_const_int(output_channels)
kernel_area = KH * KW
if isinstance(dilation, int):
dilation_h = dilation_w = dilation
else:
dilation_h, dilation_w = get_const_tuple(dilation)
dilated_kernel_h = (KH - 1) * dilation_h + 1
dilated_kernel_w = (KW - 1) * dilation_w + 1
pad_top, pad_left, pad_down, pad_right = get_pad_tuple(
padding, (dilated_kernel_h, dilated_kernel_w)
)
HSTR, WSTR = strides if isinstance(strides, (tuple, list)) else (strides, strides)
OH = (IH + pad_top + pad_down - dilated_kernel_h) // HSTR + 1
OW = (IW + pad_left + pad_right - dilated_kernel_w) // WSTR + 1
if pad_top or pad_left:
data_pad = nn.pad(
data, [0, pad_top, pad_left, 0], [0, pad_down, pad_right, 0], name="data_pad"
)
else:
data_pad = data
# Im2col
M = OH * OW
K = IC * kernel_area
N = OC
A_shape = (batches, M, K)
if kernel_area == 1:
A = tvm.topi.reshape(data_pad, A_shape)
else:
A = te.compute(
A_shape,
lambda n, x, y: data_pad[
n,
HSTR * (x // OW) + dilation_h * ((y // IC) // KW),
WSTR * (x % OW) + dilation_w * ((y // IC) % KW),
y % IC,
],
name="data_im2col",
)
# Pad if necessary
N_transformed = B_interleaved_t.shape[0]
tile_rows_B = B_interleaved_t.shape[2]
tile_cols_B = B_interleaved_t.shape[3]
# Select the tiling strategy for A.
# The tiling information is chosen to maximize register usage during
# the tile computation.
#
# Please refer to:
# - https://discuss.tvm.apache.org/t/rfc-improve-quantized-convolution-performance-for-armv8-architectures # pylint: disable=line-too-long
# - https://discuss.tvm.apache.org/t/rfc-accelerate-quantized-convolution-through-dot-product
# - https://discuss.tvm.apache.org/t/rfc-improve-quantized-convolution-through-mmla-instruction
# - Conv2DGemmWeightTransformRel in src/relay/op/nn/convolution.h
# In order to have more information
#
target = Target.current(allow_none=False)
if target.features.has_matmul_i8:
# If smmla/ummla is enabled, we are loading 8 rows from A. Each row
# will contain 8 elements
tile_rows_A = 8
tile_cols_A = 8
elif target.features.has_dotprod and interleave_A:
# If dot product has been enabled, and we are interleaving A
# tile size should be 8x4
tile_rows_A = 8
tile_cols_A = 4
else:
# If either there is no dot product or if we are using a native strategy
# tile size should be 4x16
tile_rows_A = 4
tile_cols_A = 16
pad_M = 0
pad_K = 0
if M % tile_rows_A != 0:
pad_M = tile_rows_A - (M % tile_rows_A)
if K % tile_cols_A != 0:
pad_K = tile_cols_A - (K % tile_cols_A)
M_padded = M + pad_M
K_padded = K + pad_K
N_padded = N_transformed * tile_rows_B
pad_before = (0, 0, 0)
pad_after = (0, pad_M, pad_K)
if pad_M != 0 or pad_K != 0:
A = nn.pad(A, pad_before=pad_before, pad_after=pad_after, name="A_padded")
idxm = tvm.tir.indexmod
k = te.reduce_axis((0, K_padded), "k")
if interleave_A:
# Configuration space
configure_knobs(cfg, M_padded, K_padded, target)
# Pack the input data
A_interleaved = te.compute(
(batches, M_padded // tile_rows_A, K_padded // tile_cols_A, tile_rows_A, tile_cols_A),
lambda b, x, y, z, w: A[b, z + tile_rows_A * x, w + tile_cols_A * y],
name="A_interleaved",
)
target = Target.current(allow_none=False)
if target.features.has_matmul_i8:
# Execute GEMM. In the case of mmla, we need to enforce the tiling
# from the compute. This is because mmla is doing a tiled computation
# as well. So we have a big 8x12 tile, with small 2x2 sub-tiles
# generated by mmla. In theory we could make the tile 2x2 and
# fuse and split during scheduling, but this would not work
# because of possible padding
C_interleaved = te.compute(
(
batches,
M_padded // tile_rows_A,
N_transformed,
tile_rows_A // 2,
tile_rows_B // 2,
2,
2,
),
lambda b, x, y, w, z, s, t: te.sum(
A_interleaved[b, x, k // tile_cols_A, 2 * w + s, idxm(k, tile_cols_A)].astype(
"int32"
)
* B_interleaved_t[y, k // tile_cols_B, 2 * z + t, idxm(k, tile_cols_B)].astype(
"int32"
),
axis=k,
),
name="C_interleaved",
)
# Unpack the result
C = te.compute(
(batches, M, N),
lambda b, x, y: C_interleaved[
b,
x // tile_rows_A,
y // tile_rows_B,
idxm(x, tile_rows_A) // 2,
idxm(y, tile_rows_B) // 2,
idxm(idxm(x, tile_rows_A), 2),
idxm(idxm(y, tile_rows_B), 2),
].astype(out_dtype),
name="C",
)
else:
# Execute GEMM
C_interleaved = te.compute(
(batches, M_padded // tile_rows_A, N_transformed, tile_rows_A, tile_rows_B),
lambda b, x, y, w, z: te.sum(
A_interleaved[b, x, k // tile_cols_A, w, idxm(k, tile_cols_A)].astype("int32")
* B_interleaved_t[y, k // tile_cols_B, z, idxm(k, tile_cols_B)].astype("int32"),
axis=k,
),
name="C_interleaved",
)
# Unpack the result
C = te.compute(
(batches, M, N),
lambda b, x, y: C_interleaved[
b,
x // tile_rows_A,
y // tile_rows_B,
idxm(x, tile_rows_A),
idxm(y, tile_rows_B),
].astype(out_dtype),
name="C",
)
zero = tvm.tir.const(0)
else:
# No need to pack/unpack, execute GEMM directly
C = te.compute(
(batches, M_padded, N_padded),
lambda b, x, y: te.sum(
A[b, x, k].astype("int32")
* B_interleaved_t[
y // tile_rows_B, k // tile_cols_B, idxm(y, tile_rows_B), idxm(k, tile_cols_B)
].astype("int32"),
axis=k,
),
name="C",
)
# We need to ensure that infer bound pass does not remove the padding
# which is necessary for the tensorizations to work. So we need to
# add a dummy reference to the padding area of the result
zero = (
tvm.tir.const(1, C.dtype) * C[0, M_padded - 1, N_padded - 1]
- tvm.tir.const(1, C.dtype) * C[0, M_padded - 1, N_padded - 1]
)
# Reshape the result into a convolution output
out_shape = (batches, OH, OW, OC)
out = te.compute(
out_shape,
lambda b, x, y, z: (C(b, y + OW * x, z) + zero).astype(out_dtype),
name="conv2d_gemm_output",
)
return out
def schedule_conv2d_gemm_interleaved(cfg, s, out, final_out):
"""Schedule the conv2d_gemm interleaved strategy"""
C = out.op.input_tensors[0]
C_interleaved = C.op.input_tensors[0]
A_interleaved = C_interleaved.op.input_tensors[0]
# Input transform
A_interleaved_input = A_interleaved.op.input_tensors[0]
if A_interleaved_input.op.name == "A_padded":
s[A_interleaved_input].compute_at(s[A_interleaved], A_interleaved.op.axis[3])
s[A_interleaved_input].vectorize(A_interleaved_input.op.axis[2])
s[A_interleaved_input].compute_inline()
data_im2col = A_interleaved_input.op.input_tensors[0]
else:
data_im2col = A_interleaved_input
b, m, n = data_im2col.op.axis
if data_im2col.op.name == "data_im2col":
n_outer, n_inner = s[data_im2col].split(n, 16)
s[data_im2col].unroll(n_outer)
s[data_im2col].vectorize(n_inner)
b_m_fused = s[data_im2col].fuse(b, m)
s[data_im2col].parallel(b_m_fused)
else:
s[data_im2col].compute_inline()
# Computation(through tensorize)
b, xo, yo, xi, yi = C_interleaved.op.axis[0:5]
outer_gemm, inner_gemm = cfg["reorder_gemm"].apply(s, C_interleaved, [xo, yo])
b_outer_gemm_fused = s[C_interleaved].fuse(b, outer_gemm)
s[C_interleaved].parallel(b_outer_gemm_fused)
s[A_interleaved].compute_at(s[C_interleaved], b_outer_gemm_fused)
_, _, _, outer_A_interleaved, inner_A_interleaved = A_interleaved.op.axis
cfg["A_interleaved_unroll_vec"].apply(
s, A_interleaved, [outer_A_interleaved, inner_A_interleaved]
)
in_type = A_interleaved.dtype
out_type = C.dtype
k = C_interleaved.op.reduce_axis[0]
_, M, N = C.shape
if in_type in ["int8", "uint8"]:
target = Target.current(allow_none=False)
if target.features.has_matmul_i8:
gemm_acc = gemm_acc_2x2_int8_int8_int32(in_type)
xi_inner, yi_inner = C_interleaved.op.axis[-2:]
k_outer, k_inner = s[C_interleaved].split(k, 8)
s[C_interleaved].reorder(
b_outer_gemm_fused, inner_gemm, k_outer, xi, yi, xi_inner, yi_inner, k_inner
)
s[C_interleaved].tensorize(xi_inner, gemm_acc)
s[C_interleaved].unroll(xi)
s[C_interleaved].unroll(yi)
elif target.features.has_dotprod:
gemm_acc = gemm_acc_4x4_int8_int8_int32(in_type)
xi_outer, yi_outer, xi_inner, yi_inner = s[C_interleaved].tile(
xi, yi, x_factor=8, y_factor=4
)
k_outer, k_inner = s[C_interleaved].split(k, 4)
xi_inner_outer, xi_inner_inner = s[C_interleaved].split(xi_inner, 4)
s[C_interleaved].reorder(
b_outer_gemm_fused,
inner_gemm,
xi_outer,
yi_outer,
k_outer,
xi_inner_outer,
xi_inner_inner,
yi_inner,
k_inner,
)
s[C_interleaved].tensorize(xi_inner_inner, gemm_acc)
s[C_interleaved].unroll(xi_inner_outer)
elif target.features.has_asimd:
s[C_interleaved].reorder(yi, xi)
K = A_interleaved_input.shape[2]
assert in_type in ["int8", "uint8"], "Only int8 and uint8 gemm are supported"
unroll = cfg["gemm_quantized_unroll"].val
gemm = gemm_4x4_int8_int8_int32(M, N, K, unroll, in_type)
s[C_interleaved].tensorize(yi, gemm)
# Output transform
if out != final_out:
n, h, w, c = out.op.axis
_, inner = s[out].split(c, 4)
s[C].compute_at(s[out], inner)
s[out].vectorize(inner)
return s
def schedule_conv2d_gemm_native(cfg, s, out, final_out):
"""Schedule the conv2d_gemm hybrid strategy"""
C = out.op.input_tensors[0]
A = C.op.input_tensors[0]
in_type = A.dtype
# Computation
b, x, y = C.op.axis
(k,) = C.op.reduce_axis
k_outer, k_inner = s[C].split(k, 16)
x_outer, y_outer, x_inner, y_inner = s[C].tile(x, y, x_factor=4, y_factor=16)
s[C].reorder(b, x_outer, y_outer, k_outer, x_inner, y_inner, k_inner)
gemm_acc = gemm_acc_nx16_int8_int8_int32(in_type, rows=1)
s[C].unroll(x_inner)
s[C].tensorize(y_inner, gemm_acc)
s[C].parallel(x_outer)
# Input transform
if A.op.name == "A_padded":
padding_A = True
data_im2col = A.op.input_tensors[0]
else:
padding_A = False
data_im2col = A
b, m, n = data_im2col.op.axis
if data_im2col.op.name == "data_im2col":
n_outer, n_inner = s[data_im2col].split(n, 16)
s[data_im2col].unroll(n_outer)
s[data_im2col].vectorize(n_inner)
s[data_im2col].parallel(m)
elif padding_A:
s[data_im2col].compute_inline()
s[A].compute_at(s[C], x_inner)
else:
s[data_im2col].compute_at(s[C], x_inner)
# Output transform
if out != final_out:
n, h, w, c = out.op.axis
_, inner = s[out].split(c, 4)
s[out].vectorize(inner)
return s
| 15,228 | 35.259524 | 142 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.