repo stringlengths 2 99 | file stringlengths 13 225 | code stringlengths 0 18.3M | file_length int64 0 18.3M | avg_line_length float64 0 1.36M | max_line_length int64 0 4.26M | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
tvm | tvm-main/python/tvm/auto_scheduler/feature.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""""
Python API for Feature extraction. The extracted features vector are used by cost models.
We extract one feature vector per BufferStoreNode statement in a TIR Stmt,
so we call this feature as "per-store" feature.
The cost model also does prediction for each BufferStoreNode statement and aggregates
the predicted score of each BufferStoreNode as the score of a TIR Stmt.
The feature specification is defined by `src/auto_scheduler/feature.cc::FeatureSet`
"""
from typing import List, Tuple, Union, Optional, Dict
import struct
import numpy as np
from .loop_state import State, StateObject
from .measure import MeasureInput, MeasureResult
from . import _ffi_api
from ..tir import PrimFunc
# The maximum number of extracted buffers for one statement
DEFAULT_MAX_N_BUFS = 5
# The length of the feature vector
DEFAULT_FEATURE_VEC_LEN = 164
# The size of int and float in bytes
SIZE_OF_INT32 = 4
SIZE_OF_FLOAT32 = 4
def unpack_feature(byte_arr: bytearray) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
"""Unpack the flatten feature (in byte array format) from c++
Parameters
----------
byte_arr: bytearray
The two-dimensional feature vector in serialized byte array format
Returns
-------
features: np.ndarray
Feature vectors
normalized_throughputs: np.ndarray
Normalized throughputs
task_ids: np.ndarray
Task ids
Note
----
For faster data copy between c++ and python, the c++ part returns features in a single
flatten array using a packed format. The python part then unpacks the flatten array.
The packed format for n records is:
{
int n;
int sizes[n+2]; // The sizes for the following arrays
float features_0[size[0]]; // The features for record 0
float features_1[size[1]]; // The features for record 1
...
float features_i[size[i]]; // The features for record i
... // until i == n - 1
float throughputs[sizes[n]]; // The normalized throughputs for n records
int task_ids[size[n+1]]; // The task ids for n records
}
To implement this format, we also store int as float, so we can store all numbers
into a single float array.
"""
vec_len = DEFAULT_FEATURE_VEC_LEN
# unpack sizes
offset = 0
n = struct.unpack_from("1i", byte_arr, offset=offset)[0]
offset += SIZE_OF_INT32
sizes = struct.unpack_from(f"{n + 2}i", byte_arr, offset=offset)
offset += SIZE_OF_INT32 * (n + 2)
# unpack features
features = []
for size in sizes[:-2]:
row = []
# Now, we need to unpack the feature for multiple statements.
# The format is:
# {
# int n_stage; // The number of stages
# float feature_vecs[n_stage][vec_len] // The feature vector for each stage
# }
# where vec_len can be calculated by `(size - 1) / n_stmts`
if size == 0:
# failed during lowering
features.append(np.zeros((1, vec_len)))
else:
n_stmts = struct.unpack_from("f", byte_arr, offset=offset)
offset += SIZE_OF_FLOAT32
n_stmts = int(n_stmts[0] + 0.5)
tmp_vec_len = (size - 1) // n_stmts
assert (
tmp_vec_len == vec_len
), f"The length of feature vector is wrong. Expected {vec_len} but got {tmp_vec_len}."
assert tmp_vec_len * n_stmts == size - 1
for _ in range(n_stmts):
x = struct.unpack_from(f"{vec_len}f", byte_arr, offset=offset)
offset += vec_len * SIZE_OF_FLOAT32
row.append(x)
features.append(np.array(row))
# unpack normalized_throughputs
m = sizes[-2]
normalized_throughputs = struct.unpack_from(f"{m}f", byte_arr, offset=offset)
offset += m * SIZE_OF_FLOAT32
# unpack task_ids
m = sizes[-1]
task_ids = struct.unpack_from(f"{m}i", byte_arr, offset=offset)
offset += m * SIZE_OF_INT32
assert offset == len(byte_arr), f"{offset} vs {len(byte_arr)}"
return np.array(features, dtype=object), np.array(normalized_throughputs), np.array(task_ids)
def get_per_store_features_from_file(
filename: str, max_lines: int, max_n_bufs: Optional[int] = None
) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
"""Get per-store features from a log file
Parameters
----------
filename: str
The input filename
max_lines: int
Only extract the first n lines of the file
max_n_bufs: Optional[int]
The maximum number of extracted buffers for one statement
Returns
-------
features: np.ndarray
Feature vectors
normalized_throughputs: np.ndarray
Normalized throughputs
task_ids: np.ndarray
Task ids
"""
byte_arr = _ffi_api.GetPerStoreFeaturesFromFile(
filename, max_lines, max_n_bufs or DEFAULT_MAX_N_BUFS
)
return unpack_feature(byte_arr)
def get_per_store_features_from_measure_pairs(
inputs: List[MeasureInput],
results: List[MeasureResult],
skip_first_n_feature_extraction: int = 0,
max_n_bufs: Optional[int] = None,
) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
"""Get per-store features from measurement input/result pairs
Parameters
----------
inputs: List[MeasureInput]
The measure inputs
results: List[MeasureResult]
The measure results
skip_first_n_feature_extraction: int
Skip feature extraction for the first n states
max_n_bufs: int
The maximum number of extracted buffers for one statement
Returns
-------
features: np.ndarray
Feature vectors
normalized_throughputs: np.ndarray
Normalized throughputs
task_ids: np.ndarray
Task ids
"""
byte_arr = _ffi_api.GetPerStoreFeaturesFromMeasurePairs(
inputs, results, skip_first_n_feature_extraction, max_n_bufs or DEFAULT_MAX_N_BUFS
)
return unpack_feature(byte_arr)
def get_per_store_features_from_states(
states: List[Union[State, StateObject]], task: "SearchTask", max_n_bufs: Optional[int] = None
) -> np.ndarray:
"""Get per-store features from measurement input/result pairs
Parameters
----------
states: List[Union[State, StateObject]]
The input states
task: SearchTask
The search task of the input states
max_n_bufs: Optional[int]
The maximum number of extracted buffers for one statement
Returns
-------
features: np.ndarray
Feature vectors
"""
if isinstance(states[0], State):
state_objects = [s.state_object for s in states]
elif isinstance(states[0], StateObject):
state_objects = states
byte_arr = _ffi_api.GetPerStoreFeaturesFromStates(
state_objects, task, max_n_bufs or DEFAULT_MAX_N_BUFS
)
return unpack_feature(byte_arr)[0]
def get_per_store_feature_names(max_n_bufs: Optional[int] = None) -> List[str]:
"""Get the name of every element in the feature vector. Use this for debug and inspection.
Parameters
----------
max_n_bufs: int
The maximum number of extracted buffers for one statement
Returns
-------
names: List[str]
The names of elements in the flatten feature vector
"""
return _ffi_api.GetPerStoreFeatureNames(max_n_bufs or DEFAULT_MAX_N_BUFS)
def features_from_primfunc(
func: PrimFunc,
cache_line_bytes: int = 64,
max_n_bufs: Optional[int] = None,
log_scale: bool = False,
) -> Optional[np.ndarray]:
"""Extract performance features from a PrimFunc.
Parameters
----------
func: PrimFunc
PrimFunc from which features will be extracted. Each store operation to
a unique buffer in the function will result in one row of features in
the output.
cache_line_bytes: int, optional
Size of a cache line in bytes. Defaults to 64 which is the size for
most x86 processors.
max_n_bufs: int, optional
Maximum number of buffers in generated features. This determines the
length of the resulting feature vector.
log_scale: bool
Should entries in the feature vector be scaled by log2(x + 1). Defaults
to False. Use True if using features with a cost model.
Returns
-------
Optional[np.ndarray]
Output features, one row per store into a unique buffer statement in `func`.
"""
return _ffi_api.FeaturesFromPrimFunc(
func, cache_line_bytes, max_n_bufs or DEFAULT_MAX_N_BUFS, log_scale
).numpy()
def named_features_from_primfunc(
func: PrimFunc,
cache_line_bytes: int = 64,
max_n_bufs: Optional[int] = None,
log_scale: bool = False,
) -> Optional[Dict[str, np.ndarray]]:
"""Extract performance features and associated names from a PrimFunc.
Parameters
----------
func: PrimFunc
PrimFunc from which features will be extracted. Each store operation to
a unique buffer in the function will result in one row of features in
the output.
cache_line_bytes: int, optional
Size of a cache line in bytes. Defaults to 64 which is the size for
most x86 processors.
max_n_bufs: int, optional
Maximum number of buffers in generated features. This determines the
length of the resulting feature vector.
log_scale: bool
Should entries in the feature vector be scaled by log2(x + 1). Defaults
to False. Use True if using features with a cost model.
Returns
-------
Optional[Dict[str, np.ndarray]]
Mapping from feature name to features. One element per store into a
unique buffer statement in `func`.
"""
features = features_from_primfunc(func, cache_line_bytes, max_n_bufs, log_scale)
names = get_per_store_feature_names(max_n_bufs)
if features.shape[0] == 0:
return None
return {name: features[:, i] for i, name in enumerate(names)}
| 10,794 | 31.712121 | 98 | py |
tvm | tvm-main/python/tvm/auto_scheduler/testing/tune_onnx.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-docstring
from distutils.util import strtobool
import argparse
import json
import os
import onnx # type: ignore
import tvm
from tvm import auto_scheduler
from tvm import meta_schedule as ms
from tvm import relay
from tvm.meta_schedule.testing.custom_builder_runner import run_module_via_rpc
from tvm.meta_schedule.testing.tune_utils import generate_input_data, create_timer
from tvm.meta_schedule.utils import cpu_count
from tvm.relay.frontend import from_onnx
from tvm.support import describe
def _parse_args():
args = argparse.ArgumentParser()
args.add_argument(
"--model-name",
type=str,
required=True,
)
args.add_argument(
"--onnx-path",
type=str,
required=True,
)
args.add_argument(
"--input-shape",
type=str,
required=True,
help='example: `[{"name": "input1", "dtype": "int64", "shape": [1, 1, 8]}]',
)
args.add_argument(
"--target",
type=str,
required=True,
)
args.add_argument(
"--num-trials",
type=int,
required=True,
)
args.add_argument(
"--rpc-host",
type=str,
required=True,
)
args.add_argument(
"--rpc-port",
type=int,
required=True,
)
args.add_argument(
"--rpc-key",
type=str,
required=True,
)
args.add_argument(
"--work-dir",
type=str,
required=True,
)
args.add_argument(
"--number",
type=int,
default=3,
)
args.add_argument(
"--repeat",
type=int,
default=1,
)
args.add_argument(
"--min-repeat-ms",
type=int,
default=100,
)
args.add_argument(
"--adaptive-training",
type=lambda x: bool(strtobool(x)),
help="example: True / False",
default=True,
)
args.add_argument(
"--cpu-flush",
type=lambda x: bool(strtobool(x)),
help="example: True / False",
required=True,
)
args.add_argument(
"--backend",
type=str,
choices=["graph", "vm"],
help="example: graph / vm",
required=True,
)
parsed = args.parse_args()
parsed.target = tvm.target.Target(parsed.target)
parsed.input_shape = json.loads(parsed.input_shape)
parsed.rpc_config = ms.runner.RPCConfig(
tracker_host=parsed.rpc_host,
tracker_port=parsed.rpc_port,
tracker_key=parsed.rpc_key,
session_timeout_sec=600,
)
return parsed
ARGS = _parse_args()
def main():
log_file = os.path.join(ARGS.work_dir, f"{ARGS.model_name}.json")
runner = auto_scheduler.RPCRunner(
key=ARGS.rpc_key,
host=ARGS.rpc_host,
port=ARGS.rpc_port,
n_parallel=cpu_count(logical=True),
number=ARGS.number,
repeat=ARGS.repeat,
min_repeat_ms=ARGS.min_repeat_ms,
enable_cpu_cache_flush=ARGS.cpu_flush,
timeout=ARGS.rpc_config.session_timeout_sec,
)
if ARGS.target.kind.name == "llvm":
hardware_params = auto_scheduler.HardwareParams(
num_cores=int(ARGS.target.attrs["num-cores"]),
target=ARGS.target,
)
elif ARGS.target.kind.name == "cuda":
hardware_params = auto_scheduler.HardwareParams(
num_cores=-1,
vector_unit_bytes=16,
cache_line_bytes=64,
max_shared_memory_per_block=int(ARGS.target.attrs["max_shared_memory_per_block"]),
max_threads_per_block=int(ARGS.target.attrs["max_threads_per_block"]),
# The value `max_local_memory_per_block` is not used in AutoScheduler,
# but is required by the API.
max_local_memory_per_block=12345678,
max_vthread_extent=8,
warp_size=32,
)
else:
raise NotImplementedError(f"Unsupported target {ARGS.target}")
describe()
print(f"Workload: {ARGS.model_name}")
onnx_model = onnx.load(ARGS.onnx_path)
shape_dict = {}
for item in ARGS.input_shape:
print(f" input_name : {item['name']}")
print(f" input_shape: {item['shape']}")
print(f" input_dtype: {item['dtype']}")
shape_dict[item["name"]] = item["shape"]
mod, params = from_onnx(onnx_model, shape_dict, freeze_params=True)
input_data = {
item["name"]: generate_input_data(item["shape"], item["dtype"]) for item in ARGS.input_shape
}
with ms.Profiler() as profiler:
tasks, task_weights = auto_scheduler.extract_tasks(
mod["main"],
params,
target=ARGS.target,
hardware_params=hardware_params,
)
for idx, (task, task_weight) in enumerate(zip(tasks, task_weights)):
print(
f"==== Task {idx}: {task.desc} "
f"(weight {task_weight} key: {task.workload_key}) ====="
)
print(task.compute_dag)
if ARGS.num_trials > 0:
tuner = auto_scheduler.TaskScheduler(tasks, task_weights)
tuner.tune(
auto_scheduler.TuningOptions(
num_measure_trials=ARGS.num_trials,
runner=runner,
measure_callbacks=[
auto_scheduler.RecordToFile(log_file),
],
),
adaptive_training=ARGS.adaptive_training,
)
relay_build = {"graph": relay.build, "vm": relay.vm.compile}[ARGS.backend]
with auto_scheduler.ApplyHistoryBest(log_file):
with tvm.transform.PassContext(
opt_level=3,
config={"relay.backend.use_auto_scheduler": True},
):
lib = relay_build(
mod,
target=ARGS.target,
params=params,
)
print("Tuning Time:")
print(profiler.table())
run_module_via_rpc(
rpc_config=ARGS.rpc_config,
lib=lib,
dev_type=ARGS.target.kind.name,
args=input_data,
continuation=create_timer(ARGS.backend),
backend=ARGS.backend,
)
if __name__ == "__main__":
main()
| 7,050 | 29.004255 | 100 | py |
tvm | tvm-main/python/tvm/auto_scheduler/testing/tune_te.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-docstring
from distutils.util import strtobool
import argparse
import os
import tvm
from tvm import auto_scheduler
from tvm import meta_schedule as ms
from tvm.meta_schedule.testing.te_workload import CONFIGS
from tvm.meta_schedule.utils import cpu_count
from tvm.support import describe
def _parse_args():
args = argparse.ArgumentParser()
args.add_argument(
"--workload",
type=str,
required=True,
)
args.add_argument(
"--target",
type=str,
required=True,
)
args.add_argument(
"--num-trials",
type=int,
required=True,
)
args.add_argument(
"--rpc-host",
type=str,
required=True,
)
args.add_argument(
"--rpc-port",
type=int,
required=True,
)
args.add_argument(
"--rpc-key",
type=str,
required=True,
)
args.add_argument(
"--work-dir",
type=str,
required=True,
)
args.add_argument(
"--number",
type=int,
default=3,
)
args.add_argument(
"--repeat",
type=int,
default=1,
)
args.add_argument(
"--min-repeat-ms",
type=int,
default=100,
)
args.add_argument(
"--adaptive-training",
type=lambda x: bool(strtobool(x)),
required=False,
help="example: True / False",
default=True,
)
args.add_argument(
"--cpu-flush",
type=lambda x: bool(strtobool(x)),
help="example: True / False",
required=True,
)
parsed = args.parse_args()
parsed.target = tvm.target.Target(parsed.target)
parsed.rpc_config = ms.runner.RPCConfig(
tracker_host=parsed.rpc_host,
tracker_port=parsed.rpc_port,
tracker_key=parsed.rpc_key,
session_timeout_sec=60,
)
return parsed
ARGS = _parse_args()
def main():
log_file = os.path.join(ARGS.work_dir, f"{ARGS.workload}.json")
runner = auto_scheduler.RPCRunner(
key=ARGS.rpc_key,
host=ARGS.rpc_host,
port=ARGS.rpc_port,
n_parallel=cpu_count(logical=True),
number=ARGS.number,
repeat=ARGS.repeat,
min_repeat_ms=ARGS.min_repeat_ms,
enable_cpu_cache_flush=ARGS.cpu_flush,
timeout=ARGS.rpc_config.session_timeout_sec,
)
if ARGS.target.kind.name == "llvm":
hardware_params = auto_scheduler.HardwareParams(
num_cores=int(ARGS.target.attrs["num-cores"]),
target=ARGS.target,
)
elif ARGS.target.kind.name == "cuda":
hardware_params = auto_scheduler.HardwareParams(
num_cores=-1,
vector_unit_bytes=16,
cache_line_bytes=64,
max_shared_memory_per_block=int(ARGS.target.attrs["max_shared_memory_per_block"]),
max_threads_per_block=int(ARGS.target.attrs["max_threads_per_block"]),
# The value `max_local_memory_per_block` is not used in AutoScheduler,
# but is required by the API.
max_local_memory_per_block=12345678,
max_vthread_extent=8,
warp_size=32,
)
else:
raise NotImplementedError(f"Unsupported target {ARGS.target}")
describe()
print(f"Workload: {ARGS.workload}")
with ms.Profiler() as profiler:
# Same as MetaSchedule Tune TE
# Does not count ApplyHistoryBest time
workload_func, params = CONFIGS[ARGS.workload]
params = params[0] # type: ignore
workload_func = auto_scheduler.register_workload(workload_func)
task = auto_scheduler.SearchTask(
func=workload_func,
args=params,
target=ARGS.target,
hardware_params=hardware_params,
)
# Inspect the computational graph
print("Computational DAG:")
print(task.compute_dag)
tune_option = auto_scheduler.TuningOptions(
num_measure_trials=ARGS.num_trials,
measure_callbacks=[auto_scheduler.RecordToFile(log_file)],
verbose=2,
runner=runner,
)
if ARGS.num_trials > 0:
print("Running AutoTuning:")
task.tune(tune_option, adaptive_training=ARGS.adaptive_training)
print("Tuning Time:")
print(profiler.table())
print("History Best:")
print(task.print_best(log_file))
sch, args = task.apply_best(log_file)
print("Lowered TIR:")
print(tvm.lower(sch, args, simple_mode=True))
if __name__ == "__main__":
main()
| 5,383 | 27.791444 | 94 | py |
tvm | tvm-main/python/tvm/auto_scheduler/testing/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=unused-import, redefined-builtin
"""Testing utilities in auto scheduler."""
# NOTE: Do not import any module here by default
| 929 | 43.285714 | 62 | py |
tvm | tvm-main/python/tvm/auto_scheduler/testing/tune_relay.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-docstring
import argparse
import json
import os
from distutils.util import strtobool
import tvm
from tvm import auto_scheduler
from tvm import meta_schedule as ms
from tvm import relay
from tvm.meta_schedule.testing.custom_builder_runner import run_module_via_rpc
from tvm.meta_schedule.testing.relay_workload import get_network
from tvm.meta_schedule.testing.tune_utils import create_timer, generate_input_data
from tvm.meta_schedule.utils import cpu_count
from tvm.support import describe
def _parse_args():
args = argparse.ArgumentParser()
args.add_argument(
"--workload",
type=str,
required=True,
)
args.add_argument(
"--input-shape",
type=str,
required=True,
)
args.add_argument(
"--target",
type=str,
required=True,
)
args.add_argument(
"--num-trials",
type=int,
required=True,
)
args.add_argument(
"--rpc-host",
type=str,
required=True,
)
args.add_argument(
"--rpc-port",
type=int,
required=True,
)
args.add_argument(
"--rpc-key",
type=str,
required=True,
)
args.add_argument(
"--work-dir",
type=str,
required=True,
)
args.add_argument(
"--layout",
type=str,
default=None,
)
args.add_argument(
"--cache-dir",
type=str,
default=None,
)
args.add_argument(
"--number",
type=int,
default=3,
)
args.add_argument(
"--repeat",
type=int,
default=1,
)
args.add_argument(
"--min-repeat-ms",
type=int,
default=100,
)
args.add_argument(
"--adaptive-training",
type=lambda x: bool(strtobool(x)),
help="example: True / False",
default=True,
)
args.add_argument(
"--cpu-flush",
type=lambda x: bool(strtobool(x)),
help="example: True / False",
required=True,
)
args.add_argument(
"--backend",
type=str,
choices=["graph", "vm"],
help="example: graph / vm",
required=True,
)
parsed = args.parse_args()
parsed.target = tvm.target.Target(parsed.target)
parsed.input_shape = json.loads(parsed.input_shape)
parsed.rpc_config = ms.runner.RPCConfig(
tracker_host=parsed.rpc_host,
tracker_port=parsed.rpc_port,
tracker_key=parsed.rpc_key,
session_timeout_sec=600,
)
return parsed
ARGS = _parse_args()
def main():
log_file = os.path.join(ARGS.work_dir, f"{ARGS.workload}.json")
runner = auto_scheduler.RPCRunner(
key=ARGS.rpc_key,
host=ARGS.rpc_host,
port=ARGS.rpc_port,
n_parallel=cpu_count(logical=True),
number=ARGS.number,
repeat=ARGS.repeat,
min_repeat_ms=ARGS.min_repeat_ms,
enable_cpu_cache_flush=ARGS.cpu_flush,
timeout=ARGS.rpc_config.session_timeout_sec,
)
if ARGS.target.kind.name == "llvm":
hardware_params = auto_scheduler.HardwareParams(
num_cores=int(ARGS.target.attrs["num-cores"]),
target=ARGS.target,
)
elif ARGS.target.kind.name == "cuda":
hardware_params = auto_scheduler.HardwareParams(
num_cores=-1,
vector_unit_bytes=16,
cache_line_bytes=64,
max_shared_memory_per_block=int(ARGS.target.attrs["max_shared_memory_per_block"]),
max_threads_per_block=int(ARGS.target.attrs["max_threads_per_block"]),
# The value `max_local_memory_per_block` is not used in AutoScheduler,
# but is required by the API.
max_local_memory_per_block=12345678,
max_vthread_extent=8,
warp_size=32,
)
else:
raise NotImplementedError(f"Unsupported target {ARGS.target}")
describe()
print(f"Workload: {ARGS.workload}")
mod, params, (input_name, input_shape, input_dtype) = get_network(
ARGS.workload,
ARGS.input_shape,
layout=ARGS.layout,
cache_dir=ARGS.cache_dir,
)
input_info = [
{
"name": input_name,
"shape": input_shape,
"dtype": input_dtype,
},
]
input_data = {
item["name"]: generate_input_data(item["shape"], item["dtype"]) for item in input_info
}
for item in input_info:
print(f" input_name : {item['name']}")
print(f" input_shape: {item['shape']}")
print(f" input_dtype: {item['dtype']}")
with ms.Profiler() as profiler:
with ms.Profiler.timeit("TaskExtraction"):
tasks, task_weights = auto_scheduler.extract_tasks(
mod["main"],
params,
target=ARGS.target,
hardware_params=hardware_params,
)
for idx, (task, task_weight) in enumerate(zip(tasks, task_weights)):
print(
f"==== Task {idx}: {task.desc} "
f"(weight {task_weight} key: {task.workload_key}) ====="
)
print(task.compute_dag)
with ms.Profiler.timeit("Tuning"):
if ARGS.num_trials > 0:
tuner = auto_scheduler.TaskScheduler(tasks, task_weights)
tuner.tune(
auto_scheduler.TuningOptions(
num_measure_trials=ARGS.num_trials,
runner=runner,
measure_callbacks=[
auto_scheduler.RecordToFile(log_file),
],
),
adaptive_training=ARGS.adaptive_training,
)
relay_build = {"graph": relay.build, "vm": relay.vm.compile}[ARGS.backend]
with ms.Profiler.timeit("PostTuningCompilation"):
with auto_scheduler.ApplyHistoryBest(log_file):
with tvm.transform.PassContext(
opt_level=3,
config={"relay.backend.use_auto_scheduler": True},
):
lib = relay_build(
mod,
target=ARGS.target,
params=params,
)
print("Tuning Time:")
print(profiler.table())
run_module_via_rpc(
rpc_config=ARGS.rpc_config,
lib=lib,
dev_type=ARGS.target.kind.name,
args=input_data,
continuation=create_timer(ARGS.backend),
backend=ARGS.backend,
)
if __name__ == "__main__":
main()
| 7,469 | 28.88 | 94 | py |
tvm | tvm-main/python/tvm/auto_scheduler/cost_model/xgb_model.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""Cost model based on xgboost"""
import multiprocessing
import logging
from typing import Dict
from collections import defaultdict
import numpy as np
from tvm.autotvm.tuner.metric import max_curve
from .cost_model import PythonBasedModel
from ..feature import get_per_store_features_from_measure_pairs, get_per_store_features_from_states
from ..measure_record import RecordReader
try:
from xgboost.callback import TrainingCallback # type: ignore
except ImportError:
class TrainingCallback: # type: ignore
pass
xgb = None
logger = logging.getLogger("auto_scheduler")
class XGBDMatrixContext:
"""A global context to hold additional attributes of xgb.DMatrix"""
def __init__(self):
self.context_dict = defaultdict(dict)
def get(self, key, matrix, default=None):
"""
Get an attribute of a xgb.DMatrix
Parameters
----------
key: str
The name of the attribute
matrix: xgb.DMatrix
The matrix
default: Optional[Any]
The default value if the item does not exist
"""
return self.context_dict[key].get(matrix.handle.value, default)
def set(self, key, matrix, value):
"""
Set an attribute for a xgb.DMatrix
Parameters
----------
key: str
The name of the attribute
matrix: xgb.DMatrix
The matrix
value: Optional[Any]
The new value
"""
self.context_dict[key][matrix.handle.value] = value
dmatrix_context = XGBDMatrixContext()
class XGBModel(PythonBasedModel):
"""Train a XGBoost model to predict the normalized throughputs of programs.
Let the normalized throughput be the score of a program (higher is better). We predict
the (approximate) score of a program = the sum of the scores of all stages in this program.
i.e. score(P) = score_s0 + score_s1 + ... + score_sn,
where score_si is the score of Stage i in Program P.
We extract feature for each stage and let the xgboost predict the score for each stage.
We then sum up the predictions as the score of the whole program.
We use RMSE as the loss function. i.e. loss(P, y) = 1/2 * (score(P) - y)^2,
where P is the program and y is the normalized throughput according to
the ground truth (measurement).
XGBoost does not support this loss function because `score(P)` is a sum of the prediction
of several samples, so we implemented a custom loss function and call it pack-sum-rmse.
It is called "pack-sum" because we combine several samples into a "pack" and sum up
their predictions.
Parameters
----------
verbose_eval: int = 25
Print training log every `verbose_eval` iterations.
num_warmup_sample: int = 100
The minimum number of samples to start to use the trained model.
If the number of samples is less than this number, the model outputs random predictions.
seed: Optional[int]
The random seed
model_file: Optional[str]
If is not None, save model to this file after every update.
adaptive_training: bool = False
Whether to use adaptive training, which reduces the training frequency when there are
too many logs.
"""
def __init__(
self,
verbose_eval=25,
num_warmup_sample=100,
seed=None,
model_file=None,
adaptive_training=False,
):
global xgb
try:
if xgb is None:
xgb = __import__("xgboost")
except ImportError:
# add "from Node" to silence
# "During handling of the above exception, another exception occurred"
raise ImportError(
"XGBoost is required for XGBModel. "
"Please install its python package first. "
"Help: (https://xgboost.readthedocs.io/en/latest/) "
) from None
self.xgb_params = {
"max_depth": 10,
"gamma": 0.001,
"min_child_weight": 0,
"eta": 0.2,
# todo(merrymercy): automatically decrease learning rate when the loss is too large
"n_gpus": 0,
"nthread": multiprocessing.cpu_count() // 2,
"verbosity": 0,
"seed": seed or 43,
"disable_default_eval_metric": 1,
}
self.bst = None
self.plan_size = 32
self.num_warmup_sample = num_warmup_sample
self.verbose_eval = verbose_eval
self.model_file = model_file
self.adaptive_training = adaptive_training
super().__init__()
# cache measurement input/result pairs and extracted features
self.inputs = []
self.results = []
self.last_train_length = 0
self.inputs_feature_cache = []
def update(self, inputs, results):
"""Update the cost model according to new measurement results (training data).
XGBoost does not support incremental training, so we re-train a new model every time.
Parameters
----------
inputs : List[MeasureInput]
The measurement inputs
results : List[MeasureResult]
The measurement results
"""
if len(inputs) <= 0:
return
assert len(inputs) == len(results)
self.inputs.extend(inputs)
self.results.extend(results)
if (
self.adaptive_training
and len(self.inputs) - self.last_train_length < self.last_train_length / 5
):
# Set a training threshold related to `last_train_length` to reduce the training
# overhead when there're too many logs
return
self.last_train_length = len(self.inputs)
# extract feature
n_cached = len(self.inputs_feature_cache)
features, normalized_throughputs, task_ids = get_per_store_features_from_measure_pairs(
self.inputs, self.results, skip_first_n_feature_extraction=n_cached
)
if n_cached > 0:
features = list(features)
features[:n_cached] = self.inputs_feature_cache
features = np.array(features, dtype=object)
self.inputs_feature_cache = features
dtrain = pack_sum_xgbmatrix(
features, normalized_throughputs, task_ids, normalized_throughputs
)
# train xgb model
self.bst = xgb.train(
self.xgb_params,
dtrain,
num_boost_round=10000,
obj=pack_sum_square_error,
callbacks=[
CustomCallback(
stopping_rounds=50,
metric="tr-p-rmse",
fevals=[pack_sum_rmse, pack_sum_average_peak_score(self.plan_size)],
evals=[(dtrain, "tr")],
maximize=False,
verbose_eval=self.verbose_eval,
)
],
)
# Update the model file if it has been set
if self.model_file:
self.save(self.model_file)
def predict(self, task, states):
"""Predict the scores of states
Parameters
----------
search_task : SearchTask
The search task of states
statse : List[State]
The input states
Returns
-------
scores: List[float]
The predicted scores for all states
"""
features = get_per_store_features_from_states(states, task)
if self.bst is not None and len(self.inputs) > self.num_warmup_sample:
dtest, pack_ids = feature_to_pack_sum_xgbmatrix(features)
raw_preds = self.bst.predict(dtest)
ret = predict_throughput_pack_sum(raw_preds, pack_ids)
else:
ret = np.random.uniform(0, 1, (len(states),))
# Predict -inf for invalid states that failed to be lowered.
for idx, feature in enumerate(features):
if feature.min() == feature.max() == 0:
ret[idx] = float("-inf")
return ret
def predict_stages(self, task, states):
"""Predict the scores of all stages in states. This is the breakdown version of `predict`.
Parameters
----------
search_task : SearchTask
The search task of states
statse : List[State]
The input states
Returns
-------
scores: List[float]
The predicted scores for all stages in all states in the packed format
Note
----
For faster data copy between c++ and python, the python part returns scores in a
single flatten array using a packed format. The c++ part then unpacks the flatten array.
The packed format is:
{
float scores[N]; // scores[i] is the score for states[i].
int n_stage_0; // the number of stages in states[0]
float stage_scores_0[[n_stage_0] // the scores for all stages in states[0]
int n_stage_1; // the number of stages in states[1]
float stage_scores_1[n_stage_1]; // the scores for all stages in states[1]
...
int n_stage_i; // the number of stages in states[i]
float stage_scores_1[n_stage_i]; // the scores for all stages in states[i]
... // untill i == N - 1
}
To implement this format, we also store int as float, so we can store all numbers
into a single float array.
"""
features = get_per_store_features_from_states(states, task)
if self.bst is not None and len(self.inputs) > self.num_warmup_sample:
dtest, pack_ids = feature_to_pack_sum_xgbmatrix(features)
raw_preds = self.bst.predict(dtest)
breakdown = predict_throughput_pack_sum(raw_preds, pack_ids)
stage_scores = [[] for _ in range(len(states))]
for pred, pack_id in zip(raw_preds, pack_ids):
stage_scores[pack_id].append(pred)
for idx, stage_score in enumerate(stage_scores):
breakdown = np.append(breakdown, len(stage_score))
breakdown = np.concatenate((breakdown, np.array(stage_score)))
else:
breakdown = np.concatenate(
(np.random.uniform(0, 1, (len(states),)), np.zeros(len(states)))
)
# Predict 0 for invalid states that failed to be lowered.
for idx, feature in enumerate(features):
if feature.min() == feature.max() == 0:
breakdown[idx] = float("-inf")
return breakdown
def update_from_file(self, file_name, n_lines=None):
"""Load measure records from a log file to update the cost model.
This function can be used to pre-train the cost model with history log files.
Parameters
----------
file_name: str
The filename
n_lines: Optional[int]
Only load first n lines of the log file
"""
inputs, results = RecordReader(file_name).read_lines(n_lines)
logger.info("XGBModel: Loaded %s measurement records from %s", len(inputs), file_name)
self.update(inputs, results)
def save(self, file_name: str):
"""Save the model to a file
Parameters
----------
file_name: str
The filename
"""
self.bst.save_model(file_name)
def load(self, file_name: str):
"""Load the model from a file
Parameters
----------
file_name: str
The filename
"""
if self.bst is None:
self.bst = xgb.Booster(self.xgb_params)
self.bst.load_model(file_name)
self.num_warmup_sample = -1
def feature_to_pack_sum_xgbmatrix(xs):
"""Convert an extracted multi-stage feature vector to a xgbmatrx in pack-sum format
Parameters
----------
xs: np.ndarray
The feature vector
Returns
-------
dmatrix: xgb.DMatrix
The DMatrix
pack_ids: List[int]
pack ids information
"""
x_flatten = []
pack_ids = []
for ct, x in enumerate(xs):
for row in x:
x_flatten.append(row)
pack_ids.append(ct)
return xgb.DMatrix(np.array(x_flatten)), pack_ids
def pack_sum_xgbmatrix(xs, ys, gids=None, weights=None):
"""Convert (feature, label) pairs into a xgb matrix with pack-sum format
Parameters
----------
xs: np.ndarray
The feature vector
ys: np.ndarray
The normaizlied throughput
gids: Optional[List[int]]
Group id (task id)
weights: Optional[np.ndarray]
The weight of samples
Returns
-------
dmatrix: xgb.DMatrix
The DMatrix with pack-sum information
"""
if gids is not None:
# sort by group
indices = gids.argsort()
xs, ys = xs[indices], ys[indices]
group_sizes = np.bincount(gids)
if weights is not None:
weights = weights[indices]
else:
# assume it has only one group
group_sizes = [len(xs)]
x_flatten = []
y_flatten = []
weights_flatten = []
pack_ids = []
if weights is not None:
for ct, (x, y, w) in enumerate(zip(xs, ys, weights)):
for row in x:
x_flatten.append(row)
y_flatten.append(y)
weights_flatten.append(w)
pack_ids.append(ct)
else:
for ct, (x, y) in enumerate(zip(xs, ys)):
for row in x:
x_flatten.append(row)
y_flatten.append(y)
pack_ids.append(ct)
ret = xgb.DMatrix(np.array(x_flatten), y_flatten)
if weights is not None:
ret.set_weight(weights_flatten)
dmatrix_context.set("pack_ids", ret, np.array(pack_ids))
dmatrix_context.set("group_sizes", ret, group_sizes)
return ret
def predict_throughput_pack_sum(raw_preds, pack_ids):
"""Predict the throughputs for predictions in pack-sum format
Parameters
----------
raw_preds: np.ndarray
The raw predictions
pack_ids: List[int]
The pack id for predictions
Returns
-------
throughputs: np.ndarray
The throughput
"""
sum_pred = np.bincount(pack_ids, weights=raw_preds)
return sum_pred
def pack_sum_square_error(preds, dtrain):
"""Implement square error loss on pack-sum format as
a custom objective function for xgboost.
Parameters
----------
preds: np.ndarray
The predicitons
dtrain: xgb.DMatrix
The training set
Returns
-------
gradient: np.ndarray
hessian: np.ndarray
gradient and hessian according to the xgboost format
"""
pack_ids = dmatrix_context.get("pack_ids", dtrain)
weight = dtrain.get_weight()
sum_pred = np.bincount(pack_ids, weights=preds)
x = sum_pred[pack_ids]
y = dtrain.get_label()
gradient = x - y
hessian = np.ones_like(gradient)
if len(weight) == 0:
return gradient, hessian
return gradient * weight, hessian * weight
def pack_sum_rmse(raw_preds, labels):
"""Evaluate RMSE (rooted mean square error) in the pack-sum format
Parameters
----------
raw_preds: np.ndarray
The raw prediction
labels: xgb.DMatrix
The groud-truth label matrix
Returns
-------
name: str
score: float
The name and score of this metric
"""
pack_ids = dmatrix_context.get("pack_ids", labels)
preds = predict_throughput_pack_sum(raw_preds, pack_ids)[pack_ids]
return "p-rmse", np.sqrt(np.mean(np.square((preds - labels.get_label()))))
def pack_sum_average_peak_score(N):
"""Return the evaluation function for average-peak-score@N
Parameters
----------
N: int
The "N" in "average-peak-score@N"
Returns
-------
The evaluation function
"""
def feval(preds, labels):
"""Evaluate average-peak-score@N in the pack-sum format
Parameters
----------
raw_preds: np.ndarray
The raw prediction
labels: xgb.DMatrix
The groud-truth label matrix
Returns
-------
name: str
score: float
The name and score of this metric
"""
group_sizes = dmatrix_context.get("group_sizes", labels, [len(preds)])
pack_ids = dmatrix_context.get("pack_ids", labels)
preds = predict_throughput_pack_sum(preds, pack_ids)
labels = (
np.bincount(pack_ids, weights=labels.get_label())
/ np.unique(pack_ids, return_counts=True)[1]
)
scores = []
offset = 0
for size in group_sizes:
preds_group = preds[offset : offset + size]
labels_group = labels[offset : offset + size]
offset += size
trials = np.argsort(preds_group)[::-1][:N]
trial_scores = labels_group[trials]
curve = max_curve(trial_scores) / np.max(labels_group)
scores.append(np.mean(curve))
return f"a-peak@{N}", np.mean(scores)
return feval
class XGBoostCallback(TrainingCallback):
"""Base class for XGBoost callbacks."""
def __call__(self, env: "xgb.core.CallbackEnv"):
# Compatibility with xgboost < 1.3
return self.after_iteration(env.model, env.iteration, env.evaluation_result_list)
def after_iteration(self, model: "xgb.Booster", epoch: int, evals_log: Dict):
raise NotImplementedError
class CustomCallback(XGBoostCallback):
"""
Callback function for xgboost.
Support custom evaluation function and early-stopping.
"""
def __init__(
self,
stopping_rounds,
metric,
fevals,
evals=(),
log_file=None,
maximize=False,
verbose_eval=True,
skip_every=2,
):
"""Init function"""
self.stopping_rounds = stopping_rounds
self.metric = metric
self.metric_shortname = metric.split("-")[1]
self.fevals = fevals
self.evals = evals
self.log_file = log_file
self.maximize = maximize
self.verbose_eval = verbose_eval
self.skip_every = skip_every
self.state = {}
def after_iteration(self, model: "xgb.Booster", epoch: int, evals_log: Dict):
"""Run after each iteration. Return True when training should stop."""
# pylint:disable = import-outside-toplevel
try:
from xgboost.callback import _fmt_metric # type: ignore
except ImportError:
# Compatibility with xgboost >= 1.6
def _fmt_metric(value, show_stdv=True):
"""format metric string"""
if len(value) == 2:
return f"{value[0]}:{value[1]:.5f}"
if len(value) == 3:
if show_stdv:
return f"{value[0]}:{value[1]:.5f}+{value[2]:.5f}"
return f"{value[0]}:{value[1]:.5f}"
raise ValueError("wrong metric value", value)
##### init state #####
if not self.state:
self.state["maximize_score"] = self.maximize
self.state["best_iteration"] = 0
if self.maximize:
self.state["best_score"] = float("-inf")
else:
self.state["best_score"] = float("inf")
assert model is not None
if model.attr("best_score") is not None:
self.state["best_score"] = float(model.attr("best_score"))
self.state["best_iteration"] = int(model.attr("best_iteration"))
self.state["best_msg"] = model.attr("best_msg")
else:
model.set_attr(best_iteration=str(self.state["best_iteration"]))
model.set_attr(best_score=str(self.state["best_score"]))
res_dict = {}
if epoch % self.skip_every == 1:
return False
##### evaluation #####
for feval in self.fevals:
bst_eval = model.eval_set(self.evals, epoch, feval)
res = [x.split(":") for x in bst_eval.split()]
for kv in res[1:]:
res_dict[kv[0]] = [float(kv[1])]
eval_res = []
keys = list(res_dict.keys())
keys.sort(key=lambda x: x if self.metric_shortname not in x else "a" + x)
for key in keys:
v = res_dict[key]
eval_res.append([key] + v)
##### print eval result #####
if (
not isinstance(self.verbose_eval, bool)
and self.verbose_eval
and epoch % self.verbose_eval == 0
):
infos = [f"XGB iter: {epoch:3d}"]
for item in eval_res:
if "null" in item[0]:
continue
infos.append(f"{item[0]}: {item[1]:.6f}")
logger.debug("\t".join(infos))
if self.log_file:
with open(self.log_file, "a") as fout:
fout.write("\t".join(infos) + "\n")
##### choose score and do early stopping #####
score = None
for item in eval_res:
if item[0] == self.metric:
score = item[1]
break
assert score is not None
best_score = self.state["best_score"]
best_iteration = self.state["best_iteration"]
maximize_score = self.state["maximize_score"]
if (maximize_score and score > best_score) or (not maximize_score and score < best_score):
msg = f"[{epoch}] " + "\t".join([_fmt_metric(x) for x in eval_res])
self.state["best_msg"] = msg
self.state["best_score"] = score
self.state["best_iteration"] = epoch
# save the property to attributes, so they will occur in checkpoint.
if model is not None:
model.set_attr(
best_score=str(self.state["best_score"]),
best_iteration=str(self.state["best_iteration"]),
best_msg=self.state["best_msg"],
)
elif epoch - best_iteration >= self.stopping_rounds:
best_msg = self.state["best_msg"]
if self.verbose_eval:
logger.debug("XGB stopped. Best iteration: %s ", best_msg)
return True
return False
| 23,300 | 33.065789 | 99 | py |
tvm | tvm-main/python/tvm/auto_scheduler/cost_model/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=unused-import, redefined-builtin
""" Cost model that estimates the performance of programs """
from .cost_model import RandomModel
from .xgb_model import XGBModel
| 967 | 43 | 62 | py |
tvm | tvm-main/python/tvm/auto_scheduler/cost_model/cost_model.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" Cost models that estimate the performance of programs """
import ctypes
import numpy as np
import tvm._ffi
from tvm.runtime import Object
from .. import _ffi_api
@tvm._ffi.register_object("auto_scheduler.CostModel")
class CostModel(Object):
"""The base class for cost model"""
@tvm._ffi.register_object("auto_scheduler.RandomModel")
class RandomModel(CostModel):
"""A model that returns random estimation for all inputs"""
def __init__(self):
self.__init_handle_by_constructor__(_ffi_api.RandomModel)
def update(self, inputs, results):
"""Update the cost model according to new measurement results (training data).
Parameters
----------
inputs : List[auto_scheduler.measure.MeasureInput]
The measurement inputs
results : List[auto_scheduler.measure.MeasureResult]
The measurement results
"""
_ffi_api.CostModelUpdate(self, inputs, results)
def predict(self, search_task, states):
"""Predict the scores of states
Parameters
----------
search_task : SearchTask
The search task of states
states : List[State]
The input states
Returns
-------
scores: List[float]
The predicted scores for all states
"""
return [x.value for x in _ffi_api.CostModelPredict(self, search_task, states)]
@tvm._ffi.register_func("auto_scheduler.cost_model.random_fill_float")
def random_fill_float(size, return_ptr):
"""Fills a c++ float array with random numbers in [0, 1]
Parameters
----------
size: int
The size of the array
return_ptr:
A pointer to a c++ float array
"""
if size == 0:
return
return_ptr = ctypes.cast(return_ptr, ctypes.POINTER(ctypes.c_float))
array_wrapper = np.ctypeslib.as_array(return_ptr, shape=(size,))
array_wrapper[:] = np.random.uniform(0, 1, (size,))
@tvm._ffi.register_object("auto_scheduler.PythonBasedModel")
class PythonBasedModel(CostModel):
"""Base class for cost models implemented in python"""
def __init__(self):
def update_func(inputs, results):
self.update(inputs, results)
def predict_func(task, states, return_ptr):
return_ptr = ctypes.cast(return_ptr, ctypes.POINTER(ctypes.c_float))
array_wrapper = np.ctypeslib.as_array(return_ptr, shape=(len(states),))
array_wrapper[:] = self.predict(task, states)
def predict_stage_func(task, states, return_ptr):
ret = self.predict_stages(task, states)
return_ptr = ctypes.cast(return_ptr, ctypes.POINTER(ctypes.c_float))
array_wrapper = np.ctypeslib.as_array(return_ptr, shape=ret.shape)
array_wrapper[:] = ret
self.__init_handle_by_constructor__(
_ffi_api.PythonBasedModel, update_func, predict_func, predict_stage_func
)
def update(self, inputs, results):
"""Update the cost model according to new measurement results (training data).
Parameters
----------
inputs : List[auto_scheduler.measure.MeasureInput]
The measurement inputs
results : List[auto_scheduler.measure.MeasureResult]
The measurement results
"""
raise NotImplementedError
def predict(self, task, states):
"""Predict the scores of states
Parameters
----------
search_task : SearchTask
The search task of states
states : List[State]
The input states
Returns
-------
scores: List[float]
The predicted scores for all states
"""
raise NotImplementedError
def predict_stages(self, task, states):
"""Predict the scores of all stages in states. This is the breakdown version of `predict`.
Parameters
----------
search_task : SearchTask
The search task of states
states : List[State]
The input states
Returns
-------
scores: List[float]
The predicted scores for all stages in all states in the packed format
Note
----
For faster data copy between c++ and python, the python part returns scores in a
single flatten array using a packed format. The c++ part then unpacks the flatten array.
The packed format is:
{
float scores[N]; // scores[i] is the score for states[i].
int n_stage_0; // the number of stages in states[0]
float stage_scores_0[[n_stage_0] // the scores for all stages in states[0]
int n_stage_1; // the number of stages in states[1]
float stage_scores_1[n_stage_1]; // the scores for all stages in states[1]
...
int n_stage_i; // the number of stages in states[i]
float stage_scores_1[n_stage_i]; // the scores for all stages in states[i]
... // until i == N - 1
}
To implement this format, we also store int as float, so we can store all numbers
into a single float array.
"""
raise NotImplementedError
| 6,065 | 33.662857 | 98 | py |
tvm | tvm-main/python/tvm/ir/base.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Common base structures."""
import tvm._ffi
import tvm.error
from tvm._ffi import get_global_func, register_object
from tvm.runtime import Object, _ffi_node_api
from . import _ffi_api, json_compact
class Node(Object):
"""Base class of all IR Nodes."""
@register_object("SourceMap")
class SourceMap(Object):
def add(self, name, content):
return get_global_func("SourceMapAdd")(self, name, content)
@register_object("SourceName")
class SourceName(Object):
"""A identifier for a source location.
Parameters
----------
name : str
The name of the source.
"""
def __init__(self, name):
self.__init_handle_by_constructor__(_ffi_api.SourceName, name) # type: ignore # pylint: disable=no-member
@register_object("Span")
class Span(Object):
"""Specifies a location in a source program.
Parameters
----------
source : SourceName
The source name.
lineno : int
The line number.
col_offset : int
The column offset of the location.
"""
def __init__(self, source_name, line, end_line, column, end_column):
self.__init_handle_by_constructor__(
_ffi_api.Span, source_name, line, end_line, column, end_column # type: ignore # pylint: disable=no-member
)
@register_object("SequentialSpan")
class SequentialSpan(Object):
"""A sequence of source spans
This span is specific for an expression, which is from multiple expressions
after an IR transform.
Parameters
----------
spans : Array
The array of spans.
"""
def __init__(self, spans):
self.__init_handle_by_constructor__(_ffi_api.SequentialSpan, spans)
@register_object
class EnvFunc(Object):
"""Environment function.
This is a global function object that can be serialized by its name.
"""
def __call__(self, *args):
return _ffi_api.EnvFuncCall(self, *args) # type: ignore # pylint: disable=no-member
@property
def func(self):
return _ffi_api.EnvFuncGetPackedFunc(self) # type: ignore # pylint: disable=no-member
@staticmethod
def get(name):
"""Get a static env function
Parameters
----------
name : str
The name of the function.
"""
return _ffi_api.EnvFuncGet(name) # type: ignore # pylint: disable=no-member
def load_json(json_str) -> Object:
"""Load tvm object from json_str.
Parameters
----------
json_str : str
The json string
Returns
-------
node : Object
The loaded tvm node.
"""
try:
return _ffi_node_api.LoadJSON(json_str)
except tvm.error.TVMError:
json_str = json_compact.upgrade_json(json_str)
return _ffi_node_api.LoadJSON(json_str)
def save_json(node) -> str:
"""Save tvm object as json string.
Parameters
----------
node : Object
A TVM object to be saved.
Returns
-------
json_str : str
Saved json string.
"""
return _ffi_node_api.SaveJSON(node)
def structural_equal(lhs, rhs, map_free_vars=False):
"""Check structural equality of lhs and rhs.
The structural equality is recursively defined in the DAG of IRNodes.
There are two kinds of nodes:
- Graph node: a graph node in lhs can only be mapped as equal to
one and only one graph node in rhs.
- Normal node: equality is recursively defined without the restriction
of graph nodes.
Vars(tir::Var, TypeVar) and non-constant relay expression nodes are graph nodes.
For example, it means that `%1 = %x + %y; %1 + %1` is not structurally equal
to `%1 = %x + %y; %2 = %x + %y; %1 + %2` in relay.
A var-type node(e.g. tir::Var, TypeVar) can be mapped as equal to another var
with the same type if one of the following condition holds:
- They appear in a same definition point(e.g. function argument).
- They points to the same VarNode via the same_as relation.
- They appear in a same usage point, and map_free_vars is set to be True.
The rules for var are used to remap variables occurs in function
arguments and let-bindings.
Parameters
----------
lhs : Object
The left operand.
rhs : Object
The left operand.
map_free_vars : bool
Whether free variables (i.e. variables without a definition site) should be mapped
as equal to each other.
Return
------
result : bool
The comparison result.
See Also
--------
structural_hash
assert_strucural_equal
"""
lhs = tvm.runtime.convert(lhs)
rhs = tvm.runtime.convert(rhs)
return bool(_ffi_node_api.StructuralEqual(lhs, rhs, False, map_free_vars)) # type: ignore # pylint: disable=no-member
def get_first_structural_mismatch(lhs, rhs, map_free_vars=False):
"""Like structural_equal(), but returns the ObjectPaths of the first detected mismatch.
Parameters
----------
lhs : Object
The left operand.
rhs : Object
The left operand.
map_free_vars : bool
Whether free variables (i.e. variables without a definition site) should be mapped
as equal to each other.
Returns
-------
mismatch: Optional[Tuple[ObjectPath, ObjectPath]]
`None` if `lhs` and `rhs` are structurally equal.
Otherwise, a tuple of two ObjectPath objects that point to the first detected mismtach.
"""
lhs = tvm.runtime.convert(lhs)
rhs = tvm.runtime.convert(rhs)
mismatch = _ffi_node_api.GetFirstStructuralMismatch(lhs, rhs, map_free_vars) # type: ignore # pylint: disable=no-member
if mismatch is None:
return None
else:
return mismatch.lhs_path, mismatch.rhs_path
def assert_structural_equal(lhs, rhs, map_free_vars=False):
"""Assert lhs and rhs are structurally equal to each other.
Parameters
----------
lhs : Object
The left operand.
rhs : Object
The left operand.
map_free_vars : bool
Whether or not shall we map free vars that does
not bound to any definitions as equal to each other.
Raises
------
ValueError : if assertion does not hold.
See Also
--------
structural_equal
"""
lhs = tvm.runtime.convert(lhs)
rhs = tvm.runtime.convert(rhs)
_ffi_node_api.StructuralEqual(lhs, rhs, True, map_free_vars) # type: ignore # pylint: disable=no-member
def structural_hash(node, map_free_vars=False):
"""Compute structural hash of node
The structural hash value is recursively defined in the DAG of IRNodes.
There are two kinds of nodes:
- Normal node: the hash value is defined by its content and type only.
- Graph node: each graph node will be assigned a unique index ordered by the
first occurence during the visit. The hash value of a graph node is
combined from the hash values of its contents and the index.
structural_hash is made to be concistent with structural_equal.
If two nodes are structurally equal to each other,
then their structural hash (with the same map_free_vars option)
should be equal to each other as well.
If the structural hash of two nodes equals to each other,
then it is highly likely(except for rare hash value collison cases)
that the two nodes are structurally equal to each other.
Parameters
----------
node : Object
The input to be hashed.
map_free_vars : bool
If map_free_vars is set to true, we will hash free variables
by the order of their occurrences. Otherwise, we will hash by
their in-memory pointer address.
Return
------
result : int
The hash result
See Also
--------
structrual_equal
"""
return _ffi_node_api.StructuralHash(node, map_free_vars) # type: ignore # pylint: disable=no-member
def deprecated(
method_name: str,
new_method_name: str,
):
"""A decorator to indicate that a method is deprecated
Parameters
----------
method_name : str
The name of the method to deprecate
new_method_name : str
The name of the new method to use instead
"""
import functools # pylint: disable=import-outside-toplevel
import warnings # pylint: disable=import-outside-toplevel
def _deprecate(func):
@functools.wraps(func)
def _wrapper(*args, **kwargs):
warnings.warn(
f"{method_name} is deprecated, use {new_method_name} instead",
DeprecationWarning,
stacklevel=2,
)
return func(*args, **kwargs)
return _wrapper
return _deprecate
| 9,478 | 27.465465 | 124 | py |
tvm | tvm-main/python/tvm/ir/container.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Additional container data structures used across IR variants."""
import tvm._ffi
from tvm.runtime import Object
from tvm.runtime.container import getitem_helper
from tvm.runtime import _ffi_api
@tvm._ffi.register_object("Array")
class Array(Object):
"""Array container of TVM.
You do not need to create Array explicitly.
Normally python list and tuple will be converted automatically
to Array during tvm function call.
You may get Array in return values of TVM function call.
"""
def __getitem__(self, idx):
return getitem_helper(self, _ffi_api.ArrayGetItem, len(self), idx)
def __len__(self):
return _ffi_api.ArraySize(self)
def __dir__(self):
return sorted(dir(self.__class__) + ["type_key"])
def __getattr__(self, name):
if name == "handle":
raise AttributeError("handle is not set")
if name == "type_key":
return super().__getattr__(name)
raise AttributeError(f"{type(self)} has no attribute {name}")
@tvm._ffi.register_object
class Map(Object):
"""Map container of TVM.
You do not need to create Map explicitly.
Normally python dict will be converted automatically to Map during tvm function call.
You can use convert to create a dict[Object-> Object] into a Map
"""
def __getitem__(self, k):
return _ffi_api.MapGetItem(self, k)
def __contains__(self, k):
return _ffi_api.MapCount(self, k) != 0
def __iter__(self):
akvs = _ffi_api.MapItems(self)
for i in range(len(self)):
yield akvs[i * 2]
def __dir__(self):
return sorted(dir(self.__class__) + ["type_key"])
def __getattr__(self, name):
if name == "handle":
raise AttributeError("handle is not set")
if name == "type_key":
return super().__getattr__(name)
raise AttributeError(f"{type(self)} has no attribute {name}")
def keys(self):
return iter(self)
def values(self):
akvs = _ffi_api.MapItems(self)
for i in range(len(self)):
yield akvs[i * 2 + 1]
def items(self):
"""Get the items from the map"""
akvs = _ffi_api.MapItems(self)
return [(akvs[i], akvs[i + 1]) for i in range(0, len(akvs), 2)]
def __len__(self):
return _ffi_api.MapSize(self)
def get(self, key, default=None):
"""Get an element with a default value.
Parameters
----------
key : object
The attribute key.
default : object
The default object.
Returns
-------
value: object
The result value.
"""
return self[key] if key in self else default
| 3,531 | 29.713043 | 89 | py |
tvm | tvm-main/python/tvm/ir/tensor_type.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Type relation and function for type checking."""
import tvm._ffi
from . import _ffi_api
from .type import Type
@tvm._ffi.register_object("relay.TensorType")
class TensorType(Type):
"""A concrete TensorType in Relay.
This is the type assigned to tensors with a known dtype and shape.
For example, a tensor of `float32` and `(5, 5)`.
Parameters
----------
shape : List[tvm.ir.PrimExpr]
The shape of the Tensor
dtype : Optional[str]
The content data type.
"""
def __init__(self, shape, dtype="float32"):
self.__init_handle_by_constructor__(_ffi_api.TensorType, shape, dtype)
@property
def concrete_shape(self):
"""Get shape of the type as concrete tuple of int.
Returns
-------
shape : List[int]
The concrete shape of the Type.
Raises
------
TypeError : If the shape is symbolic
"""
return tuple(int(x) for x in self.shape)
def __str__(self):
from tvm.relay import pretty_print # pylint: disable=import-outside-toplevel
return pretty_print(self)
| 1,916 | 29.919355 | 85 | py |
tvm | tvm-main/python/tvm/ir/op.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""Primitive operators in the TVM IR."""
import tvm._ffi
from . import _ffi_api
from .expr import RelayExpr
@tvm._ffi.register_object("Op")
class Op(RelayExpr):
"""Primitive operator in the IR."""
def __init__(self):
raise RuntimeError("Cannot create op, use get instead")
def astext(self, show_meta_data=True, annotate=None):
"""Get the text format of the expression.
Parameters
----------
show_meta_data : bool
Whether to include meta data section in the text
if there is meta data.
annotate: Optional[Object->str]
Optionally annotate function to provide additional
information in the comment block.
Returns
-------
text : str
The text format of the expression.
Notes
-----
The meta data section is necessary to fully parse the text format.
However, it can contain dumps that are big (e.g constant weights),
so it can be helpful to skip printing the meta data section.
"""
from tvm.relay import astext # pylint: disable=import-outside-toplevel
return astext(self, show_meta_data, annotate)
@staticmethod
def get(op_name):
"""Get the Op for a given name
Parameters
----------
op_name : str
The operator name
Returns
-------
op : Op
The op of the corresponding name
"""
return _ffi_api.GetOp(op_name)
def get_attr(self, attr_name):
"""Get additional attribute about the operator.
Parameters
----------
attr_name : str
The attribute name.
Returns
-------
value : object
The attribute value
"""
return _ffi_api.OpGetAttr(self, attr_name)
def has_attr(self, attr_name):
"""Check whether the operator has additional attribute.
Parameters
----------
attr_name : str
The attribute name.
Returns
-------
value : bool
Whether the operator has additional attribute
"""
return _ffi_api.OpHasAttr(self, attr_name)
def set_attr(self, attr_name, value, plevel=10):
"""Set attribute about the operator.
Parameters
----------
attr_name : str
The attribute name
value : object
The attribute value
plevel : int
The priority level
"""
_ffi_api.OpSetAttr(self, attr_name, value, plevel)
def reset_attr(self, attr_name):
"""Reset attribute about the operator.
Parameters
----------
attr_name : str
The attribute name
"""
_ffi_api.OpResetAttr(self, attr_name)
def add_type_rel(self, rel_name, type_rel_func=None):
"""Attach the type function corresponding to the return type.
Parameters
----------
rel_name : str
The type relation name to register.
type_rel_func : Optional[function (args: List[Type], attrs: Attrs) -> Type]
The backing relation function which can solve an arbitrary relation on variables.
Differences with type_rel_func in C++:
1) When type_rel_func is not None
a) OpAddTypeRel on C++ side will adjust type_rel_func with TypeReporter to
calling convention of relay type system.
b) type_rel_func returns output argument's type, return None means can't
infer output's type.
c) only support single output operators for now, the last argument is output tensor.
2) when type_rel_func is None, will call predefined type_rel_funcs in relay
according to ``tvm.relay.type_relation.`` + rel_name.
"""
_ffi_api.OpAddTypeRel(self, rel_name, type_rel_func)
def add_argument(self, name, type, description): # pylint: disable=redefined-builtin
"""Add arguments information to the function.
Parameters
----------
name : str
The argument name.
type : str
The argument type.
description : str
The argument description.
"""
_ffi_api.OpAddArgument(self, name, type, description)
def set_support_level(self, level):
"""Set the support level of op.
Parameters
----------
level : int
The support level.
"""
_ffi_api.OpSetSupportLevel(self, level)
def set_num_inputs(self, n):
"""Set the support level of op.
Parameters
----------
n : int
The input number.
"""
_ffi_api.OpSetNumInputs(self, n)
def set_attrs_type_key(self, key):
"""Set the attribute type key of op.
Parameters
----------
key : str
The type key.
"""
_ffi_api.OpSetAttrsTypeKey(self, key)
@staticmethod
def list_op_names():
"""List all the op names in the op registry.
Returns
-------
value : List[str]
The registered op names
"""
return _ffi_api.ListOpNames()
def register_op_attr(op_name, attr_key, value=None, level=10):
"""Register an operator property of an operator by name.
Parameters
----------
op_name : str
The name of operator
attr_key : str
The attribute name.
value : object, optional
The value to set
level : int, optional
The priority level
Returns
-------
fregister : function
Register function if value is not specified.
"""
def _register(v):
"""internal register function"""
_ffi_api.RegisterOpAttr(op_name, attr_key, v, level)
return v
return _register(value) if value is not None else _register
def register_intrin_lowering(
op_name,
target,
*,
f=None,
level=10,
):
"""Register Op lowering function
Parameters
----------
op_name : str
The op name
target : str
The target string for given intrinsic lowering function
f : function, optional
The function to be registered.
level : int
The priority level
Returns
-------
fregister : function
Register op lowering function if f is not specified.
"""
def _register(f):
"""internal register function"""
_ffi_api.RegisterOpLowerIntrinsic(op_name, f, target, level)
return f
return _register(f) if f is not None else _register
| 7,522 | 25.677305 | 99 | py |
tvm | tvm-main/python/tvm/ir/type.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Unified type system in the project."""
from enum import IntEnum
import tvm
import tvm._ffi
from tvm.runtime import Scriptable
from . import _ffi_api
from .base import Node
class Type(Node, Scriptable):
"""The base class of all types."""
def __eq__(self, other):
"""Compare two types for structural equivalence."""
return bool(tvm.ir.structural_equal(self, other))
def __ne__(self, other):
return not self.__eq__(other)
def same_as(self, other):
"""Compares two Relay types by referential equality."""
return super().__eq__(other)
class TypeKind(IntEnum):
"""Possible kinds of TypeVars."""
Type = 0
ShapeVar = 1
BaseType = 2
Constraint = 4
AdtHandle = 5
TypeData = 6
@tvm._ffi.register_object("PrimType")
class PrimType(Type):
"""Primitive data type in the low level IR
Parameters
----------
dtype : str
The runtime data type relates to the primtype.
"""
def __init__(self, dtype):
self.__init_handle_by_constructor__(_ffi_api.PrimType, dtype)
@tvm._ffi.register_object("PointerType")
class PointerType(Type):
"""PointerType used in the low-level TIR.
Parameters
----------
element_type : tvm.ir.Type
The type of pointer's element.
storage_scope : str
The storage scope into which the pointer addresses.
"""
def __init__(self, element_type, storage_scope=""):
self.__init_handle_by_constructor__(_ffi_api.PointerType, element_type, storage_scope)
@tvm._ffi.register_object("TypeVar")
class TypeVar(Type):
"""Type parameter in functions.
A type variable represents a type placeholder which will
be filled in later on. This allows the user to write
functions which are generic over types.
Parameters
----------
name_hint: str
The name of the type variable. This name only acts as a hint, and
is not used for equality.
kind : Optional[TypeKind]
The kind of the type parameter.
"""
def __init__(self, name_hint, kind=TypeKind.Type):
self.__init_handle_by_constructor__(_ffi_api.TypeVar, name_hint, kind)
def __call__(self, *args):
"""Create a type call from this type.
Parameters
----------
args: List[Type]
The arguments to the type call.
Returns
-------
call: Type
The result type call.
"""
# pylint: disable=import-outside-toplevel
from .type_relation import TypeCall
return TypeCall(self, args)
@tvm._ffi.register_object("GlobalTypeVar")
class GlobalTypeVar(Type):
"""A global type variable that is used for defining new types or type aliases.
Parameters
----------
name_hint: str
The name of the type variable. This name only acts as a hint, and
is not used for equality.
kind : Optional[TypeKind]
The kind of the type parameter.
"""
def __init__(self, name_hint, kind=TypeKind.AdtHandle):
self.__init_handle_by_constructor__(_ffi_api.GlobalTypeVar, name_hint, kind)
def __call__(self, *args):
"""Create a type call from this type.
Parameters
----------
args: List[Type]
The arguments to the type call.
Returns
-------
call: Type
The result type call.
"""
# pylint: disable=import-outside-toplevel
from .type_relation import TypeCall
return TypeCall(self, args)
@tvm._ffi.register_object("TupleType")
class TupleType(Type):
"""The type of tuple values.
Parameters
----------
fields : List[Type]
The fields in the tuple
"""
def __init__(self, fields):
self.__init_handle_by_constructor__(_ffi_api.TupleType, fields)
@tvm._ffi.register_object("TypeConstraint")
class TypeConstraint(Type):
"""Abstract class representing a type constraint."""
@tvm._ffi.register_object("FuncType")
class FuncType(Type):
"""Function type.
A function type consists of a list of type parameters to enable
the definition of generic functions,
a set of type constraints which we omit for the time being,
a sequence of argument types, and a return type.
We can informally write them as:
`forall (type_params), (arg_types) -> ret_type where type_constraints`
Parameters
----------
arg_types : List[tvm.relay.Type]
The argument types
ret_type : tvm.relay.Type
The return type.
type_params : Optional[List[tvm.relay.TypeVar]]
The type parameters
type_constraints : Optional[List[tvm.relay.TypeConstraint]]
The type constraints.
"""
def __init__(self, arg_types, ret_type, type_params=None, type_constraints=None):
if type_params is None:
type_params = []
if type_constraints is None:
type_constraints = []
self.__init_handle_by_constructor__(
_ffi_api.FuncType, arg_types, ret_type, type_params, type_constraints
)
@tvm._ffi.register_object("IncompleteType")
class IncompleteType(Type):
"""Incomplete type during type inference.
kind : Optional[TypeKind]
The kind of the incomplete type.
"""
def __init__(self, kind=TypeKind.Type):
self.__init_handle_by_constructor__(_ffi_api.IncompleteType, kind)
@tvm._ffi.register_object("relay.RefType")
class RelayRefType(Type):
"""Reference Type in relay.
Parameters
----------
value: Type
The value type.
"""
def __init__(self, value):
self.__init_handle_by_constructor__(_ffi_api.RelayRefType, value)
| 6,479 | 25.887967 | 94 | py |
tvm | tvm-main/python/tvm/ir/instrument.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unused-argument
"""Common pass instrumentation across IR variants."""
import inspect
import functools
import tvm._ffi
import tvm.runtime
from . import _ffi_instrument_api
@tvm._ffi.register_object("instrument.PassInstrument")
class PassInstrument(tvm.runtime.Object):
"""A pass instrument implementation.
To use, a user class can either subclass from PassInstrument
directly, or can apply the :py:func:`pass_instrument` wrapper. In
either case, the `enter_pass_ctx`, `exit_pass_ctx`, `should_run`,
`run_before_pass`, and `run_after_pass` methods can be defined to
adjust the instrument's behavior. See the no-op implementations
in this class definition for more information on each.
"""
def __init__(self):
# initialize handle in case pi_cls creation failed.
self.handle = None
cls = type(self)
# If the child class declared the method, then use it.
# Otherwise, pass None to avoid a C++ -> Python round trip for
# a no-op.
def get_child_method(name):
if getattr(cls, name) is getattr(PassInstrument, name):
return None
return getattr(self, name)
# Create runtime pass instrument object.
# register instance's enter_pass_ctx,exit_pass_ctx, should_run, run_before_pass and
# run_after_pass methods to it if present.
self.__init_handle_by_constructor__(
_ffi_instrument_api.PassInstrument,
cls.__name__,
get_child_method("enter_pass_ctx"),
get_child_method("exit_pass_ctx"),
get_child_method("should_run"),
get_child_method("run_before_pass"),
get_child_method("run_after_pass"),
)
def enter_pass_ctx(self):
"""Called when entering the instrumented context.
Returns
-------
None
"""
def exit_pass_ctx(self):
"""Called when exiting the instrumented context.
Returns
-------
None
"""
def should_run(self, mod, info):
"""Determine whether to run the pass or not.
Called once for each pass that is run while the instrumented
context is active.
Parameters
----------
mod : tvm.ir.module.IRModule
The module on which an optimization pass is being run.
info : tvm.transform.PassInfo
The pass information.
Returns
-------
should_run : bool
True to run the pass, or False to skip the pass.
"""
def run_before_pass(self, mod, info):
"""Instrument before the pass runs.
Called once for each pass that is run while the instrumented
context is active.
Parameters
----------
mod : tvm.ir.module.IRModule
The module on which an optimization pass is being run.
info : tvm.transform.PassInfo
The pass information.
Returns
-------
None
"""
def run_after_pass(self, mod, info):
"""Instrument after the pass runs.
Called once for each pass that is run while the instrumented
context is active.
Parameters
----------
mod : tvm.ir.module.IRModule
The module on which an optimization pass is being run.
info : tvm.transform.PassInfo
The pass information.
Returns
-------
None
"""
def _wrap_class_pass_instrument(pi_cls):
"""Wrap a python class as pass instrument"""
# No additional wrapping needed if the user class already
# inherits.
if issubclass(pi_cls, PassInstrument):
return pi_cls
class PyPassInstrument(pi_cls, PassInstrument):
"""Internal wrapper class to create a class instance."""
def __init__(self, *args, **kwargs):
# initialize handle in case pi_cls creation failed.
self.handle = None
pi_cls.__init__(self, *args, **kwargs)
PassInstrument.__init__(self)
functools.update_wrapper(PyPassInstrument.__init__, pi_cls.__init__)
PyPassInstrument.__name__ = pi_cls.__name__
PyPassInstrument.__doc__ = pi_cls.__doc__
PyPassInstrument.__module__ = pi_cls.__module__
return PyPassInstrument
def pass_instrument(pi_cls=None):
"""Decorate a pass instrument.
Parameters
----------
pi_class : class
Instrument class. See example below.
Examples
--------
.. code-block:: python
@tvm.instrument.pass_instrument
class SkipPass:
def __init__(self, skip_pass_name):
self.skip_pass_name = skip_pass_name
# Uncomment to customize
# def enter_pass_ctx(self):
# pass
# Uncomment to customize
# def exit_pass_ctx(self):
# pass
# If pass name contains keyword, skip it by return False. (return True: not skip)
def should_run(self, mod, pass_info)
if self.skip_pass_name in pass_info.name:
return False
return True
# Uncomment to customize
# def run_before_pass(self, mod, pass_info):
# pass
# Uncomment to customize
# def run_after_pass(self, mod, pass_info):
# pass
skip_annotate = SkipPass("AnnotateSpans")
with tvm.transform.PassContext(instruments=[skip_annotate]):
tvm.relay.build(mod, "llvm")
"""
def create_pass_instrument(pi_cls):
if not inspect.isclass(pi_cls):
raise TypeError("pi_cls must be a class")
return _wrap_class_pass_instrument(pi_cls)
if pi_cls:
return create_pass_instrument(pi_cls)
return create_pass_instrument
@tvm._ffi.register_object("instrument.PassInstrument")
class PassTimingInstrument(tvm.runtime.Object):
"""A wrapper to create a passes time instrument that implemented in C++"""
def __init__(self):
self.__init_handle_by_constructor__(_ffi_instrument_api.MakePassTimingInstrument)
@staticmethod
def render():
"""Retrieve rendered time profile result
Returns
-------
string : string
The rendered string result of time profiles
Examples
--------
.. code-block:: python
timing_inst = PassTimingInstrument()
with tvm.transform.PassContext(instruments=[timing_inst]):
relay_mod = relay.transform.InferType()(relay_mod)
relay_mod = relay.transform.FoldScaleAxis()(relay_mod)
# before exiting the context, get profile results.
profiles = timing_inst.render()
"""
return _ffi_instrument_api.RenderTimePassProfiles()
| 7,695 | 28.829457 | 93 | py |
tvm | tvm-main/python/tvm/ir/json_compact.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Tool to upgrade json from historical versions."""
import json
import tvm.ir
import tvm.runtime
def create_updater(node_map, from_ver, to_ver):
"""Create an updater to update json loaded data.
Parameters
----------
node_map : Map[str, Function]
Map from type_key to updating function
from_ver : str
Prefix of version that we can accept,
to_ver : str
The target version.
Returns
-------
fupdater : function
The updater function
"""
def _updater(data):
assert data["attrs"]["tvm_version"].startswith(from_ver)
nodes = data["nodes"]
for idx, item in enumerate(nodes):
f = node_map.get(item["type_key"], None)
if isinstance(f, list):
for fpass in f:
item = fpass(item, nodes)
elif f:
item = f(item, nodes)
nodes[idx] = item
data["attrs"]["tvm_version"] = to_ver
return data
return _updater
def create_updater_08_to_09():
"""
Create an update to upgrade json from v0.8 to v0.9
Returns
-------
fupdater : function
The updater function
"""
def _initialize_virtual_device(item, _):
if "virtual_device_" not in item["attrs"]:
item["attrs"]["virtual_device_"] = "0"
return item
node_map = {
# Base IR
"GlobalVar": _initialize_virtual_device,
"relay.Var": _initialize_virtual_device,
"relay.Function": _initialize_virtual_device,
"relay.Tuple": _initialize_virtual_device,
"relay.Call": _initialize_virtual_device,
"relay.Let": _initialize_virtual_device,
"relay.If": _initialize_virtual_device,
"relay.TupleGetItem": _initialize_virtual_device,
"relay.RefCreate": _initialize_virtual_device,
"relay.RefRead": _initialize_virtual_device,
"relay.RefWrite": _initialize_virtual_device,
"relay.Match": _initialize_virtual_device,
"relay.Constant": _initialize_virtual_device,
}
return create_updater(node_map, "0.8", "0.9")
def create_updater_07_to_08():
"""Create an update to upgrade json from v0.7 to v0.8"""
def _initialize_module_attributes(item, _):
assert item["type_key"] == "IRModule", "Only initialize the attributes for IRModules"
if "attrs" not in item["attrs"]:
item["attrs"]["attrs"] = "0"
return item
node_map = {"IRModule": _initialize_module_attributes}
return create_updater(node_map, "0.7", "0.8")
def create_updater_06_to_07():
"""Create an update to upgrade json from v0.6 to v0.7
Returns
-------
fupdater : function
The updater function
"""
def _ftype_var(item, nodes):
vindex = int(item["attrs"]["var"])
item["attrs"]["name_hint"] = nodes[vindex]["attrs"]["name"]
# set vindex to null
nodes[vindex]["type_key"] = ""
del item["attrs"]["var"]
assert item["type_key"].startswith("relay.")
item["type_key"] = item["type_key"][len("relay.") :]
return item
def _rename(new_name):
def _convert(item, _):
item["type_key"] = new_name
return item
return _convert
def _update_tir_var(new_name):
def _convert(item, _):
item["type_key"] = new_name
item["attrs"]["type_annotation"] = "0"
return item
return _convert
def _update_global_key(item, _):
if "global_key" in item:
item["repr_str"] = item["global_key"]
del item["global_key"]
return item
def _update_from_std_str(key):
def _convert(item, nodes):
str_val = item["attrs"][key]
jdata = json.loads(tvm.ir.save_json(tvm.runtime.String(str_val)))
root_idx = jdata["root"]
val = jdata["nodes"][root_idx]
sidx = len(nodes)
nodes.append(val)
item["attrs"][key] = f"{sidx}"
return item
return _convert
node_map = {
# Base IR
"SourceName": _update_global_key,
"EnvFunc": _update_global_key,
"relay.Op": [_update_global_key, _rename("Op")],
"relay.TypeVar": [_ftype_var, _update_from_std_str("name_hint")],
"TypeVar": _update_from_std_str("name_hint"),
"relay.Id": [_update_from_std_str("name_hint")],
"relay.GlobalTypeVar": [_ftype_var, _update_from_std_str("name_hint")],
"GlobalTypeVar": _update_from_std_str("name_hint"),
"relay.Type": _rename("Type"),
"relay.TupleType": _rename("TupleType"),
"relay.TypeConstraint": _rename("TypeConstraint"),
"relay.FuncType": _rename("FuncType"),
"relay.IncompleteType": _rename("IncompleteType"),
"relay.TypeRelation": _rename("TypeRelation"),
"relay.TypeCall": _rename("TypeCall"),
"relay.Constructor": _update_from_std_str("name_hint"),
"relay.Module": _rename("IRModule"),
"relay.SourceName": _rename("SourceName"),
"relay.Span": _rename("Span"),
"relay.GlobalVar": [_rename("GlobalVar"), _update_from_std_str("name_hint")],
"GlobalVar": _update_from_std_str("name_hint"),
"relay.Pass": _rename("transform.Pass"),
"relay.PassInfo": _rename("transform.PassInfo"),
"relay.PassContext": _rename("transform.PassContext"),
"relay.ModulePass": _rename("transform.ModulePass"),
"relay.Sequential": _rename("transform.Sequential"),
"StrMap": _rename("Map"),
# TIR
"Variable": [_update_tir_var("tir.Var"), _update_from_std_str("name")],
"SizeVar": [_update_tir_var("tir.SizeVar"), _update_from_std_str("name")],
"StringImm": [_rename("tir.StringImm"), _update_from_std_str("value")],
"Cast": _rename("tir.Cast"),
"Add": _rename("tir.Add"),
"Sub": _rename("tir.Sub"),
"Mul": _rename("tir.Mul"),
"Div": _rename("tir.Div"),
"Mod": _rename("tir.Mod"),
"FloorDiv": _rename("tir.FloorDiv"),
"FloorMod": _rename("tir.FloorMod"),
"Min": _rename("tir.Min"),
"Max": _rename("tir.Max"),
"EQ": _rename("tir.EQ"),
"NE": _rename("tir.NE"),
"LT": _rename("tir.LT"),
"LE": _rename("tir.LE"),
"GT": _rename("tir.GT"),
"GE": _rename("tir.GE"),
"And": _rename("tir.And"),
"Or": _rename("tir.Or"),
"Not": _rename("tir.Not"),
"Select": _rename("tir.Select"),
"BufferLoad": _rename("tir.BufferLoad"),
"Ramp": _rename("tir.Ramp"),
"Broadcast": _rename("tir.Broadcast"),
"Shuffle": _rename("tir.Shuffle"),
"Call": [_rename("tir.Call"), _update_from_std_str("name")],
"Let": _rename("tir.Let"),
"Any": _rename("tir.Any"),
"LetStmt": _rename("tir.LetStmt"),
"AssertStmt": _rename("tir.AssertStmt"),
"BufferStore": _rename("tir.BufferStore"),
"BufferRealize": _rename("tir.BufferRealize"),
"Allocate": _rename("tir.Allocate"),
"IfThenElse": _rename("tir.IfThenElse"),
"Evaluate": _rename("tir.Evaluate"),
"Prefetch": _rename("tir.Prefetch"),
"AttrStmt": [_rename("tir.AttrStmt"), _update_from_std_str("attr_key")],
"Layout": [_rename("tir.Layout"), _update_from_std_str("name")],
"Buffer": [
_rename("tir.Buffer"),
_update_from_std_str("name"),
_update_from_std_str("scope"),
],
}
return create_updater(node_map, "0.6", "0.7")
def upgrade_json(json_str):
"""Update json from a historical version.
Parameters
----------
json_str : str
A historical json file.
Returns
-------
updated_json : str
The updated version.
"""
data = json.loads(json_str)
from_version = data["attrs"]["tvm_version"]
if from_version.startswith("0.6"):
data = create_updater_08_to_09()(create_updater_07_to_08()(create_updater_06_to_07()(data)))
elif from_version.startswith("0.7"):
data = create_updater_08_to_09()(create_updater_07_to_08()(data))
elif from_version.startswith("0.8"):
data = create_updater_08_to_09()(data)
else:
raise ValueError(f"Cannot update from version {from_version}")
return json.dumps(data, indent=2)
| 9,213 | 33.769811 | 100 | py |
tvm | tvm-main/python/tvm/ir/_ffi_instrument_api.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""FFI APIs for tvm.instrument"""
import tvm._ffi
tvm._ffi._init_api("instrument", __name__)
| 879 | 40.904762 | 62 | py |
tvm | tvm-main/python/tvm/ir/transform.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unused-argument
"""Common pass infrastructure across IR variants."""
import inspect
import functools
import tvm._ffi
import tvm.runtime
from . import _ffi_transform_api
@tvm._ffi.register_object("transform.PassInfo")
class PassInfo(tvm.runtime.Object):
"""The class contains the meta data required by a pass. It is the
container of information needed by running an optimization or analysis.
This class can be extended by adding new members when more meta data is
needed.
Parameters
----------
opt_level : int
The optimization level of this pass.
name : str
The pass name.
required : List[str]
The list of passes that are required by a certain pass.
"""
def __init__(self, opt_level, name, required=None):
self.__init_handle_by_constructor__(_ffi_transform_api.PassInfo, opt_level, name, required)
@tvm._ffi.register_object("transform.PassContext")
class PassContext(tvm.runtime.Object):
"""The basis where a Relay optimization/analysis runs on.
Each pass context contains a number of auxiliary information that is used
to help an optimization pass. Such information includes the error reporter
to record the errors of during the optimization, etc.
opt_level : Optional[int]
The optimization level of this pass.
required_pass : Optional[Union[List[str], Set[str], Tuple[str]]]
The list of passes that are required by a certain pass.
disabled_pass : Optional[Union[List[str], Set[str], Tuple[str]]]
The list of passes that are disabled.
instruments : Optional[Sequence[PassInstrument]]
The list of pass instrument implementations.
config : Optional[Dict[str, Object]]
Additional configurations for specific passes.
"""
def __init__(
self,
opt_level=2,
required_pass=None,
disabled_pass=None,
instruments=None,
config=None,
):
required = list(required_pass) if required_pass else []
if not isinstance(required, (list, tuple)):
raise TypeError("required_pass is expected to be the type of " + "list/tuple/set.")
disabled = list(disabled_pass) if disabled_pass else []
if not isinstance(disabled, (list, tuple)):
raise TypeError("disabled_pass is expected to be the type of " + "list/tuple/set.")
instruments = list(instruments) if instruments else []
if not isinstance(instruments, (list, tuple)):
raise TypeError("instruments is expected to be the type of " + "list/tuple/set.")
config = config if config else None
self.__init_handle_by_constructor__(
_ffi_transform_api.PassContext, opt_level, required, disabled, instruments, config
)
def __enter__(self):
_ffi_transform_api.EnterPassContext(self)
return self
def __exit__(self, ptype, value, trace):
_ffi_transform_api.ExitPassContext(self)
def override_instruments(self, instruments):
"""Override instruments within this PassContext.
If there are existing instruments, their ``exit_pass_ctx`` callbacks are called.
Then switching to new instruments and calling new ``enter_pass_ctx`` callbacks.
instruments : Sequence[PassInstrument]
The list of pass instrument implementations.
"""
_ffi_transform_api.OverrideInstruments(self, instruments)
@staticmethod
def current():
"""Return the current pass context."""
return _ffi_transform_api.GetCurrentPassContext()
@staticmethod
def list_configs():
"""List all registered `PassContext` configuration names and metadata.
Returns
-------
configs : Dict[str, Dict[str, str]]
"""
return _ffi_transform_api.ListConfigs()
@tvm._ffi.register_object("transform.Pass")
class Pass(tvm.runtime.Object):
"""The base class of all passes. All methods here are just simple wrappers
that are implemented in the backend. They are defined for users to
conveniently interact with the base class.
"""
@property
def info(self):
"""Get the pass meta."""
return _ffi_transform_api.Info(self)
def __call__(self, mod):
"""Execute the pass. Note that for sequential pass, the dependency among
different passes will be resolved in the backend.
Parameters
----------
mod : tvm.IRModule
The module that a certain optimization is performed on.
Returns
-------
mod : tvm.IRModule
The updated module after applying this pass.
"""
return _ffi_transform_api.RunPass(self, mod)
@tvm._ffi.register_object("transform.ModulePass")
class ModulePass(Pass):
"""A pass that works on tvm.IRModule. Users don't need to interact with
this class directly. Instead, a module pass should be created through
`module_pass`, because the design of the `module_pass` API is flexible
enough to handle the creation of a module pass in different manners. In
addition, all members of a module pass can be accessed from the base class.
The same rule applies to FunctionPass as well.
"""
@tvm._ffi.register_object("transform.Sequential")
class Sequential(Pass):
"""A pass that works on a sequence of pass objects. Multiple passes can be
executed sequentially using this class.
Note that users can also provide a series of passes that they don't want to
apply when running a sequential pass. Pass dependency will be resolved in
the backend as well.
Parameters
----------
passes : Optional[List[Pass]]
A sequence of passes candidate for optimization.
opt_level : Optional[int]
The optimization level of this sequential pass.
The opt_level of a default sequential pass is set to 0.
Note that some of the passes within the Sequantial may still not be executed
if their opt_level is higher than the provided opt_level.
name : Optional[str]
The name of the sequential pass.
required : Optional[List[str]]
The list of passes that the sequential pass is dependent on.
"""
def __init__(self, passes=None, opt_level=0, name="sequential", required=None):
passes = passes if passes else []
if not isinstance(passes, (list, tuple)):
raise TypeError("passes must be a list of Pass objects.")
required = required if required else []
if not isinstance(required, (list, tuple)):
raise TypeError("Required is expected to be the type of list/tuple.")
self.__init_handle_by_constructor__(
_ffi_transform_api.Sequential, passes, opt_level, name, required
)
def _wrap_class_module_pass(pass_cls, pass_info):
"""Wrap a python class as function pass"""
class PyModulePass(ModulePass):
"""Internal wrapper class to create a class instance."""
def __init__(self, *args, **kwargs):
# initialize handle in cass pass_cls creation failed.fg
self.handle = None
inst = pass_cls(*args, **kwargs)
# it is important not to capture self to
# avoid a cyclic dependency
def _pass_func(mod, ctx):
return inst.transform_module(mod, ctx)
self.__init_handle_by_constructor__(
_ffi_transform_api.MakeModulePass, _pass_func, pass_info
)
self._inst = inst
def __getattr__(self, name):
# fall back to instance attribute if there is not any
return self._inst.__getattribute__(name)
functools.update_wrapper(PyModulePass.__init__, pass_cls.__init__)
PyModulePass.__name__ = pass_cls.__name__
PyModulePass.__doc__ = pass_cls.__doc__
PyModulePass.__module__ = pass_cls.__module__
return PyModulePass
def module_pass(pass_func=None, opt_level=None, name=None, required=None):
"""Decorate a module pass.
This function returns a callback when pass_func is provided.
Otherwise, it serves a decorator function.
pass_func can also be a class type with a method transform_module.
This function will create a decorated ModulePass using transform_module
as the pass function.
Parameters
----------
pass_func : Optional[Callable[(Module, PassContext) ->Module]]
The transformation function or class.
opt_level : int
The optimization level of this module pass.
name : Optional[str]
The name of the module pass. The name could be empty. In this case, the
name of the optimization function will be used as the pass name.
required : Optional[List[str]]
The list of passes that the module pass is dependent on.
Returns
-------
create_module_pass : Union[Callable, ModulePass]
A decorator will be returned if pass_func is not provided,
otherwise return the decorated result.
The returned decorator has two behaviors depending on the input:
A new ModulePass will be returned when we decorate a pass function.
A new ModulePass class will be returned when we decorate a class type.
Examples
--------
The following code block decorates a module pass class.
.. code-block:: python
@relay.transform.module_pass
class CustomPipeline:
def __init__(self, enable_fold):
self.enable_fold = enable_fold
self.cse = relay.transform.EliminateCommonSubexpr()
self.const_fold = relay.transform.FoldConstant()
def transform_module(self, mod, ctx):
mod = self.cse(mod, ctx)
if self.enable_fold:
mod = self.const_fold(mod, ctx)
return mod
# create an instance of customized pipeline
pipeline = CustomPipeline(enable_fold=False)
assert isinstance(pipeline, transform.ModulePass)
# run the pipeline.
output_module = pipeline(input_module)
The following code creates a module pass by decorating
a user defined transform function.
.. code-block:: python
@relay.transform.module_pass(opt_level=2)
def transform(mod, ctx):
tp = relay.TensorType((10,), "float32")
x = relay.var("x", tp)
gv = relay.GlobalVar("var")
func = relay.Function([x], relay.abs(x))
new_mod = tvm.IRModule({gv: func})
new_mod.update(mod)
return new_mod
module_pass = transform
assert isinstance(module_pass, transform.ModulePass)
assert module_pass.info.opt_level == 2
# Given a module m, the optimization could be invoked as the follwoing:
updated_mod = module_pass(m)
# Now a function abs should be added to the module m.
"""
if opt_level is None:
raise ValueError("Please provide opt_level for the module pass.")
required = required if required else []
if not isinstance(required, (list, tuple)):
raise TypeError("Required is expected to be the type of " + "list/tuple.")
def create_module_pass(pass_arg):
"""Internal function that creates a module pass"""
fname = name if name else pass_arg.__name__
info = PassInfo(opt_level, fname, required)
if inspect.isclass(pass_arg):
return _wrap_class_module_pass(pass_arg, info)
if not callable(pass_arg):
raise TypeError("pass_func must be a callable for Module pass")
return _ffi_transform_api.MakeModulePass(pass_arg, info)
if pass_func:
return create_module_pass(pass_func)
return create_module_pass
def PrintIR(header="", show_meta_data=False):
"""A special trace pass that prints the header and IR.
Parameters
----------
header : str
The header to be displayed along with the dump.
show_meta_data : bool
A boolean flag to indicate if meta data should be printed.
Returns
--------
The pass
"""
return _ffi_transform_api.PrintIR(header, show_meta_data)
| 13,000 | 34.425068 | 99 | py |
tvm | tvm-main/python/tvm/ir/module.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""IRModule that holds the functions and type definitions."""
import tvm._ffi
from tvm._ffi.base import string_types
from tvm.runtime import Scriptable
from . import _ffi_api
from . import expr as _expr
from . import type as _ty
from .base import Node
@tvm._ffi.register_object("IRModule")
class IRModule(Node, Scriptable):
"""IRModule that holds functions and type definitions.
IRModule is the basic unit for all IR transformations across the stack.
Parameters
----------
functions: Optional[dict].
Map of global var to BaseFunc
"""
def __init__(self, functions=None, type_definitions=None, attrs=None):
if functions is None:
functions = {}
elif isinstance(functions, dict):
mapped_funcs = {}
for k, v in functions.items():
if isinstance(k, string_types):
k = _expr.GlobalVar(k)
if not isinstance(k, _expr.GlobalVar):
raise TypeError("Expect functions to be Dict[GlobalVar, Function]")
mapped_funcs[k] = v
functions = mapped_funcs
if type_definitions is None:
type_definitions = {}
elif isinstance(type_definitions, dict):
mapped_type_defs = {}
for k, v in type_definitions.items():
if isinstance(k, string_types):
k = _ty.GlobalTypeVar(k)
if not isinstance(k, _ty.GlobalTypeVar):
raise TypeError("Expect type_definitions to be Dict[GlobalTypeVar, Type]")
mapped_type_defs[k] = v
type_definitions = mapped_type_defs
attrs = None if not attrs else attrs
if attrs is not None:
attrs = ast.literal_eval(str(attrs))
attrs = tvm.ir.make_node("DictAttrs", **attrs)
self.__init_handle_by_constructor__(
_ffi_api.IRModule,
functions,
type_definitions,
attrs,
)
def __setitem__(self, var, val):
"""Add a mapping to the module.
Parameters
---------
var: GlobalVar
The global variable.
val: Union[Function, Type]
The value.
"""
return self._add(var, val, True)
def _add(self, var, val, update=True):
if isinstance(val, _expr.RelayExpr):
if isinstance(var, string_types):
if _ffi_api.Module_ContainGlobalVar(self, var):
var = _ffi_api.Module_GetGlobalVar(self, var)
else:
var = _expr.GlobalVar(var)
_ffi_api.Module_Add(self, var, val, update)
else:
assert isinstance(val, _ty.Type)
if isinstance(var, string_types):
var = _ty.GlobalTypeVar(var)
_ffi_api.Module_AddDef(self, var, val, update)
def __getitem__(self, var):
"""Lookup a global definition by name or by variable.
Parameters
----------
var: Union[String, GlobalVar, GlobalTypeVar]
The name or global variable.
Returns
-------
val: Union[Function, Type]
The definition referenced by :code:`var` (either a function or type).
"""
if isinstance(var, string_types):
return _ffi_api.Module_Lookup_str(self, var)
if isinstance(var, _expr.GlobalVar):
return _ffi_api.Module_Lookup(self, var)
return _ffi_api.Module_LookupDef(self, var)
def update(self, other):
"""Insert functions in another Module to current one.
Parameters
----------
other: IRModule
The module to merge into the current Module.
"""
if isinstance(other, dict):
other = IRModule(other)
return _ffi_api.Module_Update(self, other)
def update_func(self, var, func):
"""Update the function corresponding to a global variable in the
module.
Parameters
----------
var: GlobalVar
The global variable.
func: tvm.relay.Function
The function to be inserted.
"""
return _ffi_api.Module_UpdateFunction(self, var, func)
def get_global_var(self, name):
"""Get a global variable in the function by name.
Parameters
----------
name: str
The name of the global variable.
Returns
-------
global_var: GlobalVar
The global variable mapped to :code:`name`.
Raises
------
tvm.error.TVMError if we cannot find corresponding global var.
"""
return _ffi_api.Module_GetGlobalVar(self, name)
def get_global_vars(self):
"""Collect all global vars defined in this module.
Returns
-------
global_vars: Array[GlobalVar]
An array of global vars.
"""
return _ffi_api.Module_GetGlobalVars(self)
def get_global_type_vars(self):
"""Collect all global type vars defined in this module.
Returns
-------
global_type_vars: Array[GlobalTypeVar]
An array of global type vars.
"""
return _ffi_api.Module_GetGlobalTypeVars(self)
def get_global_type_var(self, name):
"""Get a global type variable in the function by name.
Parameters
----------
name: str
The name of the global type variable.
Returns
-------
global_type_var: GlobalTypeVar
The global variable mapped to :code:`name`.
Raises
------
tvm.error.TVMError if we cannot find corresponding global type var.
"""
return _ffi_api.Module_GetGlobalTypeVar(self, name)
def get_constructor(self, tag):
"""Look up an ADT constructor by tag.
Parameters
----------
tag: int
The tag for a constructor.
Returns
-------
constructor: Constructor
The constructor associated with the given tag,
Raises
------
tvm.error.TVMError if the corresponding constructor cannot be found.
"""
return _ffi_api.Module_LookupTag(self, tag)
def get_type(self, name):
ty_var = self.get_global_type_var(name)
ty_data = self.type_definitions[ty_var]
return tuple([ty_var] + list(ty_data.constructors))
@staticmethod
def from_expr(expr, functions=None, type_defs=None):
"""Construct a module from a standalone expression.
Parameters
----------
expr: RelayExpr
The starting expression
global_funcs: Optional[dict]
Map of global vars to function definitions
type_defs: Optional[dict]
Map of global type vars to type definitions
Returns
-------
mod: Module
A module containing the passed definitions,
where expr is set as the entry point
(wrapped in a function if necessary)
"""
funcs = functions if functions is not None else {}
defs = type_defs if type_defs is not None else {}
return _ffi_api.Module_FromExpr(expr, funcs, defs)
def _import(self, file_to_import):
return _ffi_api.Module_Import(self, file_to_import)
def import_from_std(self, file_to_import):
# TODO(@jroesch): clean up prelude
_ffi_api.Module_ImportFromStd(self, file_to_import)
return tvm.relay.transform.InferType()(self)
def get_attr(self, attr_key):
"""Get the IRModule attribute.
Parameters
----------
attr_key : str
The attribute key.
Returns
-------
attr_value : Any
Attribute value
"""
return _ffi_api.Module_GetAttr(self, attr_key)
def with_attr(self, attr_key, attr_value):
"""Copy the IRModule and add an attribute to it.
Parameters
----------
attr_key : str
The attribute key.
attr_value : Object
The new attribute value.
Returns
-------
mod : IRModule
A new copy of the IRModule with the attribute
"""
return _ffi_api.Module_WithAttr(self, attr_key, attr_value)
def astext(self, show_meta_data=True, annotate=None):
"""Get the text format of the expression.
Parameters
----------
show_meta_data : bool
Whether to include meta data section in the text
if there is meta data.
annotate: Optional[Object->str]
Optionally annotate function to provide additional
information in the comment block.
Returns
-------
text : str
The text format of the expression.
Notes
-----
The meta data section is necessary to fully parse the text format.
However, it can contain dumps that are big (e.g constant weights),
so it can be helpful to skip printing the meta data section.
"""
from tvm.relay import astext # pylint: disable=import-outside-toplevel
return astext(self, show_meta_data, annotate)
| 10,067 | 29.883436 | 94 | py |
tvm | tvm-main/python/tvm/ir/_ffi_api.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""FFI APIs for tvm.ir"""
import tvm._ffi
tvm._ffi._init_api("ir", __name__)
| 864 | 38.318182 | 62 | py |
tvm | tvm-main/python/tvm/ir/_ffi_transform_api.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""FFI APIs for tvm.transform"""
import tvm._ffi
tvm._ffi._init_api("transform", __name__)
| 878 | 38.954545 | 62 | py |
tvm | tvm-main/python/tvm/ir/adt.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""Algebraic data type definitions."""
import tvm._ffi
from .type import Type
from .expr import RelayExpr
from . import _ffi_api
@tvm._ffi.register_object("relay.Constructor")
class Constructor(RelayExpr):
"""Relay ADT constructor.
Parameters
----------
name_hint : str
Name of constructor (only a hint).
inputs : List[Type]
Input types.
belong_to : GlobalTypeVar
Denotes which ADT the constructor belongs to.
"""
def __init__(self, name_hint, inputs, belong_to):
self.__init_handle_by_constructor__(_ffi_api.Constructor, name_hint, inputs, belong_to)
def __call__(self, *args):
"""Call the constructor.
Parameters
----------
args: List[RelayExpr]
The arguments to the constructor.
Returns
-------
call: RelayExpr
A call to the constructor.
"""
# pylint: disable=import-outside-toplevel
from tvm import relay
return relay.Call(self, args)
@tvm._ffi.register_object("relay.TypeData")
class TypeData(Type):
"""Stores the definition for an Algebraic Data Type (ADT) in Relay.
Note that ADT definitions are treated as type-level functions because
the type parameters need to be given for an instance of the ADT. Thus,
any global type var that is an ADT header needs to be wrapped in a
type call that passes in the type params.
Parameters
----------
header: GlobalTypeVar
The name of the ADT.
ADTs with the same constructors but different names are
treated as different types.
type_vars: List[TypeVar]
Type variables that appear in constructors.
constructors: List[Constructor]
The constructors for the ADT.
"""
def __init__(self, header, type_vars, constructors):
self.__init_handle_by_constructor__(_ffi_api.TypeData, header, type_vars, constructors)
| 2,758 | 30 | 95 | py |
tvm | tvm-main/python/tvm/ir/supply.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Suppliers that are used to guarantee uniqueness of names and GlobalVars."""
import tvm
from tvm import Object, IRModule
from . import _ffi_api
@tvm._ffi.register_object("NameSupply")
class NameSupply(Object):
"""NameSupply that can be used to generate unique names.
Parameters
----------
prefix: The prefix to be added to the generated names.
"""
def __init__(self, prefix=""):
self.__init_handle_by_constructor__(_ffi_api.NameSupply, prefix)
def fresh_name(self, name, add_prefix=True):
"""Generates a unique name from this NameSupply.
Parameters
----------
name: String
The name from which the generated name is derived.
add_prefix: bool
If set to true, then the prefix of this NameSupply will be prepended to the name.
"""
return _ffi_api.NameSupply_FreshName(self, name, add_prefix)
def reserve_name(self, name, add_prefix=True):
"""Reserves an existing name with this NameSupply.
Parameters
----------
name: String
The name to be reserved.
add_prefix: bool
If set to true, then the prefix of this NameSupply will be prepended to the name
before reserving it.
"""
return _ffi_api.NameSupply_ReserveName(self, name, add_prefix)
def contains_name(self, name, add_prefix=True):
"""Checks if this NameSupply already generated a name.
Parameters
----------
name: String
The name to check.
add_prefix: bool
If set to true, then the prefix of this NameSupply will be prepended to the name
before checking for it.
"""
return _ffi_api.NameSupply_ContainsName(self, name, add_prefix)
@tvm._ffi.register_object("GlobalVarSupply")
class GlobalVarSupply(Object):
"""GlobalVarSupply that holds a mapping between names and GlobalVars.
GlobalVarSupply can be used to generate new GlobalVars with a unique name.
It also can be used to retrieve previously generated GlobalVars based on a name.
Parameters
----------
value: Union[List[IRModule], IRModule, NameSupply]
The IRModules used to build this GlobalVarSupply or a NameSupply.
"""
def __init__(self, value=None):
if value is None:
name_supply = NameSupply("")
self.__init_handle_by_constructor__(_ffi_api.GlobalVarSupply_NameSupply, name_supply)
elif isinstance(value, NameSupply):
self.__init_handle_by_constructor__(_ffi_api.GlobalVarSupply_NameSupply, value)
elif isinstance(value, (list, tvm.container.Array)):
self.__init_handle_by_constructor__(_ffi_api.GlobalVarSupply_IRModules, value)
elif isinstance(value, IRModule):
self.__init_handle_by_constructor__(_ffi_api.GlobalVarSupply_IRModule, value)
def fresh_global(self, name, add_prefix=True):
"""Generates a unique GlobalVar from this supply.
Parameters
----------
name: String
The name from which the name of the GlobalVar is derived.
add_prefix: bool
If set to true, then the prefix of the contained NameSupply will be prepended
to the name.
"""
return _ffi_api.GlobalVarSupply_FreshGlobal(self, name, add_prefix)
def unique_global_for(self, name, add_prefix=True):
"""Looks up for a GlobalVar with the given name in this supply. If no entry is found
, creates one, places it in the cache and returns it.
Parameters
----------
name: String
The name of the GlobalVar to search for.
add_prefix: bool
If set to true, the prefix of the contained NameSupply will be prepended to the
name before performing the search.
"""
return _ffi_api.GlobalVarSupply_UniqueGlobalFor(self, name, add_prefix)
def reserve_global(self, global_var, allow_conflict=False):
"""Reserves an existing GlobalVar with this supply.
Parameters
----------
global_var: GlobalVar
The GlobalVar to be registered.
allow_conflict: bool
Allow conflict with other GlobalVars that have the same name
"""
return _ffi_api.GlobalVarSupply_ReserveGlobalVar(self, global_var, allow_conflict)
| 5,189 | 35.549296 | 97 | py |
tvm | tvm-main/python/tvm/ir/function.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""Function defintiions."""
from enum import IntEnum
import tvm.runtime
from .expr import RelayExpr
from . import _ffi_api
class CallingConv(IntEnum):
"""Possible kinds of calling conventions."""
DEFAULT = 0
C_PACKED_FUNC = 1
DEVICE_KERNEL_LAUNCH = 2
class BaseFunc(RelayExpr):
"""Base class of all functions."""
@property
def attrs(self):
"""Return the attrs member of the function."""
return _ffi_api.BaseFunc_Attrs(self)
def with_attr(self, attr_key_or_dict, attr_value=None):
"""Create a new copy of the function and update the attribute.
Parameters
----------
attr_key_or_dict : Union[str, dict]
The attribute key to use or a dict containing multiple key value pairs.
attr_value : Object
The new attribute value.
Returns
-------
func : Function
A new copy of the function
"""
# make sure we first copy so that we can safely do copy on write
# for multiple updates.
res = _ffi_api.BaseFuncCopy(self)
if isinstance(attr_key_or_dict, dict):
for key, val in attr_key_or_dict.items():
res = _ffi_api.BaseFuncWithAttr(res._move(), key, tvm.runtime.convert(val))
return res
return _ffi_api.BaseFuncWithAttr(
res._move(), attr_key_or_dict, tvm.runtime.convert(attr_value)
)
| 2,256 | 31.242857 | 91 | py |
tvm | tvm-main/python/tvm/ir/expr.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Common expressions data structures in the IR."""
from numbers import Number
import tvm._ffi
from ..runtime import Scriptable, const, convert
from . import _ffi_api
from .base import Node
class BaseExpr(Node):
"""Base class of all the expressions."""
class PrimExpr(BaseExpr):
"""Base class of all primitive expressions.
PrimExpr is used in the low-level code
optimizations and integer analysis.
"""
class RelayExpr(BaseExpr):
"""Base class of all non-primitive expressions."""
@property
def checked_type(self):
"""Get the checked type of tvm.relay.Expr.
Returns
-------
checked_type : tvm.relay.Type
The checked type.
"""
ret = self._checked_type_
if ret is None:
raise ValueError("The type checker has not populated the checked_type for this node")
return ret
@tvm._ffi.register_object("GlobalVar")
class GlobalVar(RelayExpr):
"""A global variable in the IR.
GlobalVar is used to refer to the global functions
stored in the IRModule.
Parameters
----------
name_hint: str
The name of the variable.
"""
def __init__(self, name_hint, type_annot=None):
self.__init_handle_by_constructor__(_ffi_api.GlobalVar, name_hint, type_annot)
def __call__(self, *args):
"""Call the global variable.
Parameters
----------
args: List[RelayExpr]
The arguments to the call.
Returns
-------
call: BaseExpr
A call taking the variable as a function.
"""
# pylint: disable=import-outside-toplevel
if all(isinstance(x, RelayExpr) for x in args):
from tvm import relay
return relay.Call(self, args)
elif all(isinstance(x, (Number, PrimExpr)) for x in args):
return tvm.tir.call_tir(self, *args)
arg_types = [type(x) for x in args]
raise RuntimeError(f"Do not know how to handle GlobalVar.__call__ for types {arg_types}")
def astext(self, show_meta_data=True, annotate=None):
"""Get the text format of the expression.
Parameters
----------
show_meta_data : bool
Whether to include meta data section in the text
if there is meta data.
annotate: Optional[Object->str]
Optionally annotate function to provide additional
information in the comment block.
Returns
-------
text : str
The text format of the expression.
Notes
-----
The meta data section is necessary to fully parse the text format.
However, it can contain dumps that are big (e.g constant weights),
so it can be helpful to skip printing the meta data section.
"""
from tvm.relay import astext # pylint: disable=import-outside-toplevel
return astext(self, show_meta_data, annotate)
@tvm._ffi.register_object
class Range(Node, Scriptable):
"""Represent a range in TVM.
You do not need to create a Range explicitly.
Python lists and tuples will be converted automatically to a Range in API functions.
Parameters
----------
begin : PrimExpr
The begin value of the range when end is None.
Otherwise it is the length of the range.
end : Optional[PrimExpr]
The end value of the range.
span : Optional[Span]
The location of this itervar in the source code.
Note
----
The constructor creates the range `[begin, end)`
if the end argument is not None. Otherwise, it creates `[0, begin)`.
"""
def __init__(self, begin, end=None, span=None):
if end is None:
end = convert(begin)
begin = const(0, dtype=end.dtype, span=span)
self.__init_handle_by_constructor__(_ffi_api.Range, begin, end, span)
@staticmethod
def from_min_extent(min_value, extent, span=None):
"""Construct a Range by min and extent.
This constructs a range in [min_value, min_value + extent)
Parameters
----------
min_value : PrimExpr
The minimum value of the range.
extent : PrimExpr
The extent of the range.
span : Optional[Span]
The location of this itervar in the source code.
Returns
-------
rng : Range
The constructed range.
"""
return _ffi_api.Range_from_min_extent(min_value, extent, span)
| 5,325 | 28.588889 | 97 | py |
tvm | tvm-main/python/tvm/ir/affine_type.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Types for quantized Tensors."""
import tvm._ffi
from . import _ffi_api
from .base import Node
class AffineType(Node):
"""The base class of Affine Types."""
def __eq__(self, other):
"""Compare two types for structural equivalence."""
return bool(tvm.ir.structural_equal(self, other))
def __ne__(self, other):
return not self.__eq__(other)
def __str__(self):
from tvm.relay import pretty_print # pylint: disable=import-outside-toplevel
return pretty_print(self)
@tvm._ffi.register_object("TensorAffineType")
class TensorAffineType(AffineType):
"""The quantized type of a tensor, with scale, zero point, and datatype
The real space value is calculated as x = x_q * scale + zero_point
Parameters
----------
scale: Expr
The scale
zero_point: Expr
The zero_point
dtype : str
The content data type.
axis : int
The axis for per-channel quantization.
"""
def __init__(self, scale, zero_point, dtype, axis=-1):
self.__init_handle_by_constructor__(
_ffi_api.TensorAffineType, scale, zero_point, dtype, axis
)
@tvm._ffi.register_object("TupleAffineType")
class TupleAffineType(AffineType):
"""Affine types of a node with multiple outputs
Parameters
----------
types : List[TensorAffineType]
The shape of the Tensor
"""
def __init__(self, types):
self.__init_handle_by_constructor__(_ffi_api.TupleAffineType, types)
| 2,308 | 27.8625 | 85 | py |
tvm | tvm-main/python/tvm/ir/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=unused-import
"""Common data structures across all IR variants."""
from . import diagnostics, instrument, transform
from .adt import Constructor, TypeData
from .affine_type import TensorAffineType, TupleAffineType
from .attrs import Attrs, DictAttrs, make_node
from .base import (
EnvFunc,
Node,
SourceName,
Span,
SequentialSpan,
assert_structural_equal,
load_json,
save_json,
structural_equal,
structural_hash,
)
from .container import Array, Map
from .expr import BaseExpr, GlobalVar, PrimExpr, Range, RelayExpr
from .function import BaseFunc, CallingConv
from .memory_pools import (
ConstantMemoryPools,
ConstantPoolInfo,
PoolInfo,
PoolInfoProperties,
WorkspaceMemoryPools,
WorkspacePoolInfo,
)
from .module import IRModule
from .op import Op, register_intrin_lowering, register_op_attr
from .tensor_type import TensorType
from .type import (
FuncType,
GlobalTypeVar,
IncompleteType,
PointerType,
PrimType,
RelayRefType,
TupleType,
Type,
TypeConstraint,
TypeKind,
TypeVar,
)
from .type_relation import TypeCall, TypeRelation
| 1,939 | 29.793651 | 65 | py |
tvm | tvm-main/python/tvm/ir/memory_pools.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Objects for Memory Pools to be used within the compilation"""
from typing import Optional, List
from tvm._ffi import register_object
from tvm.runtime import Object
from tvm.runtime import NDArray
from . import _ffi_api
@register_object("ir.PoolInfo")
class PoolInfo(Object):
"""PoolInfo object holds information related to memory pools
where the statically sized allocate nodes will pooled into.
This is a base class for WorkspacePoolInfo and ConstantPoolInfo.
"""
def __init__(self):
pass
@register_object("ir.PoolInfoProperties")
class PoolInfoProperties(Object):
"""PoolInfo object holds information related to memory pools
where the statically sized allocate nodes will pooled into.
Parameters
----------
size_hint_bytes : Optional[int]
The expected size hint to be used by the allocator.
The default value would be -1 which means the pool
is not size restricted.
clock_frequency_hz : Optional[int]
The clock frequency that the memory pool runs at in Hz.
If not specified/known, this will default to -1 indicating
it hasn't been defined.
read_bandwidth_bytes_per_cycle : Optional[int]
The read bandwidth of the memory pool in bytes/cycle.
If not specified/known, this will default to -1 indicating
it hasn't been defined.
write_bandwidth_bytes_per_cycle : Optional[int]
The write bandwidth of the memory pool in bytes/cycle.
If not specified/known, this will default to -1 indicating
it hasn't been defined.
read_latency_cycles : Optional[int]
The read latency of the memory pool in cycles.
If not specified/known, this will default to 0.
write_latency_cycles : Optional[int]
The write latency of the memory pool in cycles.
If not specified/known, this will default to 0.
target_burst_bytes : Optional[Union[Dict[Target, int], None]]
The burst length of the memory pool in bytes per target.
If not specified/known for a given target, a burst length
of 1 byte will be assumed.
"""
def __init__(
self,
size_hint_bytes: Optional[int] = -1,
clock_frequency_hz: Optional[int] = -1,
read_bandwidth_bytes_per_cycle: Optional[int] = -1,
write_bandwidth_bytes_per_cycle: Optional[int] = -1,
read_latency_cycles: Optional[int] = 0,
write_latency_cycles: Optional[int] = 0,
target_burst_bytes=None,
):
if not target_burst_bytes:
target_burst_bytes = dict()
self.__init_handle_by_constructor__(
_ffi_api.PoolInfoProperties, # type: ignore # pylint: disable=no-member
size_hint_bytes,
clock_frequency_hz,
read_bandwidth_bytes_per_cycle,
write_bandwidth_bytes_per_cycle,
read_latency_cycles,
write_latency_cycles,
target_burst_bytes,
)
@register_object("ir.ConstantInfo")
class ConstantInfo(Object):
"""ConstantInfo object hold information on a constant pool.
Parameters
----------
name_hint : str
Name of the constant.
byte_offset : int
The byte_offset of the constant.
data : NDArray
The data of the constant.
"""
def __init__(
self,
name_hint: str,
byte_offset: int,
data: NDArray,
):
self.__init_handle_by_constructor__(
_ffi_api.ConstantInfo, # type: ignore # pylint: disable=no-member
name_hint,
byte_offset,
data,
)
@register_object("ir.WorkspacePoolInfo")
class WorkspacePoolInfo(PoolInfo):
"""WorkspacePoolInfo object holds information related to RW memory pools
where the statically sized allocate nodes will pooled into.
Parameters
----------
pool_name : str
The name of the memory pool
targets : list[Target]
A list of targets which could access the pool
pool_info_properties : PoolInfoProperties
The properties of the pool.
"""
def __init__(
self,
pool_name: str,
targets,
pool_info_properties=None,
):
super().__init__()
if pool_info_properties is None:
pool_info_properties = PoolInfoProperties()
self.__init_handle_by_constructor__(
_ffi_api.WorkspacePoolInfo, # type: ignore # pylint: disable=no-member
pool_name,
targets,
pool_info_properties,
)
@register_object("ir.ConstantPoolInfo")
class ConstantPoolInfo(PoolInfo):
"""ConstantPoolInfo object holds information related to RO memory pools
where the statically sized allocate nodes are pooled into.
Parameters
----------
pool_name : str
The name of the memory pool
targets : list[Target]
describes which targets could access the pool
pool_info_properties : PoolInfoProperties
The properties of the pool.
"""
def __init__(
self,
pool_name: str,
targets, # list[Target]
constant_info_arr=None, # list[ConstantInfo]
pool_info_properties=None,
):
super().__init__()
if constant_info_arr is None:
constant_info_arr = []
if pool_info_properties is None:
pool_info_properties = PoolInfoProperties()
self.__init_handle_by_constructor__(
_ffi_api.ConstantPoolInfo, # type: ignore # pylint: disable=no-member
pool_name,
targets,
constant_info_arr,
pool_info_properties,
)
@register_object("ir.WorkspaceMemoryPools")
class WorkspaceMemoryPools(Object):
"""This object contains a list of WorkspacePoolInfo objects to be used as
workspace memory in the compilation
Parameters
----------
pools : List[WorkspacePoolInfo]
The list of ConstantPoolInfo objects to be used with the compilation
"""
def __init__(
self,
pools: List[WorkspacePoolInfo],
):
self.__init_handle_by_constructor__(
_ffi_api.WorkspaceMemoryPools, pools # type: ignore # pylint: disable=no-member
)
@register_object("ir.ConstantMemoryPools")
class ConstantMemoryPools(Object):
"""This object contains a list of ConstantPoolInfo objects to be used as
read-only memory in the compilation
Parameters
----------
pools : List[ConstantPoolInfo]
The list of ConstantPoolInfo objects to be used with the compilation
"""
def __init__(
self,
pools: List[ConstantPoolInfo],
):
self.__init_handle_by_constructor__(
_ffi_api.ConstantMemoryPools, pools # type: ignore # pylint: disable=no-member
)
@register_object("ir.AllocatedPoolInfo")
class AllocatedPoolInfo(Object):
"""Allocate memory in a given pool.
Parameters
----------
pool : PoolInfo
The pool in which to allocate memory.
allocated_size : int
The size of memory to allocate.
"""
def __init__(
self,
pool: PoolInfo,
allocated_size: int,
pool_var_idx: int = 0,
):
self.__init_handle_by_constructor__(
_ffi_api.AllocatedPoolInfo, pool, allocated_size, pool_var_idx # type: ignore # pylint: disable=no-member
)
| 8,194 | 29.464684 | 118 | py |
tvm | tvm-main/python/tvm/ir/type_relation.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Type relation and function for type checking."""
import tvm._ffi
from .type import Type, TypeConstraint
from . import _ffi_api
@tvm._ffi.register_object("TypeCall")
class TypeCall(Type):
"""Type function application.
Parameters
----------
func: tvm.ir.Type
The function.
args: List[tvm.ir.Type]
The arguments.
Returns
-------
type_call: TypeCall
The type function application.
"""
def __init__(self, func, args):
self.__init_handle_by_constructor__(_ffi_api.TypeCall, func, args)
@tvm._ffi.register_object("TypeRelation")
class TypeRelation(TypeConstraint):
"""User defined type relation, it is an input-output relation on types.
TypeRelation is more generalized than TypeCall as it allows inference
of both inputs and outputs.
Parameters
----------
func : EnvFunc
User defined relation function.
args : [tvm.ir.Type]
List of types to the func.
num_inputs : int
Number of input arguments in args,
this act as a hint for type inference.
attrs : Attrs
The attribute attached to the relation information
Returns
-------
type_relation : tvm.ir.TypeRelation
The type relation.
"""
def __init__(self, func, args, num_inputs, attrs):
self.__init_handle_by_constructor__(_ffi_api.TypeRelation, func, args, num_inputs, attrs)
| 2,206 | 28.039474 | 97 | py |
tvm | tvm-main/python/tvm/ir/attrs.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" TVM Attribute module, which is mainly used for defining attributes of operators."""
import tvm._ffi
from tvm.runtime import Object
import tvm.runtime._ffi_node_api
from . import _ffi_api
@tvm._ffi.register_object
class Attrs(Object):
"""Attribute node, which is mainly use for defining attributes of relay operators.
Used by function registered in python side, such as compute, schedule and alter_layout.
Attrs is passed as the first argument to these functions.
"""
def list_field_info(self):
"""Get fields information
Returns
-------
infos: list of AttrFieldInfo
List of field information
"""
return _ffi_api.AttrsListFieldInfo(self)
def keys(self):
"""Get list of names in the attribute.
Returns
-------
keys : list of str
List of keys
"""
return [field.name for field in self.list_field_info()]
def get_int_tuple(self, key):
"""Get a python int tuple of a key
Parameters
----------
key: str
Returns
-------
value: Tuple of int
"""
return tuple(x.value for x in self.__getattr__(key))
def get_int(self, key):
"""Get a python int value of a key
Parameters
----------
key: str
Returns
-------
value: int
"""
return self.__getattr__(key)
def get_str(self, key):
"""Get a python int value of a key
Parameters
----------
key: str
Returns
-------
value: int
"""
return self.__getattr__(key)
def __getitem__(self, item):
return self.__getattr__(item)
@tvm._ffi.register_object
class DictAttrs(Attrs):
"""Dictionary attributes."""
def _dict(self):
"""Get internal dict"""
return _ffi_api.DictAttrsGetDict(self)
def keys(self):
"""Get list of names in the attribute.
Returns
-------
keys : list of str
List of keys
"""
return [k for k, _ in self.items()]
def __getitem__(self, k):
return self._dict().__getitem__(k)
def __contains__(self, k):
return self._dict().__contains__(k)
def items(self):
"""Get items from the map."""
return self._dict().items()
def __len__(self):
return self._dict().__len__()
def make_node(type_key, **kwargs):
"""Make a new IR node by its type key and fields
Parameters
----------
type_key : str
The type key of the node.
**kwargs : dict
The fields of the node.
Returns
-------
node : Node
The corresponding IR Node
Note
----
If the created node is instance of AttrsNode, then
the creator function will also run bound checks and
default value setup as supported by Attrs.
Example
-------
The following code constructs a IntImm object
.. code-block:: python
x = tvm.ir.make_node("IntImm", dtype="int32", value=10)
assert isinstance(x, tvm.tir.IntImm)
assert x.value == 10
"""
args = [type_key]
for k, v in kwargs.items():
args += [k, v]
return tvm.runtime._ffi_node_api.MakeNode(*args)
| 4,097 | 23.987805 | 91 | py |
tvm | tvm-main/python/tvm/ir/diagnostics/_ffi_api.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""FFI for TVM diagnostics."""
import tvm._ffi
tvm._ffi._init_api("diagnostics", __name__)
| 878 | 38.954545 | 62 | py |
tvm | tvm-main/python/tvm/ir/diagnostics/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""
The diagnostic interface to TVM, used for reporting and rendering
diagnostic information by the compiler. This module exposes
three key abstractions: a Diagnostic, the DiagnosticContext,
and the DiagnosticRenderer.
"""
import enum
import tvm._ffi
from . import _ffi_api
from ... import get_global_func, register_func, Object
def get_renderer():
"""
Get the diagnostic renderer.
Returns
-------
renderer: DiagnosticRenderer
"""
return _ffi_api.GetRenderer()
@tvm.register_func("diagnostics.override_renderer")
def override_renderer(render_func):
"""
Sets a custom renderer for diagnostics.
Params
------
render_func: Option[Callable[[DiagnosticContext], None]]
If the render_func is None it will remove the current custom renderer
and return to default behavior.
"""
if render_func:
def _render_factory():
return DiagnosticRenderer(render_func)
register_func("diagnostics.OverrideRenderer", _render_factory, override=True)
else:
_ffi_api.ClearRenderer()
class DiagnosticLevel(enum.IntEnum):
"""The diagnostic level, see diagnostic.h for more details."""
BUG = 10
ERROR = 20
WARNING = 30
NOTE = 40
HELP = 50
@tvm._ffi.register_object("Diagnostic")
class Diagnostic(Object):
"""A single diagnostic object from TVM."""
def __init__(self, level, span, message):
self.__init_handle_by_constructor__(_ffi_api.Diagnostic, level, span, message)
@tvm._ffi.register_object("DiagnosticRenderer")
class DiagnosticRenderer(Object):
"""
A diagnostic renderer, which given a diagnostic context produces a "rendered"
form of the diagnostics for either human or computer consumption.
"""
def __init__(self, render_func):
self.__init_handle_by_constructor__(_ffi_api.DiagnosticRenderer, render_func)
def render(self, ctx):
"""
Render the provided context.
Params
------
ctx: DiagnosticContext
The diagnostic context to render.
"""
return _ffi_api.DiagnosticRendererRender(self, ctx)
# Register the diagnostic context.
@tvm._ffi.register_object("DiagnosticContext")
class DiagnosticContext(Object):
"""
A diagnostic context which records active errors
and contains a renderer.
"""
def __init__(self, module, renderer):
self.__init_handle_by_constructor__(_ffi_api.DiagnosticContext, module, renderer)
def emit(self, diagnostic):
"""Emit a diagnostic."""
_ffi_api.Emit(self, diagnostic)
def render(self):
"""Render the current context using its renderer member."""
_ffi_api.DiagnosticContextRender(self)
| 3,547 | 28.566667 | 89 | py |
tvm | tvm-main/python/tvm/tir/op.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=redefined-builtin, invalid-name
"""Operators used in TIR expression."""
import warnings
from typing import Any, Optional
import tvm._ffi
from tvm.ir import Array, Op, PrimExpr
from tvm.ir.base import Span
from tvm.runtime import const, convert
from . import _ffi_api
from .buffer import Buffer
from .expr import Call, CommReducer, IntImm, PrimExprWithOp, StringImm, Var
def _pack_buffer(buf, span=None):
"""Build intrinsics that packs the buffer."""
shape = Call("handle", "tir.tvm_stack_make_shape", buf.shape, span)
strides = Call("handle", "tir.tvm_stack_make_shape", buf.strides, span) if buf.strides else 0
pack_args = [
buf.data,
shape,
strides,
len(buf.shape),
const(0, dtype=buf.dtype),
buf.elem_offset,
]
return Call("handle", Op.get("tir.tvm_stack_make_array"), pack_args, span)
def call_packed_lowered(*args, span=None):
"""Lowered version of call packed.
The argument to packed function can be Expr or Buffer.
The argument is the corresponding POD type when Expr is presented.
When the argument is Buffer, the corresponding PackedFunc
will recieve an TVMArrayHandle whose content is valid during the callback period.
If the PackedFunc is a python callback, then the corresponding argument is NDArray.
Parameters
----------
args : list of Expr or Buffer.
Positional arguments.
span : Optional[Span]
The location of this operator in the source code.
Returns
-------
call : PrimExpr
The call expression.
See Also
--------
te.extern : Create tensor with extern function call.
"""
call_args = [_pack_buffer(x) if isinstance(x, Buffer) else x for x in args]
return Call("int32", Op.get("tir.tvm_call_packed_lowered"), call_args, span)
def call_cpacked_lowered(*args, span=None):
"""Lowered version of call c-packed.
Same as call_packed, except that the first argument is the function name
(as in call_extern), and the last argument is the resource handle.
Parameters
----------
args : list of Expr or Buffer.
Positional arguments.
span : Optional[Span]
The location of this operator in the source code.
Returns
-------
call : PrimExpr
The call expression.
See Also
--------
te.extern : Create tensor with extern function call.
"""
call_args = [_pack_buffer(x) if isinstance(x, Buffer) else x for x in args]
return Call("int32", Op.get("tir.tvm_call_cpacked_lowered"), call_args, span)
def call_packed(*args, span=None):
"""Build expression by call an external packed function.
The argument to packed function can be Expr or Buffer.
The argument is the corresponding POD type when Expr is presented.
When the argument is Buffer, the corresponding PackedFunc
will receive an TVMArrayHandle whose content is valid during the callback period.
If the PackedFunc is a python callback, then the corresponding argument is NDArray.
Parameters
----------
args : list of Expr or Buffer.
Positional arguments.
span : Optional[Span]
The location of this operator in the source code.
Returns
-------
call : PrimExpr
The call expression.
See Also
--------
te.extern : Create tensor with extern function call.
"""
call_args = [_pack_buffer(x) if isinstance(x, Buffer) else x for x in args]
return Call("int32", Op.get("tir.tvm_call_packed"), call_args, span)
def call_cpacked(*args, span=None):
"""Build expression by call an external packed function.
Same as call_packed, except that the first argument is the function name
(as in call_extern), and the last argument is the resource handle.
Parameters
----------
args : list of Expr or Buffer.
Positional arguments.
span : Optional[Span]
The location of this operator in the source code.
Returns
-------
call : PrimExpr
The call expression.
See Also
--------
te.extern : Create tensor with extern function call.
"""
call_args = [_pack_buffer(x) if isinstance(x, Buffer) else x for x in args]
return Call("int32", Op.get("tir.tvm_call_cpacked"), call_args, span)
def call_intrin(dtype, func_name, *args, span=None):
"""Build expression by calling an intrinsic function.
Intrinsics can be overloaded with multiple data types via
the intrinsic translation rule.
Parameters
----------
dtype : str
The data type of the result.
func_name: str
The intrinsic function name.
args : list
Positional arguments.
span : Optional[Span]
The location of this operator in the source code.
Returns
-------
call : PrimExpr
The call expression.
"""
return Call(dtype, func_name, convert(args), span)
def call_pure_extern(dtype, func_name, *args, span=None):
"""Build expression by calling a pure extern function.
Parameters
----------
dtype : str
The data type of the result.
func_name: str
The extern function name.
args : list
Positional arguments.
span : Optional[Span]
The location of this operator in the source code.
Returns
-------
call : PrimExpr
The call expression.
"""
return Call(
dtype, Op.get("tir.call_pure_extern"), convert((StringImm(func_name),) + args), span
)
def call_extern(dtype, func_name, *args, span=None):
"""Build expression by calling a extern function.
Parameters
----------
dtype : str
The data type of the result.
func_name: str
The extern function name.
args : list
Positional arguments.
span : Optional[Span]
The location of this operator in the source code.
Returns
-------
call : PrimExpr
The call expression.
"""
return Call(
dtype, Op.get("tir.call_extern"), convert((StringImm(func_name),) + args), span=span
)
def call_llvm_intrin(dtype, name, *args, span=None):
"""Build expression by calling a llvm intrinsic function
Parameters
----------
dtype : str
The data type of the result.
name : str
The name of the llvm intrinsic function.
args : list
Poistional arguments.
span : Optional[Span]
The location of this operator in the source code.
Returns
-------
call : PrimExpr
The call expression.
"""
# pylint: disable=import-outside-toplevel
from tvm.target import codegen
if isinstance(name, str):
llvm_id = codegen.llvm_lookup_intrinsic_id(name)
elif isinstance(name, IntImm):
llvm_id = name.value
else:
llvm_id = name
if llvm_id == 0:
warnings.warn(f"Unknown llvm intrinsic function {name}, falling back to 0")
return call_intrin(
dtype,
Op.get("tir.call_llvm_intrin"),
tvm.tir.const(llvm_id, "uint32"),
*args,
span=span,
)
def call_llvm_pure_intrin(dtype, name, *args, span=None):
"""Build expression by calling a pure llvm intrinsic function
Parameters
----------
dtype : str
The data type of the result.
name : str
The name of the llvm intrinsic function.
args : list
Poistional arguments.
span : Optional[Span]
The location of this operator in the source code.
Returns
-------
call : PrimExpr
The call expression.
"""
# pylint: disable=import-outside-toplevel
from tvm.target import codegen
if isinstance(name, str):
llvm_id = codegen.llvm_lookup_intrinsic_id(name)
elif isinstance(name, IntImm):
llvm_id = name.value
else:
llvm_id = name
if llvm_id == 0:
warnings.warn(f"Unknown llvm intrinsic function {name}, falling back to 0")
return call_intrin(
dtype,
Op.get("tir.call_llvm_pure_intrin"),
tvm.tir.const(llvm_id, "uint32"),
*args,
span=span,
)
def tvm_check_return(expected, return_unexpected, nested_call):
"""Return new on stack dtype[num]
Parameters
----------
expected : int
The expected return code.
return_unexpected : int
The unexpected return code.
nested_call : PrimExpr
The call expression to check return.
Returns
-------
call : PrimExpr
The call expression.
"""
return call_intrin("int32", "tir.tvm_check_return", expected, return_unexpected, nested_call)
def tvm_stack_alloca(dtype_str, num):
"""Return new on stack dtype[num]
Parameters
----------
dtype_str : str
The data type of array.
num : int
The size of array.
Returns
-------
call : PrimExpr
The call expression.
"""
return call_intrin("handle", "tir.tvm_stack_alloca", dtype_str, num)
def tvm_stack_make_shape(*args):
"""Allocate a shape tuple on stack, return the handle
Parameters
----------
args : int
The tuple shape.
Returns
-------
call : PrimExpr
The call expression.
"""
return call_intrin("handle", "tir.tvm_stack_make_shape", *args)
def tvm_stack_make_array(data, shape, strides, ndim, arr_dtype, elem_offset):
"""Allocate a NDArray(DLTensor) on stack, return the handle
Parameters
----------
data : Expr
The data of array.
shape : Expr
The shape of array.
strides : Expr
The strides of array.
ndim : Expr
The dimensions of array.
arr_dtype : Expr
The data type of array.
elem_offse : Expr
The element offset of array.
Returns
-------
call : PrimExpr
The call expression.
"""
return call_intrin(
"handle", "tir.tvm_stack_make_array", data, shape, strides, ndim, arr_dtype, elem_offset
)
def assume(cond=None):
"""Provide a true statement that can be used for simplifications
Parameters
----------
cond : Expr
The constraint condition.
Returns
-------
call : PrimExpr
The call expression.
"""
return call_intrin("bool", "tir.assume", cond)
def undef():
"""Returns an initialized but arbitrary value
Returns
-------
call : PrimExpr
The call expression.
"""
return call_intrin("int32", "tir.undef")
def call_tir(global_var: tvm.ir.GlobalVar, *args):
"""Performs a call into another PrimFunc in the same IRModule
Returns
-------
call : PrimExpr
The call expression.
"""
assert isinstance(global_var, tvm.ir.GlobalVar)
dtype = "void"
if global_var.checked_type is not None:
ret_type = global_var.checked_type.ret_type
if hasattr(ret_type, "dtype"):
dtype = ret_type.dtype
return Call(dtype=dtype, op=global_var, args=args)
def start_profile_intrinsic(id):
"""Start profile intrinsic.
Parameters
----------
id : int
The intrinsic id.
Returns
-------
call : PrimExpr
The call expression.
"""
return call_intrin("handle", "tir.start_profile_intrinsic", id)
def end_profile_intrinsic(id):
"""End profile intrinsic.
Parameters
----------
id : int
The intrinsic id.
Returns
-------
call : PrimExpr
The call expression.
"""
return call_intrin("handle", "tir.end_profile_intrinsic", id)
def tvm_tuple(*value):
"""Create a tuple structure in value field of AttrStmt
Parameters
----------
value : Expr
The value in tuple.
Returns
-------
call : PrimExpr
The call expression.
"""
return call_intrin("handle", "tir.tvm_tuple", *value)
def tvm_struct_get(arr, index, field, dtype):
"""Get struct field value in array
Parameters
----------
dtype : str
The date type of the result.
arr : StructType*
The array of struct.
index : int
The index of struct.
field : int
The field of struct.
Returns
-------
call : PrimExpr
The call expression.
"""
return call_intrin(dtype, "tir.tvm_struct_get", arr, index, field)
def tvm_struct_set(arr, index, field, value):
"""Set value in struct field in array
Parameters
----------
arr : StructType*
The array of struct.
index : int
The index of struct.
field : int
The field of struct.
value : Expr
The value to be set in field.
Returns
-------
call : PrimExpr
The call expression.
"""
return call_intrin("int32", "tir.tvm_struct_set", arr, index, field, value)
def address_of(buffer_load, span=None):
"""Returns the address of an element in the buffer
Parameters
----------
buffer_load: BufferLoad
The buffer load.
span : Optional[Span]
The location of this operator in the source code.
Returns
-------
call : PrimExpr
The call expression.
"""
return call_intrin("handle", "tir.address_of", buffer_load, span=span)
def lookup_param(param_name, span=None):
"""Returns the param by name
Parameters
----------
param_name : str
The name of param.
span : Optional[Span]
The location of this operator in the source code.
Returns
-------
call : PrimExpr
The call expression.
"""
return call_intrin("handle", "tir.lookup_param", param_name, span=span)
def tvm_thread_allreduce(*freduce_args):
"""Perform allreduce inside threadblock.
Parameters
----------
freduce_args : Expr
The args.
Returns
-------
call : PrimExpr
The call expression.
"""
return call_intrin("handle", "tir.tvm_thread_allreduce", *freduce_args)
def tvm_storage_sync(storage_scope):
"""Perform synchronization in specified scope.
Parameters
----------
storage_scope : str
The storage scope to perform synchronization.
Returns
-------
call : PrimExpr
The call expression.
"""
return call_intrin("int32", "tir.tvm_storage_sync", storage_scope)
def tvm_warp_shuffle(mask, value, warp_id, width, warp_size):
"""Exchange value between threads inside a warp.
Parameters
----------
mask : PrimExpr
The warp mask indicates active threads inside warp.
value : PrimExpr
The value to exchange.
warp_id : PrimExpr
The source lane index to fetch value.
width : PrimExpr
The width of sub-sections to perform warp shuffle.
warp_size : PrimExpr
The warp size.
Returns
-------
call : PrimExpr
The call expression.
"""
return call_intrin(value.dtype, "tir.tvm_warp_shuffle", mask, value, warp_id, width, warp_size)
def tvm_warp_shuffle_up(mask, value, offset, width, warp_size):
"""Copy value from a lane with lower (by offset) index relative to caller.
Parameters
----------
mask : PrimExpr
The warp mask indicates active threads inside warp.
value : PrimExpr
The value to exchange.
offset : PrimExpr
The difference between source lane index and destination lane index:
`offset = dst_lane_idx - src_lane_idx`
width : PrimExpr
The width of sub-sections to perform warp shuffle.
warp_size : PrimExpr
The warp size.
Returns
-------
call : PrimExpr
The call expression.
"""
return call_intrin(
value.dtype, "tir.tvm_warp_shuffle_up", mask, value, offset, width, warp_size
)
def tvm_warp_shuffle_down(mask, value, offset, width, warp_size):
"""Copy value from a lane with higher (by offset) index relative to caller.
Parameters
----------
mask : PrimExpr
The warp mask indicates active threads inside warp.
value : PrimExpr
The value to exchange.
offset : PrimExpr
The difference between source lane index and destination lane index:
`offset = src_lane_idx - dst_lane_idx`
width : PrimExpr
The width of sub-sections to perform warp shuffle.
warp_size : PrimExpr
The warp size.
Returns
-------
call : PrimExpr
The call expression.
"""
return call_intrin(
value.dtype, "tir.tvm_warp_shuffle_down", mask, value, offset, width, warp_size
)
def tvm_warp_activemask():
"""Return a 32-bit mask indicates currently active threads in a calling warp.
Returns
-------
call : PrimExpr
The call expression.
"""
return call_intrin("uint32", "tir.tvm_warp_activemask")
def type_annotation(dtype):
"""Create a type annotation expression
Parameters
----------
dtype : Expr
The data type.
Returns
-------
call : PrimExpr
The call expression.
"""
return call_intrin(dtype, "tir.type_annotation")
def tvm_access_ptr(ptype, data, offset, extent, rw_mask):
"""Get head access address with memory access pattern info
Parameters
----------
ptype : Expr
The data type of pointer.
data : DType*
The data of pointer.
offset : int
The offset of pointer.
extent : int
The extent of pointer.
rw_mask : int
The read write mask.
Returns
-------
call : PrimExpr
The call expression.
"""
return call_intrin("handle", "tir.tvm_access_ptr", ptype, data, offset, extent, rw_mask)
def tvm_throw_last_error():
"""Throw TVMGetLastError()
Returns
-------
ret : PrimExpr
The return expression
"""
return call_intrin("handle", "tir.tvm_throw_last_error")
def tvm_load_matrix_sync(fragment, m, n, k, index, buffer_ptr, stride, layout):
"""TVM intrinsic for tensor core load operators
Parameters
----------
fragment : Var
The wmma fragment.
m : UIntImm
The shape of wmma fragment.
n : UIntImm
The shape of wmma fragment.
k : UIntImm
The shape of wmma fragment.
index : Expr
The fragment index.
buffer_ptr : Expr
The fragment buffer pointer.
stride : Expr
The fragment stride.
layout : Literal["row_major", "column_major"]
The fragment layout.
Returns
-------
call : PrimExpr
The call expression.
"""
return call_intrin(
"handle",
"tir.tvm_load_matrix_sync",
fragment,
m,
n,
k,
index,
buffer_ptr,
stride,
layout,
)
def tvm_mma_sync(
fragment_d, index_d, fragment_a, index_a, fragment_b, index_b, fragment_c, index_c
):
"""TVM intrinsic for tensor core mma_sync operators
Parameters
----------
fragment_d : Var
The wmma fragment_d.
index_d : Expr
The fragment_d index.
fragment_a : Var
The wmma fragment_a.
index_a : Expr
The fragment_a index.
fragment_b : Var
The wmma fragment_b.
index_b : Expr
The fragment_b index.
fragment_c : Var
The wmma fragment_c.
index_c : Expr
The fragment_c index.
Returns
-------
call : PrimExpr
The call expression.
"""
return call_intrin(
"handle",
"tir.tvm_mma_sync",
fragment_d,
index_d,
fragment_a,
index_a,
fragment_b,
index_b,
fragment_c,
index_c,
)
def tvm_bmma_sync(
fragment_d, index_d, fragment_a, index_a, fragment_b, index_b, fragment_c, index_c
):
"""TVM intrinsic for tensor core bmma_sync operators
Parameters
----------
fragment_d : Var
The bwmma fragment_d.
index_d : Expr
The fragment_d index.
fragment_a : Var
The bwmma fragment_a.
index_a : Expr
The fragment_a index.
fragment_b : Var
The bwmma fragment_b.
index_b : Expr
The fragment_b index.
fragment_c : Var
The bwmma fragment_c.
index_c : Expr
The fragment_c index.
Returns
-------
call : PrimExpr
The call expression.
"""
return call_intrin(
"handle",
"tir.tvm_bmma_sync",
fragment_d,
index_d,
fragment_a,
index_a,
fragment_b,
index_b,
fragment_c,
index_c,
)
def tvm_fill_fragment(fragment, m, n, k, index, value):
"""TVM intrinsic for tensor core fill_fragment operators
Parameters
----------
fragment : Var
The wmma fragment
m : UIntImm
The shape of wmma fragment.
n : UIntImm
The shape of wmma fragment.
k : UIntImm
The shape of wmma fragment.
index : Expr
The fragment index.
value : Expr
The value to be filled in fragment.
Returns
-------
call : PrimExpr
The call expression.
"""
return call_intrin(
"handle",
"tir.tvm_fill_fragment",
fragment,
m,
n,
k,
index,
value,
)
def tvm_store_matrix_sync(fragment, m, n, k, index, buffer_ptr, stride, layout):
"""TVM intrinsic for tensor core store operators
Parameters
----------
fragment : Var
The wmma fragment.
m : UIntImm
The shape of wmma fragment.
n : UIntImm
The shape of wmma fragment.
k : UIntImm
The shape of wmma fragment.
index : Expr
The fragment index.
buffer_ptr : Expr
The fragment buffer pointer.
stride : Expr
The fragment stride.
layout : Literal["row_major", "column_major"]
The fragment layout.
Returns
-------
call : PrimExpr
The call expression.
"""
return call_intrin(
"handle",
"tir.tvm_store_matrix_sync",
fragment,
m,
n,
k,
index,
buffer_ptr,
stride,
layout,
)
def ptx_mma(
dtype,
shape,
A_layout,
B_layout,
A_dtype,
B_dtype,
C_dtype,
multiplicand_a,
a_index,
multiplicand_b,
b_index,
accumulator,
c_index,
saturate,
operator=None,
):
"""TVM intrinsic for ptx tensor core mma instructions
https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#warp-level-matrix-instructions-for-mma
Parameters
----------
dtype : str
The data type of the result.
shape : str
The shape of mma fragment.
A_layout : Literal["row", "col"]
The layout of multiplicand fragment A.
B_layout : Literal["row", "col"]
The layout of multiplicand fragment B.
A_dtype : str
The data type of multiplicand fragment A.
B_dtype : str
The data type of multiplicand fragment B.
C_dtype : str
The data type of accumulator fragment C.
multiplicand_a : Var
The multiplicand fragment A variable.
a_index : Expr
The index of multiplicand fragment A.
multiplicand_b : Var
The multiplicand fragment B variable.
b_index : Expr
The index of multiplicand fragment A.
accumulator : Var
The accumulator fragment C variable.
c_index : Expr
The index of accumulator fragment C.
saturate : bool
The optional saturation at the output.
operator : Optional[Literal["xor", "and"]]
The 1-bit operator.
Returns
-------
call : PrimExpr
The call expression.
"""
if operator is None:
return call_intrin(
dtype,
"tir.ptx_mma",
shape,
A_layout,
B_layout,
A_dtype,
B_dtype,
C_dtype,
multiplicand_a,
a_index,
multiplicand_b,
b_index,
accumulator,
c_index,
saturate,
)
return call_intrin(
dtype,
"tir.ptx_mma",
shape,
A_layout,
B_layout,
A_dtype,
B_dtype,
C_dtype,
multiplicand_a,
a_index,
multiplicand_b,
b_index,
accumulator,
c_index,
saturate,
operator,
)
def ptx_mma_sp(
dtype,
shape,
A_layout,
B_layout,
A_dtype,
B_dtype,
C_dtype,
multiplicand_a,
a_index,
multiplicand_b,
b_index,
accumulator,
c_index,
metadata,
meta_index,
sparse_selector,
saturate,
):
"""TVM intrinsic for sparse tensor core ptx instructions
https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#warp-level-matrix-instructions-for-sparse-mma
Parameters
----------
dtype : str
The data type of the result.
shape : str
The shape of mma fragment.
A_layout : Literal["row", "col"]
The layout of multiplicand fragment A.
B_layout : Literal["row", "col"]
The layout of multiplicand fragment B.
A_dtype : str
The data type of multiplicand fragment A.
B_dtype : str
The data type of multiplicand fragment B.
C_dtype : str
The data type of multiplicand fragment C.
multiplicand_a : Var
The multiplicand fragment A variable.
a_index : Expr
The index of multiplicand fragment A.
multiplicand_b : Var
The multiplicand fragment B variable.
b_index : Expr
The index of multiplicand fragment B.
accumulator : Var
The accumulator fragment C variable.
c_index : Expr
The index of accumulator fragment C.
metadata : Expr
The metadata of operand.
meta_index : Expr
The metadata index of operand.
sparse_selector : Expr
The sparse selector indicating the thread that stores the metadata.
saturate : bool
The optional saturation at the output.
Returns
-------
call : PrimExpr
The call expression.
"""
return call_intrin(
dtype,
"tir.ptx_mma_sp",
shape,
A_layout,
B_layout,
A_dtype,
B_dtype,
C_dtype,
multiplicand_a,
a_index,
multiplicand_b,
b_index,
accumulator,
c_index,
metadata,
meta_index,
sparse_selector,
saturate,
)
def mma_store(dtype, m, n, dst_ptr, src_ptr, src_offset, dst_stride):
"""TVM intrinsic for storing the result of PTX MMA into a destination pointer
Parameters
----------
dtype : str
The data type of the result.
m : IntImm
The shape of mma fragment.
n : IntImm
The shape of mma fragment.
dst_ptr : Var
The destination pointer variable.
src_ptr : Var
The source pointer variable.
src_offset : Expr
The source offset.
dst_stride : Var
The destination stride.
Returns
-------
call : PrimExpr
The call expression.
"""
return call_intrin(
dtype,
"tir.mma_store",
m,
n,
dst_ptr,
src_ptr,
src_offset,
dst_stride,
)
def mma_fill(dtype, local_size, local_ptr, offset):
"""TVM intrinsic for zero-initalizing an MMA accumulation registor
Parameters
----------
dtype : str
The data type of the result.
local_size : IntImm
The number of elements.
local_ptr : Var
The destination pointer variable.
offset : Expr
The destination offset.
Returns
-------
call : PrimExpr
The call expression.
"""
return call_intrin(
dtype,
"tir.mma_fill",
local_size,
local_ptr,
offset,
)
def ptx_ldmatrix(dtype, trans, num, type, local_ptr, local_offset, smem_ptr, smem_offset):
"""TVM intrinsic for ptx load matrix from shared memory
https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#warp-level-matrix-instructions-ldmatrix
Parameters
----------
dtype : str
The data type of the result.
trans : bool
The matrix is loaded in column-major format.
num : IntImm
The number of matrices.
type : Literal[".b16"]
The data type of the matrices.
local_ptr : Var
The local pointer variable.
local_offset : Expr
The offset of local pointer.
smem_ptr : Var
The shared memory pointer variable.
smem_offset : Expr
The offset of shared memort pointer.
Returns
-------
call : PrimExpr
The call expression.
"""
return call_intrin(
dtype,
"tir.ptx_ldmatrix",
trans,
num,
type,
local_ptr,
local_offset,
smem_ptr,
smem_offset,
)
def ptx_cp_async(dtype, shared_ptr, shared_offset, global_ptr, global_offset, bytes):
"""TVM intrinsic for ptx async copy from global to shared memory
https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#data-movement-and-conversion-instructions-cp-async
Parameters
----------
dtype : str
The data type of the result.
shared_ptr : Var
The shared memory pointer variable.
shared_offset : Expr
The offset of shared memory pointer.
global_ptr : Var
The global memory pointer variable.
global_offset : Expr
The offset of global memory pointer.
bytes : int
The data size to copy.
Returns
-------
call : PrimExpr
The call expression.
"""
return call_intrin(
dtype, "tir.ptx_cp_async", shared_ptr, shared_offset, global_ptr, global_offset, bytes
)
def ptx_commit_group():
"""TVM intrinsic for ptx async copy commit
https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#data-movement-and-conversion-instructions-cp-async-commit-group
Returns
-------
call : PrimExpr
The call expression.
"""
return call_intrin("", "tir.ptx_commit_group")
def ptx_wait_group(num):
"""TVM intrinsic for ptx async copy wait
https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#data-movement-and-conversion-instructions-cp-async-wait-group
Parameters
----------
num : int
The number of the most recent uncommitted pending cp.async groups to wait.
Returns
-------
call : PrimExpr
The call expression.
"""
return call_intrin("", "tir.ptx_wait_group", num)
def vectorlow(dtype, vec):
"""Get the low level half of the vector
Parameters
----------
dtype : str
The data type of the result.
vec : list
The input vector.
Returns
-------
call : PrimExpr
The call expression.
"""
return call_intrin(dtype, "tir.vectorlow", vec)
def vectorhigh(dtype, vec):
"""Get the high level half of the vector
Parameters
----------
dtype : str
The data type of the result.
vec : list
The input vector.
Returns
-------
call : PrimExpr
The call expression.
"""
return call_intrin(dtype, "tir.vectorhigh", vec)
def vectorcombine(dtype, vec1, vec2):
"""Concat two vectors
Parameters
----------
vec1 : list
The input vector.
vec2 : list
The input vector.
Returns
-------
call : PrimExpr
The call expression.
"""
return call_intrin(dtype, "tir.vectorcombine", vec1, vec2)
def ret(val):
"""Create a tir return expression
Parameters
----------
val : Expr
The returned tir expression, whose data type is int, float or void pointer.
Returns
-------
ret : PrimExpr
The return expression
"""
val = convert(val)
return call_intrin(val.dtype, "tir.ret", val)
def any(*args, span=None):
"""Create a new experssion of the union of all conditions in the arguments
Parameters
----------
args : list
List of symbolic boolean expressions
span : Optional[Span]
The location of this operator in the source code.
Returns
-------
expr: Expr
Expression
"""
if not args:
raise ValueError("Any must take at least 1 argument")
if len(args) == 1:
return args[0]
val = _ffi_api._OpOr(args[0], args[1], span) # type: ignore
for i in range(2, len(args)):
val = _ffi_api._OpOr(val, args[i], span) # type: ignore
return val
def all(*args, span=None):
"""Create a new expression of the intersection of all conditions in the
arguments
Parameters
----------
args : list
List of symbolic boolean expressions
span : Optional[Span]
The location of this operator in the source code.
Returns
-------
expr: Expr
Expression
"""
if not args:
raise ValueError("Any must take at least 1 argument")
if len(args) == 1:
return args[0]
val = _ffi_api._OpAnd(args[0], args[1], span) # type: ignore
for i in range(2, len(args)):
val = _ffi_api._OpAnd(val, args[i], span) # type: ignore
return val
@tvm._ffi.register_func("tvm.default_trace_action")
def _tvm_default_trace_action(*args):
print(list(args))
def trace(args, trace_action="tvm.default_trace_action"):
"""Trace tensor data at the runtime.
The trace function allows to trace specific tensor at the
runtime. The tracing value should come as last argument.
The trace action should be specified, by default
tvm.default_trace_action is used.
Parameters
----------
args : list of Expr or Buffers.
Positional arguments.
trace_action : str.
The name of the trace action.
Returns
-------
call : PrimExpr
The call expression.
See Also
--------
tvm.tir.call_packed : Creates packed function.
"""
if not isinstance(args, list):
raise Exception("tvm.tir.trace consumes the args as list type")
call_args = [_pack_buffer(x) if isinstance(x, Buffer) else x for x in args]
call_args.insert(0, trace_action)
return tvm.tir.Call(args[-1].dtype, Op.get("tir.tvm_call_trace_packed"), call_args)
def min_value(dtype, span=None):
"""minimum value of dtype
Parameters
----------
dtype : str
The data type.
span : Optional[Span]
The location of this operator in the source code.
Returns
-------
value : tvm.Expr
The minimum value of dtype.
"""
return _ffi_api.min_value(dtype, span) # type: ignore
def max_value(dtype: str, span: Optional[Span] = None) -> Any:
"""maximum value of dtype
Parameters
----------
dtype : str
The data type.
span : Optional[Span]
The location of this operator in the source code.
Returns
-------
value : tvm.Expr
The maximum value of dtype.
"""
return _ffi_api.max_value(dtype, span) # type: ignore
def infinity(dtype: str, span: Optional[Span] = None) -> Any:
"""infinity value of dtype
Parameters
----------
dtype : str
The data type.
span : Optional[Span]
The location of this operator in the source code.
Returns
-------
value : tvm.Expr
The infinity value of dtype.
"""
return _ffi_api.infinity(dtype, span) # type: ignore
def reinterpret(dtype, value) -> Any:
"""infinity value of dtype
Parameters
----------
dtype : str
The data type.
value : PrimExpr
The input value.
span : Optional[Span]
The location of this operator in the source code.
Returns
-------
value : tvm.Expr
The reinterpret cast value of dtype.
"""
return call_intrin(dtype, "tir.reinterpret", value)
def exp(x):
"""Take exponential of input x.
Parameters
----------
x : PrimExpr
Input argument.
Returns
-------
y : PrimExpr
The result.
"""
x = convert(x)
return call_intrin(x.dtype, "tir.exp", x)
def exp2(x):
"""Calculate 2**x
Parameters
----------
x : PrimExpr
Input argument.
Returns
-------
y : PrimExpr
The result.
"""
x = convert(x)
return call_intrin(x.dtype, "tir.exp2", x)
def exp10(x):
"""Calculate 10**x
Parameters
----------
x : PrimExpr
Input argument.
Returns
-------
y : PrimExpr
The result.
"""
x = convert(x)
return call_intrin(x.dtype, "tir.exp10", x)
def erf(x):
"""Take gauss error function of the input x.
Parameters
----------
x : PrimExpr
Input argument.
Returns
-------
y : PrimExpr
The result.
"""
x = convert(x)
return call_intrin(x.dtype, "tir.erf", x)
def tanh(x):
"""Take hyperbolic tanh of input x.
Parameters
----------
x : PrimExpr
Input argument.
Returns
-------
y : PrimExpr
The result.
"""
x = convert(x)
return call_intrin(x.dtype, "tir.tanh", x)
def sigmoid(x):
"""Quick function to get sigmoid
Parameters
----------
x : PrimExpr
Input argument.
Returns
-------
y : PrimExpr
The result.
"""
x = convert(x)
return call_intrin(x.dtype, "tir.sigmoid", x)
def log(x):
"""Take log of input x.
Parameters
----------
x : PrimExpr
Input argument.
Returns
-------
y : PrimExpr
The result.
"""
x = convert(x)
return call_intrin(x.dtype, "tir.log", x)
def log2(x):
"""Take log2 of input x.
Parameters
----------
x : PrimExpr
Input argument.
Returns
-------
y : PrimExpr
The result.
"""
x = convert(x)
return call_intrin(x.dtype, "tir.log2", x)
def log10(x):
"""Take log10 of input x.
Parameters
----------
x : PrimExpr
Input argument.
Returns
-------
y : PrimExpr
The result.
"""
x = convert(x)
return call_intrin(x.dtype, "tir.log10", x)
def log1p(x):
"""Take log(x + 1) with respect to input x.
Parameters
----------
x : PrimExpr
Input argument.
Returns
-------
y : PrimExpr
The result.
"""
x = convert(x)
return call_intrin(x.dtype, "tir.log1p", x)
def tan(x):
"""Take tan of input x.
Parameters
----------
x : PrimExpr
Input argument.
Returns
-------
y : PrimExpr
The result.
"""
x = convert(x)
return call_intrin(x.dtype, "tir.tan", x)
def cos(x):
"""Take cos of input x.
Parameters
----------
x : PrimExpr
Input argument.
Returns
-------
y : PrimExpr
The result.
"""
x = convert(x)
return call_intrin(x.dtype, "tir.cos", x)
def cosh(x):
"""Take cosh of input x.
Parameters
----------
x : PrimExpr
Input argument.
Returns
-------
y : PrimExpr
The result.
"""
x = convert(x)
return call_intrin(x.dtype, "tir.cosh", x)
def acos(x):
"""Take acos of input x.
Parameters
----------
x : PrimExpr
Input argument.
Returns
-------
y : PrimExpr
The result.
"""
x = convert(x)
return call_intrin(x.dtype, "tir.acos", x)
def acosh(x):
"""Take acos of input x.
Parameters
----------
x : PrimExpr
Input argument.
Returns
-------
y : PrimExpr
The result.
"""
x = convert(x)
return call_intrin(x.dtype, "tir.acosh", x)
def sin(x):
"""Take sin of input x.
Parameters
----------
x : PrimExpr
Input argument.
Returns
-------
y : PrimExpr
The result.
"""
x = convert(x)
return call_intrin(x.dtype, "tir.sin", x)
def sinh(x):
"""Take sinh of input x.
Parameters
----------
x : PrimExpr
Input argument.
Returns
-------
y : PrimExpr
The result.
"""
x = convert(x)
return call_intrin(x.dtype, "tir.sinh", x)
def asin(x):
"""Take asin of input x.
Parameters
----------
x : PrimExpr
Input argument.
Returns
-------
y : PrimExpr
The result.
"""
x = convert(x)
return call_intrin(x.dtype, "tir.asin", x)
def asinh(x):
"""Take asinh of input x.
Parameters
----------
x : PrimExpr
Input argument.
Returns
-------
y : PrimExpr
The result.
"""
x = convert(x)
return call_intrin(x.dtype, "tir.asinh", x)
def atan(x):
"""Take atan of input x.
Parameters
----------
x : PrimExpr
Input argument.
Returns
-------
y : PrimExpr
The result.
"""
x = convert(x)
return call_intrin(x.dtype, "tir.atan", x)
def atanh(x):
"""Take atanh of input x.
Parameters
----------
x : PrimExpr
Input argument.
Returns
-------
y : PrimExpr
The result.
"""
x = convert(x)
return call_intrin(x.dtype, "tir.atanh", x)
def atan2(x1, x2):
"""Take arctan2(x1, x2).
Parameters
----------
x1 : PrimExpr
Input argument.
x2 : PrimExpr
Input argument.
Returns
-------
y : PrimExpr
The result.
"""
x1 = convert(x1)
x2 = convert(x2)
return call_intrin(x1.dtype, "tir.atan2", x1, x2)
def sqrt(x):
"""Take square root of input x.
Parameters
----------
x : PrimExpr
Input argument.
Returns
-------
y : PrimExpr
The result.
"""
x = convert(x)
return call_intrin(x.dtype, "tir.sqrt", x)
def rsqrt(x):
"""Take reciprocal of square root of input x.
Parameters
----------
x : PrimExpr
Input argument.
Returns
-------
y : PrimExpr
The result.
"""
x = convert(x)
return call_intrin(x.dtype, "tir.rsqrt", x)
def clz(x):
"""Count leading zero bits of an integer x.
Parameters
----------
x : PrimExpr
Input 32 or 64 bit integer.
The result is undefined if the input is 0.
Returns
-------
y : PrimExpr
The result.
"""
return call_intrin("int32", "tir.clz", x)
def floor(x: PrimExprWithOp, span=None):
"""Take floor of float input x.
Parameters
----------
x : PrimExpr
Input argument.
span : Optional[Span]
The location of this operator in the source code.
Returns
-------
y : PrimExpr
The result.
"""
return _ffi_api.floor(x, span) # type: ignore
def ceil(x, span=None):
"""Take ceil of float input x.
Parameters
----------
x : PrimExpr
Input argument.
span : Optional[Span]
The location of this operator in the source code.
Returns
-------
y : PrimExpr
The result.
"""
return _ffi_api.ceil(x, span) # type: ignore
def trunc(x, span=None):
"""Get truncated value of the input.
The truncated value of the scalar x is the
nearest integer i which is closer to zero than x is.
Parameters
----------
x : PrimExpr
Input argument.
span : Optional[Span]
The location of this operator in the source code.
Returns
-------
y : PrimExpr
The result.
"""
return _ffi_api.trunc(x, span) # type: ignore
def abs(x, span=None):
"""Get absolute value of the input element-wise.
Parameters
----------
x : PrimExpr
Input argument.
span : Optional[Span]
The location of this operator in the source code.
Returns
-------
y : PrimExpr
The result.
"""
return _ffi_api.abs(x, span) # type: ignore
def bitwise_and(x, y, span=None):
"""Take bitwise and of two values
Parameters
----------
x : PrimExpr
Left operand
y : PrimExpr
Right operand
span : Optional[Span]
The location of this operator in the source code.
Returns
-------
res : PrimExpr
The result.
"""
return _ffi_api.bitwise_and(x, y, span)
def bitwise_not(x, span=None):
"""Take bitwise not of input value
Parameters
----------
x : PrimExpr
Input operand
span : Optional[Span]
The location of this operator in the source code.
Returns
-------
res : PrimExpr
The result.
"""
return _ffi_api.bitwise_not(x, span)
def bitwise_or(x, y, span=None):
"""Take bitwise or of two values
Parameters
----------
x : PrimExpr
Left operand
y : PrimExpr
Right operand
span : Optional[Span]
The location of this operator in the source code.
Returns
-------
res : PrimExpr
The result.
"""
return _ffi_api.bitwise_or(x, y, span)
def bitwise_xor(x, y, span=None):
"""Take bitwise xor of two values
Parameters
----------
x : PrimExpr
Left operand
y : PrimExpr
Right operand
span : Optional[Span]
The location of this operator in the source code.
Returns
-------
res : PrimExpr
The result.
"""
return _ffi_api.bitwise_xor(x, y, span)
def round(x, span=None):
"""Round elements of the array to the nearest integer.
Parameters
----------
x : PrimExpr
Input argument.
span : Optional[Span]
The location of this operator in the source code.
Returns
-------
y : PrimExpr
The result.
"""
return _ffi_api.round(x, span) # type: ignore
def nearbyint(x, span=None):
"""Round elements of the array to the nearest integer.
This intrinsic uses llvm.nearbyint instead of llvm.round
which is faster but will results different from te.round.
Notably nearbyint rounds according to the rounding mode,
whereas te.round (llvm.round) ignores that.
For differences between the two see:
https://en.cppreference.com/w/cpp/numeric/math/round
https://en.cppreference.com/w/cpp/numeric/math/nearbyint
Parameters
----------
x : PrimExpr
Input argument.
span : Optional[Span]
The location of this operator in the source code.
Returns
-------
y : PrimExpr
The result.
"""
return _ffi_api.nearbyint(x, span) # type: ignore
def nextafter(x1, x2):
"""Return the next floating-point value after x1 towards x2.
Parameters
----------
x1 : PrimExpr
Input argument.
x2 : PrimExpr
Input argument.
Returns
-------
y : PrimExpr
The result.
"""
x1 = convert(x1)
x2 = convert(x2)
return call_intrin(x1.dtype, "tir.nextafter", x1, x2) # type: ignore
def hypot(x1, x2):
"""Equivalent to sqrt(x1**2 + x2**2), element-wise.
Parameters
----------
x1 : PrimExpr
Input argument.
x2 : PrimExpr
Input argument.
Returns
-------
y : PrimExpr
The result.
"""
x1 = convert(x1)
x2 = convert(x2)
return call_intrin(x1.dtype, "tir.hypot", x1, x2) # type: ignore
def copysign(x1, x2):
"""Change the sign of x1 to that of x2, element-wise.
Parameters
----------
x1 : PrimExpr
Input argument.
x2 : PrimExpr
Input argument.
Returns
-------
y : PrimExpr
The result.
"""
x1 = convert(x1)
x2 = convert(x2)
return call_intrin(x1.dtype, "tir.copysign", x1, x2) # type: ignore
def ldexp(x1, x2):
"""Returns x1 * (2 ** x2).
Parameters
----------
x1 : PrimExpr
Input argument.
x2 : PrimExpr
Input argument.
Returns
-------
y : PrimExpr
The result.
"""
x1 = convert(x1)
x2 = convert(x2)
return call_intrin(x1.dtype, "tir.ldexp", x1, x2) # type: ignore
def likely(cond, span=None):
"""Mark condition as likely.
Parameters
----------
cond : PrimExpr
Input argument.
span : Optional[Span]
The location of this operator in the source code.
Returns
-------
y : PrimExpr
The marked expression.
"""
return _ffi_api.likely(cond, span) # type: ignore
def isnan(x, span=None):
"""Check if input value is Nan.
Parameters
----------
x : PrimExpr
Input argument.
span : Optional[Span]
The location of this operator in the source code.
Returns
-------
y : PrimExpr
The result.
"""
return _ffi_api.isnan(x, span) # type: ignore
def isnullptr(x, span=None):
"""Check if input value is nullptr.
Parameters
----------
x : PrimExpr
Input argument.
span : Optional[Span]
The location of this operator in the source code.
Returns
-------
y : PrimExpr
The result.
"""
return call_intrin("bool", "tir.isnullptr", x, span=span) # type: ignore
def isfinite(x, span=None):
"""Check if input value is finite.
Parameters
----------
x : PrimExpr
Input argument.
span : Optional[Span]
The location of this operator in the source code.
Returns
-------
y : PrimExpr
The result.
"""
return _ffi_api.isfinite(x, span) # type: ignore
def isinf(x, span=None):
"""Check if input value is infinite.
Parameters
----------
x : PrimExpr
Input argument.
span : Optional[Span]
The location of this operator in the source code.
Returns
-------
y : PrimExpr
The result.
"""
return _ffi_api.isinf(x, span) # type: ignore
def power(x, y, span=None):
"""x power y
Parameters
----------
x : PrimExpr
Input argument.
y : PrimExpr
The exponent
span : Optional[Span]
The location of this operator in the source code.
Returns
-------
z : PrimExpr
The result.
"""
return _ffi_api._OpPow(convert(x), convert(y), span) # type: ignore
def pow(x, y, span=None):
"""x power y
Parameters
----------
x : PrimExpr
Input argument.
y : PrimExpr
The exponent
span : Optional[Span]
The location of this operator in the source code.
Returns
-------
z : PrimExpr
The result.
"""
return _ffi_api._OpPow(convert(x), convert(y), span) # type: ignore
def popcount(x):
"""Count the number of set bits in input x.
Parameters
----------
x : PrimExpr
Input argument.
Returns
-------
y : PrimExpr
The result.
"""
x = convert(x)
return call_intrin(x.dtype, "tir.popcount", x)
def q_multiply_shift(x, y, q, s):
"""Execute a multiplication between two Q-numbers x and y
followed by a right shift s. The mathematical expression is:
out = round(x*y*2^-s)
More about Q-numbers here: https://en.wikipedia.org/wiki/Q_(number_format)
The rounding rule is to the nearest value, rounding half up
(i.e., round(x.1) = x and round (x.5) = x+1)
Parameters
----------
x : PrimExpr
First Q-number
y : PrimExpr
Second Q-number
q : PrimExpr
Number of fractional bits in x and y. Needs to be > 0
s : PrimExpr
Integer shift
Returns
-------
y : PrimExpr
The result.
"""
return call_intrin("int32", "tir.q_multiply_shift", x, y, q, s)
def q_multiply_shift_per_axis(
x: PrimExpr,
y: PrimExpr,
ls: PrimExpr,
rs: PrimExpr,
q: IntImm,
is_lshift_required: IntImm,
is_rshift_required: IntImm,
):
"""Execute a multiplication between two Q-numbers x and y
Parameters
----------
x : PrimExpr
First Q-number.
y : PrimExpr
Second Q-number.
ls : PrimExpr
Integer left shift.
rs : PrimExpr
Integer right shift.
q : IntImm
Number of fractional bits in x and y. Needs to be > 0.
is_lshift_required : IntImm
Whether we need to do left shift or not.
is_rshift_required : IntImm
Whether we need to do right shift or not.
Returns
-------
z : PrimExpr
The result.
"""
return call_intrin(
"int32",
"tir.q_multiply_shift_per_axis",
x,
y,
ls,
rs,
q,
is_lshift_required,
is_rshift_required,
)
def shift_left(x, y, span=None):
"""Return the result of x left shifted by y bits.
Parameters
----------
x : PrimExpr
Input argument.
y : PrimExpr
Input argument.
Returns
-------
z : PrimExpr
The result.
"""
return _ffi_api.left_shift(x, y, span)
def shift_right(x, y, span=None):
"""Return the result of x right shifted by y bits.
Parameters
----------
x : PrimExpr
Input argument.
y : PrimExpr
Input argument.
Returns
-------
z : PrimExpr
The result.
"""
return _ffi_api.right_shift(x, y, span)
def fmod(x, y):
"""Return the remainder of x divided by y with the same sign as x.
Parameters
----------
x : PrimExpr
Input argument.
y : PrimExpr
Input argument.
Returns
-------
z : PrimExpr
The result.
"""
x = convert(x)
y = convert(y)
return call_intrin(x.dtype, "tir.fmod", x, y)
def if_then_else(cond, t, f, span=None):
"""Conditional selection expression.
Parameters
----------
cond : PrimExpr
The condition
t : PrimExpr
The result expression if cond is true.
f : PrimExpr
The result expression if cond is false.
span : Optional[Span]
The location of this operator in the source.
Returns
-------
result : Node
The result of conditional expression.
Note
----
Unlike Select, if_then_else will not execute
the branch that does not satisfy the condition.
You can use it to guard against out of bound access.
Unlike Select, if_then_else cannot be vectorized
if some lanes in the vector have different conditions.
"""
return _ffi_api._OpIfThenElse(convert(cond), convert(t), convert(f), span) # type: ignore
def div(a, b, span=None):
"""Compute a / b as in C/C++ semantics.
Parameters
----------
a : PrimExpr
The left hand operand, known to be non-negative.
b : PrimExpr
The right hand operand, known to be non-negative.
span : Optional[Span]
The location of this operator in the source.
Returns
-------
res : PrimExpr
The result expression.
Note
----
When operands are integers, returns truncdiv(a, b, span).
"""
return _ffi_api._OpDiv(a, b, span) # type: ignore
def indexdiv(a, b, span=None):
"""Compute floor(a / b) where a and b are non-negative.
Parameters
----------
a : PrimExpr
The left hand operand, known to be non-negative.
b : PrimExpr
The right hand operand, known to be non-negative.
span : Optional[Span]
The location of this operator in the source.
Returns
-------
res : PrimExpr
The result expression.
Note
----
Use this function to split non-negative indices.
This function may take advantage of operands'
non-negativeness.
"""
return _ffi_api._OpIndexDiv(a, b, span) # type: ignore
def indexmod(a, b, span=None):
"""Compute the remainder of indexdiv. a and b are non-negative.
Parameters
----------
a : PrimExpr
The left hand operand, known to be non-negative.
b : PrimExpr
The right hand operand, known to be non-negative.
span : Optional[Span]
The location of this operator in the source.
Returns
-------
res : PrimExpr
The result expression.
Note
----
Use this function to split non-negative indices.
This function may take advantage of operands'
non-negativeness.
"""
return _ffi_api._OpIndexMod(a, b, span) # type: ignore
def truncdiv(a, b, span=None):
"""Compute the truncdiv of two expressions.
Parameters
----------
a : PrimExpr
The left hand operand
b : PrimExpr
The right hand operand
span : Optional[Span]
The location of this operator in the source.
Returns
-------
res : PrimExpr
The result expression.
Note
----
This is the default integer division behavior in C.
"""
return _ffi_api._OpTruncDiv(a, b, span) # type: ignore
def truncmod(a, b, span=None):
"""Compute the truncmod of two expressions.
Parameters
----------
a : PrimExpr
The left hand operand
b : PrimExpr
The right hand operand
span : Optional[Span]
The location of this operator in the source.
Returns
-------
res : PrimExpr
The result expression.
Note
----
This is the default integer division behavior in C.
"""
return _ffi_api._OpTruncMod(a, b, span) # type: ignore
def floordiv(a, b, span=None):
"""Compute the floordiv of two expressions.
Parameters
----------
a : PrimExpr
The left hand operand
b : PrimExpr
The right hand operand
span : Optional[Span]
The location of this operator in the source.
Returns
-------
res : PrimExpr
The result expression.
"""
return _ffi_api._OpFloorDiv(a, b, span) # type: ignore
def floormod(a, b, span=None):
"""Compute the floormod of two expressions.
Parameters
----------
a : PrimExpr
The left hand operand
b : PrimExpr
The right hand operand
span : Optional[Span]
The location of this operator in the source.
Returns
-------
res : PrimExpr
The result expression.
"""
return _ffi_api._OpFloorMod(a, b, span) # type: ignore
def ceildiv(lhs, rhs, span=None):
"""Generic ceildiv operator.
Parameters
----------
lhs : object
The left operand.
rhs : object
The right operand.
span : Optional[Span]
The location of this operator in the source.
Returns
-------
op : tvm.Expr
The result Expr of ceildiv operaton.
"""
return _ffi_api._OpCeilDiv(lhs, rhs, span) # type: ignore
def comm_reducer(fcombine, fidentity, name="reduce"):
"""Create a commutative reducer for reduction.
Parameters
----------
fcombine : function(Expr -> Expr -> Expr)
A binary function which takes two Expr as input to return a Expr.
fidentity : function(str -> Expr)
A function which takes a type string as input to return a const Expr.
Returns
-------
reducer : function
A function which creates a reduce expression over axis.
There are two ways to use it:
1. accept (expr, axis, where) to produce an Reduce Expr on
specified axis;
2. simply use it with multiple Exprs.
Example
-------
.. code-block:: python
n = te.var("n")
m = te.var("m")
mysum = te.comm_reducer(lambda x, y: x+y,
lambda t: tvm.tir.const(0, dtype=t), name="mysum")
A = te.placeholder((n, m), name="A")
k = te.reduce_axis((0, m), name="k")
B = te.compute((n,), lambda i: mysum(A[i, k], axis=k), name="B")
"""
def _reduce_directly(*args):
num = len(args)
# process `where` is None
if num == 3 and args[2] is None:
num = 2
res = args[0]
for i in range(num - 1):
res = fcombine(res, args[i + 1])
return res
def _make_reduce(expr, axis, where=None, init=None):
code = fcombine.__code__
assert fcombine.__code__.co_argcount == 2
expr = convert(expr)
if init is not None:
init = convert(init)
if isinstance(expr, Array):
size = len(expr)
larr = []
rarr = []
dtypes = []
for i in range(size):
dtype = expr[i].dtype
dtypes.append(dtype)
lname = code.co_varnames[0] + "_" + str(i)
larr.append(Var(lname, dtype))
rname = code.co_varnames[1] + "_" + str(i)
rarr.append(Var(rname, dtype))
if init is not None:
init = convert(init)
assert isinstance(init, Array)
assert len(init) == size
for init_i in range(size):
init_i = convert(init_i)
assert isinstance(
init_i, (tvm.tir.ProducerLoad, tvm.tir.IntImm, tvm.tir.FloatImm)
)
else:
init = convert([])
lhs = convert(larr)
rhs = convert(rarr)
result = fcombine(lhs, rhs)
id_elem = fidentity(*dtypes)
else:
assert isinstance(expr, tvm.ir.PrimExpr)
size = 1
dtype = expr.dtype
lvar = Var(code.co_varnames[0], dtype)
rvar = Var(code.co_varnames[1], dtype)
result = [fcombine(lvar, rvar)]
id_elem = [fidentity(dtype)]
lhs = convert([lvar])
rhs = convert([rvar])
expr = convert([expr])
if init is not None:
assert isinstance(init, (tvm.tir.ProducerLoad, tvm.tir.IntImm, tvm.tir.FloatImm))
init = convert([init])
result = convert(result)
id_elem = convert(id_elem)
combiner = CommReducer(lhs, rhs, result, id_elem)
axis = convert(axis if isinstance(axis, (list, tuple)) else [axis])
if where is None:
where = convert(True)
if init is None:
outputs = tuple(
tvm.tir.Reduce(combiner, expr, axis, where, i, convert([])) for i in range(size)
)
else:
outputs = tuple(
tvm.tir.Reduce(combiner, expr, axis, where, i, init) for i in range(size)
)
return outputs[0] if size == 1 else outputs
# pylint: disable=keyword-arg-before-vararg
def reducer(expr, axis, where=None, init=None, *args):
if isinstance(axis, (tvm.tir.IterVar, list, tuple)):
assert not args
return _make_reduce(expr, axis, where, init)
if where is None:
assert not args
return _reduce_directly(expr, axis)
return _reduce_directly(expr, axis, where, *args)
doc_str = """Create a {0} expression over axis.
Parameters
----------
expr : PrimExpr
The source expression.
axis : IterVar
The reduction IterVar axis
where : optional, Expr
Filtering predicate of the reduction.
Returns
-------
value : PrimExpr
The result value.
Example
-------
.. code-block:: python
m = te.var("m")
n = te.var("n")
A = te.placeholder((m, n), name="A")
k = te.reduce_axis((0, n), name="k")
# there are two way to use this {0} reducer:
# mode 1, accept (expr, axis, where) to produce an Reduce Expr
# tvm.{0} represents tvm.te.{0} or tvm.tir.{0}.
B = te.compute((m,), lambda i: tvm.{0}(A[i, k], axis=k), name="B")
# mode 2, simply use it with multiple Exprs:
{0}_res = tvm.{0}(m, n)
"""
reducer.__doc__ = doc_str.format(name)
return reducer
def TVMBackendAllocWorkspace(device_type, device_id, nbytes, dtype_code_hint, dtype_bits_hint):
"""Backend function to allocate temporal workspace
Parameters
----------
device_type : int
The device type which the space will be allocated.
device_id : int
The device id which the space will be allocated.
nbytes : int
The size of the space requested.
dtype_code_hint : int
The type code of the array elements. Only used in certain backends such as OpenGL.
dtype_bits_hint : int
The type bits of the array elements. Only used in certain backends such as OpenGL.
Returns
-------
call : PrimExpr
The call expression.
"""
return call_intrin(
"handle",
"tir.TVMBackendAllocWorkspace",
device_type,
device_id,
nbytes,
dtype_code_hint,
dtype_bits_hint,
)
def TVMBackendFreeWorkspace(device_type, device_id, ptr):
"""Backend function to free temporal workspace.
Parameters
----------
device_type : int
The device type which the space will be allocated.
device_id : int
The device id which the space will be allocated.
ptr : Var
The result allocated space pointer.
Returns
-------
call : PrimExpr
The call expression.
"""
return call_intrin("int32", "tir.TVMBackendFreeWorkspace", device_type, device_id, ptr)
# pylint: disable=unnecessary-lambda
sum = comm_reducer(lambda x, y: x + y, lambda t: const(0, dtype=t), name="sum")
min = comm_reducer(lambda x, y: _ffi_api._OpMin(x, y, None), max_value, name="min") # type: ignore
max = comm_reducer(lambda x, y: _ffi_api._OpMax(x, y, None), min_value, name="max") # type: ignore
| 66,641 | 20.490487 | 133 | py |
tvm | tvm-main/python/tvm/tir/data_layout.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Data layout."""
from typing import Union
import tvm._ffi
from tvm.runtime import Object
from . import _ffi_api
@tvm._ffi.register_object("tir.Layout")
class Layout(Object):
"""Layout is composed of upper cases, lower cases and numbers,
where upper case indicates a primal axis and
the corresponding lower case with factor size indicates the subordinate axis.
For example, NCHW16c can describe a 5-D tensor of
[batch_size, channel, height, width, channel_block].
Here subordinate axis channel_block=16 is the factor size of the primal axis C (channel).
See Also
--------
layout : Declare a layout
"""
def __len__(self):
return _ffi_api.LayoutNdim(self) # type: ignore
def __contains__(self, axis):
return len(axis) == 1 and axis[0].isalpha() and axis[0] in self.name
def __getitem__(self, index):
if index >= len(self):
raise IndexError("Layout index out of range")
return _ffi_api.LayoutGetItem(self, index) # type: ignore
def index_of(self, axis):
"""Get the index of an axis
Parameters
----------
axis : str
The axis name, need to be [a-z,A-Z]
Returns
-------
index : int
The index of the axis, -1 if not found.
"""
return _ffi_api.LayoutIndexOf(self, axis) # type: ignore
def factor_of(self, axis):
"""Get the factor size of the subordinate axis.
Parameters
----------
axis : str
The axis name, need to be [a-z,A-Z]
Returns
-------
factor : int
the size of the subordinate-axis of axis (if axis is a primal-axis),
or the size of axis itself (if axis is a subordinate-axis).
Return -1 if axis is not in the layout.
"""
return _ffi_api.LayoutFactorOf(self, axis) # type: ignore
@tvm._ffi.register_object("tir.BijectiveLayout")
class BijectiveLayout(Object):
"""Bijective mapping for two layouts (src-layout and dst-layout).
It provides shape and index conversion between each other.
Do not construct directly, use :any:`bijective_layout` instead.
See the documentation of :any:`bijective_layout` for more details.
Parameters
----------
src_layout : str or Layout
source layout.
dst_layout : str or Layout
destination layout.
See Also
--------
bijective_layout : Declare a layout
"""
def forward_index(self, index):
"""Given the indices of the src-layout, infer the dst index.
Parameters
----------
index: Array of Expr
The indices in src-layout.
Returns
-------
dst_index: Array of Expr
The inferred indices in dst-layout.
"""
return _ffi_api.BijectiveLayoutForwardIndex(self, index) # type: ignore
def backward_index(self, index):
"""Given the indices of the dst-layout, infer the src index.
Parameters
----------
index: Array of Expr
The indices in dst-layout.
Returns
-------
src_index: Array of Expr
The inferred indices in src-layout.
"""
return _ffi_api.BijectiveLayoutBackwardIndex(self, index) # type: ignore
def forward_shape(self, shape):
"""Given the shape of the src-layout, infer the dst shape.
Parameters
----------
shape: Array of Expr
The shape in src-layout.
Returns
-------
dst_shape: Array of Expr
The inferred shape in dst-layout.
"""
return _ffi_api.BijectiveLayoutForwardShape(self, shape) # type: ignore
def backward_shape(self, shape):
"""Given the shape of the dst-layout, infer the src shape.
Parameters
----------
shape: Array of Expr
The shape in dst-layout.
Returns
-------
src_shape: Array of Expr
The inferred shape in src-layout.
"""
return _ffi_api.BijectiveLayoutBackwardShape(self, shape) # type: ignore
def layout(layout_str: str, dtype: str = "int32") -> Layout:
"""Create a layout node from a string.
Parameters
----------
layout_str : str
A layout representation is composed of upper cases, lower cases and numbers,
where upper case indicates a primal axis and
the corresponding lower case with factor size indicates the subordinate axis.
For example, NCHW16c can describe a 5-D tensor of
[batch_size, channel, height, width, channel_block].
Here subordinate axis channel_block=16 is the factor size of
the primal axis C (channel).
dtype : str
The dtype of generated axes vars in the returned layout.
It is required to be integer type.
Returns
-------
layout : Layout
The created layout
"""
return _ffi_api.Layout(layout_str, dtype) # type: ignore
def bijective_layout(
src_layout: Union[str, Layout], dst_layout: Union[str, Layout]
) -> BijectiveLayout:
"""Create a bijective layout mapping.
Parameters
----------
src_layout : str or Layout
source layout.
dst_layout : str or Layout
destination layout.
Returns
-------
bijective_layout : BijectiveLayout
The created bijective layout
"""
if isinstance(src_layout, str):
src_layout = layout(src_layout)
if isinstance(dst_layout, str):
dst_layout = layout(dst_layout)
return _ffi_api.BijectiveLayout(src_layout, dst_layout) # type: ignore
| 6,463 | 29.065116 | 93 | py |
tvm | tvm-main/python/tvm/tir/stmt_functor.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Statement functor utilities for IR transformations"""
from .function import PrimFunc
from . import _ffi_api
def ir_transform(stmt, preorder, postorder, only_enable=None):
"""Recursively visit and transform ir nodes in post DFS order.
Parameters
----------
stmt : tvm.tir.Stmt
The input to be transformed.
preorder: function
The function called in before recursive mutation
If preorder returns None, then the transform will proceed to recursive call.
If preorder returns a not None tvm.tir.Stmt/Expr, the transformer will simply return it and
won't do further recursion.
postorder : function
The function called after recursive mutation.
only_enable : Optional[List[str]]
List of types that we only enable.
Returns
-------
result : tvm.tir.Stmt
The result.
"""
return _ffi_api.IRTransform(stmt, preorder, postorder, only_enable) # type: ignore
def post_order_visit(stmt, fvisit):
"""Recursively visit the ir in post DFS order node, apply fvisit
Each node is guaranteed to be visited only once.
Parameters
----------
fvisit: function
The visitor function.
"""
return _ffi_api.PostOrderVisit(stmt, fvisit) # type: ignore
def pre_order_visit(stmt, fvisit):
"""Recursive pre-order visit on stmt AST, applying fvisit on each node.
If fvisit returns False, it won't visit the children of the node.
Parameters
----------
fvisit: function of the signature Object -> bool
The visitor function.
"""
return _ffi_api.PreOrderVisit(stmt, fvisit) # type: ignore
def substitute(node, vmap):
"""Substitute the var specified by vmap.
Parameters
----------
node: ObjectRef
The input.
vmap : Dict[Var, PrimExpr]
The variable mapping.
Returns
-------
result : tvm.tir.Stmt
The result.
"""
return _ffi_api.Substitute(node, vmap) # type: ignore
def renew_defs(func: PrimFunc):
"""Re-generate the definition nodes for a TIR, including VarDef, BufferDef.
This pass works as a simple DeepCopy to duplicate a function with different Vars and
Buffers but the same behavior
Parameters
----------
func: PrimFunc
The input function
Returns
-------
result : PrimFunc
The new generated func.
"""
return _ffi_api.RenewDefs(func) # type: ignore
| 3,239 | 28.724771 | 99 | py |
tvm | tvm-main/python/tvm/tir/block_scope.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Definition of two pillar data structure for TensorIR scheduling: StmtSRef, BlockScope."""
from enum import IntEnum
from typing import List, Optional, Union
from tvm._ffi import register_object
from tvm.runtime import Object
from tvm.tir import Block, For
from . import _ffi_api
@register_object("tir.StmtSRef")
class StmtSRef(Object):
"""An object that refers to schedulable elements in the TensorIR, aka "sref".
Glossary
- Block sref: An StmtSref that points to a TensorIR block.
- Loop sref: An StmtSRef that points to a TensorIR for loop.
- Parent sref: The parent sref of an sref is the block/loop sref that points to its closest
schedulable statement of its ancestors on the TensorIR AST.
- Root sref: Sref to the root block. Every sref has exactly one parent sref
except for root sref.
- Sref tree: The parent-children-relationship of srefs that forms a tree,
uniquely determined by the TensorIR AST.
"""
seq_index: int
@property
def stmt(self) -> Optional[Union[Block, For]]:
"""The block/for stmt the object refers to"""
return _ffi_api.StmtSRefStmt(self) # type: ignore # pylint: disable=no-member
@property
def parent(self) -> Optional["StmtSRef"]:
"""The parent sref"""
return _ffi_api.StmtSRefParent(self) # type: ignore # pylint: disable=no-member
@staticmethod
def inline_mark() -> "StmtSRef":
"""A special StmtSRef, which doesn't point to any stmt in the AST,
only serving as a "mark" to hint compute-at to do the work of compute-inline"""
return _ffi_api.StmtSRefInlineMark() # type: ignore # pylint: disable=no-member
@staticmethod
def root_mark() -> "StmtSRef":
"""A special StmtSRef, which doesn't point to any stmt in the AST,
only serving as a "mark" to hint compute-at to do nothing"""
return _ffi_api.StmtSRefRootMark() # type: ignore # pylint: disable=no-member
class DepKind(IntEnum):
"""Type of dependency.
Attributes
----------
RAW : int = 0
Read-after-write dependency
WAW : int = 1
Write-after-write dependency
WAR : int = 2
Write-after-read dependency. Not supported in TensorIR for now.
OPAQUE: int = 3
Opaque dependency
"""
RAW = 0
WAW = 1
WAR = 2
OPAQUE = 3
@register_object("tir.Dependency")
class Dependency(Object):
"""A tuple (src, dst, kind) representing certain types of dependency.
For example, (A, B, kRAW) means block B depends on block A, and the dependency kind is
read-after-write, which means block B reads the result written by block A.
Parameters
----------
src : StmtSRef
The source of the dependency relation
dst : StmtSRef
The destination of the dependency relation
kind : DepKind
The dependency kind
"""
src: StmtSRef
dst: StmtSRef
kind: DepKind
@register_object("tir.BlockScope")
class BlockScope(Object):
"""An object corresponds to each block sref in the sref tree, which
tracks the producer-consumer dependency between blocks.
Glossary:
- Block scope: A contiguous subtree of the sref tree, rooted at
each block sref, whose components are:
- scope root: a block sref
- internal srefs: loop srefs
- scope leaves: block srefs
- Child block: The scope leaf blocks under the scope root or a specific internal sref
"""
def get_deps_by_src(self, block: StmtSRef) -> List[Dependency]:
"""Get all dependencies whose `src` is the target`block`.
Parameters
----------
block: StmtSRef
The queried block
Returns
-------
blocks: List[Dependency]
The dependencies
"""
return _ffi_api.BlockScopeGetDepsBySrc(self, block) # type: ignore # pylint: disable=no-member
def get_deps_by_dst(self, block: StmtSRef) -> List[Dependency]:
"""Get all dependencies whose `dst` is the target `block`.
Parameters
----------
block: StmtSRef
The queried block
Returns
-------
blocks: List[Dependency]
The dependencies
"""
return _ffi_api.BlockScopeGetDepsByDst(self, block) # type: ignore # pylint: disable=no-member
| 5,125 | 31.858974 | 103 | py |
tvm | tvm-main/python/tvm/tir/buffer.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Abstraction for array data structures."""
from numbers import Integral
import tvm._ffi
from tvm._ffi.base import string_types
from tvm.ir import PointerType, PrimExpr, PrimType, Range
from tvm.runtime import Object, Scriptable, convert
from . import _ffi_api
@tvm._ffi.register_object("tir.Buffer")
class Buffer(Object, Scriptable):
"""Symbolic data buffer in TVM.
Buffer provide a way to represent data layout
specialization of data structure in TVM.
Do not construct directly, use :py:func:`~decl_buffer` instead.
See the documentation of :py:func:`decl_buffer` for more details.
See Also
--------
decl_buffer : Declare a buffer
"""
READ = 1
WRITE = 2
def access_ptr(self, access_mask, ptr_type="handle", content_lanes=1, offset=0, extent=None):
"""Get an access pointer to the head of buffer.
This is the recommended method to get buffer data
ptress when interacting with external functions.
Parameters
----------
access_mask : int
The access pattern MASK. Indicate whether the
access will read or write to the data content.
ptr_type : str, optional
The data type of the result pointer. Do not specify
unless we want to cast pointer to specific type.
content_lanes: int, optional
The number of lanes for the data type. This value
is greater than one for vector types.
offset: Expr, optional
The offset of pointer. We can use it to offset by
the number of elements from the address of ptr.
extent: Expr, optional
The extent of pointer.
Examples
--------
.. code-block:: python
# Get access ptr for read
buffer.access_ptr("r")
# Get access ptr for read/write with bitmask
buffer.access_ptr(Buffer.READ | Buffer.WRITE)
# Get access ptr for read/write with str flag
buffer.access_ptr("rw")
# Get access ptr for read with offset
buffer.access_ptr("r", offset = 100)
# Get access ptr for read with extent
buffer.access_ptr("r", extent = 100)
"""
if isinstance(access_mask, string_types):
mask = 0
for value in access_mask:
if value == "r":
mask = mask | Buffer.READ
elif value == "w":
mask = mask | Buffer.WRITE
else:
raise ValueError(f"Unknown access_mask {access_mask}")
access_mask = mask
offset = convert(offset)
extent = convert(extent)
return _ffi_api.BufferAccessPtr(
self, access_mask, ptr_type, content_lanes, offset, extent # type: ignore
)
def vload(self, begin, dtype=None):
"""Generate an Expr that loads dtype from begin index.
Parameters
----------
begin : Array of Expr
The beginning index in unit of Buffer.dtype
dtype : str
The data type to be loaded,
can be vector type which have lanes that is multiple of Buffer.dtype
Returns
-------
load : Expr
The corresponding load expression.
"""
begin = (begin,) if isinstance(begin, (int, PrimExpr)) else begin
dtype = dtype if dtype else self.dtype
return _ffi_api.BufferVLoad(self, begin, dtype) # type: ignore
def vstore(self, begin, value):
"""Generate a Stmt that store value into begin index.
Parameters
----------
begin : Array of Expr
The beginning index in unit of Buffer.dtype
value : Expr
The value to be stored.
Returns
-------
store : Stmt
The corresponding store stmt.
"""
begin = (begin,) if isinstance(begin, (int, PrimExpr)) else begin
return _ffi_api.BufferVStore(self, begin, value) # type: ignore
def scope(self):
"""Return the storage scope associated with this buffer.
Returns
-------
scope : str
The storage scope associated with this buffer.
"""
return _ffi_api.BufferStorageScope(self) # type: ignore
def get_flattened_buffer(self):
"""Generate a Buffer that is a flattened version of this buffer.
Returns
-------
flattened : Buffer
The corresponding flat buffer.
"""
return _ffi_api.BufferGetFlattenedBuffer(self) # type: ignore
def offset_of(self, indices):
"""Determine the offset of the provided indices in the flattened buffer.
Parameters
----------
indices : Union[PrimExpr, List[PrimExpr]]
The indices of the element in the original buffer.
Returns
-------
flattened_indices: List[PrimExpr]
The offset indices of the element in the flattened buffer.
"""
return _ffi_api.BufferOffsetOf(self, indices) # type: ignore
def __getitem__(self, indices):
from ..arith import Analyzer # pylint: disable=import-outside-toplevel
from .expr import BufferLoad, Ramp, const # pylint: disable=import-outside-toplevel
from .stmt import BufferRegion # pylint: disable=import-outside-toplevel
if not isinstance(indices, (tuple, list)):
indices = [indices]
has_slice = any(isinstance(i, slice) for i in indices)
has_step = any(isinstance(i, slice) and i.step is not None for i in indices)
analyzer = Analyzer()
if has_slice and not has_step:
region = []
for i, index in enumerate(indices):
if isinstance(index, slice):
start = 0 if index.start is None else index.start
stop = self.shape[i] if index.stop is None else index.stop
region.append(Range.from_min_extent(start, analyzer.simplify(stop - start)))
else:
region.append(
Range.from_min_extent(
index, const(1, index.dtype) if isinstance(index, PrimExpr) else 1
)
)
return BufferRegion(self, region)
else:
expr_indices = []
for i, index in enumerate(indices):
if isinstance(index, slice):
start = 0 if index.start is None else index.start
stop = self.shape[i] if index.stop is None else index.stop
step = 1 if index.step is None else index.step
# We should ensure the dtype of start is the same with that of step.
if isinstance(start, tvm.tir.expr.PrimExpr) and isinstance(step, int):
step = tvm.tir.expr.IntImm(start.dtype, step)
lanes = analyzer.simplify((stop - start + step - 1) // step)
if lanes == 1:
expr_indices.append(start)
else:
expr_indices.append(Ramp(start, step, int(lanes)))
else:
expr_indices.append(index)
return BufferLoad(self, expr_indices)
def decl_buffer(
shape,
dtype=None,
name="buffer",
data=None,
strides=None,
elem_offset=None,
scope="",
data_alignment=-1,
offset_factor=0,
buffer_type="",
axis_separators=None,
span=None,
):
"""Declare a new symbolic buffer.
Normally buffer is created automatically during lower and build.
This is only needed if user want to specify their own buffer layout.
See the note below for detailed discussion on usage of buffer.
Parameters
----------
shape : tuple of Expr
The shape of the buffer.
dtype : str, optional
The data type of the buffer.
name : str, optional
The name of the buffer.
data : Var, optional
The data pointer in the buffer.
strides: array of Expr
The stride of the buffer.
elem_offset: Expr, optional
The beginning offset of the array to data.
In terms of number of elements of dtype.
scope: str, optional
The storage scope of the buffer, if not global.
If scope equals empty string, it means it is global memory.
data_alignment: int, optional
The alignment of data pointer in bytes.
If -1 is passed, the alignment will be set to TVM's internal default.
offset_factor: int, optional
The factor of elem_offset field, when set,
elem_offset is required to be multiple of offset_factor.
If 0 is pssed, the alignment will be set to 1.
if non-zero is passed, we will created a Var for elem_offset if elem_offset is not None.
buffer_type: str, optional, {"", "auto_broadcast"}
auto_broadcast buffer allows one to implement broadcast computation
without considering whether dimension size equals to one.
TVM maps buffer[i][j][k] -> buffer[i][0][k] if dimension j's shape equals 1.
axis_separators : list of int, optional
If passed, a list of separators between groups of axes,
each of which is flattened to an output axis. For flat
memory spaces, should either be None, or an empty list.
span: Optional[Span]
The location of the decl_buffer creation in the source.
Returns
-------
buffer : tvm.tir.Buffer
The created buffer
Example
-------
Here's an example of how broadcast buffer can be used to define a symbolic broadcast operation,
.. code-block:: python
m0, m1, m2 = te.var("m0"), te.var("m1"), te.var("m2")
n0, n1, n2 = te.var("n0"), te.var("n1"), te.var("n2")
o0, o1, o2 = te.var("o0"), te.var("o1"), te.var("o2")
A = te.placeholder((m0, m1, m2), name='A')
B = te.placeholder((n0, n1, n2), name='B')
C = te.compute((o0, o1, o2), lambda i, j, k: A[i, j, k] + B[i, j, k], name='C')
Ab = tvm.tir.decl_buffer(A.shape, A.dtype, name="Ab", buffer_type="auto_broadcast")
Bb = tvm.tir.decl_buffer(B.shape, B.dtype, name="Bb", buffer_type="auto_broadcast")
s = te.create_schedule(C.op)
fadd = tvm.build(s, [A, B, C], target='llvm', name='bcast_add', binds={A:Ab, B:Bb})
dev = tvm.cpu(0)
a = tvm.nd.array(np.random.uniform(size=(2, 4, 3)).astype(A.dtype), dev)
b = tvm.nd.array(np.random.uniform(size=(2, 1, 3)).astype(B.dtype), dev)
c = tvm.nd.array(np.zeros((2, 4, 3), dtype=C.dtype), dev)
fadd(a, b, c)
tvm.testing.assert_allclose(c.numpy(), a.numpy() + b.numpy())
Note
----
Buffer data structure reflects the DLTensor structure in dlpack.
While DLTensor data structure is very general, it is usually helpful
to create function that only handles specific case of data structure
and make compiled function benefit from it.
If user pass strides and elem_offset is passed as None
when constructing the function, then the function will be specialized
for the DLTensor that is compact and aligned.
If user pass a fully generic symbolic array to the strides,
then the resulting function becomes fully generic.
"""
# pylint: disable=import-outside-toplevel
from .expr import Var
shape = (shape,) if isinstance(shape, (PrimExpr, Integral)) else shape
dtype = "float32" if dtype is None else dtype
strides = () if strides is None else strides
if axis_separators is None:
axis_separators = []
if offset_factor != 0 and elem_offset is None:
shape_dtype = shape[0].dtype if shape and hasattr(shape[0], "dtype") else "int32"
elem_offset = Var(f"{name}_elem_offset", shape_dtype)
if data is None:
# Bool is represented as uint1 in the IR, but stored as int8
storage_type = PrimType(dtype)
storage_type = PrimType("int8") if storage_type.dtype == "bool" else storage_type
data = Var(name, PointerType(storage_type, scope), span)
return _ffi_api.Buffer( # type: ignore
data,
dtype,
shape,
strides,
elem_offset,
name,
data_alignment,
offset_factor,
buffer_type,
axis_separators,
span,
)
@tvm._ffi.register_object("tir.DataProducer")
class DataProducer(Object):
pass
| 13,322 | 35.008108 | 99 | py |
tvm | tvm-main/python/tvm/tir/_ffi_api.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""FFI APIs for tvm.tir"""
import tvm._ffi
tvm._ffi._init_api("tir", __name__)
| 866 | 38.409091 | 62 | py |
tvm | tvm-main/python/tvm/tir/generic.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Generic opertors in TVM.
We follow the numpy naming convention for this interface
(e.g., tvm.tir.generic.multitply ~ numpy.multiply).
The default implementation is used by tvm.ExprOp.
"""
# pylint: disable=unused-argument
from . import _ffi_api
# Operator precedence used when overloading.
__op_priority__ = 0
def add(lhs, rhs, span=None):
"""Generic add operator.
Parameters
----------
lhs : object
The left operand.
rhs : object
The right operand.
span : Optional[Span]
The location of this operator in the source.
Returns
-------
op : tvm.Expr
The result Expr of add operaton.
"""
return _ffi_api._OpAdd(lhs, rhs, span) # type: ignore
def subtract(lhs, rhs, span=None):
"""Generic subtract operator.
Parameters
----------
lhs : object
The left operand.
rhs : object
The right operand.
span : Optional[Span]
The location of this operator in the source.
Returns
-------
op : tvm.Expr
The result Expr of subtract operaton.
"""
return _ffi_api._OpSub(lhs, rhs, span) # type: ignore
def multiply(lhs, rhs, span=None):
"""Generic multiply operator.
Parameters
----------
lhs : object
The left operand.
rhs : object
The right operand.
span : Optional[Span]
The location of this operator in the source.
Returns
-------
op : tvm.Expr
The result Expr of multiply operaton.
"""
return _ffi_api._OpMul(lhs, rhs, span) # type: ignore
def divide(lhs, rhs, span=None):
"""Generic divide operator.
Parameters
----------
lhs : object
The left operand.
rhs : object
The right operand.
span : Optional[Span]
The location of this operator in the source.
Returns
-------
op : tvm.Expr
The result Expr of divide operaton.
"""
return _ffi_api._OpDiv(lhs, rhs, span) # type: ignore
def floordiv(lhs, rhs, span=None):
"""Generic floordiv operator.
Parameters
----------
lhs : object
The left operand.
rhs : object
The right operand.
span : Optional[Span]
The location of this operator in the source.
Returns
-------
op : tvm.Expr
The result Expr of floordiv operaton.
"""
return _ffi_api._OpFloorDiv(lhs, rhs, span) # type: ignore
def cast(src, dtype, span=None):
"""Generic cast operator.
Parameters
----------
src : object
The source operand.
span : Optional[Span]
The location of this operator in the source.
Returns
-------
op : tvm.Expr
The result Expr of cast operaton.
"""
return _ffi_api._cast(dtype, src, span) # type: ignore
| 3,576 | 23.668966 | 63 | py |
tvm | tvm-main/python/tvm/tir/function.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=unrecognized-inline-option
"""Function data types."""
import collections
import inspect
from typing import Callable, List, Mapping, Optional, Tuple, Union
import tvm
import tvm._ffi
import tvm.runtime
from tvm.ir import BaseFunc, Range
from tvm.runtime import Object, Scriptable
from ..runtime.ndarray import NDArray
from . import _ffi_api
from .buffer import Buffer
from .expr import PrimExpr, Var
@tvm._ffi.register_object("tir.PrimFunc")
class PrimFunc(BaseFunc, Scriptable):
"""A function declaration expression.
Parameters
----------
params: List[Union[tvm.tir.Var, tvm.tir.Buffer]]
List of input parameters to the function.
body: tvm.tir.Stmt
The body of the function.
ret_type: tvm.ir.Type
The return type annotation of the function.
buffer_map : Map[tvm.tir.Var, tvm.tir.Buffer]
The buffer binding map.
attrs: Optional[tvm.Attrs]
Attributes of the function, can be None
span : Optional[Span]
The location of this itervar in the source code.
"""
def __init__(
self,
params,
body,
ret_type=None,
buffer_map=None,
attrs=None,
span=None,
):
param_list = []
buffer_map = {} if buffer_map is None else buffer_map
for x in params:
x = tvm.runtime.convert(x) if not isinstance(x, Object) else x
if isinstance(x, Buffer):
var = Var(x.name, dtype="handle")
param_list.append(var)
buffer_map[var] = x
elif isinstance(x, Var):
param_list.append(x)
else:
raise TypeError("params can only contain Var or Buffer")
self.__init_handle_by_constructor__(
_ffi_api.PrimFunc,
param_list,
body,
ret_type,
buffer_map,
attrs,
span,
) # type: ignore
def with_body(self, new_body, span=None):
"""Create a new PrimFunc with the same set signatures but a new body.
Parameters
----------
new_body : Stmt
The new body.
span : Optional[Span]
The location of this itervar in the source code.
Returns
-------
new_func : PrimFunc
The created new function.
"""
return PrimFunc(
self.params,
new_body,
self.ret_type,
self.buffer_map,
self.attrs,
span,
)
def specialize(self, param_map: Mapping[Var, Union[PrimExpr, Buffer]]):
"""Specialize parameters of PrimFunc
Parameters
----------
param_map : Mapping[Var, Union[PrimExpr, Buffer]]
The mapping from function params to the instance
Examples
--------
We can define a Meta TIR function with symbolic shape:
.. code-block:: python
@T.prim_func
def mem_copy(a: T.handle, b: T.handle, m: T.int32, n: T.int32) -> None:
A = T.match_buffer(a, (m, n), "float32")
B = T.match_buffer(b, (m, n), "float32")
for i, j in T.grid(m, n):
with T.block():
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj]
Then we can make it specialized with given shapes or buffers.
.. code-block:: python
a, _, m, n = mem_copy.params
func = mem_copy.specialize({a: tir.decl_buffer((16, 16))})
# or
func = mem_copy.specialize({n: 16, m: 16})
The specialized function:
.. code-block:: python
@T.prim_func
def mem_copy_16_16(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (16, 16), "float32")
B = T.match_buffer(b, (16, 16), "float32")
for i, j in T.grid(16, 16):
with T.block():
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj]
Returns
-------
func : PrimFunc
The new function with parameter specialized
"""
return _ffi_api.Specialize(self, param_map) # type: ignore
@tvm._ffi.register_object("tir.TensorIntrin")
class TensorIntrin(Object):
"""A tensor intrinsic.
Parameters
----------
desc : PrimFunc
The function to describe the computation.
impl : PrimFunc
The function of the implementation for the execution.
"""
def __init__(self, desc, impl):
self.__init_handle_by_constructor__(_ffi_api.TensorIntrin, desc, impl)
@staticmethod
def register(name: str, desc: PrimFunc, impl: PrimFunc, override: bool = False):
"""Register a tensor intrinsic with its name.
Parameters
----------
name : str
The name of the TensorIntrin to register.
desc : PrimFunc
The function to describe the computation.
impl : PrimFunc
The function of the implementation for the execution.
override: bool
Whether override existing intrinsic.
"""
return _ffi_api.TensorIntrinRegister(
name, TensorIntrin(desc, impl), override
) # type: ignore
@staticmethod
def get(name: str, allow_missing: bool = False) -> Optional["TensorIntrin"]:
"""Look up a tensor intrinsic by its name.
Parameters
----------
name : str
The name of the TensorIntrin to look up.
allow_missing : bool
Whether to allow missing tensor intrin. If False, raise an error if the tensor intrin
doesn't exist.
Returns
-------
result : Optional[TensorIntrin]
The TensorIntrin with the specified name, or None if not found.
"""
return _ffi_api.TensorIntrinGet(name, allow_missing) # pylint: type: ignore
@tvm._ffi.register_object("tir.IndexMap")
class IndexMap(Object):
"""A mapping from multi-dimensional indices to another set of multi-dimensional indices
Parameters
----------
initial_indices : List[Var]
Variables representing the indices prior to remapping.
final_indices : List[PrimExpr]
Expressions defining the indices after remapping.
inverse_index_map : Union[Callable, Optional[IndexMap]]
The optional pre-defined inverse index map.
When this is defined, IndexMap::Inverse will return the pre-defined inverse index map.
Otherwise, the inverse index map will be computed on the fly.
It is the user's responsibility to ensure the correctness of the pre-defined inverse
index map.
"""
initial_indices: List[Var]
final_indices: List[PrimExpr]
# Sentinel value used to indicate which groups of pre-flattening axes
# should be used to post-flattening axes axes. See
# Stage.transform_layout for more details.
AXIS_SEPARATOR = "axis_separator"
def __init__(self, initial_indices, final_indices, inverse_index_map):
if isinstance(inverse_index_map, Callable):
inverse_index_map = IndexMap.from_func(inverse_index_map)
self.__init_handle_by_constructor__(
_ffi_api.IndexMap, initial_indices, final_indices, inverse_index_map
)
@staticmethod
def from_func(
mapping_function: Callable,
ndim: Optional[int] = None,
inverse_index_map: Union[Callable, Optional["IndexMap"]] = None,
*,
index_dtype: str = "int64",
):
"""Create an index map from a function
Parameters
----------
mapping_function : Callable
The function to map from source indices to target indices.
The function should accept `tir.Var` parameters and return
a either a `tir.PrimExpr`, or a list of `tir.PrimExpr`.
Returning a `tir.PrimExpr` is equivalent to returning a
list of length 1 containing that `tir.PrimExpr`.
ndim: Optional[int]
The dimensionality of the buffer to which this
transformation should be applied. If mapping_function uses
variadic argument `*args`, `ndim` must be specified. If
mapping_function does not use variadic arguments, ndim is
optional.
inverse_index_map : Union[Callable, Optional[IndexMap]]
The optional pre-defined inverse index map.
When this is defined, IndexMap::Inverse will return the pre-defined inverse index map.
Otherwise, the inverse index map will be computed on the fly.
It is the user's responsibility to ensure the correctness of the pre-defined inverse
index map.
Returns
-------
index_map: IndexMap
Returns an IndexMap representing the `mapping_function`.
"""
index_map, axis_separators = IndexMap.from_func_with_separators(
mapping_function,
ndim,
inverse_index_map,
index_dtype=index_dtype,
)
assert not axis_separators, (
"The mapping_function provided to IndexMap.from_func "
"may not return IndexMap.AXIS_SEPARATOR. "
"If required, please use IndexMap.from_func_with_separators instead."
)
return index_map
@staticmethod
def from_func_with_separators(
mapping_function: Callable,
ndim: Optional[int] = None,
inverse_index_map: Union[Callable, Optional["IndexMap"]] = None,
*,
index_dtype: str = "int64",
):
"""Create an index map from a function
Parameters
----------
mapping_function : Callable
The function to map from source indices to target indices.
The function should accept tir.Var parameters and return
either a `tir.PrimExpr` or a list. Each element of the
returned list should be either a `tir.PrimExpr` or the
object `IndexMap.AXIS_SEPARATOR`. Returning a
`tir.PrimExpr` is equivalent to returning a list of length
1 containing that `tir.PrimExpr`.
ndim: Optional[int]
The dimensionality of the buffer to which this
transformation should be applied. If mapping_function uses
variadic argument `*args`, ndim must be specified. If
mapping_function does not use variadic arguments, ndim is
optional.
inverse_index_map : Union[Callable, Optional[IndexMap]]
The optional pre-defined inverse index map.
When this is defined, IndexMap::Inverse will return the pre-defined inverse index map.
Otherwise, the inverse index map will be computed on the fly.
It is the user's responsibility to ensure the correctness of the pre-defined inverse
index map.
index_dtype : str
The default index dtype to use for input iters in the mapping function.
Returns
-------
ret: Tuple[IndexMap, List[int]]
Returns a tuple whose first element is an IndexMap
representing the `mapping_function`, and whose second index
is a list of indices at which `IndexMap.AXIS_SEPARATOR`
occurred.
"""
params = inspect.signature(mapping_function).parameters
args = []
var_arg_name = None
kwargs = collections.OrderedDict()
for name, param in params.items():
if param.kind in [
inspect.Parameter.POSITIONAL_ONLY,
inspect.Parameter.POSITIONAL_OR_KEYWORD,
]:
args.append(tvm.tir.Var(name, index_dtype))
elif param.kind == inspect.Parameter.VAR_POSITIONAL:
var_arg_name = name
elif param.kind == inspect.Parameter.KEYWORD_ONLY:
kwargs[name] = tvm.tir.Var(name, index_dtype)
else:
raise ValueError("transform_layout mapping may not have *args")
# Now that all the named arguments have been collected,
# everything that remains should go to the *args, if
# specified.
if var_arg_name is not None:
assert ndim is not None, "ndim must be specified when *args is used"
num_var_args = ndim - len(args) - len(kwargs)
for i in range(num_var_args):
args.append(tvm.tir.Var(f"{var_arg_name}_{i}", index_dtype))
mapping = mapping_function(*args, **kwargs)
initial_indices = args + list(kwargs.values())
final_indices = []
axis_separators = []
try:
iter(mapping)
is_iterable = True
except TypeError:
is_iterable = False
if is_iterable:
for val in mapping:
if isinstance(val, tvm.ir.PrimExpr):
final_indices.append(val)
elif val is IndexMap.AXIS_SEPARATOR:
axis_separators.append(len(final_indices))
else:
raise TypeError(
"Expected mapping function to return list of "
"either tvm.ir.PrimExpr or IndexMap.AXIS_SEPARATOR. "
f"Instead received {val} of type {type(val)}."
)
else:
final_indices.append(mapping)
return IndexMap(initial_indices, final_indices, inverse_index_map), axis_separators
def is_equivalent_to(self, other_map: "IndexMap") -> bool:
"""Return if the index maps are equivalent.
Parameters
----------
other_map: IndexMap
The IndexMap to which the comparison should be made.
Returns
-------
is_equivalent: bool
True if the two mappings represent the same
transformation, otherwise False
"""
if len(self.initial_indices) != len(other_map.initial_indices):
return False
if len(self.final_indices) != len(other_map.final_indices):
return False
analyzer = tvm.arith.Analyzer()
mapped_other_final_indices = other_map.map_indices(self.initial_indices)
for self_index, other_index in zip(self.final_indices, mapped_other_final_indices):
if not analyzer.can_prove_equal(self_index, other_index):
return False
return True
def map_indices(self, indices: List[PrimExpr]) -> List[PrimExpr]:
"""Apply the index map to a set of indices
Parameters
----------
indices : List[PrimExpr]
The indices to be mapped
Returns
-------
result : List[PrimExpr]
The mapped indices
"""
return _ffi_api.IndexMapMapIndices(self, indices)
def map_shape(self, shape: List[PrimExpr]) -> List[PrimExpr]:
"""Apply the index map to a buffer shape
Parameters
----------
shape : List[PrimExpr]
The buffer shape to be mapped
Returns
-------
result : List[PrimExpr]
The mapped shape
"""
return _ffi_api.IndexMapMapShape(self, shape)
def map_ndarray(self, arr_src: NDArray) -> NDArray:
"""Apply thie index map to transform the layout of the input NDArray
Parameters
----------
arr_src : runtime.NDArray
The NDArray to be transformed
Returns
-------
arr_dst : runtime.NDArray
The transformed NDArray
"""
return _ffi_api.IndexMapMapNDArray(self, arr_src)
def inverse(self, shape: List[Union[Range, PrimExpr]]) -> "IndexMap":
"""Return the inverse of the map
Throws an error if the function is not bijective.
Parameters
----------
shape: List[Union[Range,PrimExpr]]
The region over which the inverse should be determined.
Used for validating that the mapping is bijective over
this range.
Returns
-------
inverse : IndexMap
The inverse
"""
shape = [dim if isinstance(dim, Range) else Range(0, dim) for dim in shape]
return _ffi_api.IndexMapInverse(self, shape)
def non_surjective_inverse(
self, shape: List[Union[Range, PrimExpr]]
) -> Tuple["IndexMap", PrimExpr]:
"""Return the inverse of the map
Can be applied to transformations that introduce padding.
Parameters
----------
shape: List[Union[Range,PrimExpr]]
The region over which the inverse should be determined.
Used for determining the predicate.
Returns
-------
result : Tuple[IndexMap, PrimExpr]
The inverse, and a predicate for which the inverse maps to
a valid index in the input range.
Examples
--------
.. code-block:: python
index_map = IndexMap.from_func(lambda i: [i//4, i%4])
inverse_map, predicate = index_map.non_surjective_inverse([14])
assert inverse_map.is_equivalent_to(IndexMap.from_func(lambda j,k: [4*j + k])
print(predicate) # Prints "(axis0==3) && (axis2 >= 2)"
"""
shape = [dim if isinstance(dim, Range) else Range(0, dim) for dim in shape]
return _ffi_api.IndexMapNonSurjectiveInverse(self, shape)
| 18,423 | 31.841355 | 98 | py |
tvm | tvm-main/python/tvm/tir/expr.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=redefined-builtin
"""TIR expression nodes.
Each expression node have subfields that can be visited from python side.
For example, you can use addexp.a to get the left operand of an Add node.
.. code-block:: python
x = tvm.tir.Var("n", "int32")
y = x + 2
assert(isinstance(y, tvm.tir.Add))
assert(y.a == x)
"""
from typing import Optional, Union
import tvm._ffi
import tvm.ir._ffi_api
from tvm import ir
from tvm.ir import Op, PrimExpr
from tvm.ir.base import Span
from tvm.runtime import DataType, DataTypeCode, Object, ObjectGeneric, Scriptable, const
from . import _ffi_api
from . import generic as _generic
def div_ambiguity_error():
return RuntimeError(
"TVM supports multiple types of integer divisions, "
+ "please call div, indexdiv/indexmod, floordiv/floormod "
+ " or truncdiv/truncmod directly to avoid ambiguity in the code."
)
def _dtype_is_int(value):
if isinstance(value, int):
return True
return (
isinstance(value, ExprOp) and DataType(value.dtype).type_code == DataTypeCode.INT
) # type: ignore
def _dtype_is_float(value):
if isinstance(value, float):
return True
return (
isinstance(value, ExprOp) and DataType(value.dtype).type_code == DataTypeCode.FLOAT
) # type: ignore
class ExprOp(object):
"""Operator overloading for Expr like expressions."""
# TODO(tkonolige): use inspect to add source information to these objects
def __add__(self, other):
return _generic.add(self, other)
def __radd__(self, other):
return _generic.add(other, self)
def __sub__(self, other):
return _generic.subtract(self, other)
def __rsub__(self, other):
return _generic.subtract(other, self)
def __mul__(self, other):
return _generic.multiply(self, other)
def __rmul__(self, other):
return _generic.multiply(other, self)
def __div__(self, other):
if _dtype_is_int(self) and _dtype_is_int(other):
raise div_ambiguity_error()
return _generic.divide(self, other)
def __rdiv__(self, other):
if _dtype_is_int(self) and _dtype_is_int(other):
raise div_ambiguity_error()
return _generic.divide(other, self)
def __truediv__(self, other):
if _dtype_is_int(self) and _dtype_is_int(other):
raise div_ambiguity_error()
return _generic.divide(self, other)
def __rtruediv__(self, other):
if _dtype_is_int(self) and _dtype_is_int(other):
raise div_ambiguity_error()
return _generic.divide(other, self)
def __floordiv__(self, other):
return _generic.floordiv(self, other)
def __rfloordiv__(self, other):
return _generic.floordiv(other, self, None)
def __mod__(self, other):
return _ffi_api._OpFloorMod(self, other, None) # type: ignore
def __rmod__(self, other):
return _ffi_api._OpFloorMod(other, self, None) # type: ignore
def __neg__(self):
neg_one = const(-1, self.dtype) # type: ignore
return self.__mul__(neg_one)
def __lshift__(self, other):
return _ffi_api.left_shift(self, other, None) # type: ignore
def __rlshift__(self, other):
return _ffi_api.left_shift(other, self, None) # type: ignore
def __rshift__(self, other):
return _ffi_api.right_shift(self, other, None) # type: ignore
def __rrshift__(self, other):
return _ffi_api.right_shift(other, self, None) # type: ignore
def __and__(self, other):
return _ffi_api.bitwise_and(self, other, None) # type: ignore
def __rand__(self, other):
return _ffi_api.bitwise_and(other, self, None) # type: ignore
def __or__(self, other):
return _ffi_api.bitwise_or(self, other, None) # type: ignore
def __ror__(self, other):
return _ffi_api.bitwise_or(other, self, None) # type: ignore
def __xor__(self, other):
return _ffi_api.bitwise_xor(self, other, None) # type: ignore
def __rxor__(self, other):
return _ffi_api.bitwise_xor(other, self, None) # type: ignore
def __invert__(self):
if _dtype_is_float(self):
raise RuntimeError("Cannot use ~ operator on float type Expr.")
return _ffi_api.bitwise_not(self, None) # type: ignore
def __lt__(self, other):
return _ffi_api._OpLT(self, other, None) # type: ignore
def __le__(self, other):
return _ffi_api._OpLE(self, other, None) # type: ignore
def __eq__(self, other):
return EqualOp(self, other)
def __ne__(self, other):
return NotEqualOp(self, other)
def __gt__(self, other):
return _ffi_api._OpGT(self, other, None) # type: ignore
def __ge__(self, other):
return _ffi_api._OpGE(self, other, None) # type: ignore
def __nonzero__(self):
raise ValueError(
"Cannot use and / or / not operator to Expr, hint: "
+ "use tvm.tir.all / tvm.tir.any instead"
)
def __bool__(self):
return self.__nonzero__()
def equal(self, other, span=None):
"""Build an equal check expression with other expr.
Parameters
----------
other : PrimExpr
The other expression
span : Optional[Span]
The location of the cast in the source.
Returns
-------
ret : PrimExpr
The equality expression.
"""
return _ffi_api._OpEQ(self, other, span) # type: ignore
def astype(self, dtype: str, span: Optional[Span] = None):
"""Cast the expression to other type.
Parameters
----------
dtype : str
The type of new expression
span : Optional[Span]
The location of the cast in the source.
Returns
-------
expr : PrimExpr
Expression with new type
"""
return _generic.cast(self, dtype, span)
class EqualOp(ObjectGeneric, ExprOp):
"""Deferred equal operator.
This is used to support sugar that a == b can either
mean Object.same_as or Object.equal.
Parameters
----------
a : PrimExpr
Left operand.
b : PrimExpr
Right operand.
span : Optional[Span]
The location of the cast in the source.
"""
# This class is not manipulated by C++. So use python's identity check function is sufficient
same_as = object.__eq__
def __init__(self, a, b, span=None):
self.a = a
self.b = b
self.span = span
def __nonzero__(self):
return self.a.same_as(self.b)
def __bool__(self):
return self.__nonzero__()
def asobject(self):
"""Convert object."""
return _ffi_api._OpEQ(self.a, self.b, self.span) # type: ignore
class NotEqualOp(ObjectGeneric, ExprOp):
"""Deferred NE operator.
This is used to support sugar that a != b can either
mean not Object.same_as or make.NE.
Parameters
----------
a : PrimExpr
Left operand.
b : PrimExpr
Right operand.
span : Optional[Span]
The location of the cast in the source.
"""
# This class is not manipulated by C++. So use python's identity check function is sufficient
same_as = object.__eq__
def __init__(self, a, b, span=None):
self.a = a
self.b = b
self.span = span
def __nonzero__(self):
return not self.a.same_as(self.b)
def __bool__(self):
return self.__nonzero__()
def asobject(self):
"""Convert object."""
return _ffi_api._OpNE(self.a, self.b, self.span) # type: ignore
class IntImmEnum(ObjectGeneric):
"""Lazily evaluate an IntImm in case
the constructor is not available in runtime.
Parameters
----------
value : int
The enum value
span : Optional[Span]
The location of the cast in the source.
"""
def __init__(self, value, span=None):
self.value = value
self.span = span
def asobject(self):
"""Convert object."""
return IntImm("int32", self.value, self.span) # type: ignore
class PrimExprWithOp(ExprOp, PrimExpr, Scriptable):
"""Helper base class to inherit from PrimExpr."""
# In Python3, We have to explicitly tell interpreter to retain __hash__ if we overide __eq__
# https://docs.python.org/3.1/reference/datamodel.html#object.__hash__
__hash__ = PrimExpr.__hash__
class ConstExpr(PrimExprWithOp):
pass
class BinaryOpExpr(PrimExprWithOp):
pass
class CmpExpr(PrimExprWithOp):
pass
class LogicalExpr(PrimExprWithOp):
pass
@tvm._ffi.register_object("tir.Var")
class Var(PrimExprWithOp):
"""Symbolic variable.
Parameters
----------
name : str
The name
dtype : Union[str, tvm.irType]
The data type
span : Optional[Span]
The location of this itervar in the source code.
"""
def __init__(self, name: str, dtype: Union[str, ir.Type], span: Optional[Span] = None):
self.__init_handle_by_constructor__(_ffi_api.Var, name, dtype, span) # type: ignore
@tvm._ffi.register_object("tir.SizeVar")
class SizeVar(Var):
"""Symbolic variable to represent a tensor index size
which is greater or equal to zero.
Parameters
----------
name : str
The name
dtype : int
The data type
span : Optional[Span]
The location of this itervar in the source code.
"""
# pylint: disable=super-init-not-called
def __init__(self, name, dtype, span=None):
self.__init_handle_by_constructor__(_ffi_api.SizeVar, name, dtype, span) # type: ignore
@tvm._ffi.register_object("tir.IterVar")
class IterVar(Object, ExprOp, Scriptable):
"""Represent iteration variable.
IterVar represents axis iterations in the computation.
Parameters
----------
dom : Range
The domain of the iteration.
var : Union[Var, str]
The internal variable that is used for iteration.
iter_type : int
The iteration type.
thread_tag : str
The thread type tag.
span : Optional[Span]
The location of this itervar in the source code.
See Also
--------
te.thread_axis: Create thread axis IterVar.
te.reduce_axis: Create reduce axis IterVar.
"""
DataPar = 0
ThreadIndex = 1
CommReduce = 2
Ordered = 3
Opaque = 4
Unrolled = 5
Vectorized = 6
Parallelized = 7
Tensorized = 8
def __init__(self, dom, var, iter_type, thread_tag="", span=None):
if dom is not None:
if isinstance(dom, (list, tuple)):
if len(dom) != 2:
raise TypeError("need to be list of ranges")
dom = tvm.ir.Range(dom[0], dom[1])
if not isinstance(dom, tvm.ir.Range):
raise TypeError("dom need to be Range")
name = var if var is not None else "iter"
dtype = "int32" if dom is None else dom.extent.dtype
var = Var(name, dtype=dtype, span=span) if not isinstance(var, Var) else var
if dom is not None:
assert (
var.dtype == dom.extent.dtype
), "IterVar's Var dtype must match its domain's extent's dtype"
self.__init_handle_by_constructor__(
_ffi_api.IterVar, dom, var, iter_type, thread_tag, span # type: ignore
)
@tvm._ffi.register_object("tir.CommReducer")
class CommReducer(Object, Scriptable):
"""Commutative reduce operator
Parameters
----------
lhs : List[Var]
The left arguments of the reducer.
rhs : List[Var]
The right arguments of the reducer.
result : List[PrimExpr]
The reduction results.
identity_element : List[PrimExpr]
The identity elements.
span : Optional[Span]
The location of this itervar in the source code.
"""
def __init__(self, lhs, rhs, result, identity_element, span=None):
self.__init_handle_by_constructor__(
_ffi_api.CommReducer, lhs, rhs, result, identity_element, span # type: ignore
)
@tvm._ffi.register_object("tir.Reduce")
class Reduce(PrimExprWithOp):
"""Reduce node.
Parameters
----------
combiner : CommReducer
The combiner.
src : list of Expr
The source expression.
rdom : list of IterVar
The iteration domain
condition : PrimExpr
The reduce condition.
value_index : int
The value index.
init : list of Expr
The initial value for output. This can be an int, float or ProducerLoad
span : Optional[Span]
The location of this itervar in the source code.
"""
def __init__(self, combiner, src, rdom, condition, value_index, init=None, span=None):
self.__init_handle_by_constructor__(
_ffi_api.Reduce, combiner, src, rdom, condition, value_index, init, span # type: ignore
)
@tvm._ffi.register_object
class FloatImm(ConstExpr):
"""Float constant.
Parameters
----------
dtype : str
The data type
value : float
The constant value.
span : Optional[Span]
The location of this itervar in the source code.
"""
def __init__(self, dtype, value, span=None):
self.__init_handle_by_constructor__(
tvm.ir._ffi_api.FloatImm, dtype, value, span # type: ignore
)
def __float__(self):
return self.value
@tvm._ffi.register_object
class IntImm(ConstExpr):
"""Int constant.
Parameters
----------
dtype : str
The data type
value : int
The constant value.
span : Optional[Span]
The location of this itervar in the source code.
"""
def __init__(self, dtype, value, span=None):
self.__init_handle_by_constructor__(
tvm.ir._ffi_api.IntImm, dtype, value, span # type: ignore
)
def __hash__(self):
return self.value
def __int__(self):
return self.value
def __nonzero__(self):
return self.value != 0
def __eq__(self, other):
return _ffi_api._OpEQ(self, other, None) # type: ignore
def __ne__(self, other):
return _ffi_api._OpNE(self, other, None) # type: ignore
def __bool__(self):
return self.__nonzero__()
@tvm._ffi.register_object("tir.StringImm") # type: ignore
class StringImm(ConstExpr):
"""String constant.
Parameters
----------
value : str
The value of the function.
span : Optional[Span]
The location of this itervar in the source code.
"""
def __init__(self, value, span=None):
self.__init_handle_by_constructor__(_ffi_api.StringImm, value, span) # type: ignore
def __eq__(self, other):
if isinstance(other, ConstExpr):
return self.value == other.value
return self.value == other
def __ne__(self, other):
if isinstance(other, ConstExpr):
return self.value != other.value
return self.value != other
def __hash__(self):
return PrimExpr.__hash__(self)
@tvm._ffi.register_object("tir.Cast")
class Cast(PrimExprWithOp):
"""Cast expression.
Parameters
----------
dtype : str
The data type
value : PrimExpr
The value of the function.
span : Optional[Span]
The location of this itervar in the source code.
"""
def __init__(self, dtype, value, span=None):
self.__init_handle_by_constructor__(_ffi_api.Cast, dtype, value, span) # type: ignore
@tvm._ffi.register_object("tir.Add")
class Add(BinaryOpExpr):
"""Add node.
Parameters
----------
a : PrimExpr
The left hand operand.
b : PrimExpr
The right hand operand.
span : Optional[Span]
The location of this itervar in the source code.
"""
def __init__(self, a, b, span=None):
self.__init_handle_by_constructor__(_ffi_api.Add, a, b, span) # type: ignore
@tvm._ffi.register_object("tir.Sub")
class Sub(BinaryOpExpr):
"""Sub node.
Parameters
----------
a : PrimExpr
The left hand operand.
b : PrimExpr
The right hand operand.
span : Optional[Span]
The location of this itervar in the source code.
"""
def __init__(self, a, b, span=None):
self.__init_handle_by_constructor__(_ffi_api.Sub, a, b, span) # type: ignore
@tvm._ffi.register_object("tir.Mul")
class Mul(BinaryOpExpr):
"""Mul node.
Parameters
----------
a : PrimExpr
The left hand operand.
b : PrimExpr
The right hand operand.
span : Optional[Span]
The location of this itervar in the source code.
"""
def __init__(self, a, b, span=None):
self.__init_handle_by_constructor__(_ffi_api.Mul, a, b, span) # type: ignore
@tvm._ffi.register_object("tir.Div")
class Div(BinaryOpExpr):
"""Div node.
Parameters
----------
a : PrimExpr
The left hand operand.
b : PrimExpr
The right hand operand.
span : Optional[Span]
The location of this itervar in the source code.
"""
def __init__(self, a, b, span=None):
self.__init_handle_by_constructor__(_ffi_api.Div, a, b, span) # type: ignore
@tvm._ffi.register_object("tir.Mod")
class Mod(BinaryOpExpr):
"""Mod node.
Parameters
----------
a : PrimExpr
The left hand operand.
b : PrimExpr
The right hand operand.
span : Optional[Span]
The location of this itervar in the source code.
"""
def __init__(self, a, b, span=None):
self.__init_handle_by_constructor__(_ffi_api.Mod, a, b, span) # type: ignore
@tvm._ffi.register_object("tir.FloorDiv")
class FloorDiv(BinaryOpExpr):
"""FloorDiv node.
Parameters
----------
a : PrimExpr
The left hand operand.
b : PrimExpr
The right hand operand.
span : Optional[Span]
The location of this itervar in the source code.
"""
def __init__(self, a, b, span=None):
self.__init_handle_by_constructor__(_ffi_api.FloorDiv, a, b, span) # type: ignore
@tvm._ffi.register_object("tir.FloorMod")
class FloorMod(BinaryOpExpr):
"""FloorMod node.
Parameters
----------
a : PrimExpr
The left hand operand.
b : PrimExpr
The right hand operand.
span : Optional[Span]
The location of this itervar in the source code.
"""
def __init__(self, a, b, span=None):
self.__init_handle_by_constructor__(_ffi_api.FloorMod, a, b, span) # type: ignore
@tvm._ffi.register_object("tir.Min")
class Min(BinaryOpExpr):
"""Min node.
Parameters
----------
a : PrimExpr
The left hand operand.
b : PrimExpr
The right hand operand.
span : Optional[Span]
The location of this itervar in the source code.
"""
def __init__(self, a, b, span=None):
self.__init_handle_by_constructor__(_ffi_api.Min, a, b, span) # type: ignore
@tvm._ffi.register_object("tir.Max")
class Max(BinaryOpExpr):
"""Max node.
Parameters
----------
a : PrimExpr
The left hand operand.
b : PrimExpr
The right hand operand.
span : Optional[Span]
The location of this itervar in the source code.
"""
def __init__(self, a, b, span=None):
self.__init_handle_by_constructor__(_ffi_api.Max, a, b, span) # type: ignore
@tvm._ffi.register_object("tir.EQ")
class EQ(CmpExpr):
"""EQ node.
Parameters
----------
a : PrimExpr
The left hand operand.
b : PrimExpr
The right hand operand.
span : Optional[Span]
The location of this itervar in the source code.
"""
def __init__(self, a, b, span=None):
self.__init_handle_by_constructor__(_ffi_api.EQ, a, b, span) # type: ignore
@tvm._ffi.register_object("tir.NE")
class NE(CmpExpr):
"""NE node.
Parameters
----------
a : PrimExpr
The left hand operand.
b : PrimExpr
The right hand operand.
span : Optional[Span]
The location of this itervar in the source code.
"""
def __init__(self, a, b, span=None):
self.__init_handle_by_constructor__(_ffi_api.NE, a, b, span) # type: ignore
@tvm._ffi.register_object("tir.LT")
class LT(CmpExpr):
"""LT node.
Parameters
----------
a : PrimExpr
The left hand operand.
b : PrimExpr
The right hand operand.
span : Optional[Span]
The location of this itervar in the source code.
"""
def __init__(self, a, b, span=None):
self.__init_handle_by_constructor__(_ffi_api.LT, a, b, span) # type: ignore
@tvm._ffi.register_object("tir.LE")
class LE(CmpExpr):
"""LE node.
Parameters
----------
a : PrimExpr
The left hand operand.
b : PrimExpr
The right hand operand.
span : Optional[Span]
The location of this itervar in the source code.
"""
def __init__(self, a, b, span=None):
self.__init_handle_by_constructor__(_ffi_api.LE, a, b, span) # type: ignore
@tvm._ffi.register_object("tir.GT")
class GT(CmpExpr):
"""GT node.
Parameters
----------
a : PrimExpr
The left hand operand.
b : PrimExpr
The right hand operand.
span : Optional[Span]
The location of this itervar in the source code.
"""
def __init__(self, a, b, span=None):
self.__init_handle_by_constructor__(_ffi_api.GT, a, b, span) # type: ignore
@tvm._ffi.register_object("tir.GE")
class GE(CmpExpr):
"""GE node.
Parameters
----------
a : PrimExpr
The left hand operand.
b : PrimExpr
The right hand operand.
span : Optional[Span]
The location of this itervar in the source code.
"""
def __init__(self, a, b, span=None):
self.__init_handle_by_constructor__(_ffi_api.GE, a, b, span) # type: ignore
@tvm._ffi.register_object("tir.And")
class And(LogicalExpr):
"""And node.
Parameters
----------
a : PrimExpr
The left hand operand.
b : PrimExpr
The right hand operand.
span : Optional[Span]
The location of this itervar in the source code.
"""
def __init__(self, a, b, span=None):
self.__init_handle_by_constructor__(_ffi_api.And, a, b, span) # type: ignore
@tvm._ffi.register_object("tir.Or")
class Or(LogicalExpr):
"""Or node.
Parameters
----------
a : PrimExpr
The left hand operand.
b : PrimExpr
The right hand operand.
span : Optional[Span]
The location of this itervar in the source code.
"""
def __init__(self, a, b, span=None):
self.__init_handle_by_constructor__(_ffi_api.Or, a, b, span) # type: ignore
@tvm._ffi.register_object("tir.Not")
class Not(LogicalExpr):
"""Not node.
Parameters
----------
a : PrimExpr
The input value
span : Optional[Span]
The location of this itervar in the source code.
"""
def __init__(self, a, span=None):
self.__init_handle_by_constructor__(_ffi_api.Not, a, span) # type: ignore
@tvm._ffi.register_object("tir.Select")
class Select(PrimExprWithOp):
"""Select node.
Note
----
Select may compute both true_value and false_value.
Use :py:class:`tvm.tir.if_then_else` instead if you want to
get a conditional expression that only evaluates
the correct branch.
Parameters
----------
condition : PrimExpr
The condition expression.
true_value : PrimExpr
The value to take when condition is true.
false_value : PrimExpr
The value to take when condition is false.
span : Optional[Span]
The location of this itervar in the source code.
"""
def __init__(self, condition, true_value, false_value, span=None):
if isinstance(condition, bool):
condition = IntImm("bool", condition)
self.__init_handle_by_constructor__(
_ffi_api.Select, condition, true_value, false_value, span # type: ignore
)
@tvm._ffi.register_object("tir.BufferLoad")
class BufferLoad(PrimExprWithOp):
"""Buffer load node.
Parameters
----------
buffer : Buffer
The buffer to be loaded.
indices : List[PrimExpr]
The buffer indices.
span : Optional[Span]
The location of this itervar in the source code.
"""
def __init__(self, buffer, indices, span=None):
self.__init_handle_by_constructor__(
_ffi_api.BufferLoad, buffer, indices, span # type: ignore
)
@tvm._ffi.register_object("tir.ProducerLoad")
class ProducerLoad(PrimExprWithOp):
"""Producer load node.
Parameters
----------
producer : DataProducer
The buffer to be loaded.
indices : List[PrimExpr]
The buffer indices.
span : Optional[Span]
The location of this itervar in the source code.
"""
def __init__(self, producer, indices, span=None):
self.__init_handle_by_constructor__(
_ffi_api.ProducerLoad, producer, indices, span # type: ignore
)
@tvm._ffi.register_object("tir.Ramp")
class Ramp(PrimExprWithOp):
"""Ramp node.
Parameters
----------
base : PrimExpr
The base expression.
stride : ramp stride
The stride of the ramp.
lanes : int
The lanes of the expression.
span : Optional[Span]
The location of this itervar in the source code.
"""
def __init__(self, base, stride, lanes, span=None):
self.__init_handle_by_constructor__(
_ffi_api.Ramp, base, stride, lanes, span # type: ignore
)
@tvm._ffi.register_object("tir.Broadcast")
class Broadcast(PrimExprWithOp):
"""Broadcast node.
Parameters
----------
value : PrimExpr
The value of the expression.
lanes : int
The lanes of the expression.
span : Optional[Span]
The location of this itervar in the source code.
"""
def __init__(self, value, lanes, span=None):
self.__init_handle_by_constructor__(_ffi_api.Broadcast, value, lanes, span) # type: ignore
@tvm._ffi.register_object("tir.Shuffle")
class Shuffle(PrimExprWithOp):
"""Shuffle node.
Parameters
----------
vectors : Array of Expr
The vectors
indices : Array of indices
The indices
span : Optional[Span]
The location of this itervar in the source code.
"""
def __init__(self, vectors, indices, span=None):
self.__init_handle_by_constructor__(
_ffi_api.Shuffle, vectors, indices, span # type: ignore
)
class CallEffectKind:
"""Possible kinds of Call effects."""
# only expose up to opaque
ExprAnnotation = IntImmEnum(0)
Pure = IntImmEnum(1)
ReadState = IntImmEnum(2)
UpdateState = IntImmEnum(3)
Opaque = UpdateState
@tvm._ffi.register_object("tir.Call")
class Call(PrimExprWithOp):
"""Call node.
Parameters
----------
dtype : str
The return data type
op : Union[RelayExpr, str]
The function to be called, or the name
to the global tvm.Op
args : list of Expr
The input arguments to the call
span : Optional[Span]
The location of this itervar in the source code.
"""
def __init__(self, dtype, op, args, span=None):
if isinstance(op, str):
if not op.startswith("tir."):
raise ValueError(
(
"Cannot handle str op argument %s. This function only handles str "
+ "argument with the tir namespace. If you are "
+ "certain about the intrinsic name, pass in Op.get(name) instead"
)
% op
)
op = Op.get(op)
self.__init_handle_by_constructor__(_ffi_api.Call, dtype, op, args, span) # type: ignore
@tvm._ffi.register_object("tir.Let")
class Let(PrimExprWithOp):
"""Let node.
Parameters
----------
var : Var
The variable in the binding.
value : PrimExpr
The value in to be binded.
body : PrimExpr
The body expression.
span : Optional[Span]
The location of this itervar in the source code.
"""
def __init__(self, var, value, body, span=None):
self.__init_handle_by_constructor__(_ffi_api.Let, var, value, body, span) # type: ignore
@tvm._ffi.register_object("tir.Any")
class Any(PrimExprWithOp):
"""Any node.
span : Optional[Span]
The location of this itervar in the source code.
"""
def __init__(self, span=None):
self.__init_handle_by_constructor__(_ffi_api.Any, span) # type: ignore
| 29,744 | 23.664179 | 100 | py |
tvm | tvm-main/python/tvm/tir/block_dependence_info.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Define BlockDependenceInfoNode that uses the BlockScope and StmtSRef objects
to store the block level dependences"""
from typing import Union, Optional
from tvm._ffi import register_object
from tvm.ir.module import IRModule
from tvm.runtime import Object
from tvm.tir import Block, PrimFunc
from .block_scope import BlockScope, StmtSRef
from . import _ffi_api
@register_object("tir.BlockDependenceInfo")
class BlockDependenceInfo(Object):
"""
BlockDependenceInfo
An object that helps build and query block level dependences using the 2 core objects
BlockScope and StmtSRef
The data structures exposed are:
1) sref2scope: Mapping from the srefs to its corresponding BlockScope
2) stmt2ref: Mapping from blocks to corresponding StmtSRefs
Note that this object does not store SRefs to loops as the purpose is only to expose block level
dependences. This provides the advantage that the scope block (parent block) for a given block
sref can be directly accessed as sref->parent
"""
mod: IRModule
def __init__(self, mod: Union[IRModule, PrimFunc]):
if isinstance(mod, PrimFunc):
mod = IRModule({"main": mod})
if not isinstance(mod, IRModule):
raise TypeError(f"Expected `mod` to be PrimFunc or IRModule, but gets: {mod}")
self.__init_handle_by_constructor__(
_ffi_api.BlockDependenceInfo, # type: ignore # pylint: disable=no-member
mod,
)
def get_sref(self, block: Block) -> Optional[StmtSRef]:
"""Return the corresponding sref that points to the block
Parameters
----------
stmt : Block
The block for which the sref is to be retrived
Returns
-------
sref : StmtSRef
The corresponding sref
"""
return _ffi_api.BlockDependenceInfoGetSRef(self, block) # type: ignore # pylint: disable=no-member
def get_block_scope(self, block_sref: StmtSRef) -> BlockScope:
"""Get the BlockScope correpsonding to the block sref
Parameters
----------
block_sref : StmtSRef
The block sref to be retrieved
Returns
-------
scope : StmtSRef
The corresponding BlockScope
"""
return _ffi_api.BlockDependenceInfoGetBlockScope( # type: ignore # pylint: disable=no-member
self, block_sref
)
| 3,215 | 35.134831 | 107 | py |
tvm | tvm-main/python/tvm/tir/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=unused-import, redefined-builtin
"""Namespace for Tensor-level IR"""
from tvm.ir import PrimExpr
from tvm.runtime import const
from .buffer import Buffer, decl_buffer, DataProducer
from .data_layout import Layout, BijectiveLayout, bijective_layout, layout
from .expr import Var, SizeVar, Reduce, FloatImm, IntImm, StringImm, Cast
from .expr import Add, Sub, Mul, Div, Mod, FloorDiv, FloorMod
from .expr import Min, Max, EQ, NE, LT, LE, GT, GE, And, Or, Not
from .expr import Select, BufferLoad, ProducerLoad, Ramp, Broadcast, Shuffle
from .expr import Call, CallEffectKind, Let, IterVar, CommReducer, Any
from .stmt import Stmt, LetStmt, AssertStmt, ForKind, For, While
from .stmt import (
BufferStore,
BufferRealize,
ProducerStore,
Allocate,
AllocateConst,
AttrStmt,
DeclBuffer,
)
from .stmt import ProducerRealize, SeqStmt
from .stmt import IfThenElse, Evaluate, Prefetch, stmt_seq, stmt_list
from .stmt import BufferRegion, MatchBufferRegion, Block, BlockRealize
from .function import PrimFunc, TensorIntrin, IndexMap
from .op import call_packed_lowered, call_cpacked_lowered, call_tir
from .op import call_packed, call_cpacked, call_intrin, call_pure_extern, call_extern
from .op import call_llvm_intrin, call_llvm_pure_intrin, ret, all, any, min_value, max_value, trace
from .op import tvm_check_return
from .op import tvm_stack_alloca, tvm_stack_make_shape, tvm_stack_make_array
from .op import tvm_tuple, tvm_struct_get, tvm_struct_set
from .op import address_of, lookup_param, assume, undef
from .op import tvm_thread_allreduce, type_annotation, tvm_access_ptr, tvm_throw_last_error
from .op import (
tvm_load_matrix_sync,
tvm_store_matrix_sync,
tvm_mma_sync,
tvm_bmma_sync,
tvm_fill_fragment,
)
from .op import ptx_mma, ptx_mma_sp, mma_store, mma_fill
from .op import ptx_ldmatrix, ptx_cp_async, ptx_commit_group, ptx_wait_group
from .op import vectorlow, vectorhigh, vectorcombine
from .op import infinity, reinterpret
from .op import exp, exp2, exp10, log, log2, log10, log1p, ldexp, clz
from .op import sin, sinh, asin, asinh
from .op import cos, cosh, acos, acosh
from .op import tan, tanh, atan, atan2, atanh
from .op import bitwise_and, bitwise_not, bitwise_or, bitwise_xor
from .op import erf, sigmoid, sqrt, rsqrt, floor, ceil, hypot
from .op import trunc, abs, round, nextafter, nearbyint, power, pow, popcount, fmod, if_then_else
from .op import likely, isnan, isnullptr, isfinite, isinf, copysign
from .op import div, indexdiv, indexmod, truncdiv, truncmod, floordiv, floormod, ceildiv
from .op import comm_reducer, min, max, sum
from .op import q_multiply_shift, q_multiply_shift_per_axis, shift_left, shift_right
from .op import TVMBackendAllocWorkspace, TVMBackendFreeWorkspace
from .op import start_profile_intrinsic, end_profile_intrinsic
from .generic import add, subtract, multiply
from .schedule import StmtSRef, BlockScope, ScheduleState, Schedule, ScheduleError
from .block_dependence_info import BlockDependenceInfo
from . import schedule
from . import ir_builder
from . import transform
from . import analysis
from . import stmt_functor
from . import usmp
| 3,946 | 42.855556 | 99 | py |
tvm | tvm-main/python/tvm/tir/ir_builder.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Developer API of IR node builder make function."""
import tvm
from tvm._ffi.base import string_types
from tvm.runtime import ObjectGeneric, convert, const
from tvm.ir import container as _container
from . import stmt as _stmt
from . import expr as _expr
from . import buffer as _buffer
from . import op
class WithScope(object):
"""Auxiliary scope with"""
def __init__(self, enter_value, exit_cb):
self._enter_value = enter_value
self._exit_cb = exit_cb
def __enter__(self):
return self._enter_value
def __exit__(self, ptype, value, trace):
self._exit_cb()
class BufferVar(ObjectGeneric):
"""Buffer variable with content type, makes load store easily.
Do not create it directly, create use IRBuilder.
Array access through a BufferVar must use the same number of
indices as the underlying buffer was declared to have.
Examples
--------
In the follow example, x is BufferVar.
:code:`x[0] = ...` directly emit a BufferStore to the IRBuilder,
:code:`x[10]` translates to BufferLoad.
.. code-block:: python
# The following code generate IR for x[0] = x[10] + 1
ib = tvm.tir.ir_builder.create()
x = ib.allocate("float32", 20)
x[0] = x[10] + 1
# Array access using a multidimensional index
y = ib.allocate("float32", (32, 32))
y[2, 31] = 0.
See Also
--------
IRBuilder.pointer
IRBuilder.allocate
"""
def __init__(self, builder, buffer, content_type):
self._builder = builder
self._buffer = buffer
self._content_type = content_type
def asobject(self):
return self._buffer
@property
def dtype(self):
return self._content_type
def _normalize_index(self, index):
try:
index = [*index]
except TypeError:
index = [index]
index = [x.var if isinstance(x, _expr.IterVar) else x for x in index]
# Workaround to support previous behavior of ir_builder
# indexing by a single index, treating the buffer as if were
# already flattened.
if len(index) == 1 and len(self._buffer.shape) != 1:
index = tvm.topi.utils.unravel_index(index[0], self._buffer.shape)
return index
def __getitem__(self, index):
index = self._normalize_index(index)
return _expr.BufferLoad(self._buffer, index)
def __setitem__(self, index, value):
index = self._normalize_index(index)
value = convert(value)
value_element = value.dtype.split("x", maxsplit=1)[0]
content_element = self._content_type.split("x", maxsplit=1)[0]
if value_element != content_element:
raise ValueError(
f"data type does not match content type {value.dtype} vs {self._content_type}"
)
self._builder.emit(_stmt.BufferStore(self._buffer, value, index))
class IRBuilder(object):
"""Auxiliary builder to build IR for testing and dev.
Examples
--------
.. code-block:: python
ib = tvm.tir.ir_builder.create()
n = te.var("n")
A = ib.allocate("float32", n, name="A")
with ib.for_range(0, n, name="i") as i:
with ib.if_scope((i % 2) == 0):
A[i] = A[i] + 1
# The result stmt.
stmt = ib.get()
"""
def __init__(self):
self._seq_stack = [[]] # type: ignore
self.nidx = 0
def _pop_seq(self):
"""Pop sequence from stack"""
seq = self._seq_stack.pop()
if not seq or callable(seq[-1]):
seq.append(_stmt.Evaluate(0))
seqwrap = lambda x: x[0] if len(x) == 1 else _stmt.SeqStmt(list(reversed(x)))
ret_seq = [seq[-1]]
for s in reversed(seq[:-1]):
if callable(s):
ret_seq = [s(seqwrap(ret_seq))]
else:
assert isinstance(s, _stmt.Stmt)
ret_seq.append(s)
return seqwrap(ret_seq)
def emit(self, stmt):
"""Emit a statement to the end of current scope.
Parameters
----------
stmt : Stmt or callable.
The statement to be emitted or callable that build stmt given body.
"""
if isinstance(stmt, _expr.Call):
stmt = _stmt.Evaluate(stmt)
assert isinstance(stmt, _stmt.Stmt) or callable(stmt)
self._seq_stack[-1].append(stmt)
def scope_attr(self, node, attr_key, value):
"""Create an AttrStmt at current scope.
Parameters
----------
attr_key : str
The key of the attribute type.
node : Node
The attribute node to annottate on.
value : Expr
Attribute value.
Examples
--------
.. code-block:: python
ib = tvm.tir.ir_builder.create()
i = te.var("i")
x = ib.pointer("float32")
ib.scope_attr(x, "storage_scope", "global")
x[i] = x[i - 1] + 1
"""
if isinstance(node, string_types):
node = _expr.StringImm(node)
if isinstance(value, string_types):
value = _expr.StringImm(value)
# thread_extent could be zero for dynamic workloads
if attr_key == "thread_extent":
value = op.max(1, value)
self.emit(lambda x: _stmt.AttrStmt(node, attr_key, value, x))
def for_range(self, begin, end, name="i", dtype=None, kind="serial"):
"""Create a for iteration scope.
Parameters
----------
begin : Expr
The min iteration scope.
end : Expr
The end iteration scope
name : str, optional
The name of iteration variable, if no input names,
using typical index names i, j, k, then i_nidx
dtype : str, optional
The data type of iteration variable.
kind : str, optional
The special tag on the for loop.
Returns
-------
loop_scope : With.Scope of Var
The for scope, when enters returns loop_var
Examples
--------
.. code-block:: python
ib = tvm.tir.ir_builder.create()
x = ib.pointer("float32")
with ib.for_range(1, 10, name="i") as i:
x[i] = x[i - 1] + 1
"""
if name == "i":
name = chr(ord(name) + self.nidx) if self.nidx < 3 else name + "_" + str(self.nidx - 3)
self.nidx += 1
self._seq_stack.append([])
# auto infer dtype when it's not specified
def get_dtype(expr):
if isinstance(expr, _expr.PrimExpr):
if not expr.dtype.startswith("int"):
raise NotImplementedError(
f"Infer loop_var dtype failed:"
f" unsupported dtype in loop begin or end {expr.dtype}"
)
return expr.dtype
if isinstance(expr, int):
return "int32"
raise NotImplementedError(
f"Infer loop_var dtype failed:"
f" unsupported dtype in loop begin or end {expr.dtype}"
)
if dtype is None:
dtype = "int64" if "int64" in [get_dtype(begin), get_dtype(end)] else "int32"
loop_var = _expr.Var(name, dtype=dtype)
extent = end if begin == 0 else (end - begin)
def _exit_cb():
if kind == "serial":
kind_id = _stmt.ForKind.SERIAL
elif kind == "parallel":
kind_id = _stmt.ForKind.PARALLEL
elif kind == "vectorize":
kind_id = _stmt.ForKind.VECTORIZED
elif kind == "unroll":
kind_id = _stmt.ForKind.UNROLLED
else:
raise ValueError("Unknown kind")
self.emit(_stmt.For(loop_var, begin, extent, kind_id, self._pop_seq()))
return WithScope(loop_var, _exit_cb)
def while_loop(self, condition):
"""Create a while loop scope.
Parameters
----------
condition : Expr
The termination condition.
Returns
-------
loop_scope : With.Scope of Var
The while scope.
Examples
--------
.. code-block:: python
ib = tvm.tir.ir_builder.create()
iterations = ib.allocate("int32", (1,), name="iterations", scope="local")
with ib.while_loop(iterations[0] < 10):
iterations[0] += 1
"""
self._seq_stack.append([])
def _exit_cb():
self.emit(_stmt.While(condition, self._pop_seq()))
return WithScope(None, _exit_cb)
def if_scope(self, cond):
"""Create an if scope.
Parameters
----------
cond : Expr
The condition.
Returns
-------
if_scope : WithScope
The result if scope.
Examples
--------
.. code-block:: python
ib = tvm.tir.ir_builder.create()
i = te.var("i")
x = ib.pointer("float32")
with ib.if_scope((i % 2) == 0):
x[i] = x[i - 1] + 1
"""
self._seq_stack.append([])
def _exit_cb():
self.emit(_stmt.IfThenElse(cond, self._pop_seq(), None))
return WithScope(None, _exit_cb)
def else_scope(self):
"""Create an else scope.
This can only be used right after an if scope.
Returns
-------
else_scope : WithScope
The result else scope.
Examples
--------
.. code-block:: python
ib = tvm.tir.ir_builder.create()
i = te.var("i")
x = ib.pointer("float32")
with ib.if_scope((i % 2) == 0):
x[i] = x[i - 1] + 1
with ib.else_scope():
x[i] = x[i - 1] + 2
"""
if not self._seq_stack[-1]:
raise RuntimeError("else_scope can only follow an if_scope")
prev = self._seq_stack[-1][-1]
if not isinstance(prev, _stmt.IfThenElse) or prev.else_case:
raise RuntimeError("else_scope can only follow an if_scope")
self._seq_stack[-1].pop()
self._seq_stack.append([])
def _exit_cb():
self.emit(_stmt.IfThenElse(prev.condition, prev.then_case, self._pop_seq()))
return WithScope(None, _exit_cb)
def new_scope(self):
"""Create new scope,
this is useful to set boundary of attr and allocate.
Returns
-------
new_scope : WithScope
The result new scope.
"""
self._seq_stack.append([])
def _exit_cb():
self.emit(self._pop_seq())
return WithScope(None, _exit_cb)
def let(self, var_name, value):
"""Create a new let stmt binding.
Parameters
----------
var_name : str
The name of the variable
value : PrimExpr
The value to be bound
Returns
-------
var : tvm.tir.Var
The var that can be in for future emits.
"""
var = _expr.Var(var_name, dtype=value.dtype)
self.emit(lambda x: _stmt.LetStmt(var, value, x))
return var
def allocate(self, dtype, shape, name="buf", axis_separators=None, scope=""):
"""Create a allocate statement.
Parameters
----------
dtype : str
The content data type.
shape : tuple of Expr
The shape of array to be allocated.
name : str, optional
The name of the buffer.
axis_separators : list of int, optional
If passed, a list of separators between groups of axes,
each of which is flattened to an output axis. For flat
memory spaces, should either be None, or an empty list.
scope : str, optional
The scope of the buffer.
Returns
-------
buffer : BufferVar
The buffer var representing the buffer.
"""
if not isinstance(shape, (list, tuple, _container.Array)):
shape = [shape]
buffer = _buffer.decl_buffer(
shape, dtype, name, scope=scope, axis_separators=axis_separators
)
buffer_var = buffer.data
self.emit(lambda x: _stmt.Allocate(buffer_var, dtype, shape, const(1, dtype="uint1"), x))
return BufferVar(self, buffer, dtype)
def pointer(self, content_type, name="ptr", scope=""):
"""Create pointer variable with content type.
Parameters
----------
content_type : str
The content data type.
name : str, optional
The name of the pointer.
scope : str, optional
The scope of the pointer.
Returns
-------
ptr : BufferVar
The buffer var representing the buffer.
"""
buffer = _buffer.decl_buffer(shape=[1], dtype=content_type, name=name, scope=scope)
return BufferVar(self, buffer, content_type)
def buffer_ptr(self, buf):
"""Create pointer variable corresponds to buffer ptr.
Parameters
----------
buf : Buffer
The buffer to be extracted.
Returns
-------
ptr : BufferVar
The buffer var representing the buffer.
"""
return BufferVar(self, buf, buf.dtype)
def likely(self, expr):
"""Add likely tag for expression.
Parameters
----------
expr : Expr
The expression. Usually a condition expression.
Returns
-------
expr : Expr
The expression will likely tag.
"""
return _expr.Call(expr.dtype, "tir.likely", [expr])
def get(self):
"""Return the builded IR.
Returns
-------
stmt : Stmt
The result statement.
"""
seq = self._pop_seq()
if self._seq_stack:
raise RuntimeError("cannot call get inside construction scope")
return seq
def create():
"""Create a new IRBuilder
Returns
-------
builder : IRBuilder
The created IRBuilder
"""
return IRBuilder()
| 15,154 | 27.811787 | 99 | py |
tvm | tvm-main/python/tvm/tir/stmt.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""Statement AST Node in TVM.
Each statement node have subfields that can be visited from python side.
.. code-block:: python
x = tvm.tir.Var("n", "int32")
buffer = tvm.tir.decl_buffer((16,), "float32")
st = tvm.tir.stmt.BufferStore(buffer, 1, (x,))
assert isinstance(st, tvm.tir.stmt.BufferStore)
assert(st.buffer == buffer)
"""
from enum import IntEnum
from typing import List, Mapping, Optional, Union
import tvm._ffi
from tvm.ir import PrimExpr, Range, Span
from tvm.runtime import Object, Scriptable, const
from . import _ffi_api
from .buffer import Buffer
from .expr import IterVar
class Stmt(Object, Scriptable):
"""Base class of all the statements."""
@tvm._ffi.register_object("tir.LetStmt")
class LetStmt(Stmt):
"""LetStmt node.
Parameters
----------
var : Var
The variable in the binding.
value : PrimExpr
The value in to be binded.
body : Stmt
The body statement.
span : Optional[Span]
The location of this itervar in the source code.
"""
def __init__(self, var, value, body, span=None):
self.__init_handle_by_constructor__(
_ffi_api.LetStmt, var, value, body, span # type: ignore
)
@tvm._ffi.register_object("tir.AssertStmt")
class AssertStmt(Stmt):
"""AssertStmt node.
Parameters
----------
condition : PrimExpr
The assert condition.
message : PrimExpr
The error message.
body : tvm.tir.Stmt
The body statement.
span : Optional[Span]
The location of this itervar in the source code.
"""
def __init__(self, condition, message, body, span=None):
self.__init_handle_by_constructor__(
_ffi_api.AssertStmt, condition, message, body, span # type: ignore
)
class ForKind(IntEnum):
"""The kind of the for loop.
note
----
ForKind can change the control flow semantics
of the loop and need to be considered in all TIR passes.
"""
SERIAL = 0
PARALLEL = 1
VECTORIZED = 2
UNROLLED = 3
THREAD_BINDING = 4
@tvm._ffi.register_object("tir.For")
class For(Stmt):
"""For node.
Parameters
----------
loop_var : Var
The loop variable.
min_val : PrimExpr
The beginning value.
extent : PrimExpr
The length of the loop.
kind : ForKind
The type of the for.
body : Stmt
The body statement.
thread_binding: Optional[tir.IterVar]
The thread this loop binds to. Only valid
if kind is ThreadBinding
annotations: tvm.ir.Map
Additional annotation hints.
span : Optional[Span]
The location of this itervar in the source code.
"""
def __init__(
self,
loop_var,
min_val,
extent,
kind,
body,
thread_binding=None,
annotations=None,
span=None,
):
self.__init_handle_by_constructor__(
_ffi_api.For, # type: ignore
loop_var,
min_val,
extent,
kind,
body,
thread_binding,
annotations,
span,
)
@tvm._ffi.register_object("tir.While")
class While(Stmt):
"""While node.
Parameters
----------
condition : PrimExpr
The termination condition.
body : Stmt
The body statement.
span : Optional[Span]
The location of this itervar in the source code.
"""
def __init__(self, condition, body, span=None):
self.__init_handle_by_constructor__(
_ffi_api.While, # type: ignore
condition,
body,
span,
)
@tvm._ffi.register_object("tir.BufferStore")
class BufferStore(Stmt):
"""Buffer store node.
Parameters
----------
buffer : Buffer
The buffer.
value : PrimExpr
The value we to be stored.
indices : List[PrimExpr]
The indices location to be stored.
span : Optional[Span]
The location of this itervar in the source code.
"""
def __init__(self, buffer, value, indices, span=None):
self.__init_handle_by_constructor__(
_ffi_api.BufferStore, buffer, value, indices, span # type: ignore
)
@tvm._ffi.register_object("tir.BufferRealize")
class BufferRealize(Stmt):
"""Buffer realize node.
Parameters
----------
buffer : Buffer
The buffer.
bounds : List[Range]
The value we to be stored.
condition : PrimExpr
The realize condition.
body : Stmt
The body of the statement.
span : Optional[Span]
The location of this itervar in the source code.
"""
def __init__(self, buffer, bounds, condition, body, span=None):
self.__init_handle_by_constructor__(
_ffi_api.BufferRealize, buffer, bounds, condition, body, span # type: ignore
)
@tvm._ffi.register_object("tir.ProducerStore")
class ProducerStore(Stmt):
"""ProducerStore node.
Parameters
----------
producer : DataProducer
The data producer.
value : PrimExpr
The value to be stored.
indices : list of Expr
The index arguments of the store.
span : Optional[Span]
The location of this itervar in the source code.
"""
def __init__(self, producer, value, indices, span=None):
self.__init_handle_by_constructor__(
_ffi_api.ProducerStore, producer, value, indices, span # type: ignore
)
@tvm._ffi.register_object("tir.Allocate")
class Allocate(Stmt):
"""Allocate node.
Parameters
----------
buffer_var : Var
The buffer variable.
dtype : str
The data type of the buffer.
extents : list of Expr
The extents of the allocate
condition : PrimExpr
The condition.
body : Stmt
The body statement.
annotations: Optional[Mapping[str, Object]]
Additional annotation hints
span : Optional[Span]
The location of this itervar in the source code.
"""
def __init__(self, buffer_var, dtype, extents, condition, body, annotations=None, span=None):
if annotations is None:
annotations = dict()
self.__init_handle_by_constructor__(
_ffi_api.Allocate, # type: ignore
buffer_var,
dtype,
extents,
condition,
body,
annotations,
span,
)
@tvm._ffi.register_object("tir.AllocateConst")
class AllocateConst(Stmt):
"""Allocate constant node.
Parameters
----------
buffer_var : Var
The buffer variable.
dtype : str
The data type of the buffer.
extents : list of Expr
The extents of the allocate
data_or_idx : Union[NDArray, int]
If an NDArray, this is the const data associated with the
constant. If an integer, this is the index into the
"constants" attribute of the `IRModule` that contains the
`AllocateConst`.
body : Stmt
The body statement.
annotations : Optional[Map]
Additional annotations about the allocation.
span : Optional[Span]
The location of this itervar in the source code.
"""
def __init__(self, buffer_var, dtype, extents, data_or_idx, body, annotations=None, span=None):
self.__init_handle_by_constructor__(
_ffi_api.AllocateConst, buffer_var, dtype, extents, data_or_idx, body, annotations, span
)
@tvm._ffi.register_object("tir.DeclBuffer")
class DeclBuffer(Stmt):
"""DeclBuffer node.
Parameters
----------
buffer: Buffer
The buffer being declared.
body: Stmt
The body statement to be executed.
span: Optional[Span]
The location of this DeclBuffer in the source code.
"""
def __init__(self, buffer, body, span=None):
self.__init_handle_by_constructor__(_ffi_api.DeclBuffer, buffer, body, span)
@tvm._ffi.register_object("tir.AttrStmt")
class AttrStmt(Stmt):
"""AttrStmt node.
Parameters
----------
node : Node
The node to annotate the attribute
attr_key : str
Attribute type key.
value : PrimExpr
The value of the attribute
body : Stmt
The body statement.
span : Optional[Span]
The location of this itervar in the source code.
"""
def __init__(self, node, attr_key, value, body, span=None):
self.__init_handle_by_constructor__(
_ffi_api.AttrStmt, node, attr_key, value, body, span # type: ignore
)
@tvm._ffi.register_object("tir.ProducerRealize")
class ProducerRealize(Stmt):
"""ProducerRealize node.
Parameters
----------
producer : DataProducer
The data producer.
bounds : list of range
The bound of realize
condition : PrimExpr
The realize condition.
body : Stmt
The realize body
storage_scope : str
The storage scope associated with this realization
span : Optional[Span]
The location of this itervar in the source code.
"""
def __init__(self, producer, bounds, condition, body, storage_scope="", span=None):
self.__init_handle_by_constructor__(
_ffi_api.ProducerRealize,
producer,
bounds,
condition,
body,
storage_scope,
span, # type: ignore
)
@tvm._ffi.register_object("tir.SeqStmt")
class SeqStmt(Stmt):
"""Sequence of statements.
Parameters
----------
seq : List[Stmt]
The statements
span : Optional[Span]
The location of this itervar in the source code.
"""
def __init__(self, seq, span=None):
self.__init_handle_by_constructor__(_ffi_api.SeqStmt, seq, span) # type: ignore
def __getitem__(self, i):
return self.seq[i]
def __len__(self):
return len(self.seq)
@tvm._ffi.register_object("tir.IfThenElse")
class IfThenElse(Stmt):
"""IfThenElse node.
Parameters
----------
condition : PrimExpr
The expression
then_case : Stmt
The statement to execute if condition is true.
else_case : Stmt
The statement to execute if condition is false.
span : Optional[Span]
The location of this itervar in the source code.
"""
def __init__(self, condition, then_case, else_case, span=None):
self.__init_handle_by_constructor__(
_ffi_api.IfThenElse, condition, then_case, else_case, span # type: ignore
)
@tvm._ffi.register_object("tir.Evaluate")
class Evaluate(Stmt):
"""Evaluate node.
Parameters
----------
value : PrimExpr
The expression to be evalued.
span : Optional[Span]
The location of this itervar in the source code.
"""
def __init__(self, value, span=None):
self.__init_handle_by_constructor__(_ffi_api.Evaluate, value, span) # type: ignore
@tvm._ffi.register_object("tir.Prefetch")
class Prefetch(Stmt):
"""Prefetch node.
Parameters
----------
buffer : Buffer
The buffer to be prefetched.
bounds : list of Range
The bounds to be prefetched.
span : Optional[Span]
The location of this itervar in the source code.
"""
def __init__(self, buffer, bounds, span=None):
self.__init_handle_by_constructor__(_ffi_api.Prefetch, buffer, bounds, span) # type: ignore
@tvm._ffi.register_object("tir.BufferRegion")
class BufferRegion(Object, Scriptable):
"""BufferRegion node.
Parameters
----------
buffer : Buffer
The buffer of the buffer region
region : List[Range]
The region array of the buffer region
"""
buffer: Buffer
region: List[Range]
def __init__(self, buffer: Buffer, region: List[Range]):
self.__init_handle_by_constructor__(_ffi_api.BufferRegion, buffer, region) # type: ignore
@tvm._ffi.register_object("tir.MatchBufferRegion")
class MatchBufferRegion(Object, Scriptable):
"""MatchBufferRegion node.
Parameters
----------
buffer : Buffer
The target buffer
source : BufferRegion
The region of source buffer
"""
buffer: Buffer
source: BufferRegion
def __init__(self, buffer: Buffer, source: BufferRegion):
self.__init_handle_by_constructor__(
_ffi_api.MatchBufferRegion, buffer, source # type: ignore
)
@tvm._ffi.register_object("tir.Block")
class Block(Stmt):
"""Block node.
Parameters
----------
iter_vars : List[IterVar]
The block Variable.
reads : List[BufferRegion]
The read buffer regions of the block.
writes: List[BufferRegion]
The write buffer regions of the block.
name_hint: str
the name_hint of the block.
body: Stmt
The body of the block.
init: Optional[Stmt]
The init block of the reduction block
alloc_buffers: Optional[list[Buffer]]
The buffer allocations
match_buffers: Optional[List[MatchBufferRegion]]
The subregion buffer match
annotations: Optional[Mapping[str, Object]]
Additional annotation hints.
span : Optional[Span]
The location of this block in the source code.
"""
iter_vars: List[IterVar]
reads: List[BufferRegion]
writes: List[BufferRegion]
name_hint: str
body: Stmt
init: Optional[Stmt]
alloc_buffers: Optional[List[Buffer]]
match_buffers: Optional[List[MatchBufferRegion]]
annotations: Optional[Mapping[str, Object]]
span: Optional[Span]
def __init__(
self,
iter_vars: List[IterVar],
reads: List[BufferRegion],
writes: List[BufferRegion],
name_hint: str,
body: Stmt,
init: Optional[Stmt] = None,
alloc_buffers: Optional[List[Buffer]] = None,
match_buffers: Optional[List[MatchBufferRegion]] = None,
annotations: Optional[Mapping[str, Object]] = None,
span: Optional[Span] = None,
):
if alloc_buffers is None:
alloc_buffers = []
if match_buffers is None:
match_buffers = []
if annotations is None:
annotations = {}
self.__init_handle_by_constructor__(
_ffi_api.Block, # type: ignore
iter_vars,
reads,
writes,
name_hint,
body,
init,
alloc_buffers,
match_buffers,
annotations,
span,
) # type: ignore
@tvm._ffi.register_object("tir.BlockRealize")
class BlockRealize(Stmt):
"""BlockRealize node.
Parameters
----------
iter_values : List[PrimExpr]
The binding values of the block var.
predicate : Union[PrimExpr, bool]
The predicate of the block.
block : Block
The block to realize
span : Optional[Span]
The location of this block_realize in the source code.
"""
iter_values: List[PrimExpr]
predicate: PrimExpr
block: Block
span: Optional[Span]
def __init__(
self,
iter_values: List[PrimExpr],
predicate: Union[PrimExpr, bool],
block: Block,
span: Optional[Span] = None,
):
if isinstance(predicate, bool):
predicate = const(predicate, "bool")
self.__init_handle_by_constructor__(
_ffi_api.BlockRealize, # type: ignore
iter_values,
predicate,
block,
span,
) # type: ignore
def stmt_seq(*args):
"""Make sequence of statements
Parameters
----------
args : list of Expr or Var
List of statements to be combined as sequence.
Returns
-------
stmt : Stmt
The combined statement.
"""
ret = []
for value in args:
if not isinstance(value, Stmt):
value = Evaluate(value)
ret.append(value)
if len(ret) == 1:
return ret[0]
return SeqStmt(ret)
def stmt_list(stmt):
"""Make list of stmt from blocks.
Parameters
----------
stmt : A block statement
Returns
-------
stmt_list : list of Stmt
The unpacked list of statements
"""
if isinstance(stmt, SeqStmt):
res = []
for x in stmt:
res += stmt_list(x)
return res
return [stmt]
| 17,300 | 22.76511 | 100 | py |
tvm | tvm-main/python/tvm/tir/tensor_intrin/rocm.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,missing-function-docstring
"""Intrinsics for AMDGPU tensorization."""
from tvm.script import tir as T
from tvm.runtime import convert
from tvm.tir.expr import Cast, IntImm
from .dot_product_common import dp4a_desc
from .. import TensorIntrin
lift = convert
@T.prim_func
def sdot4(
A: T.Buffer((4,), "int8", offset_factor=1, align=4, scope="shared"),
B: T.Buffer((4,), "int8", offset_factor=1, align=4, scope="shared"),
C: T.Buffer((1,), "int32", offset_factor=1, align=4, scope="local"),
) -> None:
with T.block("root"):
T.reads(C[0], A[0:4], B[0:4])
T.writes(C[0])
C[0] += T.call_llvm_pure_intrin(
T.llvm_lookup_intrinsic_id("llvm.amdgcn.sdot4"),
T.uint32(4),
T.reinterpret(A.vload([0], "int8x4"), dtype="int32"),
T.reinterpret(B.vload([0], "int8x4"), dtype="int32"),
T.int32(0),
T.bool(1),
dtype="int32",
)
AMDGPU_SDOT4_INTRIN = "sdot4"
TensorIntrin.register(AMDGPU_SDOT4_INTRIN, dp4a_desc, sdot4)
WARP_SIZE = 64
M_DIM = 16
N_DIM = 16
def shared_16x4_to_local_64x1_layout_A(i, j):
thread_id = j * 16 + i
return thread_id, convert(0)
def thread_id_shared_access_64x1_to_16x4_layout_A(thread_id, local_id):
i = thread_id % 16
j = thread_id // 16 + local_id
return i, j
def shared_4x16_to_local_64x1_layout_B(i, j):
thread_id = i * 16 + j
return thread_id, convert(0)
def thread_id_shared_access_64x1_to_4x16_layout_B(thread_id, local_id):
i = thread_id // 16
j = thread_id % 16 + local_id
return i, j
def shared_16x16_to_local_64x4_layout_C(i, j):
thread_id = j + (i // 4) * 16
local = i % 4
return thread_id, local
def thread_id_shared_access_64x4_to_16x16_layout_A(thread_id, local_id):
i = thread_id % 16
j = (thread_id // 16) * 4 + local_id
return i, j
def shared_16x16_to_local_64x4_layout_A(i, j):
thread_id = i + 16 * (j // 4)
local = j % 4
return thread_id, local
def thread_id_shared_access_64x4_to_16x16_layout_B(thread_id, local_id):
i = local_id + (thread_id // 16) * 4
j = thread_id % 16
return i, j
def shared_16x16_to_local_64x4_layout_B(i, j):
thread_id = j + (i // 4) * 16
local = i % 4
return thread_id, local
def thread_id_shared_access_64x4_to_16x16_layout_C(thread_id, local_id):
i = local_id + (thread_id // 16) * 4
j = thread_id % 16
return i, j
def get_mma_fill_intrin(dtype, local_size):
zero = IntImm("int32", 0).astype(dtype)
# Assume M = N = 16
index_map = shared_16x16_to_local_64x4_layout_C
@T.prim_func
def mma_fill_desc(a: T.handle) -> None:
C_warp = T.match_buffer(a, [WARP_SIZE, local_size], dtype=dtype, scope="warp")
with T.block("root"):
T.reads()
T.writes(C_warp[0:WARP_SIZE, 0:local_size])
for i0, i1 in T.grid(M_DIM, N_DIM):
with T.block("C_warp"):
i, j = T.axis.remap("SS", [i0, i1])
thread_id, local_id = T.meta_var(index_map(i, j))
T.reads()
T.writes(C_warp[thread_id, local_id])
C_warp[thread_id, local_id] = zero
@T.prim_func
def mma_fill_impl(a: T.handle) -> None:
C_warp = T.match_buffer(
a, [WARP_SIZE, local_size], dtype=dtype, scope="warp", offset_factor=1
)
with T.block("root"):
T.reads()
T.writes(C_warp[0:WARP_SIZE, 0:local_size])
tx = T.env_thread("threadIdx.x")
T.launch_thread(tx, WARP_SIZE)
for local_id in T.serial(0, local_size):
C_warp[tx, local_id] = zero
return mma_fill_desc, mma_fill_impl
def get_mfma_load_intrin(
k_dim=4,
dtype="float32",
scope="shared",
is_b=False,
transposed=False,
):
local_size = (M_DIM * k_dim) // WARP_SIZE if not is_b else (N_DIM * k_dim) // WARP_SIZE
memory_shape = (M_DIM, k_dim)
if is_b:
memory_shape = (N_DIM, k_dim) if transposed else (k_dim, N_DIM)
row_dim, col_dim = memory_shape
if k_dim == 4:
index_map = shared_16x4_to_local_64x1_layout_A
reverse_index_map = thread_id_shared_access_64x1_to_16x4_layout_A
if is_b:
index_map = (
shared_16x4_to_local_64x1_layout_A
if transposed
else shared_4x16_to_local_64x1_layout_B
)
reverse_index_map = (
thread_id_shared_access_64x1_to_16x4_layout_A
if transposed
else thread_id_shared_access_64x1_to_4x16_layout_B
)
elif k_dim == 16:
index_map = shared_16x16_to_local_64x4_layout_A
reverse_index_map = thread_id_shared_access_64x4_to_16x16_layout_A
if is_b:
index_map = (
shared_16x16_to_local_64x4_layout_A
if transposed
else shared_16x16_to_local_64x4_layout_B
)
reverse_index_map = (
thread_id_shared_access_64x4_to_16x16_layout_A
if transposed
else thread_id_shared_access_64x4_to_16x16_layout_B
)
else:
raise ValueError("k_dim must be 4 or 16 currently")
@T.prim_func
def mfma_load_desc(reg_handle: T.handle, memory_handle: T.handle) -> None:
memory = T.match_buffer(
memory_handle,
memory_shape,
dtype,
offset_factor=1,
scope=scope,
)
reg = T.match_buffer(
reg_handle, (WARP_SIZE, local_size), dtype, offset_factor=1, scope="warp"
)
with T.block("root"):
T.reads(memory[0:row_dim, 0:col_dim])
T.writes(reg[0:WARP_SIZE, 0:local_size])
for ax0, ax1 in T.grid(row_dim, col_dim):
with T.block("memory_reg"):
v0, v1 = T.axis.remap("SS", [ax0, ax1])
T.reads(memory[v0, v1])
thread_id, local_id = T.meta_var(index_map(v0, v1))
T.writes(reg[thread_id, local_id])
reg[thread_id, local_id] = memory[v0, v1]
@T.prim_func
def mfma_load_impl(reg_handle: T.handle, memory_handle: T.handle) -> None:
s0 = T.int32()
s1 = T.int32()
memory = T.match_buffer(
memory_handle,
memory_shape,
dtype,
align=64,
offset_factor=1,
scope=scope,
strides=[s0, s1],
)
reg = T.match_buffer(
reg_handle, (WARP_SIZE, local_size), dtype, align=64, offset_factor=1, scope="warp"
)
with T.block("root"):
T.reads(memory[0:row_dim, 0:col_dim])
T.writes(reg[0:WARP_SIZE, 0:local_size])
tx = T.env_thread("threadIdx.x")
for local_id in T.serial(0, local_size):
row, col = T.meta_var(reverse_index_map(tx, local_id))
T.launch_thread(tx, WARP_SIZE)
reg[tx, local_id] = memory[row, col]
return mfma_load_desc, mfma_load_impl
def get_mfma_intrin(k_dim, in_dtype="float32", out_dtype="float32", b_transposed=False):
local_size = (M_DIM * k_dim) // WARP_SIZE
local_size_out = (M_DIM * N_DIM) // WARP_SIZE
if k_dim == 4:
index_map_A = shared_16x4_to_local_64x1_layout_A
index_map_B = shared_4x16_to_local_64x1_layout_B
index_map_C = shared_16x16_to_local_64x4_layout_C
elif k_dim == 16:
index_map_A = shared_16x16_to_local_64x4_layout_A
index_map_B = shared_16x16_to_local_64x4_layout_B
index_map_C = shared_16x16_to_local_64x4_layout_C
else:
raise ValueError("k_dim must be 4 or 16 currently")
out_dtype_abbrv = {"float16": "f16", "float32": "f32", "int8": "i8", "int32": "i32"}[out_dtype]
in_dtype_abbrv = {"float16": "f16", "float32": "f32", "int8": "i8", "int32": "i32"}[in_dtype]
mfma_intrin = f"llvm.amdgcn.mfma.{out_dtype_abbrv}.{M_DIM}x{N_DIM}x{k_dim}{in_dtype_abbrv}"
def maybe_cast(v):
if out_dtype != in_dtype:
return Cast(out_dtype, v)
return v
def maybe_swap(i, j):
if b_transposed:
return j, i
return i, j
@T.prim_func
def mfma_sync_desc(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (WARP_SIZE, local_size), in_dtype, offset_factor=1, scope="warp")
B = T.match_buffer(b, (WARP_SIZE, local_size), in_dtype, offset_factor=1, scope="warp")
C = T.match_buffer(c, (WARP_SIZE, local_size_out), out_dtype, offset_factor=1, scope="warp")
with T.block("root"):
T.reads(
C[0:WARP_SIZE, 0:local_size_out],
A[0:WARP_SIZE, 0:local_size],
B[0:WARP_SIZE, 0:local_size],
)
T.writes(C[0:WARP_SIZE, 0:local_size_out])
for i, j, k in T.grid(M_DIM, N_DIM, k_dim):
with T.block("C"):
i, j, k = T.axis.remap("SSR", [i, j, k])
b_row_ind, b_col_ind = T.meta_var(maybe_swap(k, j))
thread_id_C, local_id_C = T.meta_var(index_map_C(i, j))
thread_id_A, local_id_A = T.meta_var(index_map_A(i, k))
thread_id_B, local_id_B = T.meta_var(index_map_B(b_row_ind, b_col_ind))
T.reads(
C[thread_id_C, local_id_C],
A[thread_id_A, local_id_A],
B[thread_id_B, local_id_B],
)
T.writes(C[thread_id_C, local_id_C])
C[thread_id_C, local_id_C] += maybe_cast(
A[thread_id_A, local_id_A]
) * maybe_cast(B[thread_id_B, local_id_B])
@T.prim_func
def mfma_sync_impl_float(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (WARP_SIZE, local_size), in_dtype, offset_factor=1, scope="warp")
B = T.match_buffer(b, (WARP_SIZE, local_size), in_dtype, offset_factor=1, scope="warp")
C = T.match_buffer(c, (WARP_SIZE, local_size_out), out_dtype, offset_factor=1, scope="warp")
with T.block("root"):
T.reads(
A[0:WARP_SIZE, 0:local_size],
B[0:WARP_SIZE, 0:local_size],
C[0:WARP_SIZE, 0:local_size_out],
)
T.writes(C[0:WARP_SIZE, 0:local_size_out])
tx = T.env_thread("threadIdx.x")
T.launch_thread(tx, WARP_SIZE)
C[tx, 0:local_size_out] = T.call_llvm_pure_intrin(
T.llvm_lookup_intrinsic_id(mfma_intrin),
T.uint32(6),
A[tx, 0:local_size],
B[tx, 0:local_size],
C[tx, 0:local_size_out],
T.int32(0),
T.int32(0),
T.int32(0),
dtype=f"{out_dtype}x4",
)
@T.prim_func
def mfma_sync_impl_integer(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (WARP_SIZE, local_size), in_dtype, offset_factor=1, scope="warp")
B = T.match_buffer(b, (WARP_SIZE, local_size), in_dtype, offset_factor=1, scope="warp")
C = T.match_buffer(c, (WARP_SIZE, local_size_out), out_dtype, offset_factor=1, scope="warp")
with T.block("root"):
T.reads(
A[0:WARP_SIZE, 0:local_size],
B[0:WARP_SIZE, 0:local_size],
C[0:WARP_SIZE, 0:local_size_out],
)
T.writes(C[0:WARP_SIZE, 0:local_size_out])
tx = T.env_thread("threadIdx.x")
T.launch_thread(tx, WARP_SIZE)
C[tx, 0:local_size_out] = T.call_llvm_pure_intrin(
T.llvm_lookup_intrinsic_id(mfma_intrin),
T.uint32(6),
T.call_intrin("int32", "tir.reinterpret", A[tx, 0:local_size]),
T.call_intrin("int32", "tir.reinterpret", A[tx, 0:local_size]),
C[tx, 0:local_size_out],
T.int32(0),
T.int32(0),
T.int32(0),
dtype=f"{out_dtype}x4",
)
return (
(mfma_sync_desc, mfma_sync_impl_integer)
if in_dtype == "int8"
else (mfma_sync_desc, mfma_sync_impl_float)
)
def get_mfma_store_intrin(local_size=4, dtype="float32", scope="global"):
index_map = shared_16x16_to_local_64x4_layout_C
@T.prim_func
def mfma_store_desc(a: T.handle, c: T.handle) -> None:
C_warp = T.match_buffer(a, [WARP_SIZE, local_size], dtype=dtype, scope="warp")
C = T.match_buffer(c, [M_DIM, N_DIM], dtype=dtype, scope=scope)
with T.block("root"):
T.reads(C_warp[0:WARP_SIZE, 0:local_size])
T.writes(C[0:M_DIM, 0:N_DIM])
for i0, i1 in T.grid(M_DIM, N_DIM):
with T.block("C_warp"):
v0, v1 = T.axis.remap("SS", [i0, i1])
thread_id, local_id = T.meta_var(index_map(v0, v1))
T.reads(C_warp[thread_id, local_id])
T.writes(C[v0, v1])
C[v0, v1] = C_warp[thread_id, local_id]
@T.prim_func
def mfma_store_impl(a: T.handle, c: T.handle) -> None:
s0 = T.int32()
s1 = T.int32()
C_warp = T.match_buffer(
a, [WARP_SIZE, local_size], dtype=dtype, scope="warp", offset_factor=1
)
C = T.match_buffer(
c, [M_DIM, N_DIM], dtype=dtype, scope=scope, offset_factor=1, strides=[s0, s1]
)
with T.block("root"):
T.reads(C_warp[0:WARP_SIZE, 0:local_size])
T.writes(C[0:M_DIM, 0:N_DIM])
tx = T.env_thread("threadIdx.x")
T.launch_thread(tx, WARP_SIZE)
for i in range(local_size):
C[((tx // 16) * 4) + i, (tx % 16)] = C_warp[tx, i]
return mfma_store_desc, mfma_store_impl
ROCM_MFMA_fill_16x16_f32_INTRIN = "ROCM_mfma_fill_16x16_f32"
TensorIntrin.register(ROCM_MFMA_fill_16x16_f32_INTRIN, *get_mma_fill_intrin("float32", 4))
ROCM_MFMA_fill_16x16_i32_INTRIN = "ROCM_mfma_fill_16x16_i32"
TensorIntrin.register(ROCM_MFMA_fill_16x16_i32_INTRIN, *get_mma_fill_intrin("int", 4))
ROCM_MFMA_LOAD_16x16_A_SHARED_s8_INTRIN = "rocm_mfma_load_16x16_a_shared_s8"
TensorIntrin.register(
ROCM_MFMA_LOAD_16x16_A_SHARED_s8_INTRIN, *get_mfma_load_intrin(16, "int8", "shared")
)
ROCM_MFMA_LOAD_16x16_B_SHARED_s8_INTRIN = "rocm_mfma_load_b_16x16_shared_s8"
TensorIntrin.register(
ROCM_MFMA_LOAD_16x16_B_SHARED_s8_INTRIN, *get_mfma_load_intrin(16, "int8", "shared", is_b=True)
)
ROCM_MFMA_LOAD_16x16_A_SHARED_f16_INTRIN = "rocm_mfma_load_16x16_a_shared_f16"
TensorIntrin.register(
ROCM_MFMA_LOAD_16x16_A_SHARED_f16_INTRIN, *get_mfma_load_intrin(16, "float16", "shared")
)
ROCM_MFMA_LOAD_16x16_B_SHARED_f16_INTRIN = "rocm_mfma_load_b_16x16_shared_f16"
TensorIntrin.register(
ROCM_MFMA_LOAD_16x16_B_SHARED_f16_INTRIN,
*get_mfma_load_intrin(16, "float16", "shared", is_b=True),
)
ROCM_MFMA_LOAD_16x4_A_SHARED_f32_INTRIN = "rocm_mfma_load_16x4_a_shared_f32"
TensorIntrin.register(
ROCM_MFMA_LOAD_16x4_A_SHARED_f32_INTRIN, *get_mfma_load_intrin(4, "float32", "shared")
)
ROCM_MFMA_LOAD_16x4_B_SHARED_f32_INTRIN = "rocm_mfma_load_b_16x4_shared_f32"
TensorIntrin.register(
ROCM_MFMA_LOAD_16x4_B_SHARED_f32_INTRIN,
*get_mfma_load_intrin(4, "float32", "shared", is_b=True),
)
ROCM_MFMA_f32f32f32_INTRIN = "rocm_mfma_f32f32f32"
TensorIntrin.register(ROCM_MFMA_f32f32f32_INTRIN, *get_mfma_intrin(4, "float32", "float32"))
ROCM_MFMA_f16f16f32_INTRIN = "rocm_mfma_f16f16f32"
TensorIntrin.register(ROCM_MFMA_f16f16f32_INTRIN, *get_mfma_intrin(16, "float16", "float32"))
ROCM_MFMA_s8s8s32_INTRIN = "rocm_mfma_s8s8s32"
TensorIntrin.register(ROCM_MFMA_s8s8s32_INTRIN, *get_mfma_intrin(16, "int8", "int32"))
ROCM_MFMA_STORE_16x16_s32_INTRIN = "rocm_mfma_store_16x16_s32"
TensorIntrin.register(
ROCM_MFMA_STORE_16x16_s32_INTRIN, *get_mfma_store_intrin(4, "int32", "global")
)
ROCM_MFMA_STORE_16x16_f32_INTRIN = "rocm_mfma_store_16x16_f32"
TensorIntrin.register(
ROCM_MFMA_STORE_16x16_f32_INTRIN, *get_mfma_store_intrin(4, "float32", "global")
)
| 17,107 | 34.641667 | 100 | py |
tvm | tvm-main/python/tvm/tir/tensor_intrin/x86.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,missing-function-docstring
"""Intrinsics for x86 tensorization."""
from tvm.script import tir as T
from .. import TensorIntrin
# Tensorized intrinsic description and VNNI-specific implementation.
# Equivalent to the ones in topi/x86/tensor_intrin.py
@T.prim_func
def dot_product_16x4_u8i8i32_desc(
A: T.Buffer((4,), "uint8", offset_factor=1),
B: T.Buffer((16, 4), "int8", offset_factor=1),
C: T.Buffer((16,), "int32", offset_factor=1),
) -> None:
with T.block("root"):
T.reads(C[0:16], A[0:4], B[0:16, 0:4])
T.writes(C[0:16])
for i in T.serial(0, 16):
for k in T.serial(0, 4):
with T.block("update"):
vi, vk = T.axis.remap("SR", [i, k])
C[vi] = C[vi] + T.cast(A[vk], "int32") * T.cast(B[vi, vk], "int32")
@T.prim_func
def dot_product_16x4_u8i8i32_vnni(
A: T.Buffer((4,), "uint8", offset_factor=1),
B: T.Buffer((16, 4), "int8", offset_factor=1),
C: T.Buffer((16,), "int32", offset_factor=1),
) -> None:
with T.block("root"):
T.reads(C[0:16], A[0:4], B[0:16, 0:4])
T.writes(C[0:16])
A_u8x4 = A.vload([0], "uint8x4")
A_i32 = T.reinterpret(A_u8x4, dtype="int32")
B_i8x64 = B.vload([0, 0], dtype="int8x64")
B_i32x16 = T.reinterpret(B_i8x64, dtype="int32x16")
C_i32x16 = C.vload([0], dtype="int32x16")
C[T.ramp(T.int32(0), 1, 16)] = T.call_llvm_pure_intrin(
T.llvm_lookup_intrinsic_id("llvm.x86.avx512.vpdpbusd.512"),
T.uint32(3),
C_i32x16,
T.broadcast(A_i32, 16),
B_i32x16,
dtype="int32x16",
)
@T.prim_func
def dot_product_16x4_u8i8i32_avx512(
A: T.Buffer((4,), "uint8", offset_factor=1),
B: T.Buffer((16, 4), "int8", offset_factor=1),
C: T.Buffer((16,), "int32", offset_factor=1),
) -> None:
with T.block("root"):
T.reads(C[0:16], A[0:4], B[0:16, 0:4])
T.writes(C[0:16])
A_u8x4 = A.vload([0], "uint8x4")
A_i32 = T.reinterpret(A_u8x4, dtype="int32")
A_brdcst = T.broadcast(A_i32, 16)
A_u8x64 = T.reinterpret(A_brdcst, dtype="uint8x64")
B_i8x64 = B.vload([0, 0], dtype="int8x64")
Red = T.call_llvm_pure_intrin(
T.llvm_lookup_intrinsic_id("llvm.x86.avx512.pmaddubs.w.512"),
T.uint32(2),
A_u8x64,
B_i8x64,
dtype="int16x32",
)
C[T.ramp(T.int32(0), 1, 16)] += T.call_llvm_pure_intrin(
T.llvm_lookup_intrinsic_id("llvm.x86.avx512.pmaddw.d.512"),
T.uint32(2),
Red,
T.int16x32(1),
dtype="int32x16",
)
VNNI_DOT_16x4_INTRIN = "dot_16x4_vnni"
TensorIntrin.register(
VNNI_DOT_16x4_INTRIN, dot_product_16x4_u8i8i32_desc, dot_product_16x4_u8i8i32_vnni
)
AVX512_DOT_16x4_INTRIN = "dot_16x4_avx512"
TensorIntrin.register(
AVX512_DOT_16x4_INTRIN, dot_product_16x4_u8i8i32_desc, dot_product_16x4_u8i8i32_avx512
)
| 3,826 | 32.278261 | 90 | py |
tvm | tvm-main/python/tvm/tir/tensor_intrin/cuda.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,missing-function-docstring
"""Intrinsics for tensorization on NVIDIA GPU."""
import re
from typing import Dict, Tuple
from typing_extensions import Literal
from tvm.script import tir as T
from tvm.tir.function import PrimFunc
from ..._ffi import register_func
from ...runtime import convert
from .. import Cast, IntImm, TensorIntrin
def shared_16x16_to_ldmatrix_32x8_layout(i, j):
thread_id = 4 * (i % 8) + (j % 8) // 2
return thread_id, 4 * (j // 8) + (i // 8) * 2 + (j % 2)
def shared_16x32_to_ldmatrix_32x16_layout(i, j):
thread_id = 4 * (i % 8) + (j % 16) // 4
return thread_id, 8 * (j // 16) + (i // 8) * 4 + j % 4
def shared_32x16_to_ldmatrix_32x16_layout(i, j):
thread_id = (i % 16) // 4 + 4 * (j % 8)
return thread_id, 8 * (j // 8) + (i // 16) * 4 + i % 4
def get_tensor_core_load_offset_factor(dtype):
"""get offset factor for tensor core load intrin"""
bits = re.search(r"(\d+)", dtype).group(0)
bits = int(bits)
if bits <= 4:
# sub-byte oeprations have different offset factor
return 128 // bits
return 256 // bits
@register_func("tir.index_map.shared_16x16_to_ldmatrix_32x8_layout")
def index_map_shared_16x16_to_ldmatrix_32x8_layout(ind):
i, j = ind[0], ind[1]
thread_id, local_id = shared_16x16_to_ldmatrix_32x8_layout(i, j)
return convert([thread_id, local_id])
lift = convert
M_DIM = 16
N_DIM = 16
WARP_SIZE = 32
HALF_WARP = WARP_SIZE // 2
HALF_WARP_expr = lift(HALF_WARP)
def get_ldmatrix_intrin(k_dim, dtype, is_b, transposed, shared_scope="shared"):
local_size = (M_DIM * k_dim) // WARP_SIZE
shared_offset = None
index_map = None
if transposed:
assert is_b, "Transposed A matrix not supported"
ldmatrix_col_major = is_b and not transposed
if k_dim == 16:
assert dtype == "float16"
index_map = shared_16x16_to_ldmatrix_32x8_layout
if transposed:
shared_offset = (
lambda tx, stride: stride * 8 * (tx // HALF_WARP_expr)
+ stride * (tx % 8)
+ 8 * ((tx % HALF_WARP_expr) // 8)
)
else:
shared_offset = lambda tx, stride: stride * (tx % HALF_WARP_expr) + 8 * (
tx // HALF_WARP_expr
)
else:
assert (
k_dim == 32 and dtype == "int8"
), "Only k_dim == 16 (float16) or k_dim == 32 (int8) supported for now"
if ldmatrix_col_major:
index_map = shared_32x16_to_ldmatrix_32x16_layout
# A dummy offset, ldmatrix cannot be used for int8 + trans case.
# We still use the ldmatrix intrinsic, but lower it to a manual loop in the codegen.
# Only the stride information is required.
shared_offset = lambda _, stride: stride
elif is_b and transposed:
index_map = shared_16x32_to_ldmatrix_32x16_layout
shared_offset = (
lambda tx, stride: stride * 8 * (tx // HALF_WARP_expr)
+ (tx % 8) * stride
+ 16 * ((tx % HALF_WARP_expr) // 8)
)
else:
index_map = shared_16x32_to_ldmatrix_32x16_layout
shared_offset = lambda tx, stride: stride * (tx % 16) + 16 * (tx // 16)
assert index_map and shared_offset
if is_b and not transposed:
row_dim = k_dim
col_dim = M_DIM
else:
row_dim = M_DIM
col_dim = k_dim
shmem_shape = (row_dim, col_dim)
offset_factor = get_tensor_core_load_offset_factor(dtype)
@T.prim_func
def ldmatrix_desc(warp_handle: T.handle, shared_handle: T.handle) -> None:
shared = T.match_buffer(
shared_handle,
shmem_shape,
dtype,
align=64,
offset_factor=offset_factor,
scope=shared_scope,
)
warp = T.match_buffer(
warp_handle,
(WARP_SIZE, local_size),
dtype,
align=64,
offset_factor=offset_factor,
scope="warp",
)
with T.block("root"):
T.reads(shared[0:row_dim, 0:col_dim])
T.writes(warp[0:WARP_SIZE, 0:local_size])
for ax0, ax1 in T.grid(row_dim, col_dim):
with T.block("shared_warp"):
v0, v1 = T.axis.remap("SS", [ax0, ax1])
T.reads(shared[v0, v1])
thread_id, local_id = T.meta_var(index_map(v0, v1))
T.writes(warp[thread_id, local_id])
warp[thread_id, local_id] = shared[v0, v1]
@T.prim_func
def ldmatrix_impl(warp_handle: T.handle, shared_handle: T.handle) -> None:
s0 = T.int32()
s1 = T.int32()
shared = T.match_buffer(
shared_handle,
shmem_shape,
dtype,
align=64,
offset_factor=offset_factor,
scope=shared_scope,
strides=[s0, s1],
)
warp = T.match_buffer(
warp_handle,
(WARP_SIZE, local_size),
dtype,
align=64,
offset_factor=offset_factor,
scope="warp",
)
with T.block("root"):
T.reads(shared[0:row_dim, 0:col_dim])
T.writes(warp[0:WARP_SIZE, 0:local_size])
tx = T.env_thread("threadIdx.x")
T.launch_thread(tx, WARP_SIZE)
T.evaluate(
T.ptx_ldmatrix(
ldmatrix_col_major,
4, # Always load 4 matrices
".b16",
warp.data,
warp.elem_offset + lift(local_size) * tx,
shared.access_ptr("r"),
shared_offset(tx, s0),
dtype=dtype,
)
)
return ldmatrix_desc, ldmatrix_impl
def get_mma_intrin(k_dim, out_dtype, b_transposed):
local_size = (M_DIM * k_dim) // WARP_SIZE
local_size_out = (M_DIM * N_DIM) // 32
index_map_C = shared_16x16_to_ldmatrix_32x8_layout
if k_dim == 16:
index_map_A = shared_16x16_to_ldmatrix_32x8_layout
index_map_B = shared_16x16_to_ldmatrix_32x8_layout
mma_prefix = "m16n8k16"
elif k_dim == 32 and b_transposed:
index_map_A = index_map_B = shared_16x32_to_ldmatrix_32x16_layout
mma_prefix = "m16n8k32"
elif k_dim == 32 and not b_transposed:
index_map_A = shared_16x32_to_ldmatrix_32x16_layout
index_map_B = shared_32x16_to_ldmatrix_32x16_layout
mma_prefix = "m16n8k32"
else:
assert False
out_dtype_abbrv = {"float16": "fp16", "float32": "fp32", "int32": "int32"}[out_dtype]
if out_dtype in ["float16", "float32"]:
in_dtype = "float16"
in_dtype_abbrv = "fp16"
else:
in_dtype = "int8"
in_dtype_abbrv = "int8"
def maybe_cast(v):
if out_dtype in ["float32", "int32"]:
return Cast(out_dtype, v)
return v
def maybe_swap(i, j):
if b_transposed:
return j, i
return i, j
in_offset_factor = get_tensor_core_load_offset_factor(in_dtype)
out_offset_factor = get_tensor_core_load_offset_factor(out_dtype)
@T.prim_func
def mma_sync_desc(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(
a,
(WARP_SIZE, local_size),
in_dtype,
align=64,
offset_factor=in_offset_factor,
scope="warp",
)
B = T.match_buffer(
b,
(WARP_SIZE, local_size),
in_dtype,
align=64,
offset_factor=in_offset_factor,
scope="warp",
)
C = T.match_buffer(
c,
(WARP_SIZE, local_size_out),
out_dtype,
align=64,
offset_factor=out_offset_factor,
scope="warp",
)
with T.block("root"):
T.reads(
C[0:WARP_SIZE, 0:local_size_out],
A[0:WARP_SIZE, 0:local_size],
B[0:WARP_SIZE, 0:local_size],
)
T.writes(C[0:WARP_SIZE, 0:local_size_out])
for i, j, k in T.grid(M_DIM, N_DIM, k_dim):
with T.block("C"):
i, j, k = T.axis.remap("SSR", [i, j, k])
b_row_ind, b_col_ind = T.meta_var(maybe_swap(k, j))
thread_id_C, local_id_C = T.meta_var(index_map_C(i, j))
thread_id_A, local_id_A = T.meta_var(index_map_A(i, k))
thread_id_B, local_id_B = T.meta_var(index_map_B(b_row_ind, b_col_ind))
T.reads(
C[thread_id_C, local_id_C],
A[thread_id_A, local_id_A],
B[thread_id_B, local_id_B],
)
T.writes(C[thread_id_C, local_id_C])
C[thread_id_C, local_id_C] += maybe_cast(
A[thread_id_A, local_id_A]
) * maybe_cast(B[thread_id_B, local_id_B])
@T.prim_func
def mma_sync_impl(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(
a,
(WARP_SIZE, local_size),
in_dtype,
align=64,
offset_factor=in_offset_factor,
scope="warp",
)
B = T.match_buffer(
b,
(WARP_SIZE, local_size),
in_dtype,
align=64,
offset_factor=in_offset_factor,
scope="warp",
)
C = T.match_buffer(
c,
(WARP_SIZE, local_size_out),
out_dtype,
align=64,
offset_factor=out_offset_factor,
scope="warp",
)
with T.block("root"):
T.reads(
C[0:WARP_SIZE, 0:local_size_out],
A[0:WARP_SIZE, 0:local_size],
B[0:WARP_SIZE, 0:local_size],
)
T.writes(C[0:WARP_SIZE, 0:local_size_out])
tx = T.env_thread("threadIdx.x")
T.launch_thread(tx, WARP_SIZE)
T.evaluate(
T.ptx_mma(
mma_prefix,
"row",
"col",
in_dtype_abbrv,
in_dtype_abbrv,
out_dtype_abbrv,
A.data,
A.elem_offset + tx * lift(local_size),
B.data,
B.elem_offset + tx * lift(local_size),
C.data,
C.elem_offset + tx * lift(local_size_out),
False,
dtype=out_dtype,
)
)
T.evaluate(
T.ptx_mma(
mma_prefix,
"row",
"col",
in_dtype_abbrv,
in_dtype_abbrv,
out_dtype_abbrv,
A.data,
A.elem_offset + tx * lift(local_size),
B.data,
B.elem_offset + tx * lift(local_size) + lift(local_size) // 2,
C.data,
C.elem_offset + tx * lift(local_size_out) + lift(local_size_out) // 2,
False,
dtype=out_dtype,
)
)
return mma_sync_desc, mma_sync_impl
def get_mma_fill_intrin(dtype, local_size):
zero = IntImm("int32", 0).astype(dtype)
# Assume M = N = 16
index_map = shared_16x16_to_ldmatrix_32x8_layout
@T.prim_func
def mma_fill_desc(a: T.handle) -> None:
C_warp = T.match_buffer(a, [WARP_SIZE, local_size], dtype=dtype, scope="warp")
with T.block("root"):
T.reads()
T.writes(C_warp[0:WARP_SIZE, 0:local_size])
for i0, i1 in T.grid(M_DIM, N_DIM):
with T.block("C_warp"):
i, j = T.axis.remap("SS", [i0, i1])
thread_id, local_id = T.meta_var(index_map(i, j))
T.reads()
T.writes(C_warp[thread_id, local_id])
C_warp[thread_id, local_id] = zero
@T.prim_func
def mma_fill_impl(a: T.handle) -> None:
C_warp = T.match_buffer(
a, [WARP_SIZE, local_size], dtype=dtype, scope="warp", offset_factor=1
)
with T.block("root"):
T.reads()
T.writes(C_warp[0:WARP_SIZE, 0:local_size])
tx = T.env_thread("threadIdx.x")
T.launch_thread(tx, WARP_SIZE)
T.evaluate(T.mma_fill(local_size, C_warp.data, C_warp.elem_offset, dtype=dtype))
return mma_fill_desc, mma_fill_impl
def get_mma_store_intrin(dtype, local_size, scope="global"):
# Assume M = N = 16
index_map = shared_16x16_to_ldmatrix_32x8_layout
@T.prim_func
def mma_store_desc(a: T.handle, c: T.handle) -> None:
C_warp = T.match_buffer(a, [WARP_SIZE, local_size], dtype=dtype, scope="warp")
C = T.match_buffer(c, [M_DIM, N_DIM], dtype=dtype, scope=scope)
with T.block("root"):
T.reads(C_warp[0:WARP_SIZE, 0:local_size])
T.writes(C[0:M_DIM, 0:N_DIM])
for i0, i1 in T.grid(M_DIM, N_DIM):
with T.block("C_warp"):
v0, v1 = T.axis.remap("SS", [i0, i1])
thread_id, local_id = T.meta_var(index_map(v0, v1))
T.reads(C_warp[thread_id, local_id])
T.writes(C[v0, v1])
C[v0, v1] = C_warp[thread_id, local_id]
@T.prim_func
def mma_store_impl(a: T.handle, c: T.handle) -> None:
s0 = T.int32()
s1 = T.int32()
C_warp = T.match_buffer(
a, [WARP_SIZE, local_size], dtype=dtype, scope="warp", offset_factor=1
)
C = T.match_buffer(
c, [M_DIM, N_DIM], dtype=dtype, scope=scope, offset_factor=1, strides=[s0, s1]
)
with T.block("root"):
T.reads(C_warp[0:WARP_SIZE, 0:local_size])
T.writes(C[0:M_DIM, 0:N_DIM])
tx = T.env_thread("threadIdx.x")
T.launch_thread(tx, WARP_SIZE)
T.evaluate(
T.mma_store(
M_DIM,
N_DIM,
C.access_ptr("w"),
C_warp.data,
C_warp.elem_offset,
s0,
dtype=dtype,
)
)
return mma_store_desc, mma_store_impl
LDMATRIX_16x16_A_INTRIN = "mma.ldmatrix_16x16_a"
TensorIntrin.register(LDMATRIX_16x16_A_INTRIN, *get_ldmatrix_intrin(16, "float16", False, False))
LDMATRIX_16x16_B_INTRIN = "mma.ldmatrix_16x16_b"
TensorIntrin.register(LDMATRIX_16x16_B_INTRIN, *get_ldmatrix_intrin(16, "float16", True, False))
LDMATRIX_16x16_A_DYN_INTRIN = "mma.ldmatrix_16x16_a_dyn"
TensorIntrin.register(
LDMATRIX_16x16_A_DYN_INTRIN, *get_ldmatrix_intrin(16, "float16", False, False, "shared.dyn")
)
LDMATRIX_16x16_B_DYN_INTRIN = "mma.ldmatrix_16x16_b_dyn"
TensorIntrin.register(
LDMATRIX_16x16_B_DYN_INTRIN, *get_ldmatrix_intrin(16, "float16", True, False, "shared.dyn")
)
LDMATRIX_16x16_B_TRANS_INTRIN = "mma.ldmatrix_16x16_b_trans"
TensorIntrin.register(
LDMATRIX_16x16_B_TRANS_INTRIN, *get_ldmatrix_intrin(16, "float16", True, True)
)
LDMATRIX_16x32_A_INTRIN = "mma.ldmatrix_16x32_a"
TensorIntrin.register(LDMATRIX_16x32_A_INTRIN, *get_ldmatrix_intrin(32, "int8", False, False))
LDMATRIX_32x16_B_INTRIN = "mma.ldmatrix_32x16_b"
TensorIntrin.register(LDMATRIX_32x16_B_INTRIN, *get_ldmatrix_intrin(32, "int8", True, False))
LDMATRIX_16x32_B_TRANS_INTRIN = "mma.ldmatrix_16x32_b_trans"
TensorIntrin.register(LDMATRIX_16x32_B_TRANS_INTRIN, *get_ldmatrix_intrin(32, "int8", True, True))
MMA_f16f16f32_INTRIN = "mma_f16f16f32"
TensorIntrin.register(MMA_f16f16f32_INTRIN, *get_mma_intrin(16, "float32", False))
MMA_f16f16f32_TRANS_INTRIN = "mma_f16f16f32_trans"
TensorIntrin.register(MMA_f16f16f32_TRANS_INTRIN, *get_mma_intrin(16, "float32", True))
MMA_f16f16f16_INTRIN = "mma_f16f16f16"
TensorIntrin.register(MMA_f16f16f16_INTRIN, *get_mma_intrin(16, "float16", False))
MMA_f16f16f16_TRANS_INTRIN = "mma_f16f16f16_trans"
TensorIntrin.register(MMA_f16f16f16_TRANS_INTRIN, *get_mma_intrin(16, "float16", True))
MMA_i8i8i32_INTRIN = "mma_i8i8i32"
TensorIntrin.register(MMA_i8i8i32_INTRIN, *get_mma_intrin(32, "int32", False))
MMA_i8i8i32_TRANS_INTRIN = "mma_i8i8i32_trans"
TensorIntrin.register(MMA_i8i8i32_TRANS_INTRIN, *get_mma_intrin(32, "int32", True))
MMA_fill_16x16_f32_INTRIN = "mma_fill_16x16_f32"
TensorIntrin.register(MMA_fill_16x16_f32_INTRIN, *get_mma_fill_intrin("float32", 8))
MMA_fill_16x16_f16_INTRIN = "mma_fill_16x16_f16"
TensorIntrin.register(MMA_fill_16x16_f16_INTRIN, *get_mma_fill_intrin("float16", 8))
MMA_fill_16x16_i32_INTRIN = "mma_fill_16x16_i32"
TensorIntrin.register(MMA_fill_16x16_i32_INTRIN, *get_mma_fill_intrin("int32", 8))
MMA_store_16x16_f32_global_INTRIN = "mma_store_16x16_f32_global_"
TensorIntrin.register(
MMA_store_16x16_f32_global_INTRIN, *get_mma_store_intrin("float32", 8, "global")
)
MMA_store_16x16_f16_global_INTRIN = "mma_store_16x16_f16_global_"
TensorIntrin.register(
MMA_store_16x16_f16_global_INTRIN, *get_mma_store_intrin("float16", 8, "global")
)
MMA_store_16x16_i32_global_INTRIN = "mma_store_16x16_i32_global_"
TensorIntrin.register(
MMA_store_16x16_i32_global_INTRIN, *get_mma_store_intrin("int32", 8, "global")
)
######## WMMA intrinsics ########
def get_wmma_fragment_index(buffer, stride, m_dim, n_dim):
"""Compute wmma fragment index using elem_offset of the buffer"""
frag_index_m = buffer.elem_offset // stride // m_dim
frag_index_n = buffer.elem_offset % stride // n_dim
num_fragments_per_row = stride // n_dim
return frag_index_m * num_fragments_per_row + frag_index_n
def get_wmma_load_intrin(
m_dim: int,
n_dim: int,
k_dim: int,
dtype: str,
shared_scope: str,
is_b: bool,
is_col_major: bool,
) -> Tuple[PrimFunc, PrimFunc]:
"""Generator of wmma_load intrins"""
wmma_fragment_scope = f"wmma.matrix_{'b' if is_b else 'a'}"
layout = "col_major" if is_col_major else "row_major"
offset_factor = get_tensor_core_load_offset_factor(dtype)
frag_m, frag_n = (k_dim, n_dim) if is_b else (m_dim, k_dim)
if is_col_major:
frag_m, frag_n = frag_n, frag_m
@T.prim_func
def wmma_load_desc(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(
a, (frag_m, frag_n), dtype, align=64, offset_factor=offset_factor, scope=shared_scope
)
C = T.match_buffer(
c,
(frag_m, frag_n),
dtype,
align=64,
offset_factor=offset_factor,
scope=wmma_fragment_scope,
)
with T.block("root"):
T.reads(A[0:frag_m, 0:frag_n])
T.writes(C[0:frag_m, 0:frag_n])
for i, j in T.grid(frag_m, frag_n):
with T.block("load"):
vii, vjj = T.axis.remap("SS", [i, j])
C[vii, vjj] = A[vii, vjj]
@T.prim_func
def wmma_load_impl(a: T.handle, c: T.handle) -> None:
s1 = T.int32()
s0 = T.int32()
d1 = T.int32()
d0 = T.int32()
A = T.match_buffer(
a,
(frag_m, frag_n),
dtype,
align=64,
offset_factor=offset_factor,
scope=shared_scope,
strides=[s1, s0],
)
C = T.match_buffer(
c,
(frag_m, frag_n),
dtype,
align=64,
offset_factor=offset_factor,
scope=wmma_fragment_scope,
strides=[d1, d0],
)
with T.block("root"):
T.reads(A[0:frag_m, 0:frag_n])
T.writes(C[0:frag_m, 0:frag_n])
T.evaluate(
T.tvm_load_matrix_sync(
C.data,
m_dim,
n_dim,
k_dim,
get_wmma_fragment_index(C, d1, frag_m, frag_n),
A.access_ptr("r"),
s1,
layout,
dtype="handle",
)
)
return wmma_load_desc, wmma_load_impl
def get_wmma_fill_intrin(
m_dim: int, n_dim: int, k_dim: int, dtype: str
) -> Tuple[PrimFunc, PrimFunc]:
"""Generator of wmma_fill intrins"""
zero = IntImm("int32", 0).astype(dtype)
offset_factor = get_tensor_core_load_offset_factor(dtype)
@T.prim_func
def wmma_fill_desc(c: T.handle) -> None:
C = T.match_buffer(
c,
(m_dim, n_dim),
dtype,
align=64,
offset_factor=offset_factor,
scope="wmma.accumulator",
)
with T.block("root"):
T.reads()
T.writes(C[0:m_dim, 0:n_dim])
for i, j in T.grid(m_dim, n_dim):
with T.block("init"):
vii, vjj = T.axis.remap("SS", [i, j])
C[vii, vjj] = zero
@T.prim_func
def wmma_fill_impl(c: T.handle) -> None:
d1 = T.int32()
d0 = T.int32()
C = T.match_buffer(
c,
(m_dim, n_dim),
dtype,
align=64,
offset_factor=offset_factor,
scope="wmma.accumulator",
strides=[d1, d0],
)
with T.block("root"):
T.reads()
T.writes(C[0:m_dim, 0:n_dim])
T.evaluate(
T.tvm_fill_fragment(
C.data,
m_dim,
n_dim,
k_dim,
get_wmma_fragment_index(C, d1, m_dim, n_dim),
T.float32(0),
dtype="handle",
)
)
return wmma_fill_desc, wmma_fill_impl
def get_wmma_store_intrin(
m_dim: int, n_dim: int, k_dim: int, dtype: str, scope: str
) -> Tuple[PrimFunc, PrimFunc]:
"""Generator of wmma_store intrins"""
offset_factor = get_tensor_core_load_offset_factor(dtype)
@T.prim_func
def wmma_store_desc(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(
a,
(m_dim, n_dim),
dtype,
align=64,
offset_factor=offset_factor,
scope="wmma.accumulator",
)
C = T.match_buffer(
c, (m_dim, n_dim), dtype, align=64, offset_factor=offset_factor, scope=scope
)
with T.block("root"):
T.reads(A[0:m_dim, 0:n_dim])
T.writes(C[0:m_dim, 0:n_dim])
for i, j in T.grid(m_dim, n_dim):
with T.block("store"):
vii, vjj = T.axis.remap("SS", [i, j])
C[vii, vjj] = A[vii, vjj]
@T.prim_func
def wmma_store_impl(a: T.handle, c: T.handle) -> None:
s1 = T.int32()
s0 = T.int32()
d1 = T.int32()
d0 = T.int32()
A = T.match_buffer(
a,
(m_dim, n_dim),
dtype,
align=64,
offset_factor=offset_factor,
scope="wmma.accumulator",
strides=[d1, d0],
)
C = T.match_buffer(
c,
(m_dim, n_dim),
dtype,
align=64,
offset_factor=offset_factor,
scope=scope,
strides=[s1, s0],
)
with T.block("root"):
T.reads(A[0:m_dim, 0:n_dim])
T.writes(C[0:m_dim, 0:n_dim])
T.evaluate(
T.tvm_store_matrix_sync(
A.data,
m_dim,
n_dim,
k_dim,
get_wmma_fragment_index(A, d1, m_dim, n_dim),
C.access_ptr("w"),
s1,
"row_major",
dtype="handle",
)
)
return wmma_store_desc, wmma_store_impl
def get_wmma_sync_intrin(
m_dim: int, n_dim: int, k_dim: int, in_dtype: str, out_dtype: str, b_transposed: bool
) -> Tuple[PrimFunc, PrimFunc]:
"""Generator of wmma_sync intrins"""
in_offset_factor = get_tensor_core_load_offset_factor(in_dtype)
out_offset_factor = get_tensor_core_load_offset_factor(out_dtype)
def maybe_cast(v):
if in_dtype != out_dtype:
return Cast(out_dtype, v)
return v
def maybe_swap(i, j):
if b_transposed:
return j, i
return i, j
b_shape_0, b_shape_1 = maybe_swap(k_dim, n_dim)
@T.prim_func
def wmma_sync_desc(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(
a,
(m_dim, k_dim),
in_dtype,
align=64,
offset_factor=in_offset_factor,
scope="wmma.matrix_a",
)
B = T.match_buffer(
b,
maybe_swap(k_dim, n_dim),
in_dtype,
align=64,
offset_factor=in_offset_factor,
scope="wmma.matrix_b",
)
C = T.match_buffer(
c,
(m_dim, n_dim),
out_dtype,
align=64,
offset_factor=out_offset_factor,
scope="wmma.accumulator",
)
with T.block("root"):
T.reads(C[0:m_dim, 0:n_dim], A[0:m_dim, 0:k_dim], B[0:b_shape_0, 0:b_shape_1])
T.writes(C[0:m_dim, 0:n_dim])
for i, j, k in T.grid(m_dim, n_dim, k_dim):
with T.block(""):
vii, vjj, vkk = T.axis.remap("SSR", [i, j, k])
B_index_0, B_index_1 = T.meta_var(maybe_swap(vkk, vjj))
C[vii, vjj] = C[vii, vjj] + maybe_cast(A[vii, vkk]) * maybe_cast(
B[B_index_0, B_index_1]
)
@T.prim_func
def wmma_sync_impl(a: T.handle, b: T.handle, c: T.handle) -> None:
a1 = T.int32()
a0 = T.int32()
b1 = T.int32()
b0 = T.int32()
c1 = T.int32()
c0 = T.int32()
A = T.match_buffer(
a,
(m_dim, k_dim),
in_dtype,
align=64,
offset_factor=in_offset_factor,
scope="wmma.matrix_a",
strides=[a1, a0],
)
B = T.match_buffer(
b,
maybe_swap(k_dim, n_dim),
in_dtype,
align=64,
offset_factor=in_offset_factor,
scope="wmma.matrix_b",
strides=[b1, b0],
)
C = T.match_buffer(
c,
(m_dim, n_dim),
out_dtype,
align=64,
offset_factor=out_offset_factor,
scope="wmma.accumulator",
strides=[c1, c0],
)
with T.block("root"):
T.reads(C[0:m_dim, 0:n_dim], A[0:m_dim, 0:k_dim], B[0:b_shape_0, 0:b_shape_1])
T.writes(C[0:m_dim, 0:n_dim])
T.evaluate(
T.tvm_mma_sync(
C.data,
get_wmma_fragment_index(C, c1, m_dim, n_dim),
A.data,
get_wmma_fragment_index(A, a1, m_dim, k_dim),
B.data,
get_wmma_fragment_index(B, b1, b_shape_0, b_shape_1),
C.data,
get_wmma_fragment_index(C, c1, m_dim, n_dim),
dtype="handle",
)
)
return wmma_sync_desc, wmma_sync_impl
WMMA_SYNC_16x16x16_f16f16f32_INTRIN = "wmma_sync_16x16x16_f16f16f32"
TensorIntrin.register(
WMMA_SYNC_16x16x16_f16f16f32_INTRIN,
*get_wmma_sync_intrin(16, 16, 16, "float16", "float32", False),
)
WMMA_SYNC_16x16x16_f16f16f32_TRANS_INTRIN = "wmma_sync_16x16x16_f16f16f32_trans"
TensorIntrin.register(
WMMA_SYNC_16x16x16_f16f16f32_TRANS_INTRIN,
*get_wmma_sync_intrin(16, 16, 16, "float16", "float32", True),
)
WMMA_SYNC_16x16x16_f16f16f16_INTRIN = "wmma_sync_16x16x16_f16f16f16"
TensorIntrin.register(
WMMA_SYNC_16x16x16_f16f16f16_INTRIN,
*get_wmma_sync_intrin(16, 16, 16, "float16", "float16", False),
)
WMMA_SYNC_16x16x16_f16f16f16_TRANS_INTRIN = "wmma_sync_16x16x16_f16f16f16_trans"
TensorIntrin.register(
WMMA_SYNC_16x16x16_f16f16f16_TRANS_INTRIN,
*get_wmma_sync_intrin(16, 16, 16, "float16", "float16", True),
)
WMMA_SYNC_16x16x16_s8s8s32_INTRIN = "wmma_sync_16x16x16_s8s8s32"
TensorIntrin.register(
WMMA_SYNC_16x16x16_s8s8s32_INTRIN, *get_wmma_sync_intrin(16, 16, 16, "int8", "int32", False)
)
WMMA_SYNC_16x16x16_s8s8s32_TRANS_INTRIN = "wmma_sync_16x16x16_s8s8s32_trans"
TensorIntrin.register(
WMMA_SYNC_16x16x16_s8s8s32_TRANS_INTRIN,
*get_wmma_sync_intrin(16, 16, 16, "int8", "int32", True),
)
WMMA_SYNC_8x8x32_s4s4s32_TRANS_INTRIN = "wmma_sync_8x8x32_s4s4s32_trans"
TensorIntrin.register(
WMMA_SYNC_8x8x32_s4s4s32_TRANS_INTRIN, *get_wmma_sync_intrin(8, 8, 32, "int4", "int32", True)
)
WMMA_LOAD_16x16x16_F16_A_INTRIN = "wmma_load_16x16x16_f16_a_shared"
TensorIntrin.register(
WMMA_LOAD_16x16x16_F16_A_INTRIN,
*get_wmma_load_intrin(16, 16, 16, "float16", "shared", False, False),
)
WMMA_LOAD_16x16x16_F16_A_DYN_INTRIN = "wmma_load_16x16x16_f16_a_shared_dyn"
TensorIntrin.register(
WMMA_LOAD_16x16x16_F16_A_DYN_INTRIN,
*get_wmma_load_intrin(16, 16, 16, "float16", "shared.dyn", False, False),
)
WMMA_LOAD_16x16x16_F16_B_INTRIN = "wmma_load_16x16x16_f16_b_shared"
TensorIntrin.register(
WMMA_LOAD_16x16x16_F16_B_INTRIN,
*get_wmma_load_intrin(16, 16, 16, "float16", "shared", True, False),
)
WMMA_LOAD_16x16x16_F16_B_DYN_INTRIN = "wmma_load_16x16x16_f16_b_shared_dyn"
TensorIntrin.register(
WMMA_LOAD_16x16x16_F16_B_DYN_INTRIN,
*get_wmma_load_intrin(16, 16, 16, "float16", "shared.dyn", True, False),
)
WMMA_LOAD_16x16x16_F16_A_TRANS_INTRIN = "wmma_load_16x16x16_f16_a_trans_shared"
TensorIntrin.register(
WMMA_LOAD_16x16x16_F16_A_TRANS_INTRIN,
*get_wmma_load_intrin(16, 16, 16, "float16", "shared", False, True),
)
WMMA_LOAD_16x16x16_F16_A_TRANS_DYN_INTRIN = "wmma_load_16x16x16_f16_a_trans_shared_dyn"
TensorIntrin.register(
WMMA_LOAD_16x16x16_F16_A_TRANS_DYN_INTRIN,
*get_wmma_load_intrin(16, 16, 16, "float16", "shared.dyn", False, True),
)
WMMA_LOAD_16x16x16_F16_B_TRANS_INTRIN = "wmma_load_16x16x16_f16_b_trans_shared"
TensorIntrin.register(
WMMA_LOAD_16x16x16_F16_B_TRANS_INTRIN,
*get_wmma_load_intrin(16, 16, 16, "float16", "shared", True, True),
)
WMMA_LOAD_16x16x16_F16_B_TRANS_DYN_INTRIN = "wmma_load_16x16x16_f16_b_trans_shared_dyn"
TensorIntrin.register(
WMMA_LOAD_16x16x16_F16_B_TRANS_DYN_INTRIN,
*get_wmma_load_intrin(16, 16, 16, "float16", "shared.dyn", True, True),
)
WMMA_LOAD_16x16x16_S8_A_INTRIN = "wmma_load_16x16x16_s8_a_shared"
TensorIntrin.register(
WMMA_LOAD_16x16x16_S8_A_INTRIN,
*get_wmma_load_intrin(16, 16, 16, "int8", "shared", False, False),
)
WMMA_LOAD_16x16x16_S8_A_DYN_INTRIN = "wmma_load_16x16x16_s8_a_shared_dyn"
TensorIntrin.register(
WMMA_LOAD_16x16x16_S8_A_DYN_INTRIN,
*get_wmma_load_intrin(16, 16, 16, "int8", "shared.dyn", False, False),
)
WMMA_LOAD_16x16x16_S8_B_INTRIN = "wmma_load_16x16x16_s8_b_shared"
TensorIntrin.register(
WMMA_LOAD_16x16x16_S8_B_INTRIN, *get_wmma_load_intrin(16, 16, 16, "int8", "shared", True, False)
)
WMMA_LOAD_16x16x16_S8_B_DYN_INTRIN = "wmma_load_16x16x16_s8_b_shared_dyn"
TensorIntrin.register(
WMMA_LOAD_16x16x16_S8_B_DYN_INTRIN,
*get_wmma_load_intrin(16, 16, 16, "int8", "shared.dyn", True, False),
)
WMMA_LOAD_16x16x16_S8_A_TRANS_INTRIN = "wmma_load_16x16x16_s8_a_trans_shared"
TensorIntrin.register(
WMMA_LOAD_16x16x16_S8_A_TRANS_INTRIN,
*get_wmma_load_intrin(16, 16, 16, "int8", "shared", False, True),
)
WMMA_LOAD_16x16x16_S8_A_TRANS_DYN_INTRIN = "wmma_load_16x16x16_s8_a_trans_shared_dyn"
TensorIntrin.register(
WMMA_LOAD_16x16x16_S8_A_TRANS_DYN_INTRIN,
*get_wmma_load_intrin(16, 16, 16, "int8", "shared.dyn", False, True),
)
WMMA_LOAD_16x16x16_S8_B_TRANS_INTRIN = "wmma_load_16x16x16_s8_b_trans_shared"
TensorIntrin.register(
WMMA_LOAD_16x16x16_S8_B_TRANS_INTRIN,
*get_wmma_load_intrin(16, 16, 16, "int8", "shared", True, True),
)
WMMA_LOAD_16x16x16_S8_B_TRANS_DYN_INTRIN = "wmma_load_16x16x16_s8_b_trans_shared_dyn"
TensorIntrin.register(
WMMA_LOAD_16x16x16_S8_B_TRANS_DYN_INTRIN,
*get_wmma_load_intrin(16, 16, 16, "int8", "shared.dyn", True, True),
)
WMMA_LOAD_8x8x32_S4_A_INTRIN = "wmma_load_8x8x32_s4_a_shared"
TensorIntrin.register(
WMMA_LOAD_8x8x32_S4_A_INTRIN, *get_wmma_load_intrin(8, 8, 32, "int4", "shared", False, False)
)
WMMA_LOAD_8x8x32_S4_A_DYN_INTRIN = "wmma_load_8x8x32_s4_a_shared_dyn"
TensorIntrin.register(
WMMA_LOAD_8x8x32_S4_A_DYN_INTRIN,
*get_wmma_load_intrin(8, 8, 32, "int4", "shared.dyn", False, False),
)
WMMA_LOAD_8x8x32_S4_B_TRANS_INTRIN = "wmma_load_8x8x32_s4_b_trans_shared"
TensorIntrin.register(
WMMA_LOAD_8x8x32_S4_B_TRANS_INTRIN,
*get_wmma_load_intrin(8, 8, 32, "int4", "shared", True, True),
)
WMMA_LOAD_8x8x32_S4_B_TRANS_DYN_INTRIN = "wmma_load_8x8x32_s4_b_trans_shared_dyn"
TensorIntrin.register(
WMMA_LOAD_8x8x32_S4_B_TRANS_DYN_INTRIN,
*get_wmma_load_intrin(8, 8, 32, "int4", "shared.dyn", True, True),
)
WMMA_FILL_16x16x16_F32_INTRIN = "wmma_fill_16x16x16_f32"
TensorIntrin.register(WMMA_FILL_16x16x16_F32_INTRIN, *get_wmma_fill_intrin(16, 16, 16, "float32"))
WMMA_FILL_16x16x16_F16_INTRIN = "wmma_fill_16x16x16_f16"
TensorIntrin.register(WMMA_FILL_16x16x16_F16_INTRIN, *get_wmma_fill_intrin(16, 16, 16, "float16"))
WMMA_FILL_16x16x16_S32_INTRIN = "wmma_fill_16x16x16_s32"
TensorIntrin.register(WMMA_FILL_16x16x16_S32_INTRIN, *get_wmma_fill_intrin(16, 16, 16, "int32"))
WMMA_FILL_8x8x32_S32_INTRIN = "wmma_fill_8x8x32_s32"
TensorIntrin.register(WMMA_FILL_8x8x32_S32_INTRIN, *get_wmma_fill_intrin(8, 8, 32, "int32"))
WMMA_STORE_16x16x16_F32_SHARED_INTRIN = "wmma_store_16x16x16_f32_shared"
TensorIntrin.register(
WMMA_STORE_16x16x16_F32_SHARED_INTRIN, *get_wmma_store_intrin(16, 16, 16, "float32", "shared")
)
WMMA_STORE_16x16x16_F32_SHARED_DYN_INTRIN = "wmma_store_16x16x16_f32_shared_dyn"
TensorIntrin.register(
WMMA_STORE_16x16x16_F32_SHARED_DYN_INTRIN,
*get_wmma_store_intrin(16, 16, 16, "float32", "shared.dyn"),
)
WMMA_STORE_16x16x16_F16_SHARED_INTRIN = "wmma_store_16x16x16_f16_shared"
TensorIntrin.register(
WMMA_STORE_16x16x16_F16_SHARED_INTRIN, *get_wmma_store_intrin(16, 16, 16, "float16", "shared")
)
WMMA_STORE_16x16x16_F16_SHARED_DYN_INTRIN = "wmma_store_16x16x16_f16_shared_dyn"
TensorIntrin.register(
WMMA_STORE_16x16x16_F16_SHARED_DYN_INTRIN,
*get_wmma_store_intrin(16, 16, 16, "float16", "shared.dyn"),
)
WMMA_STORE_16x16x16_S32_SHARED_INTRIN = "wmma_store_16x16x16_s32_shared"
TensorIntrin.register(
WMMA_STORE_16x16x16_S32_SHARED_INTRIN, *get_wmma_store_intrin(16, 16, 16, "int32", "shared")
)
WMMA_STORE_16x16x16_S32_SHARED_DYN_INTRIN = "wmma_store_16x16x16_s32_shared_dyn"
TensorIntrin.register(
WMMA_STORE_16x16x16_S32_SHARED_DYN_INTRIN,
*get_wmma_store_intrin(16, 16, 16, "int32", "shared.dyn"),
)
WMMA_STORE_8x8x32_S32_SHARED_INTRIN = "wmma_store_8x8x32_s32_shared"
TensorIntrin.register(
WMMA_STORE_8x8x32_S32_SHARED_INTRIN, *get_wmma_store_intrin(8, 8, 32, "int32", "shared")
)
WMMA_STORE_8x8x32_S32_SHARED_DYN_INTRIN = "wmma_store_8x8x32_s32_shared_dyn"
TensorIntrin.register(
WMMA_STORE_8x8x32_S32_SHARED_DYN_INTRIN, *get_wmma_store_intrin(8, 8, 32, "int32", "shared.dyn")
)
WMMA_STORE_16x16x16_F32_GLOBAL_INTRIN = "wmma_store_16x16x16_f32_global"
TensorIntrin.register(
WMMA_STORE_16x16x16_F32_GLOBAL_INTRIN, *get_wmma_store_intrin(16, 16, 16, "float32", "global")
)
WMMA_STORE_16x16x16_F16_GLOBAL_INTRIN = "wmma_store_16x16x16_f16_global"
TensorIntrin.register(
WMMA_STORE_16x16x16_F16_GLOBAL_INTRIN, *get_wmma_store_intrin(16, 16, 16, "float16", "global")
)
WMMA_STORE_16x16x16_S32_GLOBAL_INTRIN = "wmma_store_16x16x16_s32_global"
TensorIntrin.register(
WMMA_STORE_16x16x16_S32_GLOBAL_INTRIN, *get_wmma_store_intrin(16, 16, 16, "int32", "global")
)
WMMA_STORE_8x8x32_S32_GLOBAL_INTRIN = "wmma_store_8x8x32_s32_global"
TensorIntrin.register(
WMMA_STORE_8x8x32_S32_GLOBAL_INTRIN, *get_wmma_store_intrin(8, 8, 32, "int32", "global")
)
def get_wmma_intrin_group(
load_scope: Literal["shared", "shared.dyn"],
store_scope: Literal["global", "shared", "shared.dyn"],
in_dtype: str,
out_dtype: str,
trans_b: bool,
) -> Dict[str, str]:
"""Get a group of intrinsics for wmma tensor core with the given configurations
Parameters
----------
load_scope : Literal["shared", "shared.dyn"]
The memory scope of the input buffer.
store_scope : Literal["global", "shared", "shared.dyn"]
The memory scope of the result buffer.
in_dtype : str
The input data type.
out_dtype : str
The output data dtype.
trans_b : bool
Whether the input matrix B is transposed.
Returns
-------
ret : Dict[str, str]
A group of tensor intrinsics.
"""
assert load_scope in ["shared", "shared.dyn"]
assert store_scope in ["global", "shared", "shared.dyn"]
assert in_dtype in ["float16", "int8"]
assert out_dtype in ["float16", "float32", "int32"]
shape = "16x16x16"
in_dtype = "f16" if in_dtype == "float16" else "s8"
out_dtype = "f16" if out_dtype == "float16" else "f32" if out_dtype == "float32" else "s32"
# convert "shared.dyn" to "shared_dyn"
load_scope = load_scope.replace(".", "_")
store_scope = store_scope.replace(".", "_")
trans_a = ""
trans_b = "_trans" if trans_b else ""
# e.g. wmma_load_16x16x16_f16_a_shared
load_a_intrin = f"wmma_load_{shape}_{in_dtype}_a{trans_a}_{load_scope}"
# e.g. wmma_load_16x16x16_f16_b_trans_shared_dyn
load_b_intrin = f"wmma_load_{shape}_{in_dtype}_b{trans_b}_{load_scope}"
# e.g. wmma_sync_16x16x16_f16f16f32_trans
compute_intrin = f"wmma_sync_{shape}_{in_dtype}{in_dtype}{out_dtype}{trans_b}"
# e.g. wmma_fill_16x16x16_f16
init_intrin = f"wmma_fill_{shape}_{out_dtype}"
# e.g. wmma_store_16x16x16_f16_shared_dyn
store_intrin = f"wmma_store_{shape}_{out_dtype}_{store_scope}"
return {
"init": init_intrin,
"load_a": load_a_intrin,
"load_b": load_b_intrin,
"compute": compute_intrin,
"store": store_intrin,
}
######## MMA intrinsics ########
def get_index_A(elem_offset, stride):
i = elem_offset // stride
j = elem_offset % stride
stride_b = stride // 8
bi = i // 32
bj = j // 8
no = bi * stride_b + bj
return no * 8 + (i % 32) // 16 * 4
def get_index_B(elem_offset, stride):
i = elem_offset // stride
j = elem_offset % stride
stride_b = stride // 32
bi = i // 8
bj = j // 32
no = bi * stride_b + bj
return no * 8 + (j % 32) // 8 * 2
def get_index_C(elem_offset, stride):
i = elem_offset // stride
j = elem_offset % stride
stride_b = stride // 8
bi = i // 8
bj = j // 8
return (bi // 2) * 2 * stride_b + bi % 2 + bj * 2
def get_mma_init_intrin(
m_dim: int, n_dim: int, k_dim: int, dtype: str
) -> Tuple[PrimFunc, PrimFunc]:
"""Generator of mma init intrins"""
del k_dim # unused
zero = IntImm("int32", 0).astype(dtype)
assert m_dim % 8 == 0 and n_dim % 4 == 0, "m_dim and n_dim must be multiple of 8 and 4"
assert dtype in ["float16", "float32"]
assert n_dim // 4 * int(dtype[-2:]) <= 128, "n_dim vectorize failed"
@T.prim_func
def mma_init_desc(c: T.handle) -> None:
dst = T.match_buffer(
c, (m_dim, n_dim), dtype, align=64, offset_factor=1, scope="m16n8k8.matrixC"
)
with T.block("root"):
T.reads()
T.writes(dst[0:m_dim, 0:n_dim])
for i, j in T.grid(m_dim, n_dim):
with T.block("init"):
vi, vj = T.axis.remap("SS", [i, j])
dst[vi, vj] = zero
@T.prim_func
def mma_init_impl(c: T.handle) -> None:
dst = T.match_buffer(
c, (m_dim, n_dim), dtype, align=64, offset_factor=1, scope="m16n8k8.matrixC"
)
with T.block("root"):
T.reads()
T.writes(dst[0:m_dim, 0:n_dim])
tx = T.env_thread("threadIdx.x")
T.launch_thread(tx, 32)
for b in range(m_dim // 8):
for v in T.vectorized(n_dim // 4):
dst[b * 8 + tx // 4, (tx % 4) * (n_dim // 4) + v] = zero
return mma_init_desc, mma_init_impl
def get_mma_load_intrin(
m_dim: int,
n_dim: int,
k_dim: int,
dtype: str,
shared_scope: str,
is_b: bool,
is_col_major: bool,
) -> Tuple[PrimFunc, PrimFunc]:
"""Generator of mma ldmatrix intrins"""
mma_fragment_scope = f"m16n8k8.matrix{'B' if is_b else 'A'}"
frag_m, frag_n = (k_dim, n_dim) if is_b else (m_dim, k_dim)
trans = (not is_col_major) if is_b else is_col_major
if is_col_major:
frag_m, frag_n = frag_n, frag_m
get_index = get_index_B if is_b else get_index_A
get_tx_index = (
(lambda tx, s0: (tx % 8) * s0 + (tx // 8) * 8) if trans else (lambda tx, s0: tx * s0)
)
@T.prim_func
def mma_load_desc(a: T.handle, c: T.handle) -> None:
src = T.match_buffer(
a, (frag_m, frag_n), dtype, align=64, offset_factor=1, scope=shared_scope
)
dst = T.match_buffer(
c, (frag_m, frag_n), dtype, align=64, offset_factor=1, scope=mma_fragment_scope
)
with T.block("root"):
T.reads(src[0:frag_m, 0:frag_n])
T.writes(dst[0:frag_m, 0:frag_n])
for i, j in T.grid(frag_m, frag_n):
with T.block("root"):
vi, vj = T.axis.remap("SS", [i, j])
dst[vi, vj] = src[vi, vj]
@T.prim_func
def mma_load_impl(a: T.handle, c: T.handle) -> None:
s0 = T.int32()
s1 = T.int32()
src = T.match_buffer(
a,
(frag_m, frag_n),
dtype,
align=64,
offset_factor=1,
scope=shared_scope,
strides=[s0, s1],
)
d0 = T.int32()
d1 = T.int32()
dst = T.match_buffer(
c,
(frag_m, frag_n),
dtype,
align=64,
offset_factor=1,
scope=mma_fragment_scope,
strides=[d0, d1],
)
with T.block("root"):
T.reads(src[0:frag_m, 0:frag_n])
T.writes(dst[0:frag_m, 0:frag_n])
tx = T.env_thread("threadIdx.x")
T.launch_thread(tx, 32)
T.evaluate(
T.ptx_ldmatrix(
trans,
4, # Always load 4 matrices
".b16",
dst.data,
get_index(dst.elem_offset, d0),
src.access_ptr("r"),
get_tx_index(tx, s0),
dtype=dtype,
)
)
return mma_load_desc, mma_load_impl
def get_mma_sync_intrin(
m_dim: int, n_dim: int, k_dim: int, in_dtype: str, out_dtype: str, b_transposed: bool
) -> Tuple[PrimFunc, PrimFunc]:
"""Generator of mma sync intrins"""
def maybe_cast(v):
if in_dtype != out_dtype:
return Cast(out_dtype, v)
return v
def maybe_swap(i, j):
if b_transposed:
return j, i
return i, j
B_shape_0, B_shape_1 = maybe_swap(k_dim, n_dim)
@T.prim_func
def mma_sync_desc(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(
a, (m_dim, k_dim), in_dtype, align=64, offset_factor=1, scope="m16n8k8.matrixA"
)
B = T.match_buffer(
b, (B_shape_0, B_shape_1), in_dtype, align=64, offset_factor=1, scope="m16n8k8.matrixB"
)
C = T.match_buffer(
c, (m_dim, n_dim), out_dtype, align=64, offset_factor=1, scope="m16n8k8.matrixC"
)
with T.block("root"):
T.reads(C[0:m_dim, 0:n_dim], A[0:m_dim, 0:k_dim], B[0:B_shape_0, 0:B_shape_1])
T.writes(C[0:m_dim, 0:n_dim])
for i, j, k in T.grid(m_dim, n_dim, k_dim):
with T.block("m16n8k8_sync"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
B_index_0, B_index_1 = T.meta_var(maybe_swap(vk, vj))
C[vi, vj] = C[vi, vj] + maybe_cast(A[vi, vk]) * maybe_cast(
B[B_index_0, B_index_1]
)
@T.prim_func
def mma_sync_impl(a: T.handle, b: T.handle, c: T.handle) -> None:
a0 = T.int32()
a1 = T.int32()
A = T.match_buffer(
a,
(m_dim, k_dim),
in_dtype,
align=64,
offset_factor=1,
scope="m16n8k8.matrixA",
strides=[a0, a1],
)
b0 = T.int32()
b1 = T.int32()
B = T.match_buffer(
b,
(B_shape_0, B_shape_1),
in_dtype,
align=64,
offset_factor=1,
scope="m16n8k8.matrixB",
strides=[b0, b1],
)
c0 = T.int32()
c1 = T.int32()
C = T.match_buffer(
c,
(m_dim, n_dim),
out_dtype,
align=64,
offset_factor=1,
scope="m16n8k8.matrixC",
strides=[c0, c1],
)
with T.block("root"):
T.reads(C[0:m_dim, 0:n_dim], A[0:m_dim, 0:k_dim], B[0:B_shape_0, 0:B_shape_1])
T.writes(C[0:m_dim, 0:n_dim])
T.evaluate(
T.ptx_mma(
f"m{m_dim}n{n_dim}k{k_dim}",
"row",
"col",
in_dtype,
in_dtype,
out_dtype,
A.data,
get_index_A(A.elem_offset, a0),
B.data,
get_index_B(B.elem_offset, b0),
C.data,
get_index_C(C.elem_offset, c0),
False,
dtype=out_dtype,
)
)
return mma_sync_desc, mma_sync_impl
def get_mma_store_dummy_intrin(
m_dim: int, n_dim: int, k_dim: int, dtype: str
) -> Tuple[PrimFunc, PrimFunc]:
"""Disable mma store intrin for now."""
del k_dim # unused
@T.prim_func
def mma_store_desc(a: T.handle, c: T.handle) -> None:
src = T.match_buffer(
a, (m_dim, n_dim), dtype, align=64, offset_factor=1, scope="m16n8k8.matrixC"
)
dst = T.match_buffer(
c, (m_dim, n_dim), dtype, align=64, offset_factor=1, scope="shared.dyn"
)
with T.block("root"):
T.reads(src[0:m_dim, 0:n_dim])
T.writes(dst[0:m_dim, 0:n_dim])
for i, j in T.grid(m_dim, n_dim):
with T.block("m16n8k8_store"):
vi, vj = T.axis.remap("SS", [i, j])
dst[vi, vj] = src[vi, vj]
return mma_store_desc, mma_store_desc
TensorIntrin.register("mma_init_m16n8k8_f16", *get_mma_init_intrin(16, 8, 8, "float16"))
TensorIntrin.register("mma_init_m16n8k8_f32", *get_mma_init_intrin(16, 8, 8, "float32"))
TensorIntrin.register(
"mma_load_m16n8k8_f16_A_shared_dyn",
*get_mma_load_intrin(32, 32, 8, "float16", "shared.dyn", False, False),
)
TensorIntrin.register(
"mma_load_m16n8k8_f16_B_shared_dyn",
*get_mma_load_intrin(32, 32, 8, "float16", "shared.dyn", True, False),
)
TensorIntrin.register(
"mma_sync_m16n8k8_f16f16f16", *get_mma_sync_intrin(16, 8, 8, "float16", "float16", False)
)
TensorIntrin.register(
"mma_sync_m16n8k8_f16f16f32", *get_mma_sync_intrin(16, 8, 8, "float16", "float32", False)
)
TensorIntrin.register(
"mma_store_m16n8k8_f16_global", *get_mma_store_dummy_intrin(16, 8, 8, "float16")
)
TensorIntrin.register(
"mma_store_m16n8k8_f32_global", *get_mma_store_dummy_intrin(16, 8, 8, "float32")
)
@register_func("tir.index_map_m16n8k8.matrixC")
def index_map_m16n8k8_matrixC(ind):
i, j = ind[0], ind[1]
return convert([(i // 8) // 2, j // 8, (i // 8) % 2, (j % 8) % 2])
| 49,265 | 31.88785 | 100 | py |
tvm | tvm-main/python/tvm/tir/tensor_intrin/hexagon.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,missing-function-docstring
"""Intrinsics for Hexagon tensorization."""
from tvm.script import tir as T
from .. import TensorIntrin
def generate_dma_load_intrin(
size: int,
dtype: str,
):
"""Generator of dma_load intrins"""
@T.prim_func
def sync_dma_load_desc(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (size), dtype, offset_factor=1, scope="global")
C = T.match_buffer(c, (size), dtype, offset_factor=1, scope="global.vtcm")
with T.block("root"):
T.reads(A[0:size])
T.writes(C[0:size])
for i in T.serial(size):
with T.block("load"):
vii = T.axis.remap("S", [i])
C[vii] = A[vii]
@T.prim_func
def sync_dma_load_impl(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (size), dtype, offset_factor=1, scope="global")
C = T.match_buffer(c, (size), dtype, offset_factor=1, scope="global.vtcm")
with T.block("root"):
T.reads(A[0:size])
T.writes(C[0:size])
T.evaluate(
T.tvm_call_packed(
"device_api.hexagon.dma_copy_dltensor",
T.tvm_stack_make_array(
T.address_of(C[0], dtype="handle"),
T.tvm_stack_make_shape(size, dtype="handle"),
0,
1,
C.dtype,
0,
dtype="handle",
),
T.tvm_stack_make_array(
T.address_of(A[0], dtype="handle"),
T.tvm_stack_make_shape(size, dtype="handle"),
0,
1,
A.dtype,
0,
dtype="handle",
),
T.cast(size, dtype="int"),
False, # Do not use experimental bypass mode.
dtype="int32",
)
)
return sync_dma_load_desc, sync_dma_load_impl
def generate_dot_product_32x4_u8u8i32(mem_scope="global"):
@T.prim_func
def dot_product_32x4_u8u8i32_desc(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (4,), "uint8", offset_factor=1, scope=mem_scope)
B = T.match_buffer(b, (32, 4), "uint8", offset_factor=1, scope=mem_scope)
C = T.match_buffer(c, (32,), "int32", offset_factor=1, scope=mem_scope)
with T.block("root"):
T.reads(C[0:32], A[0:4], B[0:32, 0:4])
T.writes(C[0:32])
for i in T.serial(0, 32):
for k in T.serial(0, 4):
with T.block("update"):
vi, vk = T.axis.remap("SR", [i, k])
C[vi] = C[vi] + T.cast(A[vk], "int32") * T.cast(B[vi, vk], "int32")
@T.prim_func
def dot_product_32x4_u8u8i32_vrmpy(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (4,), "uint8", offset_factor=1, scope=mem_scope)
B = T.match_buffer(b, (32, 4), "uint8", offset_factor=1, scope=mem_scope)
C = T.match_buffer(c, (32,), "int32", offset_factor=1, scope=mem_scope)
with T.block("root"):
T.reads(C[0:32], A[0:4], B[0:32, 0:4])
T.writes(C[0:32])
A_u8x4 = A.vload([0], "uint8x4")
A_i32 = T.reinterpret(A_u8x4, dtype="int32")
B_i8x128 = B.vload([0, 0], dtype="uint8x128")
B_i32x32 = T.reinterpret(B_i8x128, dtype="int32x32")
C[T.ramp(T.int32(0), 1, 32)] = T.call_llvm_pure_intrin(
T.llvm_lookup_intrinsic_id("llvm.hexagon.V6.vrmpyub.acc.128B"),
T.uint32(3),
C[T.ramp(T.int32(0), 1, 32)],
B_i32x32,
A_i32,
dtype="int32x32",
)
return dot_product_32x4_u8u8i32_desc, dot_product_32x4_u8u8i32_vrmpy
def generate_dot_product_32x4_u8i8i32(mem_scope="global"):
@T.prim_func
def dot_product_32x4_u8i8i32_desc(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (4,), "uint8", offset_factor=1, scope=mem_scope)
B = T.match_buffer(b, (32, 4), "int8", offset_factor=1, scope=mem_scope)
C = T.match_buffer(c, (32,), "int32", offset_factor=1, scope=mem_scope)
with T.block("root"):
T.reads(C[0:32], A[0:4], B[0:32, 0:4])
T.writes(C[0:32])
for i in T.serial(0, 32):
for k in T.serial(0, 4):
with T.block("update"):
vi, vk = T.axis.remap("SR", [i, k])
C[vi] = C[vi] + T.cast(A[vk], "int32") * T.cast(B[vi, vk], "int32")
@T.prim_func
def dot_product_32x4_u8i8i32_vrmpy(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (4,), "uint8", offset_factor=1, scope=mem_scope)
B = T.match_buffer(b, (32, 4), "int8", offset_factor=1, scope=mem_scope)
C = T.match_buffer(c, (32,), "int32", offset_factor=1, scope=mem_scope)
with T.block("root"):
T.reads(C[0:32], A[0:4], B[0:32, 0:4])
T.writes(C[0:32])
A_u8x4 = A.vload([0], "uint8x4")
A_i32 = T.reinterpret(A_u8x4, dtype="int32")
B_i8x128 = B.vload([0, 0], dtype="int8x128")
B_i32x32 = T.reinterpret(B_i8x128, dtype="int32x32")
C[T.ramp(T.int32(0), 1, 32)] = T.call_llvm_pure_intrin(
T.llvm_lookup_intrinsic_id("llvm.hexagon.V6.vrmpybusv.acc.128B"),
T.uint32(3),
C[T.ramp(T.int32(0), 1, 32)],
T.broadcast(A_i32, 32),
B_i32x32,
dtype="int32x32",
)
return dot_product_32x4_u8i8i32_desc, dot_product_32x4_u8i8i32_vrmpy
def generate_dot_product_32x2_i16i16i32(mem_scope="global"):
@T.prim_func
def dot_product_32x2_i16i16i32_desc(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (2,), "int16", offset_factor=1, scope=mem_scope)
B = T.match_buffer(b, (32, 2), "int16", offset_factor=1, scope=mem_scope)
C = T.match_buffer(c, (32,), "int32", offset_factor=1, scope=mem_scope)
with T.block("root"):
T.reads(C[0:32], A[0:2], B[0:32, 0:2])
T.writes(C[0:32])
for i in T.serial(0, 32):
for k in T.serial(0, 2):
with T.block("update"):
vi, vk = T.axis.remap("SR", [i, k])
C[vi] = C[vi] + T.cast(A[vk], "int32") * T.cast(B[vi, vk], "int32")
@T.prim_func
def dot_product_32x2_i16i16i32_vdmpy(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (2,), "int16", offset_factor=1, scope=mem_scope)
B = T.match_buffer(b, (32, 2), "int16", offset_factor=1, scope=mem_scope)
C = T.match_buffer(c, (32,), "int32", offset_factor=1, scope=mem_scope)
with T.block("root"):
T.reads(C[0:32], A[0:2], B[0:32, 0:2])
T.writes(C[0:32])
A_i16x2 = A.vload([0], "int16x2")
A_i32 = T.reinterpret(A_i16x2, dtype="int32")
B_i16x64 = B.vload([0, 0], dtype="int16x64")
B_i32x32 = T.reinterpret(B_i16x64, dtype="int32x32")
C[T.ramp(T.int32(0), 1, 32)] = T.call_llvm_pure_intrin(
T.llvm_lookup_intrinsic_id("llvm.hexagon.V6.vdmpyhvsat.acc.128B"),
T.uint32(3),
C[T.ramp(T.int32(0), 1, 32)],
T.Broadcast(A_i32, 32),
B_i32x32,
dtype="int32x32",
)
return dot_product_32x2_i16i16i32_desc, dot_product_32x2_i16i16i32_vdmpy
VRMPY_u8u8i32_INTRIN = "dot_32x4_u8u8i32_vrmpy"
TensorIntrin.register(VRMPY_u8u8i32_INTRIN, *generate_dot_product_32x4_u8u8i32())
VRMPY_u8i8i32_INTRIN = "dot_32x4_u8i8i32_vrmpy"
TensorIntrin.register(VRMPY_u8i8i32_INTRIN, *generate_dot_product_32x4_u8i8i32())
VDMPY_i16i16i32_INTRIN = "dot_product_32x2_i16i16i32_vdmpy"
TensorIntrin.register(VDMPY_i16i16i32_INTRIN, *generate_dot_product_32x2_i16i16i32())
VRMPY_u8u8i32_VTCM_INTRIN = "dot_32x4_u8u8i32_vtcm_vrmpy"
TensorIntrin.register(VRMPY_u8u8i32_VTCM_INTRIN, *generate_dot_product_32x4_u8u8i32("global.vtcm"))
VRMPY_u8i8i32_VTCM_INTRIN = "dot_32x4_u8i8i32_vtcm_vrmpy"
TensorIntrin.register(VRMPY_u8i8i32_VTCM_INTRIN, *generate_dot_product_32x4_u8i8i32("global.vtcm"))
DMA_READ_128_u8 = "dma_read_128_u8"
TensorIntrin.register(DMA_READ_128_u8, *generate_dma_load_intrin(128, "uint8"))
DMA_READ_128_i8 = "dma_read_128_i8"
TensorIntrin.register(DMA_READ_128_i8, *generate_dma_load_intrin(128, "int8"))
| 9,555 | 41.096916 | 99 | py |
tvm | tvm-main/python/tvm/tir/tensor_intrin/arm_cpu.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,missing-function-docstring
"""Intrinsics for ARM tensorization."""
from tvm.script import tir as T
from .. import TensorIntrin
from .dot_product_common import DP4A_INTRIN # pylint: disable=unused-import
# TODO(masahi): Parametrize the TVMScript description of dot product by
# shape and dtype, and share the common description with x86.
@T.prim_func
def neon_4x4_i8i8i32_desc(
A: T.Buffer((4,), "int8", offset_factor=1),
B: T.Buffer((4, 4), "int8", offset_factor=1),
C: T.Buffer((4,), "int32", offset_factor=1),
) -> None:
with T.block("root"):
T.reads(C[0:4], A[0:4], B[0:4, 0:4])
T.writes(C[0:4])
for i in T.serial(0, 4):
for k in T.serial(0, 4):
with T.block("update"):
vi, vk = T.axis.remap("SR", [i, k])
C[vi] = C[vi] + T.cast(A[vk], "int32") * T.cast(B[vi, vk], "int32")
@T.prim_func
def neon_4x4_i8i8i32_impl(
A: T.Buffer((4,), "int8", offset_factor=1),
B: T.Buffer((4, 4), "int8", offset_factor=1),
C: T.Buffer((4,), "int32", offset_factor=1),
) -> None:
with T.block("root"):
T.reads(C[0:4], A[0:4], B[0:4, 0:4])
T.writes(C[0:4])
A_int8 = A.vload([0], "int8x4")
re_int32 = T.reinterpret(A_int8, dtype="int32")
vec_ai32 = T.broadcast(re_int32, 2)
vec_a = T.reinterpret(vec_ai32, dtype="int8x8")
vec_b = B.vload([0, 0], dtype="int8x16")
# TODO(masahi): Remove duplication when inlined function call is supported
vec_b_low = T.vectorlow(vec_b, dtype="int8x8")
multiply_low = T.call_llvm_pure_intrin(
T.llvm_lookup_intrinsic_id("llvm.aarch64.neon.smull.v8i16"),
T.uint32(2),
vec_a,
vec_b_low,
dtype="int16x8",
)
pairwise_reduction_low = T.call_llvm_pure_intrin(
T.llvm_lookup_intrinsic_id("llvm.aarch64.neon.saddlp.v4i32.v8i16"),
T.uint32(1),
multiply_low,
dtype="int32x4",
)
vec_b_high = T.vectorhigh(vec_b, dtype="int8x8")
multiply_high = T.call_llvm_pure_intrin(
T.llvm_lookup_intrinsic_id("llvm.aarch64.neon.smull.v8i16"),
T.uint32(2),
vec_a,
vec_b_high,
dtype="int16x8",
)
pairwise_reduction_high = T.call_llvm_pure_intrin(
T.llvm_lookup_intrinsic_id("llvm.aarch64.neon.saddlp.v4i32.v8i16"),
T.uint32(1),
multiply_high,
dtype="int32x4",
)
C[T.ramp(T.int32(0), 1, 4)] += T.call_llvm_pure_intrin(
T.llvm_lookup_intrinsic_id("llvm.aarch64.neon.addp.v4i32"),
T.uint32(2),
pairwise_reduction_low,
pairwise_reduction_high,
dtype="int32x4",
)
def get_dotprod_intrin(in_dtype, out_dtype):
if in_dtype == "uint8":
instr = "udot.v4u32.v16u8"
else: # if in_dtype == "int8"
instr = "sdot.v4i32.v16i8"
in_dtype_x4 = f"{in_dtype}x4"
out_dtype_x4 = f"{out_dtype}x4"
in_dtype_x16 = f"{in_dtype}x16"
@T.prim_func
def dot_prod_desc(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (4,), dtype=in_dtype, offset_factor=1)
B = T.match_buffer(b, (4, 4), dtype=in_dtype, offset_factor=1)
C = T.match_buffer(c, (4,), dtype=out_dtype, offset_factor=1)
with T.block("root"):
T.reads(C[0:4], A[0:4], B[0:4, 0:4])
T.writes(C[0:4])
for i in T.serial(0, 4):
for k in T.serial(0, 4):
with T.block("update"):
vi, vk = T.axis.remap("SR", [i, k])
C[vi] = C[vi] + T.cast(A[vk], dtype=out_dtype) * T.cast(
B[vi, vk], dtype=out_dtype
)
@T.prim_func
def dot_prod_impl(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (4,), dtype=in_dtype, offset_factor=1)
B = T.match_buffer(b, (4, 4), dtype=in_dtype, offset_factor=1)
C = T.match_buffer(c, (4,), dtype=out_dtype, offset_factor=1)
with T.block("root"):
T.reads(C[0:4], A[0:4], B[0:4, 0:4])
T.writes(C[0:4])
A_i8x4 = A.vload([0], in_dtype_x4)
A_i32 = T.reinterpret(A_i8x4, dtype=out_dtype)
vec_ai32 = T.broadcast(A_i32, 4)
vec_a = T.reinterpret(vec_ai32, dtype=in_dtype_x16)
vec_b = B.vload([0, 0], dtype=in_dtype_x16)
vec_c = C.vload([0], dtype=out_dtype_x4)
C[T.ramp(T.int32(0), 1, 4)] = T.call_llvm_pure_intrin(
T.llvm_lookup_intrinsic_id(f"llvm.aarch64.neon.{instr}"),
T.uint32(3),
vec_c,
vec_a,
vec_b,
dtype=out_dtype_x4,
)
return dot_prod_desc, dot_prod_impl
ARM_DOT_4x4_i8_NEON_INTRIN = "dot_4x4_i8i8s32_neon"
ARM_DOT_4x4_i8_SDOT_INTRIN = "dot_4x4_i8i8s32_sdot"
ARM_DOT_4x4_u8_UDOT_INTRIN = "dot_4x4_u8u8u32_udot"
ARM_DOT_4x4_u8_HDOT_INTRIN = "dot_4x4_u8u8i32_hdot"
TensorIntrin.register(ARM_DOT_4x4_i8_NEON_INTRIN, neon_4x4_i8i8i32_desc, neon_4x4_i8i8i32_impl)
TensorIntrin.register(ARM_DOT_4x4_i8_SDOT_INTRIN, *get_dotprod_intrin("int8", "int32"))
TensorIntrin.register(ARM_DOT_4x4_u8_UDOT_INTRIN, *get_dotprod_intrin("uint8", "uint32"))
TensorIntrin.register(ARM_DOT_4x4_u8_HDOT_INTRIN, *get_dotprod_intrin("uint8", "int32"))
| 6,316 | 35.514451 | 95 | py |
tvm | tvm-main/python/tvm/tir/tensor_intrin/dot_product_common.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,missing-function-docstring
"""Dot product related intrinsics."""
from tvm.script import tir as T
from .. import TensorIntrin
@T.prim_func
def dp4a_desc(
A: T.Buffer((4,), "int8", offset_factor=1, align=4, scope="shared"),
B: T.Buffer((4,), "int8", offset_factor=1, align=4, scope="shared"),
C: T.Buffer((1,), "int32", offset_factor=1, align=4, scope="local"),
) -> None:
with T.block("root"):
T.reads(C[0], A[0:4], B[0:4])
T.writes(C[0])
for i in range(0, 4):
with T.block("update"):
vi = T.axis.remap("R", [i])
C[0] = C[0] + T.cast(A[vi], "int32") * T.cast(B[vi], "int32")
@T.prim_func
def dp4a_impl(
A: T.Buffer((4,), "int8", offset_factor=1, align=4, scope="shared"),
B: T.Buffer((4,), "int8", offset_factor=1, align=4, scope="shared"),
C: T.Buffer((1,), "int32", offset_factor=1, align=4, scope="local"),
) -> None:
with T.block("root"):
T.reads(C[0], A[0:4], B[0:4])
T.writes(C[0])
C[0] += T.call_pure_extern(
"__dp4a", A.vload([0], "int8x4"), B.vload([0], "int8x4"), T.int32(0), dtype="int32"
)
DP4A_INTRIN = "dp4a"
TensorIntrin.register(DP4A_INTRIN, dp4a_desc, dp4a_impl)
| 2,047 | 35.571429 | 95 | py |
tvm | tvm-main/python/tvm/tir/tensor_intrin/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=unused-import
"""Intrinsics for tensorization."""
from . import arm_cpu, cuda, rocm, x86, hexagon
| 901 | 44.1 | 62 | py |
tvm | tvm-main/python/tvm/tir/usmp/utils.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""USMP Utilities and Data Structures"""
# pylint: disable=invalid-name
from typing import Optional, List
import tvm
from tvm._ffi import register_object
from tvm.runtime import Object
from . import _ffi_api
from ...ir.memory_pools import PoolInfo
# The allocate node attribute to indicate candidate memory pools.
# This needs to be kept in sync with CANDIDATE_MEMORY_POOL_ATTR in
# include/tvm/tir/usmp/utils.h
CANDIDATE_MEMORY_POOL_ATTR = "candidate_memory_pools"
def use_workspace_io_is_enabled() -> bool:
"""
Check whether placing I/O tensors in the workspace is enabled.
"""
ctx = tvm.transform.PassContext.current()
return bool(ctx.config.get("tir.usmp.use_workspace_io", False))
@register_object("tir.usmp.BufferInfo")
class BufferInfo(Object):
"""BufferInfo object holds information related to buffers
that are associated with tir.allocates and tir.allocate_consts
that will be used with USMP
Parameters
----------
name_hint : str
The name associated with the buffer (derived from TIR)
size_bytes : int
The size in bytes
pool_candidates : List[PoolInfo]
The list of candidates pools this buffer could be placed
alignment : Optional[int]
The byte alignment required in the workspace memory
"""
def __init__(
self,
name_hint: str,
size_bytes: int,
pool_candidates: List[PoolInfo],
alignment: Optional[int] = None,
):
self.__init_handle_by_constructor__(
_ffi_api.BufferInfo, # type: ignore # pylint: disable=no-member
name_hint,
size_bytes,
pool_candidates,
alignment,
)
def set_conflicts(self, conflicts: list):
"""Sets the conflicting array of buffer info objects"""
_ffi_api.BufferInfoSetConflicts(self, conflicts)
@register_object("tir.usmp.PoolAllocation")
class PoolAllocation(Object):
"""PoolAllocation object holds information related to an allocation
that indicates an offset in a pool
Parameters
----------
pool_info : PoolInfo
The PoolInfo to which this allocation corresponds to
byte_offset : int
The offset in the pool where the allocate node should be placed
"""
def __init__(self, pool_info: PoolInfo, byte_offset: int):
self.__init_handle_by_constructor__(
_ffi_api.PoolAllocation, # type: ignore # pylint: disable=no-member
pool_info,
byte_offset,
)
| 3,317 | 30.301887 | 80 | py |
tvm | tvm-main/python/tvm/tir/usmp/_ffi_api.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""FFI APIs for tvm.tir.usmp"""
import tvm._ffi
tvm._ffi._init_api("tir.usmp", __name__)
| 876 | 38.863636 | 62 | py |
tvm | tvm-main/python/tvm/tir/usmp/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=unused-import, redefined-builtin
"""Namespace for Unified Static Memory Planner"""
from . import analysis
from . import transform
from .utils import BufferInfo
| 964 | 40.956522 | 62 | py |
tvm | tvm-main/python/tvm/tir/usmp/analysis/analysis.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""USMP Analysis Python API for passes"""
# pylint: disable=invalid-name
from . import _ffi_api
from ...function import PrimFunc
from ....ir.module import IRModule
def extract_buffer_info(main_func: PrimFunc, mod: IRModule):
"""Convert Parallel For Loop to Serial.
Parameters
----------
main_func: tvm.tir.PrimFunc
The main function containing calls to operator PrimFuncs.
mod : tvm.ir.IRModule
The full IRModule containing all PrimFuncs
Returns
-------
Map<tir::Stmt, BufferInfo>
extracted buffer info objects
"""
return _ffi_api.extract_buffer_info(main_func, mod)
| 1,420 | 34.525 | 65 | py |
tvm | tvm-main/python/tvm/tir/usmp/analysis/_ffi_api.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""FFI APIs for tvm.tir.usmp.analysis"""
import tvm._ffi
tvm._ffi._init_api("tir.usmp.analysis", __name__)
| 894 | 39.681818 | 62 | py |
tvm | tvm-main/python/tvm/tir/usmp/analysis/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=unused-import, redefined-builtin
"""Namespace for Unified Static Memory Planner"""
from .analysis import extract_buffer_info
| 929 | 43.285714 | 62 | py |
tvm | tvm-main/python/tvm/tir/usmp/transform/transform.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""USMP Transform Python API for passes"""
# pylint: disable=invalid-name
from typing import Dict
import tvm
from tvm.tir import Stmt
from tvm.tir.usmp.utils import PoolAllocation
from . import _ffi_api
def convert_pool_allocations_to_offsets(
pool_allocations: Dict[Stmt, PoolAllocation], emit_tvmscript_printable: bool = False
) -> tvm.transform.Pass:
"""Convert pool allocations to Load nodes with offsets from pools.
Parameters
----------
pool_allocations : Dict[Stmt, PoolAllocation]
Allocate or AllocateConst node to pool allocation mapping
emit_tvmscript_printable : bool
A toggle to emit TVMScript printable IRModule for unit tests
removing all attributes that should be attached for integration
Returns
-------
ret: tvm.transform.Pass
The registered pass that converts the allocations to offsets.
"""
return _ffi_api.ConvertPoolAllocationsToOffsets(pool_allocations, emit_tvmscript_printable)
| 1,769 | 36.659574 | 95 | py |
tvm | tvm-main/python/tvm/tir/usmp/transform/_ffi_api.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""FFI APIs for tvm.tir.usmp.analysis"""
import tvm._ffi
tvm._ffi._init_api("tir.usmp.transform", __name__)
| 895 | 39.727273 | 62 | py |
tvm | tvm-main/python/tvm/tir/usmp/transform/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=unused-import, redefined-builtin
"""Namespace for Unified Static Memory Planner"""
from .transform import convert_pool_allocations_to_offsets
| 946 | 44.095238 | 62 | py |
tvm | tvm-main/python/tvm/tir/schedule/instruction.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Schedule instructions each corresponds to a schedule primitive"""
from typing import TYPE_CHECKING, Any, List, Union
from tvm._ffi import register_object as _register_object
from tvm.runtime import Object
from . import _ffi_api
if TYPE_CHECKING:
from .schedule import RAND_VAR_TYPE
INPUT_RV_TYPE = Union[RAND_VAR_TYPE, float, int, str, None] # pylint: disable=invalid-name
OUTPUT_RV_TYPE = Union[RAND_VAR_TYPE] # pylint: disable=invalid-name
ATTR_TYPE = Any
else:
INPUT_RV_TYPE = OUTPUT_RV_TYPE = ATTR_TYPE = Any
@_register_object("tir.InstructionKind")
class InstructionKind(Object):
"""Kind of an instruction, e.g. Split, Reorder, etc.
Besides the name, every kind of instruction has its own properties, including:
1) A boolean indicating if the instruction is pure, i.e. change nothing in the schedule state
2) A functor that applies the instruction to a TensorIR schedule
3) A functor that converts the instruction to a statement in python syntax
4) A functor that serialize its attributes to JSON
5) A functor that deserialize its attributes from JSON
Unlike `tvm.ir.op`, `InstructionKind` doesn't support unstructured properties,
mainly because there is no such usecase yet to add any other property.
Attributes
----------
name : str
The name of a kind of instructions
Note
----
The functor properties are not exposed on python side at the moment
"""
name: str
@property
def is_pure(self) -> bool:
"""Indicates if the instruction is pure, i.e. removing it alone doesn't mutate the schedule
state. For example, the instruction `GetBlock` is pure because it changes
nothing, while `ComputeInline` is not because removing it leads to a different resulting
schedule.
Returns
-------
pure : bool
The boolean flag indicating if the instruction is pure
"""
return bool(self._is_pure)
@staticmethod
def get(name: str) -> "InstructionKind":
"""Retrieve an InstructionKind using its name
Parameters
----------
name : str
The registered name of the InstructionKind
Returns
-------
kind : InstructionKind
The InstructionKind retrieved
"""
return _ffi_api.InstructionKindGet(name) # type: ignore # pylint: disable=no-member
@_register_object("tir.Instruction")
class Instruction(Object):
"""Schedule instructions each corresponds to a schedule primitive
Attributes
----------
kind : InstructionKind
The kind of the instruction
inputs : List[INPUT_RV_TYPE]
The input random variables of the instruction,
and the type of each element can be one of the following:
- BlockRV
- LoopRV
- ExprRV
- float
- int
- str
- None
attrs : List[ATTR_TYPE]
The attributes of the instruction. Similar to attributes of an operator,
attributes of an instruction are arbitrary constant metadata required by the instructions.
For example, the name of the block to be retrieved in `GetBlock`.
outputs : List[OUTPUT_RV_TYPE]
The output random variables of the instruction,
and the type of each element can be one of the following:
- BlockRV
- LoopRV
- ExprRV, atomic variables only, won't be constants or composite PrimExpr
"""
kind: InstructionKind
inputs: List[INPUT_RV_TYPE]
attrs: List[ATTR_TYPE]
outputs: List[OUTPUT_RV_TYPE]
def __init__(
self,
kind: InstructionKind,
inputs: List[INPUT_RV_TYPE],
attrs: List[ATTR_TYPE],
outputs: List[OUTPUT_RV_TYPE],
) -> None:
"""Constructor
Parameters
----------
kind : InstructionKind
The kind of the instruction
inputs : List[INPUT_RV_TYPE]
The input random variables of the instruction,
and the type of each element can be one of the following:
- BlockRV
- LoopRV
- ExprRV
- float
- int
- str
- None
attrs : List[ATTR_TYPE]
The attributes of the instruction. Similar to attributes of an operator,
attributes of an instruction are arbitrary constant metadata required by the
instructions. For example, the name of the block to be retrieved in `GetBlock`.
outputs : List[OUTPUT_RV_TYPE]
The output random variables of the instruction,
and the type of each element can be one of the following:
- BlockRV
- LoopRV
- ExprRV, atomic variables only, won't be constants or composite PrimExpr
"""
self.__init_handle_by_constructor__(
_ffi_api.Instruction, # type: ignore # pylint: disable=no-member
kind,
inputs,
attrs,
outputs,
)
| 5,838 | 33.964072 | 99 | py |
tvm | tvm-main/python/tvm/tir/schedule/_type_checker.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""Type checking functionality"""
import collections
import collections.abc
import functools
import inspect
from typing import Any, Callable, Dict, List, Optional, Tuple, TypeVar, Union
import typing
def _is_none_type(type_: Any) -> bool:
return type_ is None or type_ is type(None)
def _get_subtypes(type_: Any) -> Any:
# TODO(@tvm-team): This is hot fix to support subtle difference between python versions
# Would be nice to find a better way if possible
if hasattr(typing, "_SpecialGenericAlias"):
if hasattr(typing, "get_args"):
subtypes = typing.get_args(type_) # type: ignore
else:
subtypes = type_.__args__
else:
subtypes = type_.__args__
return subtypes
if hasattr(typing, "_GenericAlias"):
# For python versions 3.7 onward, check the __origin__ attribute.
class _Subtype:
@staticmethod
def _origin(type_: Any) -> Any:
if hasattr(typing, "_SpecialGenericAlias"):
if isinstance(type_, typing._SpecialGenericAlias): # type: ignore # pylint: disable=protected-access
return type_.__origin__
if isinstance(type_, typing._GenericAlias): # type: ignore # pylint: disable=protected-access
return type_.__origin__
return None
@staticmethod
def list_(type_: Any) -> Any:
if _Subtype._origin(type_) is list:
if hasattr(typing, "get_args"):
(subtype,) = typing.get_args(type_) # type: ignore
else:
(subtype,) = type_.__args__
return [subtype]
return None
@staticmethod
def dict_(type_: Any) -> Any:
if _Subtype._origin(type_) is dict:
if hasattr(typing, "get_args"):
(ktype, vtype) = typing.get_args(type_) # type: ignore
else:
(ktype, vtype) = type_.__args__
return [ktype, vtype]
return None
@staticmethod
def tuple_(type_: Any) -> Optional[List[type]]:
if _Subtype._origin(type_) is tuple:
subtypes = _get_subtypes(type_)
return subtypes
return None
@staticmethod
def optional( # pylint: disable=missing-function-docstring
type_: Any,
) -> Optional[List[type]]:
if _Subtype._origin(type_) is Union:
subtypes = _get_subtypes(type_)
if len(subtypes) == 2 and _is_none_type(subtypes[1]):
return [subtypes[0]]
return None
@staticmethod
def union(type_: Any) -> Optional[List[type]]: # pylint: disable=missing-function-docstring
if _Subtype._origin(type_) is Union:
subtypes = _get_subtypes(type_)
if len(subtypes) != 2 or not _is_none_type(subtypes[1]):
return list(subtypes)
return None
@staticmethod
def callable(type_: Any) -> Optional[List[type]]:
if _Subtype._origin(type_) is collections.abc.Callable:
subtypes = _get_subtypes(type_)
return subtypes
return None
elif hasattr(typing, "_Union"):
# For python 3.6 and below, check the __name__ attribute, or CallableMeta.
class _Subtype: # type: ignore
@staticmethod
def list_(type_: Any) -> Optional[List[type]]:
if isinstance(type_, typing.GenericMeta): # type: ignore # pylint: disable=no-member
if type_.__name__ == "List":
(subtype,) = type_.__args__ # type: ignore # pylint: disable=no-member
return [subtype]
return None
@staticmethod
def dict_(type_: Any) -> Optional[List[type]]:
if isinstance(type_, typing.GenericMeta): # type: ignore # pylint: disable=no-member
if type_.__name__ == "Dict":
(ktype, vtype) = type_.__args__ # type: ignore # pylint: disable=no-member
return [ktype, vtype]
return None
@staticmethod
def tuple_(type_: Any) -> Optional[List[type]]:
if isinstance(type_, typing.GenericMeta): # type: ignore # pylint: disable=no-member
if type_.__name__ == "Tuple":
subtypes = type_.__args__ # type: ignore # pylint: disable=no-member
return subtypes
return None
@staticmethod
def optional(type_: Any) -> Optional[List[type]]:
if isinstance(type_, typing._Union): # type: ignore # pylint: disable=no-member,protected-access
subtypes = type_.__args__
if len(subtypes) == 2 and _is_none_type(subtypes[1]):
return [subtypes[0]]
return None
@staticmethod
def union(type_: Any) -> Optional[List[type]]:
if isinstance(type_, typing._Union): # type: ignore # pylint: disable=no-member,protected-access
subtypes = type_.__args__
if len(subtypes) != 2 or not _is_none_type(subtypes[1]):
return list(subtypes)
return None
@staticmethod
def callable(type_: Any) -> Optional[List[type]]:
if isinstance(type_, typing.CallableMeta): # type: ignore # pylint: disable=no-member,protected-access
subtypes = type_.__args__
return subtypes
return None
def _dispatcher(type_: Any) -> Tuple[str, List[type]]:
if _is_none_type(type_):
return "none", []
subtype = _Subtype.list_(type_)
if subtype is not None:
return "list", subtype
subtype = _Subtype.dict_(type_)
if subtype is not None:
return "dict", subtype
subtype = _Subtype.tuple_(type_)
if subtype is not None:
return "tuple", subtype
subtype = _Subtype.optional(type_)
if subtype is not None:
return "optional", subtype
subtype = _Subtype.union(type_)
if subtype is not None:
return "union", subtype
subtype = _Subtype.callable(type_)
if subtype is not None:
return "callable", subtype
return "atomic", [type_]
def callable_str(*subtypes):
if subtypes:
*arg_types, return_type = subtypes
arg_str = ", ".join(_type2str(arg_type) for arg_type in arg_types)
return_type_str = _type2str(return_type)
return f"Callable[[{arg_str}], {return_type_str}]"
else:
return "Callable"
_TYPE2STR: Dict[Any, Callable] = {
"none": lambda: "None",
"atomic": lambda t: str(t.__name__),
"callable": callable_str,
"list": lambda t: f"List[{_type2str(t)}]",
"dict": lambda k, v: f"Dict[{_type2str(k)}, {_type2str(v)}]",
"tuple": lambda *t: f"Tuple[{', '.join([_type2str(x) for x in t])}]",
"optional": lambda t: f"Optional[{_type2str(t)}]",
"union": lambda *t: f"Union[{', '.join([_type2str(x) for x in t])}]",
}
def _type2str(type_: Any) -> str:
key, subtypes = _dispatcher(type_)
return _TYPE2STR[key](*subtypes)
def _val2type(value: Any):
if isinstance(value, list):
types = set(_val2type(x) for x in value)
if len(types) == 1:
return List[types.pop()] # type: ignore
return List[Union[tuple(types)]] # type: ignore
if isinstance(value, tuple):
types = tuple(_val2type(x) for x in value) # type: ignore
return Tuple[types]
return type(value)
def _type_check_err(x: Any, name: str, expected: Any) -> str:
return (
f'"{name}" has wrong type. '
f'Expected "{_type2str(expected)}", '
f'but gets: "{_type2str(_val2type(x))}"'
)
def _type_check_vtable() -> Dict[str, Callable]:
def _type_check_none(v: Any, name: str) -> Optional[str]:
return None if v is None else _type_check_err(v, name, None)
def _type_check_atomic(v: Any, name: str, type_: Any) -> Optional[str]:
return None if isinstance(v, type_) else _type_check_err(v, name, type_)
def _type_check_callable(v: Any, name: str, *_subtypes: Any) -> Optional[str]:
# Current implementation only validates that the argument is
# callable, and doesn't validate the arguments accepted by the
# callable, if any.
return None if callable(v) else _type_check_err(v, name, Callable)
def _type_check_list(v: List[Any], name: str, type_: Any) -> Optional[str]:
if not isinstance(v, (list, tuple)):
return _type_check_err(v, name, list)
for i, x in enumerate(v):
error_msg = _type_check(x, f"{name}[{i}]", type_)
if error_msg is not None:
return error_msg
return None
def _type_check_dict(dict_obj: Dict[Any, Any], name: str, *types: Any) -> Optional[str]:
ktype_, vtype_ = types
if not isinstance(dict_obj, dict):
return _type_check_err(dict_obj, name, dict)
for k, v in dict_obj.items():
error_msg = _type_check(k, f"{name}[{k}]", ktype_)
if error_msg is not None:
return error_msg
error_msg = _type_check(v, f"{name}[{k}]", vtype_)
if error_msg is not None:
return error_msg
return None
def _type_check_tuple(v: Any, name: str, *types: Any) -> Optional[str]:
if not isinstance(v, tuple):
return _type_check_err(v, name, Tuple[types])
if len(types) != len(v):
return _type_check_err(v, name, Tuple[types])
for i, (x, type_) in enumerate(zip(v, types)):
error_msg = _type_check(x, f"{name}[{i}]", type_)
if error_msg is not None:
return error_msg
return None
def _type_check_optional(v: Any, name: str, type_: Any) -> Optional[str]:
return None if v is None else _type_check(v, name, type_)
def _type_check_union(v: Any, name: str, *types: Any) -> Optional[str]:
for type_ in types:
error_msg = _type_check(v, name, type_)
if error_msg is None:
return None
return _type_check_err(v, name, Union[types])
return {
"none": _type_check_none,
"atomic": _type_check_atomic,
"callable": _type_check_callable,
"list": _type_check_list,
"dict": _type_check_dict,
"tuple": _type_check_tuple,
"optional": _type_check_optional,
"union": _type_check_union,
}
_TYPE_CHECK: Dict[Any, Callable] = _type_check_vtable()
def _type_check(v: Any, name: str, type_: Any) -> Optional[str]:
key, subtypes = _dispatcher(type_)
return _TYPE_CHECK[key](v, name, *subtypes)
FType = TypeVar("FType", bound=Callable[..., Any])
def type_checked(func: FType) -> FType:
"""Type check the input arguments of a function."""
sig = inspect.signature(func)
@functools.wraps(func)
def wrap(*args, **kwargs):
bound_args = sig.bind(*args, **kwargs)
bound_args.apply_defaults()
for param in sig.parameters.values():
if param.annotation != inspect.Signature.empty:
error_msg = _type_check(
bound_args.arguments[param.name],
param.name,
param.annotation,
)
if error_msg is not None:
error_msg = f'In "{func.__qualname__}", {error_msg}'
raise TypeError(error_msg)
return func(*args, **kwargs)
return wrap # type: ignore
| 12,491 | 35.419825 | 117 | py |
tvm | tvm-main/python/tvm/tir/schedule/state.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""This file defines ScheduleState, the core data structure of TensorIR scheduling."""
from collections import namedtuple
from enum import IntEnum
from typing import Dict, Optional, Union
from tvm._ffi import register_object
from tvm.ir import IRModule
from tvm.runtime import Object
from tvm.tir import Block, BlockRealize, For, PrimFunc
from . import _ffi_api
from ..block_scope import BlockScope, StmtSRef
CachedFlags = namedtuple("CachedFlags", ["affine_binding", "region_cover", "stage_pipeline"])
class ScheduleDebugMask(IntEnum):
"""The bitmask of the `debug_mask` flag in the ScheduleState class.
If the `debug_mask` flag has a certain bit on, then the correpsonding
verification pass will be conducted. For example, if `(debug_mask & VERIFY_SREF_TREE) != 0`,
then the correctness of the sref tree will be verified after each schedule instruction.
Attributes
----------
VERIFY_SREF_TREE : int = 1
Verify the correctness of the sref tree
VERIFY_CACHED_FLAGS : int = 2
Verify the correctness of affine_binding, region_cover and stage_pipeline
"""
VERIFY_SREF_TREE = 1
VERIFY_CACHED_FLAGS = 2
def _parse_mod(mod: Union[PrimFunc, IRModule]) -> IRModule:
if isinstance(mod, PrimFunc):
mod = IRModule({"main": mod})
if not isinstance(mod, IRModule):
raise TypeError(f"Expected `mod` to be PrimFunc or IRModule, but gets: {mod}")
return mod
def _parse_debug_mask(debug_mask: Union[str, int]) -> int:
if isinstance(debug_mask, str):
if debug_mask == "all":
debug_mask = ScheduleDebugMask.VERIFY_SREF_TREE | ScheduleDebugMask.VERIFY_CACHED_FLAGS
elif debug_mask == "none":
debug_mask = 0
else:
raise ValueError(f"Unrecognizable `debug_mask`: {debug_mask}")
if not isinstance(debug_mask, bool) and not isinstance(debug_mask, int):
raise TypeError(f"`debug_mask` should be integer or boolean, but gets: {debug_mask}")
return debug_mask
def _parse_enable_checks(enable_checks: bool) -> bool:
if not isinstance(enable_checks, bool):
raise TypeError(f"enable_checks only accepts bool value, got {type(enable_checks)} instead")
return enable_checks
@register_object("tir.ScheduleState")
class ScheduleState(Object):
"""The state of scheduling, which exposes a `Replace` method as
the primary resort for all the scheduling primitives to manipulate the TensorIR.
The data structure contains the following information
1) The AST being scheduled (mod)
2) The sref tree of schedulable statements (indicated by the srefs)
3) The dependency information of each block scope (block_info)
4) A reverse mapping from the AST nodes to that in the sref tree (get_sref)
5) A debug flag, if set, extra checking is enabled (debug_mask)
6) A enable check flag, if False, some prerequisite checks are disabled.
Parameters
----------
mod : IRModule
The AST of the module being scheduled
debug_mask : int
Do extra correctness checking after the object construction
and each time after calling the Replace method.
enable_check : bool
Indicates whether we enable prerequisite checks for some schedule primitives or not,
defaults to `True`.
"""
mod: IRModule
debug_mask: int
def __init__(
self,
mod: Union[PrimFunc, IRModule],
*,
debug_mask: Union[str, int] = "none",
enable_check: bool = True,
) -> None:
"""Construct a schedule state from an IRModule or a PrimFunc
Parameters
----------
mod : Union[PrimFunc, IRModule]
The IRModule or PrimFunc to be scheduled
debug_mask : Union[str, int]
Do extra correctness checking after the class creation and each time
after calling the Replace method.
Possible choices of `debug_mask`:
1) "all" - Turn on all the checks
2) "none" - Turn off all the checks
3) An integer - Turn on checks according to the bitmasks provided in ScheduleDebugMask
"""
self.__init_handle_by_constructor__(
_ffi_api.ScheduleState, # type: ignore # pylint: disable=no-member
_parse_mod(mod),
_parse_debug_mask(debug_mask),
_parse_enable_checks(enable_check),
)
def get_sref(self, stmt: Union[Block, For]) -> Optional[StmtSRef]:
"""Return the corresponding sref that points to the stmt
Parameters
----------
stmt : Union[Block, For]
The schedulable statement in the TensorIR to be retrieved for its sref
Returns
-------
sref : StmtSRef
The corresponding sref
"""
return _ffi_api.ScheduleStateGetSRef(self, stmt) # type: ignore # pylint: disable=no-member
def get_block_scope(self, block_sref: StmtSRef) -> BlockScope:
"""Get the BlockScope correpsonding to the block sref
Parameters
----------
block_sref : StmtSRef
The block sref to be retrieved
Returns
-------
sref : StmtSRef
The corresponding sref
"""
return _ffi_api.ScheduleStateGetBlockScope( # type: ignore # pylint: disable=no-member
self, block_sref
)
def _get_cached_flags(self, block_sref: StmtSRef) -> CachedFlags:
"""Get the cached flags of the corresponding block
Parameters
----------
block_sref : StmtSRef
The block sref to be retrieved
Returns
-------
flags : CachedFlags
Three flags: affine_binding, region_cover, stage_pipeline
Note
----
It is an API intended for internal testing use.
"""
(
affine_binding,
region_cover,
stage_pipeline,
) = _ffi_api.ScheduleStateGetCachedFlags( # type: ignore # pylint: disable=no-member
self, block_sref
)
return CachedFlags(
affine_binding=bool(affine_binding.value),
region_cover=bool(region_cover.value),
stage_pipeline=bool(stage_pipeline.value),
)
def replace(
self,
src_sref: StmtSRef,
tgt_stmt: Union[Block, For, BlockRealize],
block_sref_reuse: Optional[Dict[Block, Block]] = None,
) -> None:
"""
Replace the part of the AST, as being pointed to by `src_sref`,
with a specific statement `tgt_stmt`, and maintain the sref tree accordingly.
Replace will try to perform copy on write as much as possible when the ScheduleState holds
the only copy to the IRModule and IR nodes.
Only 3 types of replacements are allowed: from `src_sref->stmt` to `tgt_stmt`.
1) Block -> Block
2) Loop -> Loop
3) Loop -> BlockRealize
Parameters
----------
src_sref : StmtSRef
The sref to the statement to be replaced in the TensorIR AST
tgt_stmt : Union[Block, For, BlockRealize]
The statement to be replaced to
block_sref_reuse : Optional[Dict[Block, Block]] = None
Maps an old block (to be replaced in the subtree under `src_sref->stmt`)
to a new block (replaced to, in the subtree under `tgt_stmt`), and enforces
reuse of srefs between them (rather than create new srefs) i.e. after being replaced,
the sref that points to the old block will point to the new one
Note
----
The reuse of loop srefs are detected automatically according to the reuse of loop vars.
"""
if block_sref_reuse is None:
block_sref_reuse = {}
_ffi_api.ScheduleStateReplace( # type: ignore # pylint: disable=no-member
self, src_sref, tgt_stmt, block_sref_reuse
)
| 8,773 | 35.865546 | 100 | py |
tvm | tvm-main/python/tvm/tir/schedule/transform.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Transformation on TIR schedule."""
from typing import Optional
from tvm.tir.schedule import Schedule, BlockRV, LoopRV
from . import _ffi_api
def tile_with_tensor_intrin(
sch: Schedule, block: BlockRV, intrin_name: str, allow_padding: bool = False
) -> Optional[LoopRV]:
"""Tile a subset of loops in the block according to the given tensor intrinsic.
Parameters
----------
sch : Schedule
The schedule to which tiling is applied
block : BlockRV
The block whose subset of loops will be tiled
intrin_name : str
The name of a tensor intrinsic, must be registerd via TensorIntrin.register(...) beforehand
allow_padding : bool
Whether to allow padding when tiling
Returns
-------
tiled_loop_rv : Optional[LoopRV]
LoopRV corresponding to the outermost loop of a block tiled according to the given intrin
NullOpt if no valid loop mapping is found
"""
return _ffi_api.TileWithTensorIntrin(sch, block, intrin_name, allow_padding) # type: ignore
| 1,828 | 37.914894 | 99 | py |
tvm | tvm-main/python/tvm/tir/schedule/testing.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=dangerous-default-value
"""Testing utilities for the TensorIR schedule API"""
from typing import Sequence, Union
import tvm
from tvm.ir import IRModule, assert_structural_equal
from tvm.tir import PrimFunc
from tvm.tir.schedule import Schedule, Trace
def verify_trace_roundtrip(
sch: Schedule,
mod: Union[PrimFunc, IRModule],
*,
debug_mask: Union[str, int] = "all",
text_format: Union[str, Sequence[str]] = ["python", "json"],
) -> Schedule:
"""Serialize a traced schedule to JSON, then replay the JSON trace by applying to
a fresh new schedule, verifying the reproducibility of scheduling.
Parameters
----------
sch : tir.Schedule
The traced TensorIR schedule to be verified
mod : Union[PrimFunc, IRModule]
The IRModule or PrimFunc to construct the fresh new schedule
debug_mask : Union[str, int]
Do extra correctness checking after the class creation and each time
after calling the Replace method.
Possible choices of `debug_mask`:
1) "all" - Turn on all the checks
2) "none" - Turn off all the checks
3) An integer - Turn on checks according to the bitmasks provided in ScheduleDebugMask
text_format: Union[str, Sequence[str]]
The text format or formats whose round-trip behavior should be
validated. If a single string, validate round-trips through
"""
from tvm.script import tir as T # pylint: disable=import-outside-toplevel
if not isinstance(text_format, str):
for opt in text_format:
new_sch = verify_trace_roundtrip(sch, mod, debug_mask=debug_mask, text_format=opt)
return new_sch
trace = sch.trace
assert trace is not None
# Step 1. Perform a round-trip through the text-format
new_sch = Schedule(mod=mod, debug_mask=debug_mask)
if text_format == "json":
json_obj = trace.as_json()
Trace.apply_json_to_schedule(json_obj=json_obj, sch=new_sch)
elif text_format == "python":
py_trace = "\n".join(trace.as_python())
vars_dict = {"T": T}
vars_dict.update(tvm.tir.__dict__)
exec(py_trace, vars_dict, {"sch": new_sch}) # pylint: disable=exec-used
else:
assert text_format in ("json", "python"), f"Unknown text format: {text_format}"
# Step 2. Verify that the round-trip produced the same scheduling
assert_structural_equal(new_sch.mod, sch.mod)
# Step 3. Check the consistency of the text format between the old and new traces
py_repr = "\n".join(trace.as_python())
new_py_repr = "\n".join(new_sch.trace.as_python())
assert py_repr == new_py_repr
# Step 4. Return the new schedule in case it could be useful
return new_sch
| 3,532 | 39.609195 | 94 | py |
tvm | tvm-main/python/tvm/tir/schedule/analysis.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Analysis used in TensorIR scheduling"""
from typing import List, Optional
import tvm._ffi
from tvm.runtime import Object
from ..buffer import Buffer
from ..stmt import For
from ..expr import PrimExpr
from ..function import IndexMap, PrimFunc
from . import _ffi_api
from .schedule import Schedule, BlockRV
def suggest_index_map(
buffer: Buffer,
indices: List[PrimExpr],
loops: List[For],
predicate: PrimExpr,
) -> Optional[IndexMap]:
"""Provided the access pattern to a buffer, suggest one of the possible layout
transformation to maximize the locality of the access pattern.
Parameters
----------
buffer : Buffer
The buffer to be transformed.
indices : List[PrimExpr]
The access pattern to the buffer.
loops : List[For]
The loops above the buffer.
predicate : PrimExpr
The predicate of the access.
Returns
-------
index_map : Optional[IndexMap]
The suggested index map. None if no transformation is suggested.
"""
return _ffi_api.SuggestIndexMap( # type: ignore # pylint: disable=no-member
buffer,
indices,
loops,
predicate,
)
@tvm._ffi.register_object("tir.schedule.TensorizeInfo")
class TensorizeInfo(Object):
"""Necessary information used for tensorization."""
def get_tensorize_loop_mapping(
sch: Schedule, block: BlockRV, desc_func: PrimFunc, allow_padding: bool = False
) -> Optional[TensorizeInfo]:
"""Establish a mapping between loops in a target block and an intrinsic description
Parameters
----------
sch : Schedule
The schedule to be tensorized
block : BlockRV
The target block to match against
desc_func : PrimFunc
The prim func describing the computation to be tensorized
allow_padding : bool
Whether to allow padding the block iters to match the intrinsic description
Returns
-------
tensorize_info : Optional[TensorizeInfo]
TensorizeInfo structure if a valid mapping is found, None otherwise
"""
return _ffi_api.GetTensorizeLoopMapping(sch, block, desc_func, allow_padding) # type: ignore
@tvm._ffi.register_object("tir.schedule.AutoTensorizeMappingInfo")
class AutoTensorizeMappingInfo(Object):
"""Necessary information used to perform transformations for tensorization."""
def get_auto_tensorize_mapping_info(
sch: Schedule, block: BlockRV, desc_func: PrimFunc
) -> Optional[AutoTensorizeMappingInfo]:
"""Get mapping info between a target block and an intrinsic description including layout
transformations to apply.
Parameters
----------
sch : Schedule
The schedule to be tensorized
block : BlockRV
The compute block for auto tensorization
desc_func : PrimFunc
The prim func describing the computation to be tensorized
Returns
-------
auto_tensorize_mapping_info : Optional[AutoTensorizeMappingInfo]
AutoTensorizeMappingInfo structure if potential mappings found, None otherwise.
Note
----
Returning a valid AutoTensorizeMappingInfo doesn't guarantee the block can be tensorized.
We will need to apply the suggested layout transformations and then match against the tensor
intrinsics.
"""
return _ffi_api.GetAutoTensorizeMappingInfo(sch, block, desc_func) # type: ignore
def has_block(sch: Schedule, block_name: str) -> bool:
"""Query if the given block name exists in the module associated with the provided schedule.
Parameters
----------
sch : Schedule
The schedule
block_name : str
The name of the block to query
Returns
-------
yes/no: bool
True if the given block exists in the schedule.
"""
return _ffi_api.HasBlock(sch, block_name) # type: ignore
def is_output_block(sch: Schedule, block: BlockRV) -> bool:
"""Check whether the given block is an output block
Parameters
----------
sch : Schedule
The schedule object of the block
block : BlockRV
The blockRV to be checked
Returns
-------
yes/no : bool
True if the given block is an output block
"""
return _ffi_api.IsOutputBlock(sch, block) # type: ignore
| 5,029 | 30.049383 | 97 | py |
tvm | tvm-main/python/tvm/tir/schedule/_ffi_api.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""FFI APIs for tvm.tir.schedule"""
import tvm._ffi
tvm._ffi._init_api("tir.schedule", __name__) # pylint: disable=protected-access
| 919 | 42.809524 | 80 | py |
tvm | tvm-main/python/tvm/tir/schedule/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=unused-import
"""Namespace for the TensorIR schedule API."""
from ..block_scope import BlockScope, Dependency, DepKind, StmtSRef
from .instruction import Instruction, InstructionKind
from .schedule import BlockRV, ExprRV, LoopRV, Schedule, ScheduleError
from .state import ScheduleDebugMask, ScheduleState
from .trace import Trace
from . import analysis
from . import transform
| 1,183 | 41.285714 | 70 | py |
tvm | tvm-main/python/tvm/tir/schedule/schedule.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""The TensorIR schedule class"""
from typing import Callable, Dict, List, Optional, Tuple, Union
from tvm._ffi import register_object as _register_object
from tvm.error import TVMError, register_error
from tvm.ir import GlobalVar, IRModule, PrimExpr
from tvm.runtime import Object, String
from tvm.tir import Block, Buffer, FloatImm, For, IntImm, PrimFunc
from ..function import IndexMap
from . import _ffi_api
from ._type_checker import type_checked
from .state import ScheduleState, StmtSRef, _parse_debug_mask, _parse_mod
from .trace import Trace
@register_error
class ScheduleError(TVMError):
"""Error that happens during TensorIR scheduling."""
@_register_object("tir.LoopRV")
class LoopRV(Object):
"""A random variable that refers to a loop"""
def __init__(self) -> None:
"""Construct a new LoopRV."""
self.__init_handle_by_constructor__(
_ffi_api.LoopRV # type: ignore # pylint: disable=no-member
)
@_register_object("tir.BlockRV")
class BlockRV(Object):
"""A random variable that refers to a block"""
def __init__(self) -> None:
"""Construct a new BlockRV."""
self.__init_handle_by_constructor__(
_ffi_api.BlockRV # type: ignore # pylint: disable=no-member
)
# It is a workaround for mypy: https://github.com/python/mypy/issues/7866#issuecomment-549454370
# This feature is not supported until python 3.10:
# https://docs.python.org/3.10/whatsnew/3.10.html#pep-613-typealias
ExprRV = Union[PrimExpr] # A random variable that evaluates to an integer
RAND_VAR_TYPE = Union[ExprRV, BlockRV, LoopRV] # pylint: disable=invalid-name
# Update to `Literal["detail", "fast", "none"]` once upgraded to python3.8
_ERROR_RENDER_LEVEL: Dict[str, int] = {"detail": 0, "fast": 1, "none": 2}
def _parse_error_render_level(error_render_level: str) -> int:
if error_render_level not in _ERROR_RENDER_LEVEL:
raise ValueError(
'error_render_level can be "detail", "fast", or "none", but got: '
+ f"{error_render_level}"
)
return _ERROR_RENDER_LEVEL.get(error_render_level)
def _parse_enable_checks(enable_checks: bool) -> bool:
if not isinstance(enable_checks, bool):
raise TypeError(f"enable_checks only accepts bool value, got {type(enable_checks)} instead")
return enable_checks
def _parse_seed(seed: Optional[int]) -> int:
if seed is None:
return -1
if not isinstance(seed, int):
raise TypeError(f"Expected `seed` to be int or None, but gets: {seed}")
if seed < 1 or seed > 2147483647:
raise ValueError(f"seed must be in the range [1, 2147483647], but gets: {seed}")
return seed
def _get_block_default_dtype(block: Block) -> str:
for i in block.iter_vars:
return i.var.dtype
for buffer_region in list(block.reads) + list(block.writes):
for dom in buffer_region.region:
return dom.min.dtype
return "int64"
@_register_object("tir.Schedule")
class Schedule(Object):
"""The user-facing schedule class
A schedule is a set of transformations that change the order of computation but
preserve the semantics of computation. Some example of schedules:
1) Split a loop into two;
2) Reorder two loops;
3) Inline the computation of a specific buffer into its consumer
The schedule class stores auxiliary information to schedule correctly and efficiently.
Link to tutorial: https://tvm.apache.org/docs/tutorials/language/schedule_primitives.html
"""
@type_checked
def __init__(
self,
mod: Union[PrimFunc, IRModule],
*,
seed: Optional[int] = None,
debug_mask: Union[str, int] = "none",
error_render_level: str = "detail",
enable_check: bool = True,
) -> None:
"""Construct a TensorIR schedule class from an IRModule
Parameters
----------
mod : Union[PrimFunc, IRModule]
The IRModule or PrimFunc to be scheduled
seed: Optional[int]
The seed value for schedule's random state
Note that None and -1 means use device random, otherwise only integer between 1 and
2147483647 is allowed.
debug_mask : Union[str, int]
Do extra correctness checking after the class creation and each time
after calling the Replace method.
Possible choices of `debug_mask`:
1) "all" - Turn on all the checks
2) "none" - Turn off all the checks
3) An integer - Turn on checks according to the bitmasks provided in ScheduleDebugMask
error_render_level : str = "detail"
The level of error rendering. Choices: "detail", "fast", "none".
- "detail": Render a detailed error message, with the TIR and error locations printed
- "fast: Show a simple error message without rendering or string manipulation
- "none": Do not show any error message.
enable_check : bool = True
The default schedule checks are too strict and might prevent us performing some valid
schedules. `enable_check` is an argument to control whether we enable prerequisite
checks for some schedule primitives or not:
- true: perform prerequisite check before applying some schedules.
- false: do not perform some check before applying schedules, but still raise error
if schedule fails.
It's user duty to guarantee schedule correctness if `enable_check` is set to `False`.
Note
----
The checks performed includes:
1) VerifySRefTree
2) VerifyCachedFlags
"""
# call the constructor
self.__init_handle_by_constructor__(
_ffi_api.TracedSchedule, # type: ignore # pylint: disable=no-member
_parse_mod(mod),
_parse_seed(seed),
_parse_debug_mask(debug_mask),
_parse_error_render_level(error_render_level),
_parse_enable_checks(enable_check),
)
@staticmethod
def _create_non_traced(
mod: Union[PrimFunc, IRModule],
*,
seed: Optional[int] = None,
debug_mask: Union[str, int] = "none",
error_render_level: str = "detail",
enable_check: bool = True,
) -> "Schedule":
"""Construct a non-traced TensorIR schedule class from an IRModule."""
return _ffi_api.ConcreteSchedule( # type: ignore # pylint: disable=no-member
_parse_mod(mod),
_parse_seed(seed),
_parse_debug_mask(debug_mask),
_parse_error_render_level(error_render_level),
_parse_enable_checks(enable_check),
)
########## Utilities ##########
@property
def mod(self) -> IRModule:
"""Returns the AST of the module being scheduled"""
return _ffi_api.ScheduleGetMod(self) # type: ignore # pylint: disable=no-member
@property
def state(self) -> ScheduleState:
"""Returns the ScheduleState in the current schedule class"""
return _ffi_api.ScheduleGetState(self) # type: ignore # pylint: disable=no-member
@property
def trace(self) -> Optional[Trace]:
"""Returns the internally maintained trace of scheduling program execution"""
return _ffi_api.ScheduleGetTrace(self) # type: ignore # pylint: disable=no-member
@property
def func_working_on(self) -> Optional[GlobalVar]:
"""Returns the GlobalVar of the func that the schedule is currently working on"""
return _ffi_api.ScheduleGetFuncWorkingOn(self) # type: ignore # pylint: disable=no-member
def work_on(self, func_name: str) -> None:
"""Instruct the schedule to work on a function in the IRModule.
By default, the schedule works on the function with the name "main", or the only function in
the IRModule if there is only one. If there is multiple functions in the IRModule, and none
of their names are "main", users will have to call this method to explicitly specify which
function to work on.
This sugar function will guide the `GetBlock` method if its `func_name` is not specified.
Parameters
----------
func_name : str
The name of the function to work on.
"""
_ffi_api.ScheduleWorkOn(self, func_name) # type: ignore # pylint: disable=no-member
def copy(self) -> "Schedule":
"""Returns a copy of the schedule, including both the state and the symbol table,
* guaranteeing that
* 1) SRef tree is completely reconstructed;
* 2) The IRModule being scheduled is untouched;
* 3) All the random variables are valid in the copy, pointing to the corresponding sref
* reconstructed
Returns
-------
copy : Schedule
A new copy of the schedule
"""
return _ffi_api.ScheduleCopy(self) # type: ignore # pylint: disable=no-member
@type_checked
def seed(self, seed: int) -> None:
"""Seed the randomness
Parameters
----------
seed : int
The new random seed, -1 if use device random, otherwise non-negative
"""
return _ffi_api.ScheduleSeed(self, seed) # type: ignore # pylint: disable=no-member
def fork_seed(self) -> int:
"""Returns a forked random state as seed for new schedules
Returns
-------
seed : int
The forked random state, not the same as the current random state
"""
return _ffi_api.ScheduleForkSeed(self) # type: ignore # pylint: disable=no-member
def show(self, style: Optional[str] = None, black_format: bool = True) -> None:
"""A sugar for print highlighted TVM script.
Parameters
----------
style : str, optional
Pygmentize printing style, auto-detected if None. See
`tvm.script.highlight.cprint` for more details.
black_format: bool
If true (default), use the formatter Black to format the TVMScript
"""
mod = self.mod
if mod is not None:
mod.show(style=style, black_format=black_format)
trace = self.trace
if trace is not None:
trace.show(style=style, black_format=black_format)
########## Lookup ##########
@type_checked
def get(
self, rand_var_or_sref: Union[RAND_VAR_TYPE, StmtSRef]
) -> Optional[Union[int, Block, For]]:
"""Returns:
- the corresponding Block that a BlockRV evaluates to;
- the corresponding For that a LoopRV evaluates to;
- the corresponding integer that a ExprRV evaluates to;
- the corresponding Block that a block sref points to;
- the corresponding For that a loop sref points to;
Parameters
----------
rand_var_or_sref : Union[ExprRV, BlockRV, LoopRV, StmtSRef]
The random variable / sref to be evaluated
Returns
-------
result : Optional[Union[int, Block, For]]
The corresponding result
"""
if isinstance(rand_var_or_sref, StmtSRef):
return rand_var_or_sref.stmt
# pylint: disable-next=no-member
result = _ffi_api.ScheduleGet(self, rand_var_or_sref) # type: ignore
if isinstance(result, IntImm):
result = result.value
return result
@type_checked
def get_sref(self, rand_var_or_stmt: Union[BlockRV, LoopRV, Block, For]) -> Optional[StmtSRef]:
"""Returns the corresponding sref to the given
1) LoopRV
2) BlockRV
3) Block
4) For
Parameters
----------
rand_var_or_stmt : Union[BlockRV, LoopRV, Block, For]
The random variable / sref to be evaluated
Returns
-------
result : Optional[StmtSRef]
The corresponding result
"""
return _ffi_api.ScheduleGetSRef( # type: ignore # pylint: disable=no-member
self, rand_var_or_stmt
)
@type_checked
def remove_rv(self, rand_var: RAND_VAR_TYPE) -> None:
"""Remove a random variable from the symbol table
Parameters
----------
rand_var : Union[BlockRV, LoopRV, ExprRV]
The random variable to be removed
"""
return _ffi_api.ScheduleRemoveRV(self, rand_var) # type: ignore # pylint: disable=no-member
########## Schedule: Sampling ##########
@type_checked
def sample_categorical(
self, candidates: List[int], probs: List[float], decision: Optional[int] = None
) -> ExprRV:
"""Sample an integer given the probability distribution
Parameters
----------
candidates : List[int]
The candidates to be sampled from
probs : List[float]
The probability of each candidate
decision : Optional[int]
The sampling decision, if any
Returns
-------
result : ExprRV
The random variable sampled from candidates
"""
return _ffi_api.ScheduleSampleCategorical( # type: ignore # pylint: disable=no-member
self, candidates, probs, decision
)
@type_checked
def sample_perfect_tile(
self,
loop: LoopRV,
n: int,
max_innermost_factor: int = 16,
decision: Optional[List[int]] = None,
) -> List[ExprRV]:
"""Sample the factors to perfect tile a specific loop
Parameters
----------
loop : LoopRV
The loop to be tiled
n : int
The number of tiles to be sampled
max_innermost_factor : int
The maximum tile size allowed to be sampled in the innermost loop
decision: Optional[List[int]]
The sampling decision, if any
Returns
-------
result : List[ExprRV]
A list of length `n`, the random perfect tile sizes sampled
"""
return list(
_ffi_api.ScheduleSamplePerfectTile( # type: ignore # pylint: disable=no-member
self, loop, n, max_innermost_factor, decision
)
)
@type_checked
def sample_partitioned_tile(
self,
loop: LoopRV,
n: int,
partition_pos: int = 0,
innerpart_factor: int = 1,
decision: Optional[List[int]] = None,
) -> List[ExprRV]:
"""Sample the factors to a partitioned tile for a specific loop
Parameters
----------
loop : LoopRV
The loop to be tiled
n : int
The number of tiles to be sampled
partition_pos : int
The position to partition tiles to two parts
innerpart_factor : int
The factor of the second part
decision: Optional[List[int]]
The sampling decision, if any
Returns
-------
result : List[ExprRV]
A list of length `n`, the random partitioned tile sizes sampled
"""
return list(
_ffi_api.ScheduleSamplePartitionedTile( # type: ignore # pylint: disable=no-member
self,
loop,
n,
partition_pos,
innerpart_factor,
decision,
)
)
@type_checked
def sample_compute_location(
self, block: Union[BlockRV, str], decision: Optional[int] = None
) -> LoopRV:
"""Sample a compute-at location of the given block
Parameters
----------
block : Union[BlockRV, str]
The block whose compute-at location is to be sampled
decision : Optional[int]
The sampling decision
Returns
-------
result : LoopRV
The sampled loop where the input block is to be computed at
"""
block = self._normalize_block_arg(block)
return _ffi_api.ScheduleSampleComputeLocation( # type: ignore # pylint: disable=no-member
self, block, decision
)
########## Schedule: Get blocks & loops ##########
@type_checked
def get_block(self, name: str, func_name: Optional[str] = None) -> BlockRV:
"""Retrieve a block in a specific function with its name
By default, if `func_name` is not specified, the schedule will search for the block in the
function that is currently being "worked on". To switch the function to be worked on, use
`work_on` before calling this method.
Parameters
----------
name : str
The name of the block
func_name : Optional[str] = None
The name of the function
Returns
-------
block : BlockRV
The block retrieved
IndexError is raised if 0 or multiple blocks exist with the specific name.
"""
return _ffi_api.ScheduleGetBlock( # type: ignore # pylint: disable=no-member
self, name, func_name
)
@type_checked
def get_loops(self, block: Union[BlockRV, str]) -> List[LoopRV]:
"""Get the parent loops of the block in its scope, from outer to inner
Parameters
----------
block : Union[BlockRV, str]
The query block
Returns
-------
loops : List[LoopRV]
A list of loops above the given block in its scope, from outer to inner
"""
block = self._normalize_block_arg(block)
# pylint: disable-next=no-member
return list(_ffi_api.ScheduleGetLoops(self, block)) # type: ignore
@type_checked
def get_child_blocks(self, block_or_loop: Union[BlockRV, LoopRV]) -> List[BlockRV]:
"""Get the leaf blocks of a specific block/loop
Parameters
----------
block_or_loop : Union[BlockRV, LoopRV]
The query block/loop
Returns
-------
blocks : List[LoopRV]
A list of leaf blocks inside a specific block/loop
"""
# pylint: disable-next=no-member
return list(_ffi_api.ScheduleGetChildBlocks(self, block_or_loop)) # type: ignore
@type_checked
def get_producers(self, block: Union[BlockRV, str]) -> List[BlockRV]:
"""Get the producers of a specific block
Parameters
----------
block : Union[BlockRV, str]
The block in the query
Returns
-------
producers : List[BlockRV]
A list of producers of the given block
"""
block = self._normalize_block_arg(block)
# pylint: disable-next=no-member
return list(_ffi_api.ScheduleGetProducers(self, block)) # type: ignore
@type_checked
def get_consumers(self, block: Union[BlockRV, str]) -> List[BlockRV]:
"""Get the consumers of a specific block
Parameters
----------
block : Union[BlockRV, str]
The block in the query
Returns
-------
consumers : List[BlockRV]
A list of consumers of the given block
"""
block = self._normalize_block_arg(block)
# pylint: disable-next=no-member
return list(_ffi_api.ScheduleGetConsumers(self, block)) # type: ignore
@type_checked
def get_output_blocks(self, scope_block: Union[BlockRV, str]) -> List[BlockRV]:
"""Get the list of output blocks within the given scope
An output block is a block which has atleast one buffer being written
to, but is not allocated within the PrimFunc
Parameters
----------
scope_block : Union[BlockRV, str],
The scope block from which output blocks are collected
Returns
-------
output_blocks : List[BlockRV]
A list of all blocks that write to some output buffer
"""
scope_block = self._normalize_block_arg(scope_block)
# pylint: disable-next=no-member
return list(_ffi_api.ScheduleGetOutputBlocks(self, scope_block)) # type: ignore
########## Schedule: Transform loops ##########
@type_checked
def merge(self, *loops: List[LoopRV]) -> LoopRV:
"""Merge a list of loops into one. The loops under their LCA requires:
1) Under the same scope.
2) Can't have annotations or thread bindings.
3) Start with 0 and have same extent and same nesting depth.
4) From target loop to their LCA, The inner loop must be the only child of the outer loop.
Parameters
----------
*loops : List[LoopRV]
The loops to be merged
Returns
-------
fused_loop : LoopRV
The new loop after merge
Examples
--------
Before applying merge, in TensorIR, the IR is:
.. code-block:: python
@T.prim_func
def before_merge(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
B = T.match_buffer(b, (128, 128))
C = T.match_buffer(c, (128, 128))
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = A[vi, vj] * 2.0
Create the schedule and do fuse:
.. code-block:: python
sch = tir.Schedule(before_fuse)
i1, _ = sch.get_loops(sch.get_block("B"))
i2, _ = sch.get_loops(sch.get_block("C"))
sch.merge(i1, i2)
print(sch.mod["main"].script())
After applying fuse, the IR becomes:
.. code-block:: python
@T.prim_func
def after_fuse(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
B = T.match_buffer(b, (128, 128))
C = T.match_buffer(c, (128, 128))
# the 2 loops are merged into 1
for i_m in range(128):
for j in range(128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i_m, j])
T.reads(A[vi, vj])
T.writes(B[vi, vj])
B[vi, vj] = A[vi, vj] * T.float32(2)
for j in range(128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i_m, j])
T.reads(A[vi, vj])
T.writes(C[vi, vj])
C[vi, vj] = A[vi, vj] * T.float32(2)
"""
return _ffi_api.ScheduleMerge(self, loops) # type: ignore # pylint: disable=no-member
@type_checked
def fuse(self, *loops: List[LoopRV], preserve_unit_iters: bool = True) -> LoopRV:
"""Fuse a list of consecutive loops into one. It requires:
1) The loops can't have annotations or thread bindings.
2) The (i+1)-th loop must be the only child of the i-th loop.
3) All loops must start with 0.
4) The domain of a loop to be fused cannot depend on another loop to be fused.
Parameters
----------
*loops : List[LoopRV]
The loops to be fused
Returns
-------
fused_loop : LoopRV
The new loop after fusion
Examples
--------
Before applying fuse, in TensorIR, the IR is:
.. code-block:: python
@T.prim_func
def before_fuse(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
B = T.match_buffer(b, (128, 128))
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
Create the schedule and do fuse:
.. code-block:: python
sch = tir.Schedule(before_fuse)
i, j = sch.get_loops(sch.get_block("B"))
sch.fuse(i, j)
print(sch.mod["main"].script())
After applying fuse, the IR becomes:
.. code-block:: python
@T.prim_func
def after_fuse(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
B = T.match_buffer(b, (128, 128))
# the 2 loops are fused into 1
for i_j_fused in T.serial(0, 16384):
with T.block("B"):
vi = T.axis.S(128, T.floordiv(i_j_fused, 128))
vj = T.axis.S(128, T.floormod(i_j_fused, 128))
B[vi, vj] = A[vi, vj] * 2.0
"""
# pylint: disable-next=no-member
return _ffi_api.ScheduleFuse(self, loops, preserve_unit_iters) # type: ignore
@type_checked
def split(
self,
loop: LoopRV,
factors: List[Union[int, ExprRV, None]],
preserve_unit_iters: bool = True,
) -> List[LoopRV]:
"""Split a loop into a list of consecutive loops. It requires:
1) The loop can't have annotation or thread binding.
2) The loop must start with 0.
Predicates may be added to ensure the total loop numbers keeps unchanged.
In `factors`, at most one of the factors can be None,
which will be automatically inferred.
Parameters
----------
loop : LoopRV
The loop to be split
factors: List[Union[int, ExprRV, None]]
The splitting factors
Potential inputs are:
- None
- ExprRV
- Positive constant integers
preserve_unit_iters : bool
Whether or not to preserve unit iterators in block bindings
Returns
-------
split_loops : List[LoopRV]
The new loops after split
Examples
--------
Before split, in TensorIR, the IR is:
.. code-block:: python
@T.prim_func
def before_split(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
B = T.match_buffer(b, (128, 128))
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
Create the schedule and do split:
.. code-block:: python
sch = tir.Schedule(before_split)
i, j = sch.get_loops(sch.get_block("B"))
sch.split(i, factors=[2, 64])
print(sch.mod["main"].script())
After applying split, the IR becomes:
.. code-block:: python
@T.prim_func
def after_split(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
B = T.match_buffer(b, (128, 128))
# the original loop is split into 2 loops
for i0, i1, j in T.grid(2, 64, 128):
with T.block("B"):
vi = T.axis.S(128, i0 * 64 + i1)
vj = T.axis.S(128, j)
B[vi, vj] = A[vi, vj] * 2.0
"""
# it will be checked later in C++ implementation
# that there is at most one None in `factors`
return list(
_ffi_api.ScheduleSplit( # type: ignore # pylint: disable=no-member
self, loop, factors, preserve_unit_iters
)
)
@type_checked
def reorder(self, *ordered_loops: List[LoopRV]) -> None:
"""
Reorder a list of loops. It doesn't require the loops to be consecutive.
It requires:
1) The loops are in the same chain. That means: the loops can be ordered to [l_1, l_2, ... ,
l_n] where l_i is an ancestor of l_{i+1} and there are only single-branch loops between
l_1 and l_n (which also indicates they are under the same scope).
2) After reordering, the domain of an outer loop cannot depend on any of the inner loops.
3) For every block under the loop nests, its block binding must be affine, and the block
variables must be either data parallel or reduction.
4) No duplicated loops are allowed in the arguments.
Parameters
----------
*ordered_loops : List[LoopRV]
The loops in the new order
Examples
--------
Before reorder, in TensorIR, the IR is:
.. code-block:: python
@T.prim_func
def before_reorder(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
B = T.match_buffer(b, (128, 128))
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
Create the schedule and do reorder:
.. code-block:: python
sch = tir.Schedule(before_reorder)
i, j = sch.get_loops(sch.get_block("B"))
sch.reorder(j, i)
print(sch.mod["main"].script())
After applying reorder, the IR becomes:
.. code-block:: python
@T.prim_func
def after_reorder(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
B = T.match_buffer(b, (128, 128))
# Here j and i are reordered
for j, i in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
"""
_ffi_api.ScheduleReorder(self, ordered_loops) # type: ignore # pylint: disable=no-member
@type_checked
def reorder_block_iter_var(self, block: BlockRV, new_order: List[int]) -> None:
"""Reorder the itervars inside a given block.
Parameters
----------
block : BlockRV
The block to be transformed.
new_order : List[int]
The new block itervar order.
Examples
--------
Before reorder_block_iter_var, in TensorIR, the IR is:
.. code-block:: python
@T.prim_func
def matmul(
A: T.Buffer((128, 128), "float32"),
B: T.Buffer((128, 128), "float32"),
C: T.Buffer((128, 128), "float32"),
) -> None:
for i, j, k in T.grid(128, 128, 128):
with T.block("C"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
with T.init():
C[vi, vj] = 0.0
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vj, vk]
Create the schedule and do reorder_block_iter_var:
.. code-block:: python
sch = tir.Schedule(matmul)
C = sch.get_block("C")
sch.reorder_block_iter_var(C, [2, 1, 0])
After applying reorder_block_iter_var, the IR becomes:
.. code-block:: python
@T.prim_func
def matmul_after_reorder_block_iter_var(
A: T.Buffer((128, 128), "float32"),
B: T.Buffer((128, 128), "float32"),
C: T.Buffer((128, 128), "float32"),
):
for i, j, k in T.grid(128, 128, 128):
with T.block("C"):
vk, vj, vi = T.axis.remap("RSS", [k, j, i])
T.reads(A[vi, vk], B[vj, vk])
T.writes(C[vi, vj])
with T.init():
C[vi, vj] = T.float32(0)
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vj, vk]
See Also
--------
reorder
"""
# pylint: disable-next=no-member
_ffi_api.ScheduleReorderBlockIterVar(self, block, new_order) # type: ignore
@type_checked
def add_unit_loop(self, block_or_loop: Union[LoopRV, BlockRV]) -> LoopRV:
"""Create a new unit loop on top of the specific block or loop.
Parameters
----------
block_or_loop : Union[LoopRV, BlockRV]
The block above which the new loop is created
Returns
-------
new_loop : LoopRV
The new unit loop
Examples
--------
Before add_unit_loop, in TensorIR, the IR is:
.. code-block:: python
@T.prim_func
def before_add_unit_loop(
A: T.Buffer((), "int32"),
B: T.Buffer((), "int32"),
C: T.Buffer((), "int32"),
) -> None:
with T.block("C"):
vi = T.axis.spatial(1, 0)
C[()] = A[()] + B[()]
Create the schedule and do add-unit-loop:
.. code-block:: python
sch = tir.Schedule(before_add_unit_loop)
sch.add_unit_loop(sch.get_block("C"))
print(sch.mod["main"].script())
After applying add-unit-loop, the IR becomes:
.. code-block:: python
@T.prim_func
def after_add_unit_loop(
A: T.Buffer((), "int32"),
B: T.Buffer((), "int32"),
C: T.Buffer((), "int32"),
) -> None:
for u in T.serial(1):
with T.block("C"):
vi = T.axis.spatial(1, 0)
C[()] = A[()] + B[()]
"""
# pylint: disable-next=no-member
return _ffi_api.ScheduleAddUnitLoop(self, block_or_loop) # type: ignore
########## Schedule: Manipulate ForKind ##########
@type_checked
def parallel(self, loop: LoopRV) -> None:
"""Parallelize the input loop. It requires:
1) The scope block that the loop is in should have stage-pipeline property
2) All the blocks under the loop are complete blocks or reduction blocks, and have affine
bindings
3) For each block under the loop, the loop can only be contained in data-parallel block
iters' bindings
Parameters
----------
loop : LoopRV
The loop to be parallelized
Examples
--------
Before parallel, in TensorIR, the IR is:
.. code-block:: python
@T.prim_func
def before_parallel(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
B = T.match_buffer(b, (128, 128))
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
Create the schedule and do parallel:
.. code-block:: python
sch = tir.Schedule(before_parallel)
i, j = sch.get_loops(sch.get_block("B"))
sch.parallel(i)
After applying parallel, the IR becomes:
.. code-block:: python
@T.prim_func
def after_parallel(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
B = T.match_buffer(b, (128, 128))
for i in T.parallel(0, 128):
for j in T.serial(0, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
"""
_ffi_api.ScheduleParallel(self, loop) # type: ignore # pylint: disable=no-member
@type_checked
def vectorize(self, loop: LoopRV) -> None:
"""Vectorize the input loop. It requires:
1) The scope block that the loop is in should have stage-pipeline property
2) All the blocks under the loop are complete blocks or reduction blocks, and have affine
bindings
3) For each block under the loop, the loop can only be contained in data-parallel block
iters' bindings
Parameters
----------
loop : LoopRV
The loop to be vectorized
Examples
--------
Before vectorize, in TensorIR, the IR is:
.. code-block:: python
@T.prim_func
def before_vectorize(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
B = T.match_buffer(b, (128, 128))
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
Create the schedule and do vectorize:
.. code-block:: python
sch = tir.Schedule(before_vectorize)
i, j = sch.get_loops(sch.get_block("B"))
sch.vectorize(j)
After applying vectorize, the IR becomes:
.. code-block:: python
@T.prim_func
def after_vectorize(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
B = T.match_buffer(b, (128, 128))
for i in T.serial(0, 128):
for j in T.vectorized(0, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
"""
_ffi_api.ScheduleVectorize(self, loop) # type: ignore # pylint: disable=no-member
@type_checked
def bind(self, loop: LoopRV, thread_axis: str) -> None:
"""Bind the input loop to the given thread axis. It requires:
1) The scope block that the loop is in should have stage-pipeline property
2) All the blocks under the loop are complete blocks or reduction blocks, and have affine
bindings
3) For each block under the loop, if the thread axis starts with "threadIdx`, the loop can
only be contained in data-parallel block iter and reduction block iters' bindings. Otherwise
the loop can only be contained in data-parallel block iters' bindings
Parameters
----------
loop : LoopRV
The loop to be bound to the thread axis
thread_axis : str
The thread axis to be bound to the loop. Possible candidates:
- blockIdx.x/y/z
- threadIdx.x/y/z
- vthread.x/y/z
- vthread (It is a legacy behavior that will be deprecated. Please use `vthread.x/y/z`
instead.)
Examples
--------
Before bind, in TensorIR, the IR is:
.. code-block:: python
@T.prim_func
def before_bind(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
B = T.match_buffer(b, (128, 128))
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
Create the schedule and do bind:
.. code-block:: python
sch = tir.Schedule(before_bind)
i, j = sch.get_loops(sch.get_block("B"))
sch.bind(i, "blockIdx.x")
sch.bind(j, "threadIdx.x")
After applying bind, the IR becomes:
.. code-block:: python
@T.prim_func
def after_bind(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
B = T.match_buffer(b, (128, 128))
for i in T.thread_binding(0, 128, thread = "blockIdx.x"):
for j in T.thread_binding(0, 128, thread = "threadIdx.x"):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
"""
_ffi_api.ScheduleBind(self, loop, thread_axis) # type: ignore # pylint: disable=no-member
@type_checked
def unroll(self, loop: LoopRV) -> None:
"""Unroll the input loop. It requires nothing
Parameters
----------
loop : LoopRV
The loop to be unrolled
Examples
--------
Before unroll, in TensorIR, the IR is:
.. code-block:: python
@T.prim_func
def before_unroll(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
B = T.match_buffer(b, (128, 128))
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
Create the schedule and do unroll:
.. code-block:: python
sch = tir.Schedule(before_unroll)
i, j = sch.get_loops(sch.get_block("B"))
sch.unroll(i)
After applying unroll, the IR becomes:
.. code-block:: python
@T.prim_func
def after_unroll(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
B = T.match_buffer(b, (128, 128))
for i in T.unroll(0, 128):
for j in T.serial(0, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
"""
_ffi_api.ScheduleUnroll(self, loop) # type: ignore # pylint: disable=no-member
########## Schedule: Insert cache stages ##########
@type_checked
def cache_read(
self,
block: Union[BlockRV, str],
read_buffer_index: Union[int, str, Buffer],
storage_scope: str,
consumer_blocks: Optional[List[Union[BlockRV, str]]] = None,
) -> BlockRV:
"""Create a block that reads a buffer region into a read cache. It requires:
1) There is at most one block who write the buffer in the scope.
2) The scope block have stage-pipeline property.
Parameters
----------
block : Union[BlockRV, str]
The consumer block of the target buffer.
buffer: Union[int, str, Buffer]
The index of the buffer in block's read region, the unique
name of a read buffer in the block, or a Buffer object
that is within the blocks read region.
storage_scope: str
The target storage scope.
consumer_blocks: Optional[List[Union[BlockRV, str]]]
An optional list of consumers that should read from the cache. If not specified,
all consumers will use the cache.
Returns
-------
cached_block : BlockRV
The block of the cache stage
Examples
--------
Before cache_read, in TensorIR, the IR is:
.. code-block:: python
@T.prim_func
def before_cache_read(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
B = T.match_buffer(b, (128, 128))
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
Create the schedule and cache_read:
.. code-block:: python
sch = tir.Schedule(before_cache_read)
block_b = sch.get_block("B")
sch.cache_read(block_b, 0, "local")
print(sch.mod["main"].script())
After applying cache_read, the IR becomes:
.. code-block:: python
@T.prim_func
def after_cache_read(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
B = T.match_buffer(b, (128, 128))
A_local = T.alloc_buffer((128, 128), scope="local")
for i, j in T.grid(128, 128):
with T.block("A_local"):
vi, vj = T.axis.remap("SS", [i, j])
A_local[vi, vj] = A[vi, vj]
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A_local[vi, vj] * 2.0
"""
if consumer_blocks is None:
consumer_blocks = []
# Convert any string block names into Block RVs.
consumer_blocks = [self._normalize_block_arg(b) for b in consumer_blocks]
block = self._normalize_block_arg(block)
if not isinstance(read_buffer_index, int):
_, read_buffer_index, _ = self._normalize_buffer_arg(
block, read_buffer_index, required_buffer_type="read"
)
return _ffi_api.ScheduleCacheRead( # type: ignore # pylint: disable=no-member
self, block, read_buffer_index, storage_scope, consumer_blocks
)
@type_checked
def cache_write(
self,
block: Union[BlockRV, str],
write_buffer_index: Union[int, str, Buffer],
storage_scope: str,
consumer_blocks: Optional[List[Union[BlockRV, str]]] = None,
) -> BlockRV:
"""Create a block that reads a buffer region into a write cache. It requires:
1) There is only one block who write the buffer in the scope.
2) The scope block have stage-pipeline property.
Parameters
----------
block : Union[BlockRV, str]
The producer block of the target buffer.
write_buffer_index: int
The index of the buffer in block's write region, the unique
name of a write buffer in the block, or a Buffer object
that is within the blocks write region.
storage_scope: str
The target storage scope.
consumer_blocks: Optional[List[Union[BlockRV, str]]]
An optional list of consumers that should read directly from the cache.
If not specified, all consumers will read from the original buffer.
Returns
-------
cached_block : BlockRV
The block of the cache stage
Examples
--------
Before cache_write, in TensorIR, the IR is:
.. code-block:: python
@T.prim_func
def before_cache_write(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
B = T.match_buffer(b, (128, 128))
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
Create the schedule and cache_write:
.. code-block:: python
sch = tir.Schedule(before_cache_write)
block_b = sch.get_block("B")
sch.cache_write(block_b, 0, "local")
print(sch.mod["main"].script())
After applying cache_write, the IR becomes:
.. code-block:: python
@T.prim_func
def after_cache_write(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
B = T.match_buffer(b, (128, 128))
B_local = T.alloc_buffer((128, 128), scope="local")
for i, j in T.grid(128, 128):
with T.block("A_local"):
vi, vj = T.axis.remap("SS", [i, j])
B_local[vi, vj] = A[vi, vj] * 2.0
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = B_local[vi, vj]
"""
if consumer_blocks is None:
consumer_blocks = []
# Convert any string block names into Block RVs.
consumer_blocks = [self._normalize_block_arg(b) for b in consumer_blocks]
block = self._normalize_block_arg(block)
if not isinstance(write_buffer_index, int):
_, write_buffer_index, _ = self._normalize_buffer_arg(
block, write_buffer_index, required_buffer_type="write"
)
return _ffi_api.ScheduleCacheWrite( # type: ignore # pylint: disable=no-member
self, block, write_buffer_index, storage_scope, consumer_blocks
)
@type_checked
def reindex_cache_read(
self,
block: Union[BlockRV, str],
read_buffer_index: int,
storage_scope: str,
index_map: Union[IndexMap, Callable],
) -> BlockRV:
"""Create a block that reads a buffer region into a read cache using customized
indices specified by index map. The read region of the buffer must be a single point.
The cache stage block follows the original order of loops and block itervars in the block.
If a block itervar does not appear in the buffer access region, it and its corresponding
loop variables will be omitted. User can then use `transform_block_layout` primitive to
reorder the block itervars and surrounding loops of the cache read/write block.
Unlike `cache_read`, `reindex_cache_read` only supports single consumer, please use
`cache_read` when there are multiple consumers.
Parameters
----------
block : BlockRV
The consumer block of the target buffer.
read_buffer_index: int
The index of the buffer in block's read region.
storage_scope: str
The target storage scope.
index_map: Union[IndexMap, Callable]
User defined indices to access allocated cache buffer, maps from block iter vars.
Returns
-------
cached_block : BlockRV
The block of the cache stage
Examples
--------
Before reindex_cache_read, in TensorIR, the IR is:
.. code-block:: python
@T.prim_func
def before_reindex_cache_read(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
B = T.match_buffer(b, (128, 128))
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
Create the schedule and reindex_cache_read:
.. code-block:: python
sch = tir.Schedule(before_cache_read)
block_b = sch.get_block("B")
sch.reindex_cache_read(block_b, 0, "local", lambda vi, vj: (vj, vi))
print(sch.mod["main"].script())
After applying reindex_cache_read, the IR becomes:
.. code-block:: python
@T.prim_func
def after_reindex_cache_read(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
B = T.match_buffer(b, (128, 128))
A_local = T.alloc_buffer((128, 128), scope="local")
for i, j in T.grid(128, 128):
with T.block("A_local"):
vi, vj = T.axis.remap("SS", [i, j])
A_local[vj, vi] = A[vi, vj]
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A_local[vj, vi] * 2.0
See Also
--------
reindex_cache_write
transform_block_layout
transform_layout
cache_read
reindex
"""
# Convert any string block names into Block RVs.
block = self._normalize_block_arg(block)
if callable(index_map):
index_map = IndexMap.from_func(
index_map,
index_dtype=_get_block_default_dtype(self.get(block)),
)
return _ffi_api.ScheduleReindexCacheRead( # type: ignore # pylint: disable=no-member
self, block, read_buffer_index, storage_scope, index_map
)
@type_checked
def reindex_cache_write(
self,
block: Union[BlockRV, str],
write_buffer_index: int,
storage_scope: str,
index_map: Union[Callable, IndexMap],
) -> BlockRV:
r"""Create a block that reads a buffer region into a write cache using customized
indices specified by index map. The write region of the buffer must be a single point.
The cache stage block follows the original order of loops and block itervars in the block.
If a block itervar does not appear in the buffer access region, it and its corresponding
loop variables will be omitted. User can then use `transform_block_layout` primitive to
reorder the block itervars and surrounding loops of the cache read/write block.
Unlike `cache_write`, `reindex_cache_write` only supports single consumer, please use
`cache_write` when there are multiple consumers.
Parameters
----------
block : Union[BlockRV, str]
The consumer block of the target buffer.
write_buffer_index: int
The index of the buffer in block's write region.
storage_scope: str
The target storage scope.
index_map: Union[Callable, IndexMap]
User defined indices to access allocated cache buffer, maps from block iter vars.
consumer_blocks: Optional[List[Union[BlockRV, str]]]
An optional list of consumers that should read directly from the cache.
If not specified, all consumers will read from the original buffer.
Returns
-------
cached_block : BlockRV
The block of the cache stage
Examples
--------
Before reindex_cache_write, in TensorIR, the IR is:
.. code-block:: python
@T.prim_func
def before_reindex_cache_write(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
B = T.match_buffer(b, (128, 128))
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
Create the schedule and reindex_cache_write:
.. code-block:: python
sch = tir.Schedule(before_cache_write)
block_b = sch.get_block("B")
sch.reindex_cache_write(block_b, 0, "local", lambda vi, vj: (vi // 2, vi % 2, vj))
print(sch.mod["main"].script())
After applying reindex_cache_write, the IR becomes:
.. code-block:: python
@T.prim_func
def after_cache_write(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
B = T.match_buffer(b, (64, 2, 128))
B_local = T.alloc_buffer((128, 128), scope="local")
for i, j in T.grid(128, 128):
with T.block("A_local"):
vi, vj = T.axis.remap("SS", [i, j])
B_local[vi % 2, vi // 2, vj] = A[vi, vj] * 2.0
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = B_local[vi % 2, vi // 2, vj]
See Also
--------
reindex_cache_read
transform_block_layout
transform_layout
cache_write
reindex
"""
# Convert any string block names into Block RVs.
block = self._normalize_block_arg(block)
if callable(index_map):
index_map = IndexMap.from_func(
index_map,
index_dtype=_get_block_default_dtype(self.get(block)),
)
return _ffi_api.ScheduleReindexCacheWrite( # type: ignore # pylint: disable=no-member
self, block, write_buffer_index, storage_scope, index_map
)
@type_checked
def cache_inplace(
self,
block: Union[BlockRV, str],
read_buffer_index: Union[int, str, Buffer],
storage_scope: str,
) -> List[BlockRV]:
"""Create blocks that reads & write a buffer region into a cache block.
It requires the the target block both read & write the target buffer.
Mainly for inplace operation.
Parameters
----------
block : Union[BlockRV, str]
The target block operates on the target buffer.
read_buffer_index: int
The index of the buffer in block's read region, the unique
name of a read buffer in the block, or a Buffer object
that is within the blocks read region.
storage_scope: str
The target storage scope.
Returns
-------
cached_blocks : List[BlockRV]
The blocks of the cache stage, read cache first, write cache second
Examples
--------
Before cache_inplace, in TensorIR, the IR is:
.. code-block:: python
@T.prim_func
def before_cache_inplace(data_io: T.Buffer((64), "int32")):
for i0 in T.serial(1):
with T.block("A"):
T.reads(data_io[:64])
T.writes(data_io[:64])
T.evaluate(T.call_extern("call_impl", data_io.data, dtype=""))
Create the schedule and cache_inplace:
.. code-block:: python
sch = tir.Schedule(before_cache_inplace)
block_a = sch.get_block("A")
sch.cache_inplace(block_a, 0, "local")
print(sch.mod["main"].script())
After applying cache_inplace, the IR becomes:
.. code-block:: python
@T.prim_func
def cache_inplace(data_io: T.Buffer(64, "int32")) -> None:
data_io_local = T.alloc_buffer([64], dtype="int32", scope="local")
for i0 in T.serial(1):
for ax0 in T.serial(64):
with T.block("data_io_local"):
v0 = T.axis.spatial(64, ax0)
T.reads(data_io[v0])
T.writes(data_io_local[v0])
data_io_local[v0] = data_io[v0]
with T.block("A"):
T.reads(data_io_local[0 : 64])
T.writes(data_io_local[0 : 64])
T.evaluate(T.call_extern("call_impl", data_io_local.data, dtype=""))
for ax0 in T.serial(64):
with T.block("data_io_local"):
v0 = T.axis.spatial(64, ax0)
T.reads(data_io_local[v0])
T.writes(data_io[v0])
data_io[v0] = data_io_local[v0]
"""
block = self._normalize_block_arg(block)
if not isinstance(read_buffer_index, int):
_, read_buffer_index, _ = self._normalize_buffer_arg(
block, read_buffer_index, required_buffer_type="read"
)
return _ffi_api.ScheduleCacheInplace( # type: ignore # pylint: disable=no-member
self, block, read_buffer_index, storage_scope
)
@type_checked
def cache_index(
self, block: Union[BlockRV, str], storage_scope: str, cse_thresh: int = 0
) -> List[BlockRV]:
"""Create a block to cache precomputed index for later use.
if there is no index computation, keep unchanged.
Parameters
----------
block : Union[BlockRV, str]
The target block operates on the target buffer.
storage_scope: str
The storage scope of cached block.
cse_thresh: int
The repeat threshold that determines a common sub expr,
default 0 means cache all index computation.
Returns
-------
cached_blocks : List[BlockRV]
The blocks of the stage writing the cache buffers
Examples
--------
Before cache_inplace, in TensorIR, the IR is:
.. code-block:: python
@T.prim_func
def resize(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (1, 3, 40, 40))
B = T.match_buffer(b, (1, 3, 80, 80))
for i0, i1, i2, i3 in T.grid(1, 3, 80, 80):
with T.block("A"):
n, c, vi, vj = T.axis.remap("SSSS", [i0, i1, i2, i3])
B[n, c, vi, vj] = A[n, c, vi//4 + vj//4, vj//2]
Create the schedule and cache_index:
.. code-block:: python
sch = tir.Schedule(resize)
block_a = sch.get_block("A")
sch.cache_index(block_a, "global", 1)
print(sch.mod["main"].script())
After applying cache_index, the IR becomes:
.. code-block:: python
@T.prim_func
def resize_cache_index(
A: T.Buffer((1, 3, 40, 40), "float32"), B: T.Buffer((1, 3, 80, 80), "float32")
) -> None:
index_var_0 = T.alloc_buffer([80, 80], dtype="int32", strides=[1])
index_var_1 = T.alloc_buffer([80], dtype="int32", strides=[1])
for ax0, ax1 in T.grid(80, 80):
with T.block("index_0"):
v0 = T.axis.spatial(80, ax0)
v1 = T.axis.spatial(80, ax1)
T.reads()
T.writes(index_var_0[v0, v1])
index_var_0[v0, v1] = v0 // 4 + v1 // 4
for ax0 in T.serial(80):
with T.block("index_1"):
v0 = T.axis.spatial(80, ax0)
T.reads()
T.writes(index_var_1[v0])
index_var_1[v0] = v0 // 2
for i0, i1, i2, i3 in T.grid(1, 3, 80, 80):
with T.block("A"):
n, c, vi, vj = T.axis.remap("SSSS", [i0, i1, i2, i3])
T.reads(A[n, c, vi // 4 + vj // 4, vj // 2])
T.writes(B[n, c, vi, vj])
B[n, c, vi, vj] = A[n, c, index_var_0[vi, vj], index_var_1[vj]]
"""
block = self._normalize_block_arg(block)
return _ffi_api.ScheduleCacheIndex( # type: ignore # pylint: disable=no-member
self, block, storage_scope, cse_thresh
)
@type_checked
def reindex(
self, block: Union[BlockRV, str], buffer: Union[Tuple[str, int], str, Buffer]
) -> BlockRV:
"""Create a block that read/write a buffer region into a read/write cache with reindexing.
The layout of the cache will be the same as by the iterators of the block that reads/writes
the buffer. It requires:
1) There is only one block who reads/writes the target buffer
2) There is only one buffer load/store of this buffer in the block
Parameters
----------
block : Union[BlockRV, str]
The block that accesses the target buffer. If a string,
this must uniquely identify a block.
buffer: Union[Tuple[str,int], Buffer, str]
The buffer to be transformed, or a specification of how to
identify the buffer to be transformed.
If `buffer` if a tuple of ``(str,int)``, the first item
should be either "read" or "write", and the second item is
an index into the block's read or write regions.
If `buffer` is a string, it is the name of the buffer,
which must exist within the reads/writes of the block. In
addition, the reads/writes of the block may not contain
more than one buffer with this name.
If `buffer` is a Buffer object, it must exist within the
reads/writes of the block.
Returns
-------
reindex_block : BlockRV
The block of the reindex stage
Examples
--------
Before reindex, in TensorIR, the IR is:
.. code-block:: python
@T.prim_func
def before_reindex(
A: T.Buffer((128, 128), "float32"),
B: T.Buffer((128, 128), "float32")
) -> None:
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vj, vi] * 2.0
Create the schedule and do reindex:
.. code-block:: python
sch = tir.Schedule(before_reindex)
block = sch.get_block("B")
sch.reindex(block, ("read", 0))
After applying reindex, the IR becomes:
.. code-block:: python
@T.prim_func
def after_reindex(
A: T.Buffer((128, 128), "float32"),
B: T.Buffer((128, 128), "float32")
) -> None:
A_reindex = T.alloc_buffer((128, 128), "float32")
for i, j in T.grid(128, 128):
with T.block("A_reindex"):
vi, vj = T.axis.remap("SS", [i, j])
A_reindex[vi, vj] = A[vj, vi]
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A_reindex[vi, vj] * 2.0
"""
block = self._normalize_block_arg(block)
buffer_index_type, buffer_index, _ = self._normalize_buffer_arg(block, buffer)
assert buffer_index_type in ["read", "write"], "Invalid buffer_index_type"
buffer_index_type_enum = 0 if buffer_index_type == "read" else 1
return _ffi_api.ScheduleReIndex( # type: ignore # pylint: disable=no-member
self, block, buffer_index, buffer_index_type_enum
)
########## Schedule: Data movement ##########
def read_at(
self, loop: LoopRV, block: BlockRV, read_buffer_index: int, storage_scope: str
) -> BlockRV:
return _ffi_api.ScheduleReadAt( # type: ignore # pylint: disable=no-member
self, loop, block, read_buffer_index, storage_scope
)
def write_at(
self, loop: LoopRV, block: BlockRV, write_buffer_index: int, storage_scope: str
) -> BlockRV:
return _ffi_api.ScheduleWriteAt( # type: ignore # pylint: disable=no-member
self, loop, block, write_buffer_index, storage_scope
)
########## Schedule: Compute location ##########
@type_checked
def compute_at(
self,
block: Union[BlockRV, str],
loop: LoopRV,
preserve_unit_loops: bool = False,
index: int = -1,
) -> None:
"""Compute-At. Move a producer block under the specific loop, and regenerate the
loops induced by the block so that the buffer region produced by the producer block could
cover those regions consumed by its consumer blocks under the given loop. It requires:
1) `block` and `loop` are under the same scope, `loop` is not the ancestor of `block`
2) The scope block has stage-pipeline property
3) The subtree of the scope block, where the given block is in, satisfies the compact
dataflow condition. i.e. all the blocks in the scope block's subtree must be either
complete block or reduction block
4) The block is not an output block with regard to the scope block, i.e. the buffers written
by the block are allocated under the scope block
5) All the consumers of the block are under the given loop
Parameters
----------
block : Union[BlockRV, str]
The block to be moved
loop: LoopRV
The loop where the block to be moved under
preserve_unit_loops: bool
Whether to keep the trivial loops whose extents are 1
index: int
The block index of the loop body subtree blocks:
- `index = -1` means inserted into the last possible insertion point;
- `index = -2` means inserted into the first possible insertion point;
- Otherwise, `index` is a nonnegative number that indicates the insertion point
Examples
--------
Before compute-at, in TensorIR, the IR is:
.. code-block:: python
@T.prim_func
def before_compute_at(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (128, 128), "float32")
B = T.alloc_buffer((128, 128), "float32")
C = T.match_buffer(c, (128, 128), "float32")
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi, vj] + 1.0
Create the schedule and do compute-at:
.. code-block:: python
sch = tir.Schedule(before_compute_at)
block = sch.get_block("B")
loop, _ = sch.get_loops(sch.get_block("C"))
sch.compute_at(block, loop, preserve_unit_loops=False)
print(sch.mod["main"].script())
After applying compute-at, the IR becomes:
.. code-block:: python
@T.prim_func
def after_compute_at(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (128, 128), "float32")
B = T.alloc_buffer((128, 128), "float32")
C = T.match_buffer(c, (128, 128), "float32")
for i in T.serial(0, 128):
for j in T.serial(0, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
for j in T.serial(0, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi, vj] + 1.0
"""
block = self._normalize_block_arg(block)
_ffi_api.ScheduleComputeAt( # type: ignore # pylint: disable=no-member
self, block, loop, preserve_unit_loops, index
)
@type_checked
def reverse_compute_at(
self,
block: Union[BlockRV, str],
loop: LoopRV,
preserve_unit_loops: bool = False,
index: int = -1,
) -> None:
"""Reverse-Compute-At. Move a consumer block under the specific loop, and regenerate the
loops induced by the block so that the buffer region consumed by the consumer block could
cover those regions produced by its producer blocks under the given loop. It requires:
1) `block` and `loop` are under the same scope, `loop` is not the ancestor of `block`
2) The scope block has stage-pipeline property
3) The subtree of the scope block, where the given block is in, satisfies the compact
dataflow condition. i.e. all the blocks in the scope block's subtree must be either
complete block or reduction block
4) All the producers of the block are under the given loop
Parameters
----------
block : Union[BlockRV, str]
The block to be moved
loop: LoopRV
The loop where the block to be moved under
preserve_unit_loops: bool
Whether to keep the trivial loops whose extents are 1
index: int
The block index of the loop body subtree blocks:
- `index = -1` means inserted into the last possible insertion point;
- `index = -2` means inserted into the first possible insertion point;
- Otherwise, `index` is a nonnegative number that indicates the insertion point
Examples
--------
Before reverse-compute-at, in TensorIR, the IR is:
.. code-block:: python
@T.prim_func
def before_reverse_compute_at(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (128, 128), "float32")
B = T.alloc_buffer((128, 128), "float32")
C = T.match_buffer(c, (128, 128), "float32")
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi, vj] + 1.0
Create the schedule and do reverse-compute-at:
.. code-block:: python
sch = tir.Schedule(before_reverse_compute_at)
block = sch.get_block("C")
loop, _ = sch.get_loops(sch.get_block("B"))
sch.reverse_compute_at(block, loop, preserve_unit_loops=False)
print(sch.mod["main"].script())
After applying reverse-compute-at, the IR becomes:
.. code-block:: python
@T.prim_func
def after_reverse_compute_at(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (128, 128), "float32")
B = T.alloc_buffer((128, 128), "float32")
C = T.match_buffer(c, (128, 128), "float32")
for i in T.serial(0, 128):
for j in T.serial(0, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
for j in T.serial(0, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi, vj] + 1.0
"""
block = self._normalize_block_arg(block)
_ffi_api.ScheduleReverseComputeAt( # type: ignore # pylint: disable=no-member
self, block, loop, preserve_unit_loops, index
)
@type_checked
def compute_inline(self, block: Union[BlockRV, str]) -> None:
"""Inline a block into its consumer(s). It requires:
1) The block is a complete non-root block, which only produces one buffer
2) The block must not be the only leaf in the scope.
3) The body of the block must be a BufferStore statement in
the form of, ``A[i, j, k, ...] = ...`` where the indices of
the LHS are all distinct atomic variables, and no variables
other than those indexing variables are allowed in the
statement.
Parameters
----------
block : Union[BlockRV, str]
The block to be inlined to its consumer(s)
Examples
--------
Before compute-inline, in TensorIR, the IR is:
.. code-block:: python
@T.prim_func
def before_inline(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
B = T.alloc_buffer((128, 128))
C = T.match_buffer(c, (128, 128))
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi, vj] + 1.0
Create the schedule and do compute-inline:
.. code-block:: python
sch = tir.Schedule(before_inline)
sch.compute_inline(sch.get_block("B"))
print(sch.mod["main"].script())
After applying compute-inline, the IR becomes:
.. code-block:: python
@T.prim_func
def after_inline(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
C = T.match_buffer(c, (128, 128))
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = A[vi, vj] * 2.0 + 1.0
"""
block = self._normalize_block_arg(block)
_ffi_api.ScheduleComputeInline(self, block) # type: ignore # pylint: disable=no-member
@type_checked
def reverse_compute_inline(self, block: Union[BlockRV, str]) -> None:
"""Inline a block into its only producer. It requires:
1) The block is a complete non-root block, which only produces and consumes one buffer
2) The block must not be the only leaf in the scope.
3) The only producer of the block is a read-after-write producer and a
complete non-root block
4) The body of the block must be a BufferStore statement in the form of,
``B[f(i, j, k, ...)] = g(i, j, k, A[i, j, k, ...] ...)`` where the
indices of each `BufferLoad` on the RHS are all distinct atomic
variables, and no variables other than those indexing variables are
allowed in the statement.
Parameters
----------
block : Union[BlockRV, str]
The block to be inlined to its producer
Examples
--------
Before reverse-compute-inline, in TensorIR, the IR is:
.. code-block:: python
@T.prim_func
def before_inline(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
B = T.alloc_buffer((128, 128))
C = T.match_buffer(c, (128, 128))
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi, vj] + 1.0
Create the schedule and do reverse-compute-inline:
.. code-block:: python
sch = tir.Schedule(before_inline)
sch.reverse_compute_inline(sch.get_block("C"))
print(sch.mod["main"].script())
After applying reverse-compute-inline, the IR becomes:
.. code-block:: python
@T.prim_func
def after_inline(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
C = T.match_buffer(c, (128, 128))
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = A[vi, vj] * 2.0 + 1.0
"""
block = self._normalize_block_arg(block)
# pylint: disable-next=no-member
_ffi_api.ScheduleReverseComputeInline(self, block) # type: ignore
########## Schedule: Reduction ##########
@type_checked
def decompose_reduction(self, block: Union[BlockRV, str], loop: LoopRV) -> BlockRV:
"""Decompose a reduction block into two separate blocks.
a) The init block, which is translated from the init statement of the reduction block;
b) The update block, which is the original block without init statement.
The init block is inserted right before the given loop.
The schedule primitive requires:
1) The input block is a reduction block.
2) The input loop is the ancestor of the block.
3) The input loop is not lower than all the loops related to reduce block var.
Parameters
----------
block : Union[BlockRV, str]
The reduction block to be decomposed
loop : LoopRV
The loop above which the init block is inserted before.
Returns
-------
init_block : BlockRV
The init block
Examples
--------
Before decompose-reduction, in TensorIR, the IR is:
.. code-block:: python
@T.prim_func
def before_decompose(a: ty.handle, c: ty.handle) -> None:
A = tir.match_buffer(a, [128, 128])
B = tir.match_buffer(b, [128, 128])
C = tir.match_buffer(c, [128, 128])
for i, j, k in tir.grid(128, 128, 128):
with tir.block([128, 128, tir.reduce_axis(0, 128)], "C") as [vi, vj, vk]:
with tir.init():
C[vi, vj] = 0.0
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vj, vk]
Create the schedule and do decompose-reduction with specified loop:
.. code-block:: python
sch = tir.Schedule(before_decompose)
C = sch.get_block("C")
i, j, k = sch.get_loops(C)
sch.decompose_reduction(C, i)
print(sch.mod["main"].script())
After applying decompose-reduction, the IR becomes:
.. code-block:: python
@T.prim_func
def after_decompose(a: ty.handle, c: ty.handle) -> None:
A = tir.match_buffer(a, [128, 128])
B = tir.match_buffer(b, [128, 128])
C = tir.match_buffer(c, [128, 128])
for i in tir.serial(128):
for j in tir.serial(128):
with tir.block([128, 128]) as [vi, vj]:
C[vi, vj] = 0.0
for i, j, k in tir.grid(128, 128, 128):
with tir.block([128, 128, tir.reduce_axis(0, 128)], "C") as [vi, vj, vk]:
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vj, vk]
"""
block = self._normalize_block_arg(block)
# pylint: disable-next=no-member
return _ffi_api.ScheduleDecomposeReduction(self, block, loop) # type: ignore
@type_checked
def rfactor(self, loop: LoopRV, factor_axis: int) -> BlockRV:
"""Factorize an associative reduction block by the specified loop.
An associative reduction cannot be parallelized directly,
because it leads to potential race condition during accumulation.
Alternatively, the reduction could be factorized on a loop with the following steps:
- Step 1: evenly slice the reduction into `n` separate chunks, where `n` is the loop extent
- Step 2: compute the chunks separately and write the result into `n` intermediate buffers;
- Step 3: accumulate the `n` separate buffer into the result buffer.
Note that the Step 2 above introduces opportunities for parallelization.
RFactor is a schedule primitive that implements the transformation described above:
Given a block that writes to buffer `B`, it factorizes a loop of extent `n`.
For example, the pseudocode below accumulates `B[i] = sum(A[i, : , : ])`:
.. code-block:: python
for i in range(128): # loop i is a data parallel loop
for j in range(128): # loop j is a reduction loop
for k in range(128): # loop k is a reduction loop
B[i] = B[i] + A[i, j, k]
Suppose RFactor is applied on the innermost loop `k` and `factor_axis = 1`.
RFactor then creates an intermediate buffer and two blocks.
1. The intermediate buffer, or "rf-buffer" is a buffer of rank `ndim(B) + 1` and
size `size(B) * n`, whose shape expands from `shape(B)` by adding an axis of `n`
at the position specified by `factor_axis`. For example,
* shape(B) = [1, 2, 3], factor_axis = 0 => shape(B_rf) = [n, 1, 2, 3]
* shape(B) = [1, 2, 3], factor_axis = 1 => shape(B_rf) = [1, n, 2, 3]
* shape(B) = [1, 2, 3], factor_axis = 2 => shape(B_rf) = [1, 2, n, 3]
* shape(B) = [1, 2, 3], factor_axis = 3 => shape(B_rf) = [1, 2, 3, n]
2. The rfactor block, or "rf-block", is a block that writes to the `rf-buffer` without
accumulating over the loop `k`, i.e. the loop `k` is converted from a reduction loop
to a data parallel loop. In our example, the rf-block is:
.. code-block:: python
B_rf = np.zeros((128, 128)) # the rf-buffer
for k in range(128): # loop k is converted to a data parallel loop
for i in range(128): # loop i is a data parallel loop (unchanged)
for j in range(128): # loop j is a reduction loop (unchanged)
B_rf[i, k] = B_rf[i, k] + A[i, j, k]
3. The write-back block, or `wb-block`, is a block that accumulates the rf-buffer into
the result buffer. All the reduction loops are removed except the loop `k` for accumulation.
In our example, the wb-block is:
.. code-block:: python
for i in range(128): # loop i is a data parallel loop (unchanged)
# loop j is removed because it is a reduction loop
for k in range(128): # loop k is a reduction loop (unchanged)
B[i] = B[i] + B_rf[i, k]
Parameters
----------
loop : LoopRV
The loop outside block for which we want to do rfactor
factor_axis : int
The position where the new dimension is placed in the new introduced rfactor buffer
Returns
-------
rf_block : BlockRV
The block which computes partial results over each slices (i.e., the first block
as described in the above illustration)
Examples
--------
Before rfactor, in TensorIR, the IR is:
.. code-block:: python
@T.prim_func
def before_rfactor(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128, 128))
B = T.match_buffer(b, (128,))
for ii, i, j in T.grid(128, 128, 128):
with T.block("B"):
vii, vi, vj = T.axis.remap("SRR", [ii, i, j])
with T.init():
B[vii] = 0.0
B[vii] = B[vii] + A[vii, vi, vj]
Create the schedule and do rfactor:
.. code-block:: python
sch = tir.Schedule(before_rfactor)
_, _, k = sch.get_loops(sch.get_block("B"))
sch.rfactor(k, 0)
print(sch.mod["main"].script())
After applying rfactor, the IR becomes:
.. code-block:: python
@T.prim_func
def after_rfactor(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [128, 128, 128])
B = T.match_buffer(b, [128])
B_rf = T.alloc_buffer([128, 128])
for i2, ii, i in T.grid(128, 128, 128):
with T.block("B_rf"):
vi2, vii, vi = T.axis.remap("SSR", [i2, ii, i])
with T.init():
B_rf[vi2, vii] = 0.0
B_rf[vi2, vii] = (B_rf[vi2, vii] + A[vii, vi, vi2])
for ii, i2 in T.grid(128, 128):
with T.block("B"):
vii, vi2 = T.axis.remap("SR", [ii, i2])
with T.init():
B[vii] = 0.0
B[vii] = B[vii] + B_rf[vi2, vii]
Note
----
Rfactor requires:
1) `loop` has only one child block, and it is a reduction block;
2) `loop` is a reduction loop, i.e. the loop variable is bound to only reduction variables
in the block binding;
3) `loop` is not parallelized, vectorized, unrolled or bound to any thread axis;
4) The block scope that `loop` is in is a staged-pipeline;
5) The outermost loop outside the reduction block should has the reduction block as its
first child block;
6) The outermost reduction loop should have only one child block;
7) An unary extent loop that is not bound to any reduction or data parallel variables in
the block binding should not appear under some reduction loop;
8) The reduction block should write to only one buffer, and its init and body are both
simple `BufferStore`s, and the pattern is registered as an associative reducer.
The pre-defined patterns include: plus, multiplication, min and max;
9) Each of the loops on top of the block cannot be bound to a data parallel and a
reduction block binding at the same time;
10) `factor_axis` should be in range `[-ndim(B) - 1, ndim(B)]`,
where `B` is the buffer that the reduction block writes to.
Negative indexing is normalized according to numpy convention.
"""
# pylint: disable-next=no-member
return _ffi_api.ScheduleRFactor(self, loop, factor_axis) # type: ignore
######## Schedule: Block annotation ########
@type_checked
def storage_align( # pylint: disable=too-many-arguments
self, block: Union[BlockRV, str], buffer_index: int, axis: int, factor: int, offset: int
) -> None:
"""Set alignment requirement for specific dimension such that
stride[axis] == k * factor + offset for some k. This is useful to set memory layout for more
friendly memory access pattern. For example, we can set alignment to be factor=2, offset=1
to avoid bank conflict for thread access on higher dimension in GPU shared memory.
Parameters
----------
block : Union[BlockRV, str]
The producer block of the buffer.
buffer_index : int
The index of the buffer in block's write region.
axis : int
The dimension to be specified for alignment.
factor : int
The factor multiple of alignment.
offset : int
The required offset factor.
Examples
--------
Before storage_align, in TensorIR, the IR is:
.. code-block:: python
@T.prim_func
def before_storage_align(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
B = T.alloc_buffer((128, 128))
C = T.match_buffer(c, (128, 128))
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi, vj] + 1.0
Create the schedule and do storage_align:
.. code-block:: python
sch = tir.Schedule(before_storage_align)
sch.storage_align(sch.get_block("B"), buffer_index=0, axis=0, factor=128, offset=1)
print(sch.mod["main"].script())
After applying storage_align, the IR becomes:
.. code-block:: python
@T.prim_func
def after_storage_align(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
B = T.alloc_buffer((128, 128))
C = T.match_buffer(c, (128, 128))
for i, j in T.grid(128, 128):
with T.block("B"):
T.block_attr({"buffer_dim_align": [[[0, 128, 1]]]})
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi, vj] + 1.0
After lowering passes, buffer B will have strides as [129, 1].
Note
----
Storage_align requires the buffer to be an intermediate buffer defined via `alloc_buffer`.
"""
block = self._normalize_block_arg(block)
_ffi_api.ScheduleStorageAlign( # type: ignore # pylint: disable=no-member
self, block, buffer_index, axis, factor, offset
)
@type_checked
def set_scope(
self, block: Union[BlockRV, str], buffer_index: Union[int, str, Buffer], storage_scope: str
) -> None:
"""Set the storage scope of a buffer, where the buffer is
specified by the a block and a write-index.
Parameters
----------
block : Union[BlockRV, str]
The producer block of the buffer
buffer_index : int
The index of the buffer in block's write region
storage_scope : str
The storage scope to be set
Examples
--------
Before set_scope, in TensorIR, the IR is:
.. code-block:: python
@T.prim_func
def before_set_scope(
A: T.Buffer((128, 128), "float32"), C: T.Buffer((128, 128), "float32")
) -> None:
B = T.alloc_buffer((128, 128), dtype="float32")
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi, vj] + 1.0
Create the schedule and do set_scope:
.. code-block:: python
sch = tir.Schedule(before_set_scope)
sch.set_scope(sch.get_block("B"), buffer_index=0, storage_scope="shared")
print(sch.mod["main"].script())
After applying set_scope, the IR becomes:
.. code-block:: python
@T.prim_func
def after_set_scope(
A: T.Buffer((128, 128), "float32"), C: T.Buffer((128, 128), "float32")
) -> None:
B_shared = T.alloc_buffer([128, 128], dtype="float32", scope="shared")
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B_shared[vi, vj] = A[vi, vj] * T.float32(2)
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B_shared[vi, vj] + T.float32(1)
Note
----
`set_scope` requires the buffer to be an intermediate buffer defined via `alloc_buffer`.
"""
block = self._normalize_block_arg(block)
if not isinstance(buffer_index, int):
_, buffer_index, _ = self._normalize_buffer_arg(
block, buffer_index, required_buffer_type="write"
)
_ffi_api.ScheduleSetScope( # type: ignore # pylint: disable=no-member
self, block, buffer_index, storage_scope
)
@type_checked
def unsafe_set_dtype(self, block: Union[BlockRV, str], buffer_index: int, dtype: str) -> None:
"""Set the data type of a buffer, where the buffer is
specified by the a block and write-index.
This schedule primitive is unsafe and may change the correctness of program because of
type conversion, please use with caution.
Parameters
----------
block : Union[BlockRV, str]
The producer block of the buffer
buffer_index : int
The index of the buffer in block's write region
dtype : str
The data type to be set
Examples
--------
Before unsafe_set_dtype, in TensorIR, the IR is:
.. code-block:: python
@T.prim_func
def before_set_dtype(
A: T.Buffer((128, 128), "float32"), C: T.Buffer((128, 128), "float32")
) -> None:
B = T.alloc_buffer((128, 128), dtype="float32")
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j]
C[vi, vj] = B[vi, vj] + 1.0
Create the schedule and do unsafe_set_dtype:
.. code-block:: python
sch = tir.Schedule(before_set_dtype)
sch.unsafe_set_dtype("B", buffer_index=0, dtype="float16")
print(sch.mod["main"].script())
After applying set_dtype, the IR becomes:
.. code-block:: python
@T.prim_func
def after_set_dtype(
A: T.Buffer((128, 128), "float32"), C: T.Buffer((128, 128), "float32")
) -> None:
B = T.alloc_buffer((128, 128), dtype="float16")
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = T.cast(A[vi, vj] * 2.0, "float16")
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j]
C[vi, vj] = T.cast(B[vi, vj], "float32") + 1.0
Note
----
`unsafe_set_dtype` requires the buffer to be an intermediate buffer defined via
`alloc_buffer`.
"""
block = self._normalize_block_arg(block)
_ffi_api.ScheduleUnsafeSetDType( # type: ignore # pylint: disable=no-member
self, block, buffer_index, dtype
)
########## Schedule: Blockize & Tensorize ##########
@type_checked
def blockize(
self, target: Union[LoopRV, List[BlockRV]], preserve_unit_iters: bool = True
) -> BlockRV:
"""Convert multiple blocks or the subtree rooted at a specific loop into a block.
Parameters
----------
target : LoopRV or List[BlockRV]
The root of the subtree or the specified blocks.
preserve_unit_iters : bool
Whether or not to preserve unit iterators in block bindings
Returns
-------
result : BlockRV
The new block.
Examples
--------
Before blockize, in TensorIR, the IR is:
.. code-block:: python
@T.prim_func
def before_blockize(
A: T.Buffer((128, 128), "float32"),
B: T.Buffer((128, 128), "float32")
) -> None:
for i_0, j_0, i_1, j_1 in T.grid(8, 8, 16, 16):
with T.block("B"):
vi = T.axis.spatial(128, i_0 * 16 + i_1)
vj = T.axis.spatial(128, j_0 * 16 + j_1)
T.reads(A[vi, vj])
T.writes(B[vi, vj])
B[vi, vj] = A[vi, vj] * T.float32(2)
Create the schedule and do set_scope:
.. code-block:: python
sch = tir.Schedule(before_blockize)
B = sch.get_block("B")
_, _, i1, _ = sch.get_loops(B)
sch.blockize(i1)
print(sch.mod["main"].script())
After applying blockize, the IR becomes:
.. code-block:: python
@T.prim_func
def after_blockize(
A: T.Buffer((128, 128), "float32"),
B: T.Buffer((128, 128), "float32")
)-> None:
for i_0, j_0 in T.grid(8, 8):
with T.block("B_o"):
vio, vjo = T.axis.remap("SS", [i_0, j_0])
T.reads(A[vio * 16 : vio * 16 + 16, vjo * 16 : vjo * 16 + 16])
T.writes(B[vio * 16 : vio * 16 + 16, vjo * 16 : vjo * 16 + 16])
for i_1, j_1 in T.grid(16, 16):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i_1, j_1])
T.reads(A[vio * 16 + vi, vjo * 16 + vj])
T.writes(B[vio * 16 + vi, vjo * 16 + vj])
B[vio * 16 + vi, vjo * 16 + vj] = A[vio * 16 + vi, vjo * 16 + vj] \
* T.float32(2)
Note
----
blockize requires there is exactly one block under the given loop and the bindings of the
block are divisible by the subspace represented by the loops starting at the given loop.
"""
# pylint: disable-next=no-member
return _ffi_api.ScheduleBlockize(self, target, preserve_unit_iters) # type: ignore
@type_checked
def tensorize(
self,
block_or_loop: Union[BlockRV, LoopRV],
tensor_intrin: str,
preserve_unit_iters: bool = True,
) -> None:
"""Tensorize the computation enclosed by loop with the tensor intrinsic.
Parameters
----------
block_or_loop : Union[BlockRV, LoopRV]
The loop to be tensorized.
tensor_intrin : str
The tensor intrin or the name of the tensor intrin.
preserve_unit_iters : bool
Whether or not to preserve unit iterators in block bindings
Examples
--------
Before tensorize, in TensorIR, the IR is:
.. code-block:: python
@T.prim_func
def before_tensorize(
A: T.Buffer((128, 128), "float32"),
B: T.Buffer((128, 128), "float32"),
C: T.Buffer((128, 128), "float32"),
) -> None:
# body
# with T.block("root")
for i_0, j_0, k_0, i_1, j_1, k_1 in T.grid(8, 8, 8, 16, 16, 16):
with T.block("update"):
vi = T.axis.spatial(128, i_0 * 16 + i_1)
vj = T.axis.spatial(128, j_0 * 16 + j_1)
vk = T.axis.reduce(128, k_0 * 16 + k_1)
T.reads(C[vi, vj], A[vi, vk], B[vj, vk])
T.writes(C[vi, vj])
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vj, vk]
Declare and register the tensor intrinsic:
.. code-block:: python
@T.prim_func
def mma_desc(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (16, 16), align=128, offset_factor=1)
B = T.match_buffer(b, (16, 16), align=128, offset_factor=1)
C = T.match_buffer(c, (16, 16), align=128, offset_factor=1)
with T.block("root"):
T.reads(C[0 : 16, 0 : 16], A[0 : 16, 0 : 16], B[0 : 16, 0 : 16])
T.writes(C[0 : 16, 0 : 16])
for i, j, k in T.grid(16, 16, 16):
with T.block("update"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vj, vk]
@T.prim_func
def mma_intrin(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (16, 16), align=128, offset_factor=1)
B = T.match_buffer(b, (16, 16), align=128, offset_factor=1)
C = T.match_buffer(c, (16, 16), align=128, offset_factor=1)
with T.block("root"):
T.reads(C[0 : 16, 0 : 16], A[0 : 16, 0 : 16], B[0 : 16, 0 : 16])
T.writes(C[0 : 16, 0 : 16])
T.evaluate(
T.tvm_mma_sync(
C.data,
C.elem_offset // 256,
A.data,
A.elem_offset // 256,
B.data,
B.elem_offset // 256,
C.data,
C.elem_offset // 256,
dtype="handle",
)
)
tir.TensorIntrin.register("test_mma_intrin", mma_desc, mma_intrin)
Create the schedule and do tensorize:
.. code-block:: python
sch = tir.Schedule(before_tensorize)
update = sch.get_block("update")
_, _, _, i1, _, _ = sch.get_loops(update)
sch.tensorize(i1, "test_mma_intrin")
print(sch.mod["main"].script())
After applying tensorize, the IR becomes:
.. code-block:: python
@T.prim_func
def after_tensorize(
A: T.Buffer((128, 128), "float32"),
B: T.Buffer((128, 128), "float32"),
C: T.Buffer((128, 128), "float32"),
) -> None:
# body
# with T.block("root")
for i_0, j_0, k_0 in T.grid(8, 8, 8):
with T.block("update_o"):
vio, vjo, vko = T.axis.remap("SSR", [i_0, j_0, k_0])
T.reads(
C[vio * 16 : vio * 16 + 16, vjo * 16 : vjo * 16 + 16],
A[vio * 16 : vio * 16 + 16, vko * 16 : vko * 16 + 16],
B[vjo * 16 : vjo * 16 + 16, vko * 16 : vko * 16 + 16],
)
T.writes(C[vio * 16 : vio * 16 + 16, vjo * 16 : vjo * 16 + 16])
A_1 = T.match_buffer(
A[vio * 16 : vio * 16 + 16, vko * 16 : vko * 16 + 16],
[16, 16],
dtype="float32",
offset_factor=1,
)
B_1 = T.match_buffer(
B[vjo * 16 : vjo * 16 + 16, vko * 16 : vko * 16 + 16],
[16, 16],
dtype="float32",
offset_factor=1,
)
C_1 = T.match_buffer(
C[vio * 16 : vio * 16 + 16, vjo * 16 : vjo * 16 + 16],
[16, 16],
dtype="float32",
offset_factor=1,
)
T.evaluate(
T.tvm_mma_sync(
C_1.data,
C_1.elem_offset // 256,
A_1.data,
A_1.elem_offset // 256,
B_1.data,
B_1.elem_offset // 256,
C_1.data,
C_1.elem_offset // 256,
dtype="handle",
)
)
"""
_ffi_api.ScheduleTensorize( # type: ignore # pylint: disable=no-member
self, block_or_loop, tensor_intrin, preserve_unit_iters
)
########## Schedule: Annotation ##########
PrimAnnotationValueT = Union[str, int, float, ExprRV]
AnnotationValueT = Union[
PrimAnnotationValueT,
List[PrimAnnotationValueT],
Dict[str, Union[PrimAnnotationValueT, List[PrimAnnotationValueT]]],
]
@type_checked
def annotate(
self, block_or_loop: Union[BlockRV, LoopRV], ann_key: str, ann_val: AnnotationValueT
) -> None:
"""Annotate a block/loop with a key value pair
Parameters
----------
block_or_loop: Union[BlockRV, LoopRV]
The block/loop to be annotated
ann_key : str
The annotation key
ann_val : AnnotationValueT
The annotation value
Examples
--------
Before annotate, in TensorIR, the IR is:
.. code-block:: python
@T.prim_func
def before_annotate(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
B = T.match_buffer(b, (128, 128))
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
Create the schedule and do annotate:
.. code-block:: python
sch = tir.Schedule(before_annotate)
sch.annotate(sch.get_block("B"), "ann_key", "ann_value")
print(sch.mod["main"].script())
After applying annotate, the IR becomes:
.. code-block:: python
@T.prim_func
def after_annotate(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
B = T.match_buffer(b, (128, 128))
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
T.block_attr({"ann_key", "ann_value"})
B[vi, vj] = A[vi, vj] * 2.0
"""
if isinstance(ann_val, str):
ann_val = String(ann_val)
elif isinstance(ann_val, int):
ann_val = IntImm("int32", ann_val)
elif isinstance(ann_val, float):
ann_val = FloatImm("float32", ann_val)
_ffi_api.ScheduleAnnotate( # type: ignore # pylint: disable=no-member
self, block_or_loop, ann_key, ann_val
)
@type_checked
def unannotate(self, block_or_loop: Union[BlockRV, LoopRV], ann_key: str) -> None:
"""Unannotate a block/loop's annotation with key ann_key
Parameters
----------
block_or_loop: Union[BlockRV, LoopRV]
The block/loop to be unannotated
ann_key : str
The annotation key
Examples
--------
Before unannotate, in TensorIR, the IR is:
.. code-block:: python
@T.prim_func
def before_unannotate(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
B = T.match_buffer(b, (128, 128))
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
T.block_attr({"ann_key", "ann_value"})
B[vi, vj] = A[vi, vj] * 2.0
Create the schedule and do annotate:
.. code-block:: python
sch = tir.Schedule(before_unannotate)
sch.unannotate(sch.get_block("B"), "ann_key")
print(sch.mod["main"].script())
After applying unannotate, the IR becomes:
.. code-block:: python
@T.prim_func
def after_unannotate(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
B = T.match_buffer(b, (128, 128))
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
"""
_ffi_api.ScheduleUnannotate( # type: ignore # pylint: disable=no-member
self, block_or_loop, ann_key
)
########## Schedule: Layout transformation ##########
def _normalize_block_arg(self, block: Union[BlockRV, str]) -> BlockRV:
if isinstance(block, str):
return self.get_block(block)
return block
def _normalize_buffer_arg(
self,
block: BlockRV,
buffer: Union[Tuple[str, int], int, str, Buffer],
required_buffer_type=None,
) -> Tuple[str, int, Buffer]:
block_obj: Block = self.get(block)
block_name = block_obj.name_hint
def iter_buffers():
for i, read in enumerate(block_obj.reads):
yield "read", i, read.buffer
for i, write in enumerate(block_obj.writes):
yield "write", i, write.buffer
if isinstance(buffer, int):
buffer = (required_buffer_type, buffer)
if isinstance(buffer, str):
possible_buffers = {}
# String lookup requires ensuring that the name is unique
for buffer_index_type, buffer_index, buf in iter_buffers():
if buf.name == buffer:
possible_buffers[buf] = (buffer_index_type, buffer_index)
assert possible_buffers, f"Could not find buffer '{buffer}' in block '{block_name}'"
assert (
len(possible_buffers) == 1
), f"Multiple buffers named '{buffer}' in block '{block_name}'"
buffer_obj, (buffer_index_type, buffer_index) = next(iter(possible_buffers.items()))
elif isinstance(buffer, Buffer):
# Buffer lookup has unique id, can break out early
found = False
for buffer_index_type, buffer_index, buffer_obj in iter_buffers():
if buffer_obj.same_as(buffer):
found = True
break
assert found, f"Could not find buffer '{buffer.name}' in block '{block_name}'"
elif isinstance(buffer, tuple):
buffer_index_type, buffer_index = buffer
assert buffer_index_type in ["read", "write"], (
f"Invalid buffer_index_type. "
f"Expected 'read' or 'write', "
f"but received {buffer_index_type}"
)
buffer_list = block_obj.reads if buffer_index_type == "read" else block_obj.writes
assert 0 <= buffer_index < len(buffer_list), (
f"Invalid buffer_index {buffer_index}. "
f"Block {block_name} has only "
f"{len(buffer_list)} {buffer_index_type} buffers."
)
buffer_obj = buffer_list[buffer_index].buffer
else:
raise TypeError(f"Invalid type for argument 'buffer': {type(buffer)}")
if required_buffer_type is not None:
assert buffer_index_type == required_buffer_type, (
f"Expected buffer to be read buffer, "
f"but {buffer_obj.name} was a {buffer_index_type} buffer "
f"in the specified block"
)
return (buffer_index_type, buffer_index, buffer_obj)
@type_checked
def transform_layout(
self,
block: Union[BlockRV, str],
buffer: Union[Tuple[str, int], str, Buffer],
index_map: Union[IndexMap, Callable],
pad_value: Optional[Union[int, float, PrimExpr, IndexMap, Callable]] = None,
*,
assume_injective_transform: bool = False,
) -> None:
"""Apply a transformation represented by IndexMap to buffer
Parameters
----------
block : Union[BlockRV, str]
The block that accesses the target buffer. If a string,
this must uniquely identify a block.
buffer: Union[Tuple[str,int], Buffer, str]
The buffer to be transformed, or a specification of how to
identify the buffer to be transformed.
If `buffer` if a tuple of ``(str,int)``, the first item
should be either "read" or "write", and the second item is
an index into the block's read or write regions.
If `buffer` is a string, it is the name of the buffer,
which must exist within the reads/writes of the block. In
addition, the reads/writes of the block may not contain
more than one buffer with this name.
If `buffer` is a Buffer object, it must exist within the
reads/writes of the block.
index_map : Union[IndexMap, Callable]
The transformation to apply.
If `index_map` is a callable, and the returned list
contains IndexMap.AXIS_SEPARATOR, the SetAxisSeparators
primitive will be called in addition to the
TransformLayout primitive.
pad_value: Optional[Union[int, float, PrimExpr, IndexMap, Callable]]
The value to be used for any padding introduced by the
transformation. If the schedule contains a producer block
for the specified buffer, the pad value will be written as
part of the producer block if possible, or after the producer
block otherwise. Otherwise, if the buffer is an input, will
insert an annotation block to state that the padding contains
the known value.
The pad value may not contain instances of BufferLoad,
except where it loads a value from the buffer being
transformed (e.g. to create a circular buffer with
padding that consists of repeated elements).
Note: If applied to an input buffer, the calling scope is
responsible for ensuring that the pad_value is present.
Algebraic symplifications, branch elimination, and other
optimizations may assume that this precondition is met, and
may result in incorrect results being returned.
If None, the transformation may not introduce padding.
If an int, float or PrimExpr, the transformation is the
specific value to be present in the padding.
If an IndexMap or Callable, the transformation is the
value to be present in the padding in terms of the
transformed index.
assume_injective_transform : bool
If set to true, the schedule primitive will assume the index_map is injective and skip
checking overlapping of the mapped indices. This can be useful for complicated index_map
that the analysis does not cover. It is the callers' responsibility to ensure the
index map is injective, otherwise, the correctness of the schedule is not guaranteed.
Examples
--------
Before transform_layout, in TensorIR, the IR is:
.. code-block:: python
@T.prim_func
def before_transform_layout(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (128, 128), "float32")
B = T.alloc_buffer((128, 128), "float32")
C = T.match_buffer(c, (128, 128), "float32")
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi, vj] + 1.0
Create the schedule and do transform_layout:
.. code-block:: python
sch = tir.Schedule(before_storage_align)
sch.transform_layout(sch.get_block("B"), buffer=("write",0),
index_map=lambda m, n: (m // 16, n // 16, m % 16, n % 16))
print(sch.mod["main"].script())
After applying transform_layout, the IR becomes:
.. code-block:: python
@T.prim_func
def two_elementwise_transformed_intermediate_buffer(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (128, 128), "float32")
B = T.alloc_buffer((8, 8, 16, 16), "float32")
C = T.match_buffer(c, (128, 128), "float32")
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi // 16, vj // 16, vi % 16, vj % 16] = A[vi, vj] * 2.0
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi // 16, vj // 16, vi % 16, vj % 16] + 1.0
"""
block = self._normalize_block_arg(block)
buffer_index_type, buffer_index, buffer_obj = self._normalize_buffer_arg(block, buffer)
ndim = len(buffer_obj.shape)
if callable(index_map):
index_map, axis_separators = IndexMap.from_func_with_separators(
index_map,
ndim=ndim,
index_dtype=_get_block_default_dtype(self.get(block)),
)
else:
axis_separators = []
if pad_value is None:
pass
elif callable(pad_value):
pad_value = IndexMap.from_func(
pad_value,
ndim=len(index_map.final_indices),
index_dtype=_get_block_default_dtype(self.get(block)),
)
elif not isinstance(pad_value, IndexMap):
# Explicitly convert python int/float arguments to the
# buffer's type. If the default `tvm.runtime.convert`
# behavior is applied, these would be converted to
# int32/float32, which may not match the buffer's type.
if "int" in buffer_obj.dtype and isinstance(pad_value, int):
pad_value = IntImm(buffer_obj.dtype, pad_value)
elif "float" in buffer_obj.dtype and isinstance(pad_value, float):
pad_value = FloatImm(buffer_obj.dtype, pad_value)
pad_value = IndexMap.from_func(
lambda *indices: pad_value,
ndim=len(index_map.final_indices),
index_dtype=_get_block_default_dtype(self.get(block)),
)
buffer_index_type_enum = 0 if buffer_index_type == "read" else 1
_ffi_api.ScheduleTransformLayout( # type: ignore # pylint: disable=no-member
self,
block,
buffer_index,
buffer_index_type_enum,
index_map,
pad_value,
assume_injective_transform,
)
if axis_separators:
_ffi_api.ScheduleSetAxisSeparator( # type: ignore # pylint: disable=no-member
self, block, buffer_index, buffer_index_type_enum, axis_separators
)
@type_checked
def transform_block_layout(
self, block: Union[BlockRV, str], index_map: Union[IndexMap, Callable]
) -> None:
"""Apply a transformation represented by IndexMap to block
Parameters
----------
block : Union[BlockRV, str]
The block to be transformed
index_map : Union[IndexMap, Callable]
The transformation to apply.
Examples
--------
Before transform_block_layout, in TensorIR, the IR is:
.. code-block:: python
@T.prim_func
def before_transform_block_layout(
A: T.Buffer((16, 16), "float32"),
B: T.Buffer((16, 16), "float32")
) -> None:
for i, j in T.grid(16, 16):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
Create the schedule and do transform_block_layout:
.. code-block:: python
sch = tir.Schedule(before_transform_block_layout)
sch.transform_block_layout(sch.get_block("B"), lambda i, j: (i * 16 + j,))
print(sch.mod["main"].script())
After applying transform_block_layout, the IR becomes:
.. code-block:: python
@T.prim_func
def after_transform_block_layout(
A: T.Buffer((16, 16), "float32"),
B: T.Buffer((16, 16), "float32")
) -> None:
for i in range(256):
with T.block("B"):
vi, = T.axis.remap("S", [i])
B[vi // 16, vi % 16] = A[vi // 16, vi % 16] * 2.0
"""
block = self._normalize_block_arg(block)
if callable(index_map):
index_map = IndexMap.from_func(
index_map,
index_dtype=_get_block_default_dtype(self.get(block)),
)
_ffi_api.ScheduleTransformBlockLayout( # type: ignore # pylint: disable=no-member
self, block, index_map
)
@type_checked
def set_axis_separator(
self,
block: Union[BlockRV, str],
buffer: Union[Tuple[str, int], str, Buffer],
axis_separators: Optional[List[int]],
) -> None:
"""Set the axis separator of a buffer, where the buffer is specified by a block and a read
or write index.
Parameters
----------
block : Union[BlockRV, str]
The block that accesses the target buffer. If a string,
this must uniquely identify a block.
buffer: Union[Tuple[str,int], Buffer, str]
The buffer to be transformed, or a specification of how to
identify the buffer to be transformed.
If `buffer` if a tuple of ``(str,int)``, the first item
should be either "read" or "write", and the second item is
an index into the block's read or write regions.
If `buffer` is a string, it is the name of the buffer,
which must exist within the reads/writes of the block. In
addition, the reads/writes of the block may not contain
more than one buffer with this name.
If `buffer` is a Buffer object, it must exist within the
reads/writes of the block.
axis_separators : Optional[List[int]]
The axis separators.
Examples
--------
Before set_axis_separator, in TensorIR, the IR is:
.. code-block:: python
@T.prim_func
def before_set_axis_separator(
A: T.Buffer((128, 128), "float32"), C: T.Buffer((128, 128), "float32")
) -> None:
B = T.alloc_buffer((128, 128), dtype="float32")
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi, vj] + 1.0
Create the schedule and do set_axis_separator:
.. code-block:: python
sch = tir.Schedule(before_set_axis_separator)
sch.set_axis_separators(sch.get_block("B"), buffer_index=0, buffer_index_type="write",
axis_separators=[1])
print(sch.mod["main"].script())
After applying set_axis_separator, the IR becomes:
.. code-block:: python
@T.prim_func
def after_set_axis_separators(
A: T.Buffer((128, 128), "float32"), C: T.Buffer((128, 128), "float32")
) -> None:
B = T.alloc_buffer([128, 128], dtype="float32", axis_separators=[1])
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * T.float32(2)
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi, vj] + T.float32(1)
"""
axis_separators = axis_separators or []
block = self._normalize_block_arg(block)
buffer_index_type, buffer_index, _ = self._normalize_buffer_arg(block, buffer)
buffer_index_type_enum = 0 if buffer_index_type == "read" else 1
_ffi_api.ScheduleSetAxisSeparator( # type: ignore # pylint: disable=no-member
self, block, buffer_index, buffer_index_type_enum, axis_separators
)
########## Schedule: Padding decomposition #########
@type_checked
def decompose_padding(self, block: Union[BlockRV, str], loop: LoopRV) -> BlockRV:
"""Decompose a block of padding computation pattern into two separate blocks.
a) The block which fill const pad values into full write region;
b) The block which fill in-bound values into region where pad predicate is true.
The pad value filling block is inserted right before the given loop.
The schedule primitive requires:
1) The input block is a complete block.
2) The input loop is the ancestor of the block.
3) The input block is a block which match padding pattern.
Parameters
----------
block : Union[BlockRV, str]
The padding block to be decomposed.
loop : LoopRV
The loop above which the pad value filling block is inserted before.
Returns
-------
pad_value_block : BlockRV
The block filling const pad values.
Examples
--------
Before decompose-padding, in TensorIR, the IR is:
.. code-block:: python
@T.prim_func
def before_decompose(x: T.Buffer(128, "int32"), y: T.Buffer(140, "int32")):
for i in range(140):
with T.block("block"):
vi = T.axis.remap("S", [i])
y[vi] = T.if_then_else(vi >= 6 and vi < 134, x[vi - 6], 0, dtype="int32")
Create the schedule and do decompose-padding with specified loop:
.. code-block:: python
sch = tir.Schedule(before_decompose, debug_mask="all")
block = sch.get_block("block")
sch.decompose_padding(block, sch.get_loops(block)[0])
print(sch.mod["main].script())
After applying decompose-padding, the IR becomes:
.. code-block:: python
@T.prim_func
def after_decompose(x: T.Buffer(128, "int32"), y: T.Buffer(140, "int32")):
for i in T.serial(140):
with T.block("block_pad_const"):
vi = T.axis.spatial(140, i)
y[vi] = 0
for i in T.serial(128):
with T.block("block"):
vi = T.axis.spatial(128, i)
y[vi + 6] = x[vi]
"""
block = self._normalize_block_arg(block)
return _ffi_api.ScheduleDecomposePadding( # type: ignore # pylint: disable=no-member
self, block, loop
)
@type_checked
def can_decompose_padding(self, block: Union[BlockRV, str], loop: LoopRV) -> bool:
"""Check whether the block match padding pattern and can be decomposed."""
# pylint: disable-next=no-member
return _ffi_api.CanDecomposePadding(self, block, loop) # type: ignore
@type_checked
def pad_einsum(self, block: Union[BlockRV, str], padding: List[int]) -> None:
"""Pad the computation of Einsum.
On a block with trivial binding, this primitive pads the iteration domain of the block by
the given padding factors, for example, 127 -> 128, 132 -> 144 when padding factor is 16.
Extra producer and consumer padding blocks will be generated to avoid out-of-bound buffer
access.
Einsum pattern means all the indices on the buffer access are either by constants
(e.g. B[0]) or by variables (e.g. B[i]), but not by composite expressions (e.g. B[i + 1]).
Parameters
----------
block : Union[BlockRV, str]
The block that matches the Einsum pattern.
padding : List[int]
The padding for each block iter.
Examples
--------
Before applying pad-einsum, in TensorIR, the IR is:
.. code-block:: python
@T.prim_func
def before_pad_einsum(
A: T.Buffer((127, 127), "float32"),
B: T.Buffer((127, 127), "float32"),
C: T.Buffer((127, 127), "float32"),
) -> None:
for i0, i1, i2 in T.grid(127, 127, 127):
with T.block("C_shared"):
i, j, k = T.axis.remap("SSR", [i0, i1, i2])
with T.init():
C[i, j] = T.float32(0)
C[i, j] = C[i, j] + A[i, k] * B[k, j]
Create the schedule and do pad-einsum with specified block:
.. code-block:: python
sch = tir.Schedule(before_pad_einsum, debug_mask="all")
block = sch.get_block("C_shared")
sch.pad_einsum(block, [0, 1, 1])
print(sch.mod["main"].script())
After applying decompose-padding, the IR becomes:
.. code-block:: python
@T.prim_func
def main(
A: T.Buffer((127, 127), "float32"),
B: T.Buffer((127, 127), "float32"),
C: T.Buffer((127, 127), "float32"),
):
# with T.block("root"):
A_pad = T.alloc_buffer((128, 128))
B_pad = T.alloc_buffer((128, 128))
C_pad = T.alloc_buffer((128, 128))
for i0, i1 in T.grid(128, 128):
with T.block("A_pad"):
v0, v1 = T.axis.remap("SS", [i0, i1])
A_pad[v0, v1] = T.if_then_else(
v0 < 127 and v1 < 127,
A[v0, v1],
T.float32(0),
)
for i0, i1 in T.grid(128, 128):
with T.block("B_pad"):
v0, v1 = T.axis.remap("SS", [i0, i1])
B_pad[v0, v1] = T.if_then_else(
v0 < 127 and v1 < 127,
B[v0, v1],
T.float32(0),
)
for i0, i1, i2 in T.grid(128, 128, 128):
with T.block("C_shared"):
i, j, k = T.axis.remap("SSR", [i0, i1, i2])
with T.init():
C_pad[i, j] = T.float32(0)
C_pad[i, j] = C_pad[i, j] + A_pad[i, k] * B_pad[k, j]
for i0, i1 in T.grid(127, 127):
with T.block("C_pad"):
v0, v1 = T.axis.remap("SS", [i0, i1])
C[v0, v1] = C_pad[v0, v1]
"""
block = self._normalize_block_arg(block)
return _ffi_api.SchedulePadEinsum( # type: ignore # pylint: disable=no-member
self, block, padding
)
######## Schedule: Buffer transformation ########
@type_checked
def rolling_buffer(self, block: Union[BlockRV, str], write_buffer_index: int) -> None:
"""Compute the target buffer via rolling buffering, select the outermost rollable
axis with a positive bound overlap that appears in the block's ancestor loops
as `rolling axis`, fold and circularize the buffer along the rolling dimension,
append block predicate to avoid recomputing overlapping elements. It requires:
1) The block is not an output block and has only RAW dependencies.
2) The buffer to be an intermediate buffer defined via `alloc_buffer`.
3) The LCA of the producer and consumer of the buffer is a for loop, typically,
the producer and consumer of the buffer are cascaded through compute_at.
4) The access region of the buffer has at least one dimension that contains
a positive bound overlap.
Parameters
----------
block : Union[BlockRV, str]
The producer block of the buffer.
write_buffer_index : int
The index of the buffer in block's write region.
Examples
--------
Before rolling_buffer, in TensorIR, the IR is:
.. code-block:: python
@T.prim_func
def before_rolling_buffer(
A: T.Buffer((12, 12), "int8"), C: T.Buffer((8, 8), "int8")
) -> None:
# body
# with T.block("root")
B = T.alloc_buffer([10, 10], dtype="int8")
for i0, i1 in T.grid(2, 2):
for ax0, ax1, ax2, ax3 in T.grid(6, 6, 3, 3):
with T.block("B"):
ax0_1 = T.axis.spatial(10, i0 * 4 + ax0)
ax1_1 = T.axis.spatial(10, i1 * 4 + ax1)
rv0, rv1 = T.axis.remap("RR", [ax2, ax3])
B[ax0_1, ax1_1] = T.max(
B[ax0_1, ax1_1], A[ax0_1 + rv0, ax1_1 + rv1]
)
for ax0, ax1, ax2, ax3 in T.grid(4, 4, 3, 3):
with T.block("C"):
ax0_1 = T.axis.spatial(8, i0 * 4 + ax0)
ax1_1 = T.axis.spatial(8, i1 * 4 + ax1)
rv0, rv1 = T.axis.remap("RR", [ax2, ax3])
C[ax0_1, ax1_1] = T.max(
C[ax0_1, ax1_1], B[ax0_1 + rv0, ax1_1 + rv1]
)
Create the schedule and do rolling_buffer:
.. code-block:: python
sch = tir.Schedule(before_rolling_buffer)
sch.rolling_buffer(sch.get_block("B"), write_buffer_index=0)
print(sch.mod["main"].script())
After applying rolling_buffer, the IR becomes:
.. code-block:: python
@T.prim_func
def after_rolling_buffer(
A: T.Buffer((12, 12), "int8"),
C: T.Buffer((8, 8), "int8")
) -> None:
# body
# with T.block("root")
B = T.alloc_buffer([6, 10], dtype="int8")
for i0, i1 in T.grid(2, 2):
for ax0, ax1, ax2, ax3 in T.grid(6, 6, 3, 3):
with T.block("B"):
T.where((i0 < 1 or 2 <= ax0) and (i1 < 1 or 2 <= ax1))
ax0_1 = T.axis.spatial(10, i0 * 4 + ax0)
ax1_1 = T.axis.spatial(10, i1 * 4 + ax1)
rv0, rv1 = T.axis.remap("RR", [ax2, ax3])
B[ax0_1 % 6, ax1_1] = T.max(
B[ax0_1 % 6, ax1_1], A[ax0_1 + rv0, ax1_1 + rv1]
)
for ax0, ax1, ax2, ax3 in T.grid(4, 4, 3, 3):
with T.block("C"):
ax0_1 = T.axis.spatial(8, i0 * 4 + ax0)
ax1_1 = T.axis.spatial(8, i1 * 4 + ax1)
rv0, rv1 = T.axis.remap("RR", [ax2, ax3])
C[ax0_1, ax1_1] = T.max(
C[ax0_1, ax1_1], B[ax0_1 % 6 + rv0, ax1_1 + rv1]
)
Note
----
The region_cover property of the consumer block of the target buffer will become false.
"""
block = self._normalize_block_arg(block)
# pylint: disable-next=no-member
return _ffi_api.ScheduleRollingBuffer(self, block, write_buffer_index) # type: ignore
########## Schedule: Misc ##########
@type_checked
def enter_postproc(self) -> None:
"""A no-op that marks the start of postprocessing phase of scheduling"""
_ffi_api.ScheduleEnterPostproc(self) # type: ignore # pylint: disable=no-member
@type_checked
def unsafe_hide_buffer_access(
self, block: BlockRV, buf_type: str, buf_index_array: List[int]
) -> None:
"""Hide some buffer access in a given block. This is an unsafe schedule primitive.
Parameters
----------
block : BlockRV
The block where we hide read access.
buf_type : str
The buffer type: "read"/"write".
buf_index_array : List[int]
The array of buffer indices we hide access.
Note
----
This schedule primitive is unsafe, and may fail dependency analysis.
One use case of `unsafe_hide_buffer_access` is to hide the buffer access
to indices buffers (e.g. in sparse computation) so that we can further tensorize
the block (the indices buffers appeared in read/write regions may fail the pattern
matching in `tensorize` primitive, and hide the access to these buffers could address
the issue).
"""
_ffi_api.ScheduleUnsafeHideBufferAccess( # type: ignore # pylint: disable=no-member
self,
block,
buf_type,
buf_index_array,
)
| 142,275 | 36.559662 | 100 | py |
tvm | tvm-main/python/tvm/tir/schedule/trace.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""An execution trace of a scheduling program"""
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional
from tvm._ffi import register_object as _register_object
from tvm.runtime import Object
from ...ir import Array, Map, save_json
from ...runtime import String
from ..expr import FloatImm, IntImm
from ..function import IndexMap
from . import _ffi_api
from .instruction import ATTR_TYPE, INPUT_RV_TYPE, Instruction
if TYPE_CHECKING:
from .schedule import Schedule
DECISION_TYPE = Any
JSON_TYPE = Any
def _json_from_tvm(obj):
if obj is None:
return None
if isinstance(obj, Array):
return [_json_from_tvm(i) for i in obj]
if isinstance(obj, Map):
return {_json_from_tvm(k): _json_from_tvm(v) for k, v in obj.items()}
if isinstance(obj, String):
return str(obj)
if isinstance(obj, (IntImm, FloatImm)):
return obj.value
if isinstance(obj, IndexMap):
return save_json(obj)
raise TypeError("Not supported type: " + str(type(obj)))
@_register_object("tir.Trace")
class Trace(Object):
"""An execution trace of a scheduling program.
A trace has two parts:
1) The instructions invoked so far
2) The random decisions made upon those instructions, if any
A trace can be serialized to:
1) Roundtrippable JSON format: can be saved to file and loaded back
2) Python syntax: allows users to copy-paste the trace to reproduce the scheduling process
A trace can be applied to a TensorIR schedule by re-applying all its instructions possibly with
their decisions accordingly. Re-sampling is invoked if a sampling instruction doesn't have its
corresponding decision; Otherwise the existing decision will be reused accordingly.
Attributes
----------
insts : List[Instruction]
The instructions invoked so far in the program execution
decisions : Dict[Instruction, DECISION_TYPE]
The random decisions made upon those instructions
"""
insts: List[Instruction]
decisions: Dict[Instruction, DECISION_TYPE]
def __init__(
self,
insts: List[Instruction],
decisions: Dict[Instruction, DECISION_TYPE],
) -> None:
"""Constructor
Parameters
----------
insts : List[Instruction]
The instructions invoked so far in the program execution
decisions : Dict[Instruction, DECISION_TYPE]
The random decisions made upon those instructions
"""
self.__init_handle_by_constructor__(
_ffi_api.Trace, # type: ignore # pylint: disable=no-member
insts,
decisions,
)
def get_decision(self, inst: Instruction) -> Optional[DECISION_TYPE]:
"""Retrieve the decision made on a specific instruction
Parameters
----------
insts : Instruction
The instruction whose decision is to be retrieved
Returns
-------
decision : Optional[DECISION_TYPE]
The corresponding decision; None if there is no decision made on the instruction
"""
return _ffi_api.TraceGetDecision(self, inst) # type: ignore # pylint: disable=no-member
def append(
self,
inst: Instruction,
decision: Optional[DECISION_TYPE] = None,
) -> None:
"""Append a new instruction to the trace
Parameters
----------
insts : Instruction
The new instruction to be appended
decision : Optional[DECISION_TYPE] = None
The random decision made on this instruction
"""
_ffi_api.TraceAppend(self, inst, decision) # type: ignore # pylint: disable=no-member
def pop(self) -> Optional[Instruction]:
"""Remove the last instruction, along with the decision made on that instruction, if any
Returns
-------
popped_inst : Instruction
Returns the instruction removed; NullOpt if the trace is empty
"""
return _ffi_api.TracePop(self) # type: ignore # pylint: disable=no-member
def apply_to_schedule(
self,
sch: "Schedule",
remove_postproc: bool,
decision_provider: Optional[
Callable[
[Instruction, List[INPUT_RV_TYPE], List[ATTR_TYPE], DECISION_TYPE], DECISION_TYPE
]
] = None,
) -> None:
"""Apply the trace to a TensorIR schedule
Parameters
----------
sch : Schedule
The schedule to be applied onto
remove_postproc : bool
If postprocessing instructions are removed
decision_provider: Optional[Callable] = None
A callback that allows users to mutate decisions on the fly when applying instructions.
The signature of the callback is:
- The 1st argument: The instruction
- The 2nd argument: The input random variables
- The 3rd argument: The attributes
- The 4th argument: The decision
- Return: A new decision
"""
_ffi_api.TraceApplyToSchedule( # type: ignore # pylint: disable=no-member
self,
sch,
remove_postproc,
decision_provider,
)
def as_json(self, remove_postproc: bool = False) -> JSON_TYPE:
"""Serialize the trace as a JSON-style object
Parameters
----------
remove_postproc : bool = False
If postprocessing instructions are removed
Returns
-------
json: JSON_TYPE
The JSON-style object
"""
obj = _ffi_api.TraceAsJSON(self, remove_postproc) # type: ignore # pylint: disable=no-member
return _json_from_tvm(obj)
def as_python(self, remove_postproc: bool = False) -> List[str]:
"""Serialize the trace as a sequence of python statements
Parameters
----------
remove_postproc : bool = False
If postprocessing instructions are removed
Returns
-------
py_stmts: List[str]
A sequence of python statements
"""
return _ffi_api.TraceAsPython(self, remove_postproc) # type: ignore # pylint: disable=no-member
def with_decision(
self,
inst: Instruction,
decision: DECISION_TYPE,
remove_postproc: bool,
) -> "Trace":
"""Create a new trace with an instruction whose decision is changed,
assuming this instruction exists in the resulting trace
Parameters
----------
inst : Instruction
The instruction whose decision is to be changed
decision : DECISION_TYPE
The decision to be changed to
remove_postproc : bool
If postprocessing instructions are removed
Returns
-------
trace: Trace
The new trace with the decision changed
"""
return _ffi_api.TraceWithDecision( # type: ignore # pylint: disable=no-member
self,
inst,
decision,
remove_postproc,
)
def simplified(self, remove_postproc: bool) -> "Trace":
"""Simplify the trace with dead-code elimination
Parameters
----------
remove_postproc : bool
If postprocessing instructions are removed
Returns
-------
trace: Trace
A simplified trace
"""
return _ffi_api.TraceSimplified(self, remove_postproc) # type: ignore # pylint: disable=no-member
@staticmethod
def apply_json_to_schedule(json_obj: JSON_TYPE, sch: "Schedule") -> None:
"""Apply a JSON-serialized trace to a TensorIR schedule
Parameters
----------
json_obj : JSON_TYPE
The JSON-serialized trace
sch : Schedule
The TensorIR schedule
"""
_ffi_api.TraceApplyJSONToSchedule(json_obj, sch) # type: ignore # pylint: disable=no-member
def show(self, style: Optional[str] = None, black_format: bool = True) -> None:
"""A sugar for print highlighted TVM script.
Parameters
----------
style : str, optional
Pygmentize printing style, auto-detected if None. See
`tvm.script.highlight.cprint` for more details.
black_format: bool
If true (default), use the formatter Black to format the TVMScript
"""
from tvm.script.highlight import ( # pylint: disable=import-outside-toplevel
cprint,
)
cprint(str(self), style=style, black_format=black_format)
| 9,444 | 32.257042 | 106 | py |
tvm | tvm-main/python/tvm/tir/analysis/analysis.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Wrapping existing analysis utils."""
# pylint: disable=invalid-name
from typing import Dict, List, Optional, Union
import tvm
from tvm import Object
from tvm.ir import IRModule
from tvm.tir.expr import Var
from tvm.tir.stmt import Block, BufferRegion, PrimExpr
from .. import Buffer, Stmt
from ..function import PrimFunc
from . import _ffi_api
def expr_deep_equal(lhs: PrimExpr, rhs: PrimExpr) -> bool:
"""Deeply compare two nested expressions.
Parameters
----------
lhs : PrimExpr
The left operand.
rhs : PrimExpr
The right operand.
Returns
-------
result : bool
The comparison result
Note
----
This function does not remap variable bindings, it will not
return true for (let x = 1 in x + 1) vs (let y = 1 in y + 1), unless x.same_as(y).
Use py:func:`tvm.ir.structural_equal` to handle structural variable remapping.
Due to the restriction of not remapping variables, this function can run
faster than StructuralEqual and can be used as a utility function during arithmetic
simplifications.
Always consider py:func:`tvm.ir.structural_equal` first, which handles
the structural remapping.
See Also
--------
tvm.ir.structural_equal
"""
return _ffi_api.expr_deep_equal(lhs, rhs) # type: ignore
def verify_ssa(func: PrimFunc) -> bool:
"""Verify if the func is in SSA form.
Parameters
----------
func: tvm.tir.PrimFunc
The module to be verified.
Returns
-------
result : bool
The result of verification.
"""
return _ffi_api.verify_ssa(func) # type: ignore
def verify_memory(func: PrimFunc) -> bool:
"""Verify if func contains illegal host side direct memory access.
Parameters
----------
func: tvm.tir.PrimFunc
The module to be verified.
Returns
-------
result : bool
The result of verification.
"""
return _ffi_api.verify_memory(func) # type: ignore
def verify_gpu_code(func: PrimFunc, constraints: Dict[str, int]) -> None:
"""Verify if module contains illegal host side direct memory access.
Parameters
----------
func: tvm.tir.PrimFunc
The module to be verified.
constraints : Dict[str, int]
The attribute constraints.
Returns
-------
result : bool
The result of verification.
"""
return _ffi_api.verify_gpu_code(func, constraints) # type: ignore
def get_block_access_region(
block: Block, buffer_var_map: Dict[Var, Buffer]
) -> List[List[BufferRegion]]:
"""Detect which regions of tensors in this block are read or written to.
Regions are sorted by order of appearance in the AST.
Parameters
----------
block: tvm.tir.Block
The block in which we are detecting read/write regions.
buffer_var_map : Dict[Var, Buffer]
The outside buffers which may access the block. Mapping from buffer var to the buffer
Returns
-------
result : List[List[BufferRegion]]
Array of access regions. There are three arrays of BufferRegion:
- first: read regions
- second: write regions
- third: opaque regions
"""
return _ffi_api.GetBlockAccessRegion(block, buffer_var_map) # type: ignore
def get_block_read_write_region(
block: Block, buffer_var_map: Dict[Var, Buffer]
) -> List[List[BufferRegion]]:
"""Auto detect the block read/write region according to its body stmt.
An opaque access will be counted as both a read and a write access
Parameters
----------
block: tvm.tir.Block
The block in which we are detecting read/write regions.
buffer_var_map : Dict[Var, Buffer]
The outside buffers which may access the block. Mapping from buffer var to the buffer
Returns
-------
result : List[List[BufferRegion]]
An array only consisting of the read regions and write regions of the input block
"""
return _ffi_api.GetBlockReadWriteRegion(block, buffer_var_map) # type: ignore
def calculate_workspace_bytes(func: PrimFunc, workspace_byte_alignment: int) -> int:
"""Calculate the workspace size in bytes needed by the TIR allocates inside the TIR
PrimFunc.
Parameters
----------
func: tvm.tir.PrimFunc
The function to be detected.
workspace_byte_alignment : int
The byte alignment required for each tensor
Returns
-------
result : int
Workspace size in bytes.
"""
return _ffi_api.calculate_workspace_bytes(func, workspace_byte_alignment) # type: ignore
def calculate_constant_bytes(func: PrimFunc, constant_byte_alignment: int) -> int:
"""Calculate the constant size in bytes needed by the TIR allocates inside the TIR
PrimFunc.
Parameters
----------
func: tvm.tir.PrimFunc
The function to be detected.
constant_byte_alignment : int
The byte alignment required for each tensor
Returns
-------
result : int
Workspace size in bytes.
"""
return _ffi_api.calculate_constant_bytes(func, constant_byte_alignment) # type: ignore
def calculate_allocated_bytes(
func_or_mod: Union[PrimFunc, IRModule]
) -> Union[Dict[str, int], Dict[str, Dict[str, int]]]:
"""Calculate allocated memory per memory scope required by TIR PrimFuncs.
Parameters
----------
func_or_mod: Union[PrimFunc, IRModule]
The function or module to be detected. If a module is passed, allocated
memory is calculated for all PrimFuncs inside the module
Returns
-------
result : Union[Dict[str, int], Dict[str, Dict[str, int]]]
Allocated memory size per scope in bytes for each function in the IRModule returned as a
dict with function names as keys and a dict of allocated sizes as values. If a single
PrimFunc is passed, the function name is returned as "main"
"""
if not isinstance(func_or_mod, (PrimFunc, IRModule)):
raise TypeError(
f"Expected argument to be PrimFunc or IRModule, but received {type(func_or_mod)}"
)
return _ffi_api.calculate_allocated_bytes(func_or_mod) # type: ignore
def detect_buffer_access_lca(func: PrimFunc) -> Dict[Buffer, Stmt]:
"""Detect the lowest common ancestor(LCA) of buffer access, including both high-level
access (BufferLoad, BufferStore) and low-level access (BufferLoad, BufferStore and opaque
access).
The LCA may be a For loop or a Block.
Parameters
----------
func: tvm.tir.PrimFunc
The function to be detected.
Returns
-------
result : Dict[Buffer, Stmt]
Map from buffer to the LCA of all access to it.
"""
return _ffi_api.detect_buffer_access_lca(func) # type: ignore # pylint: disable=no-member
def estimate_tir_flops(stmt_or_mod: Union[Stmt, IRModule]) -> float:
"""Estimate the FLOPs of a TIR fragment.
Parameters
----------
stmt_or_mod: Union[Stmt, IRModule]
The TIR fragment or IRModule to be estimated.
Returns
-------
flops: float
The estimated FLOPs.
"""
return _ffi_api.EstimateTIRFlops(stmt_or_mod) # type: ignore # pylint: disable=no-member
# NOTE: relay_func_type in the following two functions should be relay.FuncType however that would
# introduce a cycling dependency. We make do with Object.
def undefined_vars(node: Union[Stmt, PrimExpr], defs: Optional[List[Var]] = None) -> List[Var]:
"""Find undefined vars in a TIR statement or expression.
Parameters
----------
node: Union[Stmt, PrimExpr]
The TIR statement or expression to be checked.
defs: Optional[List[Var]]
The vars that is defined
Returns
-------
result : List[Var]
The undefined vars.
"""
defs = defs or []
return _ffi_api.UndefinedVars(node, defs) # type: ignore # pylint: disable=no-member
def get_prim_func_arg_and_result_memory_constraints(
func: PrimFunc, relay_func_type: Object
) -> List[str]:
"""Returns the memory (aka storage) scope constraints for all the arguments and result
of func. However the result will be w.r.t. the func's representation as a Relay Function
of relay_func_type before lowering and conversion to DPS.
Visible for testing.
Parameters
----------
func: tvm.tir.PrimFunc
The function to retrieve constraints from.
relay_func_type: tvm.relay.FuncType
The type of the Relay Function from which the func was derived.
Returns
-------
result: List[AnyStr]
Memory scope constraints for funcs args and result in Relay form. The empty string
denotes 'no constraint'.
"""
return _ffi_api.GetPrimFuncArgAndResultMemoryConstraints( # type: ignore # pylint: disable=no-member
func, relay_func_type
)
def apply_prim_func_arg_and_result_memory_constraints(
func: PrimFunc, relay_func_type: Object, arg_and_result_memory_scopes: List[str]
) -> PrimFunc:
"""Returns func written to capture the memory (aka storage) scope constraints
for each of the func's parameters given by arg_and_result_memory_scopes. However,
arg_and_result_memory_scopes should be w.r.t. the func's representation as a Relay
Function of relay_func_type before lowering and conversion to DPS.
Visible for testing.
CAUTION: This is experimental. The resulting PrimFunc may not have fully accounted
for all new memory scopes.
Parameters
----------
func: tvm.tir.PrimFunc
The function to retrieve constraints from.
relay_func_type: tvm.relay.FuncType
The type of the Relay Function from which the func was derived.
arg_and_result_memory_scopes: Array[AnyStr]
Memory constraints for funcs args and result in Relay form. The empty string denotes
'no constraint'.
Returns
-------
result: tvm.tir.PrimFunc
The rewritten func.
"""
return _ffi_api.ApplyPrimFuncArgAndResultMemoryConstraints( # type: ignore # pylint: disable=no-member
func, relay_func_type, arg_and_result_memory_scopes
)
def verify_well_formed(obj: Union[PrimFunc, IRModule], assert_mode: bool = True) -> bool:
"""Verify if the given TIR is well-formed. The verification includes:
- Check if expressions not contain vars that is defined outside the block.
Parameters
----------
obj: Union[tvm.tir.PrimFunc, tvm.ir.IRModule]
The function or module to be verified.
assert_mode: bool
The indicator if it raises an error when the function is not well-formed.
Returns
-------
result: bool
Whether it is a well-formed TIR function.
"""
return _ffi_api.VerifyWellFormed(obj, assert_mode) # type: ignore # pylint: disable=no-member
def OOBChecker():
"""Detect out of bounds memory access in arrays.
Returns
-------
fpass : tvm.transform.Pass
The result pass
"""
return _ffi_api.OOBChecker() # type: ignore
def find_anchor_block(mod: IRModule) -> Block:
"""Find the "anchor block" of the given module.
We define the anchor block to be the block with (1) an init statement and (2) having
the biggest flops count. The latter condition is only used when there are multiple blocks
with an init statement.
For example, if the input module is conv2d + fused spatial blocks, conv2d is the anchor block.
The input module may not contain more than one such block. For example, a module having
two conv2d is not allowed as an input.
However, a module created from winograd convolution has multiple blocks with an init statement
(input transform, batched GEMM, and output transform). We use the second condition, the flops
count, to determine that the batched GEMM block is the anchor block.
Parameters
----------
mod: tvm.ir.IRModule
The input TIR module.
Returns
-------
anchor_block: Block
The anchor block if found, None otherwise.
"""
return _ffi_api.find_anchor_block(mod) # type: ignore # pylint: disable=no-member
def get_vtcm_compaction_passes() -> List[tvm.transform.Pass]:
"""Utility function to get the list of lowering passes to be applied to calculate the compacted
VTCM allocation size
Returns
-------
result : List[tvm.transform.Pass]
returns list of passes
"""
return _ffi_api.get_vtcm_compaction_passes() # type: ignore # pylint: disable=no-member
| 13,299 | 30.666667 | 107 | py |
tvm | tvm-main/python/tvm/tir/analysis/_ffi_api.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""FFI APIs for tvm.tir.analysis"""
import tvm._ffi
tvm._ffi._init_api("tir.analysis", __name__)
| 884 | 39.227273 | 62 | py |
tvm | tvm-main/python/tvm/tir/analysis/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Namespace of all TIR analysis utils."""
# pylint: disable=wildcard-import, invalid-name
from .analysis import *
| 901 | 41.952381 | 62 | py |
tvm | tvm-main/python/tvm/tir/transform/transform.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Wrapping existing transformations."""
# pylint: disable=invalid-name, unsupported-binary-operation
import enum
from typing import Callable, Optional
from . import _ffi_api
from . import function_pass as _fpass
def Apply(ftransform):
"""Apply ftransform to each function in the Module.
This function is a thin wrapper around tvm.tir.transform.prim_func_pass
Parameters
----------
ftransform: tvm.tir.PrimFunc -> tvm.tir.PrimFunc
The transformation pass.
Returns
-------
fpass : tvm.transform.Pass
The result pass
"""
# pylint: disable=unused-argument
def _transform(func, mod, ctx):
return ftransform(func)
return _fpass.prim_func_pass(_transform, opt_level=0, name="Apply") # type: ignore
def InjectPrefetch():
"""Inject prefetch instructions into stmt.
Returns
-------
fpass : tvm.transform.Pass
The result pass
"""
return _ffi_api.InjectPrefetch() # type: ignore
def ApplyLayoutTransforms():
"""Reshape buffers that appear in the "layout_transform_map"
fucntion attribute.
Returns
-------
fpass : tvm.transform.Pass
The result pass
"""
return _ffi_api.ApplyLayoutTransforms() # type: ignore
def StorageFlatten(cache_line_size, create_bound_attribute: bool = False):
"""Flatten the multi-dimensional read/write to 1D.
Parameters
----------
cache_line_size: int
The size of CPU cache line.
create_bound_attribute:
Whether to create bound attributes.
Returns
-------
fpass : tvm.transform.Pass
The result pass
"""
return _ffi_api.StorageFlatten(cache_line_size, create_bound_attribute) # type: ignore
def TextureFlatten():
"""Flatten the multi-dimensional read/write to 2D.
Parameters
----------
Returns
-------
fpass : tvm.transform.Pass
The result pass
"""
return _ffi_api.TextureFlatten() # type: ignore
def InjectCopyIntrin(pragma_key: str, fintrin):
"""Inject virtual thread loops.
Parameters
----------
pragma_key : str
The pragma key for hint of copy.
fintrin : function
The function with signature copyintrin(src, dst, pad_before, pad_after, pad_value)
Returns
-------
fpass : tvm.transform.Pass
The result pass
"""
return _ffi_api.InjectCopyIntrin(pragma_key, fintrin) # type: ignore
def CoProcSync():
"""Detect and insert sync points to co-processor.
Returns
-------
fpass : tvm.transform.Pass
The result pass
"""
return _ffi_api.CoProcSync() # type: ignore
def LiftAttrScope(attr_key: str):
"""Lift common attrs with attr_key to outer scope.
Parameters
----------
attr_key : str
The attribute key to be checked.
Returns
-------
fpass : tvm.transform.Pass
The result pass
"""
return _ffi_api.LiftAttrScope(attr_key) # type: ignore
def LoopPartition():
"""Inject virtual thread loops.
Returns
-------
fpass : tvm.transform.Pass
The result pass
"""
return _ffi_api.LoopPartition() # type: ignore
def VectorizeLoop(enable_vectorize: bool = True):
"""Lower vectorization loops.
Parameters
----------
enable_vectorize : bool
Whether vectorization is enabled.
Will lower to scalar loop when it is turned off.
Returns
-------
fpass : tvm.transform.Pass
The result pass
"""
return _ffi_api.VectorizeLoop(enable_vectorize) # type: ignore
def InjectVirtualThread():
"""Inject virtual thread loops.
Returns
-------
fpass : tvm.transform.Pass
The result pass
"""
return _ffi_api.InjectVirtualThread() # type: ignore
def InjectDoubleBuffer():
"""Inject double buffer statements.
Returns
-------
fpass : tvm.transform.Pass
The result pass
"""
return _ffi_api.InjectDoubleBuffer() # type: ignore
def InjectRollingBuffer():
"""Inject rolling buffer statements.
Returns
-------
fpass : tvm.transform.Pass
The result pass
"""
return _ffi_api.InjectRollingBuffer() # type: ignore
def StorageRewrite():
"""Rewrite storage allocation pattern.
Moves the allocation to outer most possible scope.
Trying to share space between allocations to make
a static allocation plan when possible.
Returns
-------
fpass : tvm.transform.Pass
The result pass
"""
return _ffi_api.StorageRewrite() # type: ignore
def UnrollLoop():
"""Unroll the constant loop marked by unroll.
This pass also automatically attach pragma unroll tag to loops which meets the standard.
Returns
-------
fpass : tvm.transform.Pass
The result pass
"""
return _ffi_api.UnrollLoop() # type: ignore
def ReduceBranchingThroughOvercompute():
"""Reduce branching by introducing overcompute
Returns
-------
fpass : tvm.transform.Pass
The result pass
"""
return _ffi_api.ReduceBranchingThroughOvercompute() # type: ignore
def RemoveNoOp():
"""Remove No Op from the Stmt.
Returns
-------
fpass : tvm.transform.Pass
The result pass
"""
return _ffi_api.RemoveNoOp() # type: ignore
def RemoveAssume():
"""Remove all instances of builtin::assume
Returns
-------
fpass : tvm.transform.Pass
The result pass
"""
return _ffi_api.RemoveAssume() # type: ignore
def RemoveStoreUndef():
"""Remove stores of undefined values from the Stmt.
Returns
-------
fpass : tvm.transform.Pass
The result pass
"""
return _ffi_api.RemoveStoreUndef() # type: ignore
def BF16ComputeLegalize():
"""Legalize bf16 compute Ops.
Returns
-------
fpass : tvm.transform.Pass
The result pass
"""
return _ffi_api.BF16ComputeLegalize() # type: ignore
def FP8ComputeLegalize(promote_dtype_str: str = "float32"):
"""Legalize fp8 compute Ops.
Parameters
----------
promote_dtype : str
The data type we promote fp8 to, options: float16/float32.
Returns
-------
fpass : tvm.transform.Pass
The result pass
"""
return _ffi_api.FP8ComputeLegalize(promote_dtype_str) # type: ignore
def BF16StorageLegalize():
"""Legalize bf16 storage types to u16.
Returns
-------
fpass : tvm.transform.Pass
The result pass
"""
return _ffi_api.BF16StorageLegalize() # type: ignore
def FP8StorageLegalize():
"""Legalize fp8 storage types to u8.
Returns
-------
fpass : tvm.transform.Pass
The result pass
"""
return _ffi_api.FP8StorageLegalize() # type: ignore
def CommonSubexprElimTIR(enable_cse_tir: bool = True, identify_equiv_terms: bool = False):
"""Replace redundant computations by new variables.
Returns
-------
fpass : tvm.transform.Pass
The result pass
"""
return _ffi_api.CommonSubexprElimTIR(enable_cse_tir, identify_equiv_terms) # type: ignore
def RewriteUnsafeSelect():
"""Detect and rewrite unsafe select that contains memory access.
Returns
-------
fpass : tvm.transform.Pass
The result pass
"""
return _ffi_api.RewriteUnsafeSelect() # type: ignore
def Simplify():
"""Run arithmetic simplifications on the statements and expressions.
Returns
-------
fpass : tvm.transform.Pass
The result pass
"""
return _ffi_api.Simplify() # type: ignore
def ConvertSSA():
"""Convert an IRModule to be SSA form.
This pass handles cases where the same `tir.Var` appears in
multiple functions within the same module. For example, after
extracting a fragment from one function into another, where the
same `tir.Var` may be defined both as within the body of the
original function, and as a parameter within the hoisted function.
Returns
-------
fpass : tvm.transform.Pass
The result pass
"""
return _ffi_api.ConvertSSA() # type: ignore
def InstrumentBoundCheckers():
"""Instruments bound checkers.
Returns
-------
fpass : tvm.transform.Pass
The result pass
"""
return _ffi_api.InstrumentBoundCheckers() # type: ignore
def LowerCustomDatatypes():
"""Lower custom datatypes.
See tvm::datatypes::Registry for more information on adding custom datatypes.
Returns
-------
fpass : tvm.transform.Pass
The result pass
"""
return _ffi_api.LowerCustomDatatypes() # type: ignore
def MakePackedAPI():
"""Transform the PrimFuncs in the module to a packed func API.
Prior to this pass, the PrimFunc may have Buffer arguments defined
in the `PrimFuncNode::buffer_map`. This pass consumes the
`buffer_map`, using it to generate `TVMArgs` and `TVMRetValue*`
arguments that implement the `PackedFunc` API.
For static shapes, the `BufferNode::shape`, `BufferNode::strides`,
and `BufferNode::elem_offset` member variables are used to
generate runtime checks on the corresponding member variables in
the user-provided `DLTensor*` or `tvm.nd.array` argument. (e.g. A
PrimFunc that accepts a buffer of shape `[16,32]` validates that
the `DLTensor::shape` array is `[16,32]`.)
For dynamic Buffers, in which one or more of these `BufferNode` member
variables use `tir.Var` that are not defined by other PrimFunc
parameters, these are instead used to define the variables based on
the corresponding `DLTensor` members. (e.g. A PrimFunc that accepts a
buffer of shape `[tir.Var("n"), tir.Var("m")]`, when passed a
`DLTensor` of shape `[16,32]`, will define `n = 16` and `n=32`, based
on the argument's shape.
Returns
-------
fpass : tvm.transform.Pass
The result pass
"""
return _ffi_api.MakePackedAPI() # type: ignore
def MakeUnpackedAPI():
"""Transform the PrimFuncs in the module to a C API compatible with internal calls.
Prior to this pass, the PrimFunc may have Buffer arguments defined in
the `PrimFuncNode::buffer_map`. This pass consumes the `buffer_map`,
using it to generate `T*` arguments (e.g. `float32*`) that can be
directly called by a C API.
For static shapes, no runtime validation is performed to confirm that
the argument buffer's shape matches the expected shape. For dynamic
shapes, `MakeUnpackedAPI` requires that the dynamic parameters be
passed as separate `tir.Var` parameters.
Returns
-------
fpass : tvm.transform.Pass
The result pass
"""
return _ffi_api.MakeUnpackedAPI() # type: ignore
def AnnotateDeviceRegions():
"""Annotate locations that should be run on the device
Insert `AttrStmt` nodes specifying a target on which regions
within the PrimFunc should be executed. Only modifies functions
that have a `tvm::attr::kTarget` attribute, and where that target
defines a host.
Returns
-------
fpass : tvm.transform.Pass
The result pass
"""
return _ffi_api.AnnotateDeviceRegions() # type: ignore
def SplitHostDevice():
"""Split the function into a host function and device functions.
Returns
-------
fpass : tvm.transform.Pass
The result pass
"""
return _ffi_api.SplitHostDevice() # type: ignore
def LowerDeviceKernelLaunch():
"""Lower cross-device function calls.
Prior to this pass, host to device calls are represented as
subroutine calls, with environment parameters (e.g. env_thread)
specified internally. The device function is an internal
function, without a `tvm::attr::kGlobalSymbol` attribute.
After this pass, host to device calls are represented as
tvm_call_packed built-in. The device function is an
externally-exposed function, with a non-empty
`tvm::attr::kGlobalSymbol` attribute.
Returns
-------
fpass : tvm.transform.Pass
The result pass
"""
return _ffi_api.LowerDeviceKernelLaunch() # type: ignore
def DecorateDeviceScope():
"""Decorate all the function's body as device function.
Returns
-------
fpass : tvm.transform.Pass
The result pass
"""
return _ffi_api.DecorateDeviceScope() # type: ignore
def SkipAssert():
"""Skip assert stmt.
Returns
-------
fpass : tvm.transform.Pass
The result pass
"""
return _ffi_api.SkipAssert() # type: ignore
def ThreadSync(storage_scope: str):
"""Insert sync between parallel read/write of shared buffers.
Parameters
----------
storage_scope: str
The target storage scope.
Returns
-------
fpass : tvm.transform.Pass
The result pass
"""
return _ffi_api.ThreadSync(storage_scope) # type: ignore
def LowerThreadAllreduce():
"""Lower cross thread alleduce.
Returns
-------
fpass : tvm.transform.Pass
The result pass
"""
return _ffi_api.LowerThreadAllreduce() # type: ignore
def InferFragment():
"""Infer the TensorCore fragment infomation using tensor intrinsics.
Returns
-------
fpass : tvm.transform.Pass
The result pass
"""
return _ffi_api.InferFragment() # type: ignore
def LowerWarpMemory():
"""Lower warp memory access to low-level device related function calls.
Returns
-------
fpass : tvm.transform.Pass
The result pass
"""
return _ffi_api.LowerWarpMemory() # type: ignore
def LowerTVMBuiltin():
"""Lower tvm builtin intrinsics.
Returns
-------
fpass : tvm.transform.Pass
The result pass
"""
return _ffi_api.LowerTVMBuiltin() # type: ignore
def LegalizePackedCalls():
"""Legalize packed calls to have its arguments wrapped in TVMValues
Returns
-------
fpass : tvm.transform.Pass
The result pass
"""
return _ffi_api.LegalizePackedCalls() # type: ignore
def LowerIntrin():
"""Lower target specific intrinsic calls.
Returns
-------
fpass : tvm.transform.Pass
The result pass
"""
return _ffi_api.LowerIntrin() # type: ignore
def LowerDeviceStorageAccessInfo():
"""Lower attached storage access information on device.
Returns
-------
fpass : tvm.transform.Pass
The result pass
Note
----
Run this pass after all storage access analysis finish.
"""
return _ffi_api.LowerDeviceStorageAccessInfo() # type: ignore
def CombineContextCall():
"""Combine context calls in the host function.
Returns
-------
fpass : tvm.transform.Pass
The result pass
"""
return _ffi_api.CombineContextCall() # type: ignore
def NarrowDataType(target_bits: int):
"""Narrow down PrimExpr datatype in stmt to target_bits.
Parameters
----------
target_bits : int
The target bit configuration.
Returns
-------
fpass : tvm.transform.Pass
The result pass
Note
----
Run this pass after StorageFlatten.
"""
return _ffi_api.NarrowDataType(target_bits) # type: ignore
def VerifyMemory():
"""Verify if func contains illegal host side direct memory access.
Returns
-------
fpass : tvm.transform.Pass
The result pass
"""
return _ffi_api.VerifyMemory() # type: ignore
def VerifyVTCMLimit(limit: int):
"""Verify if the size of the allocated vtcm memory satisfies the limit.
Returns
-------
fpass : tvm.transform.Pass
The result pass
"""
return _ffi_api.VerifyVTCMLimit(limit) # type: ignore
# pylint: disable=no-else-return,inconsistent-return-statements
def HoistIfThenElse(variant: Optional[str] = None):
"""Hoist loop-invariant IfThenElse nodes to outside the eligible loops.
Parameters
----------
variant : Optional[String]
The variant of the pass.
variant can have any one of following values ["basic", None(Default)].
The basic variant supports basic hoisting scenarios where it expects
the For & If Nodes are in place consecutively and does not involve
global scope variables or more advanced scenarios.
Default variant supports all hoisting scenarios,i.e., {"Basic" + "Advanced"}
supported with control with PassContext configs like below:
config={"tir.HoistIfThenElse": {"support_block_scope_hosting": True}}
Returns
-------
fpass : tvm.transform.Pass
The result pass
"""
if variant == "basic":
return _ffi_api.HoistIfThenElseBasic() # type: ignore
elif variant is None:
return _ffi_api.HoistIfThenElse() # type: ignore
class HoistedConditionals(enum.Flag):
"""Flags for use in HoistExpressionConfig.conditional_types
Each bitflag represents a type of expression that should be
hoisted to the outermost loop possible.
"""
Never = 0
""" No hoisting of conditionals """
IfElseStmt = 1
""" If set, look for hoist candidates in IfElseStmt """
IfElseExpr = 2
""" If set, look for hoist candidates in tir.if_then_else """
BooleanExpression = 4
""" If set, look for hoist candidates in all boolean expressions """
UsingBlockVar = 8
""" If set, allow hoisting of conditionals that use a block variable (e.g. threadIdx.x) """
All = IfElseStmt | IfElseExpr | BooleanExpression | UsingBlockVar
""" Enable all hoisting of conditionals"""
class HoistedLetBindings(enum.Flag):
"""Flags for use in HoistExpressionConfig.let_binding_types
Each bitflag represents a type of let binding expression that should be
hoisted to the outermost loop possible.
"""
Never = 0
""" No hoisting of let bindings """
RequiredByConditional = 1
""" Bindings that are used by a hoisted conditional """
LetStmt = 2
""" Bindings occuring in LetStmt """
LetExpr = 4
""" Bindings occuring in Let expressions """
All = RequiredByConditional | LetStmt | LetExpr
""" Enable all hoisting of let bindings """
def HoistExpression():
"""Generalized verison of HoistIfThenElse.
Hoist loop-invariant expressions to outside the eligible loops.
Searches for expressions in:
* LetStmt bindings
* IfThenElse conditions
* Boolean operators
Returns
-------
fpass : tvm.transform.Pass
The result pass
"""
return _ffi_api.HoistExpression() # type: ignore
def LowerCrossThreadReduction():
"""Lower cross-thread reduction from thread bindings to
intrinsic function calls.
Returns
-------
fpass : tvm.transform.Pass
The result pass
"""
return _ffi_api.LowerCrossThreadReduction() # type: ignore
def LowerInitBlock():
"""Lower block init stmt into IfThenElse statements.
Returns
-------
fpass : tvm.transform.Pass
The result pass
"""
return _ffi_api.LowerInitBlock() # type: ignore
def PlanAndUpdateBufferAllocationLocation():
"""Locate the buffer allocation to the exact position (usually is
the lca of buffer access). This pass will inject opaque block
with alloc_buffers at the allocation site.
Returns
-------
fpass : tvm.transform.Pass
The result pass
"""
return _ffi_api.PlanAndUpdateBufferAllocationLocation() # type: ignore
def ConvertBlocksToOpaque():
"""Substitute all the block vars with the PrimExprs they are bound to, indicated by
the corresponding iter_values in BlockRealize, and then convert the blocks into
opaque ones by removing all the iter_values in BlockRealize and iter_vars in Block.
Returns
-------
fpass : tvm.transform.Pass
The result pass
"""
return _ffi_api.ConvertBlocksToOpaque() # type: ignore
def LiftThreadBinding():
"""Lift the same thread bindings to their LCA loops.
Returns
-------
fpass : tvm.transform.Pass
The result pass
"""
return _ffi_api.LiftThreadBinding() # type: ignore
def CompactBufferAllocation(is_strict: bool = True):
"""Compact the buffer access region. by removing the buffer regions
that are not accessed, i.e. narrowing the buffer shape and adjust
the access region if necessary.
Example
-------
Before narrowing, ``B`` is a ``[16, 16]`` buffer, but only a
skinny vector ``B[i, 0:16]`` is accessed.
.. code-block:: python
for i in range(0, 16):
with T.block():
B = T.alloc_buffer(16, 16)
for j in range(0, 16):
B[i, j] = A[i, j] + 1
for j in range(0, 16):
C[i, j] = B[i, j] + 1
This pass narrows the buffer shape and adjust its accessed region
accordingly. In this particular case, because only a ``1 * 16``
vector of ``B`` is accessed, the pass narrows ``B`` to shape ``[1,
16]``, and changes the access to ``B[i, j]`` to ``B[0, j]``.
.. code-block:: python
for i in range(0, 16):
with T.block():
B = T.alloc_buffer(1, 16)
for j in range(0, 16):
B[0, j] = A[i, j] + 1
for j in range(0, 16):
C[i, j] = B[0, j] + 1
Parameters
----------
is_strict : bool
Ensure the compacted shape to be always smaller than the original shape.
Otherwise it allows to grow the shape to match actual accessed buffer regions.
Returns
-------
fpass : tvm.transform.Pass
The result pass
"""
return _ffi_api.CompactBufferAllocation(is_strict) # type: ignore
def LowerMatchBuffer():
"""Remove match buffers inside the block. Also, it will validate the binding.
Returns
-------
fpass : tvm.transform.Pass
The result pass
"""
return _ffi_api.LowerMatchBuffer() # type: ignore
def LowerOpaqueBlock():
"""Remove the block to ensure that the TIR can not be scheduled again.
Returns
-------
fpass : tvm.transform.Pass
The result pass
"""
return _ffi_api.LowerOpaqueBlock() # type: ignore
def FlattenBuffer():
"""Flatten the multi-dimensional BufferLoad and BufferStore to single dimensional
BufferLoad/BufferStore for the TIR not contains opaque block.
Returns
-------
fpass : tvm.transform.Pass
The result pass
"""
return _ffi_api.FlattenBuffer() # type: ignore
def TransformMmaBufferLayout():
"""Transform mma buffer layout
Returns
-------
fpass : tvm.transform.Pass
The result pass
"""
return _ffi_api.TransformMmaBufferLayout() # type: ignore
def InjectPermutedLayout():
"""Inject permuted layout in mma
Returns
-------
fpass : tvm.transform.Pass
The result pass
"""
return _ffi_api.InjectPermutedLayout() # type: ignore
def UnifyThreadBinding():
"""Unify all the thread bindings for "blockIdx.x/y/z",
"threadIdx.x/y/z", and "vthread.x/y/z". Before the unification,
two vars that are bound to a thread axis (e.g., "threadIdx.x")
use different IterVars and variables in their AttrStmts. After
the unification, we use a consolidated IterVar and a variable
for them.
Returns
-------
fpass : tvm.transform.Pass
The result pass
Note
----
`vthread` is a legacy behavior that will be deprecated, though
thread bindings of `vthread` are still also unified in this
pass. Please use `vthread.x`, `vthread.y` and `vthread.z` instead.
"""
return _ffi_api.UnifyThreadBinding() # type: ignore
def MergeDynamicSharedMemoryAllocations():
"""This pass merges multiple TIR-level dynamic shared memory allocations
into one allocation.
Returns
-------
fpass : tvm.transform.Pass
The result pass
"""
return _ffi_api.MergeDynamicSharedMemoryAllocations() # type: ignore
def ConvertForLoopsToSerial():
"""Convert Parallel For Loops to Serial For Loops.
Returns
-------
fpass : tvm.transform.Pass
The result pass
"""
return _ffi_api.ConvertForLoopsToSerial() # type: ignore
def InjectSoftwarePipeline():
"""Transform annotated loops into pipelined one that parallelize producers and consumers
Returns
-------
fpass : tvm.transform.Pass
The result pass
"""
return _ffi_api.InjectSoftwarePipeline() # type: ignore
def ExtractPrimFuncConstants():
"""Collects and unificates tir non-scalar constants to module's attr 'Constants' array.
Returns
-------
fpass : tvm.transform.Pass
The result pass
"""
return _ffi_api.ExtractPrimFuncConstants() # type: ignore
def LowerAutoCopy():
"""Automatically do memory optimizations for auto copy blocks
Returns
-------
fpass : tvm.transform.Pass
The result pass
"""
return _ffi_api.LowerAutoCopy() # type: ignore
def RenormalizeSplitPattern():
"""Renormalize the split pattern from floordiv(floormod()) to floormod(floordiv())
Returns
-------
fpass : tvm.transform.Pass
The result pass
"""
return _ffi_api.RenormalizeSplitPattern() # type: ignore
def BindTarget(target):
"""Annotate a PrimFunc with a given target.
Parameters
-------
target : tvm.target.Target
target
Returns
-------
fpass : tvm.transform.Pass
The result pass
"""
return _ffi_api.BindTarget(target) # type: ignore
def AnnotateEntryFunc():
"""Set a PrimFunc as the entry point if it is only function in IRModule.
Returns
-------
fpass : tvm.transform.Pass
The result pass
"""
return _ffi_api.AnnotateEntryFunc() # type: ignore
def Filter(fcond: Callable):
"""Filter out PrimFuncs that does not satisfy the given condition.
`fcond` should be a function that takes a primfunc and returns boolean.
Returns
-------
fpass : tvm.transform.Pass
The result pass
"""
return _ffi_api.Filter(fcond) # type: ignore
def InjectPTXAsyncCopy():
"""Rewrite global to shared memory copy on CUDA with asyncronous copy.
Returns
-------
fpass : tvm.transform.Pass
The result pass
"""
return _ffi_api.InjectPTXAsyncCopy() # type: ignore
def RemoveWeightLayoutRewriteBlock(skip_ndarray_rewrite=False):
"""Remove weight layout rewrite block before benchmarking during tuning stage.
Parameters
----------
skip_ndarray_rewrite : bool
If True, exact rewrite of NDArray, according to the given index map, will be skipped.
Only the shape of the NDArray is transformed correctly, and the content of the destination
array will be filled with random values.
When this pass is called many times during MetaSchedule tuning, the raw data of NDArray,
before and after rewrite, does not matter. Since NDArray layout rewrite, using IndexMap's
MapNDArray, is currently slow, skipping the exact rewrite is sometimes necessary.
Returns
-------
fpass : tvm.transform.Pass
The result pass
"""
return _ffi_api.RemoveWeightLayoutRewriteBlock(skip_ndarray_rewrite) # type: ignore
def ManifestSharedMemoryLocalStage():
"""Add the explicit local stage for the shared memory access on GPU.
Returns
-------
fpass : tvm.transform.Pass
The result pass
"""
return _ffi_api.ManifestSharedMemoryLocalStage() # type: ignore
def InstrumentProfileIntrinsics():
"""Insert intrinsic calls to instrument function and loop level profiling.
Returns
-------
fpass : tvm.transform.Pass
The result pass
"""
return _ffi_api.InstrumentProfileIntrinsics() # type: ignore
def InstallDebugSpans():
"""Add line information from the TIR printer as spans on each statement and
expression.
Returns
-------
fpass : tvm.transform.Pass
The result pass
"""
return _ffi_api.InstallDebugSpans() # type: ignore
| 28,897 | 24.26049 | 98 | py |
tvm | tvm-main/python/tvm/tir/transform/_ffi_api.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""FFI APIs for tvm.tir.transform"""
import tvm._ffi
tvm._ffi._init_api("tir.transform", __name__)
| 886 | 39.318182 | 62 | py |
tvm | tvm-main/python/tvm/tir/transform/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Namespace of all TIR transformations"""
# pylint: disable=wildcard-import, invalid-name
from .function_pass import prim_func_pass, PrimFuncPass
from .transform import *
| 958 | 42.590909 | 62 | py |
tvm | tvm-main/python/tvm/tir/transform/function_pass.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""TIR specific function pass support."""
import inspect
import functools
from typing import Callable, List, Optional, Union
import tvm._ffi
from tvm.ir.transform import Pass, PassInfo
from . import _ffi_api
@tvm._ffi.register_object("tir.PrimFuncPass")
class PrimFuncPass(Pass):
"""A pass that works on each :py:func:`tvm.tir.PrimFunc` in a module. A function
pass class should be created through py:func:`tvm.tir.transform.function_pass`.
"""
def _wrap_class_function_pass(pass_cls, pass_info):
"""Wrap a python class as function pass"""
class PyFunctionPass(PrimFuncPass):
"""Internal wrapper class to create a class instance."""
def __init__(self, *args, **kwargs):
# initialize handle in cass pass_cls creation failed.fg
self.handle = None
inst = pass_cls(*args, **kwargs)
# it is important not to capture self to
# avoid a cyclic dependency
def _pass_func(func, mod, ctx):
return inst.transform_function(func, mod, ctx)
self.__init_handle_by_constructor__(
_ffi_api.CreatePrimFuncPass, _pass_func, pass_info # type: ignore
)
self._inst = inst
def __getattr__(self, name):
# fall back to instance attribute if there is not any
return self._inst.__getattribute__(name)
functools.update_wrapper(PyFunctionPass.__init__, pass_cls.__init__)
PyFunctionPass.__name__ = pass_cls.__name__
PyFunctionPass.__doc__ = pass_cls.__doc__
PyFunctionPass.__module__ = pass_cls.__module__
return PyFunctionPass
def prim_func_pass(
pass_func=None,
opt_level: int = None,
name: Optional[str] = None,
required: Optional[List[str]] = None,
) -> Union[Callable, PrimFuncPass]:
"""Decorate a function pass.
This function returns a callback when pass_func
is provided. Otherwise, it returns the created function pass using the
given optimization function.
Parameters
----------
pass_func : Optional[Callable[(tvm.tir.PrimFunc, IRModule, PassContext) -> tvm.tir.PrimFunc]]
The transformation function or class.
opt_level : int
The optimization level of this module pass.
name : Optional[str]
The name of the function pass. The name could be empty. In this case, the
name of the optimization function will be used as the pass name.
required : Optional[List[str]]
The list of passes that the function pass is dependent on.
Returns
-------
create_function_pass : Union[Callable, FunctionPass]
A decorator will be returned if pass_func is not provided,
otherwise return the decorated result.
The returned decorator has two behaviors depending on the input:
A new FunctionPass will be returned when we decorate a pass function.
A new FunctionPass class will be returned when we decorate a class type.
Examples
--------
The following code block decorates a function pass class.
.. code-block:: python
@tvm.tir.transform.prim_func_pass(opt_level=1)
class TestReplaceFunc:
def __init__(self, new_func):
self.new_func = new_func
def transform_function(self, func, mod, ctx):
# just for demo purposes
# transform func to new_func
return self.new_func
The following code creates a function pass by decorating
a user defined transform function.
.. code-block:: python
@tvm.tir.transform.prim_func_pass(opt_level=2)
def transform(func, mod, ctx):
# my transformations here.
return func
function_pass = transform
assert isinstance(function_pass, transform.FunctionPass)
assert function_pass.info.opt_level == 2
# Given a module m, the optimization could be invoked as the following:
updated_mod = function_pass(m)
# Now constant folding should have been applied to every function in
# the provided module m. And the updated module will be returned.
"""
if opt_level is None:
raise ValueError("Please provide opt_level for the function pass.")
required = required if required else []
if not isinstance(required, (list, tuple)):
raise TypeError("Required is expected to be the type of " + "list/tuple.")
def create_function_pass(pass_arg):
"""Internal function that creates a function pass"""
fname = name if name else pass_arg.__name__
info = PassInfo(opt_level, fname, required)
if inspect.isclass(pass_arg):
return _wrap_class_function_pass(pass_arg, info)
if not callable(pass_arg):
raise TypeError("pass_func must be a callable for Module pass")
return _ffi_api.CreatePrimFuncPass(pass_arg, info) # type: ignore
if pass_func:
return create_function_pass(pass_func)
return create_function_pass
| 5,810 | 35.31875 | 97 | py |
tvm | tvm-main/python/tvm/_ffi/base.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
# pylint: disable=invalid-name, import-outside-toplevel
"""Base library for TVM FFI."""
import sys
import os
import ctypes
import numpy as np
from . import libinfo
# ----------------------------
# library loading
# ----------------------------
string_types = (str,)
integer_types = (int, np.int32)
numeric_types = integer_types + (float, np.float16, np.float32)
# this function is needed for python3
# to convert ctypes.char_p .value back to python str
if sys.platform == "win32":
def _py_str(x):
try:
return x.decode("utf-8")
except UnicodeDecodeError:
encoding = "cp" + str(ctypes.cdll.kernel32.GetACP())
return x.decode(encoding)
py_str = _py_str
else:
py_str = lambda x: x.decode("utf-8")
def _load_lib():
"""Load libary by searching possible path."""
lib_path = libinfo.find_lib_path()
# The dll search path need to be added explicitly in
# windows after python 3.8
if sys.platform.startswith("win32") and sys.version_info >= (3, 8):
for path in libinfo.get_dll_directories():
os.add_dll_directory(path)
lib = ctypes.CDLL(lib_path[0], ctypes.RTLD_GLOBAL)
lib.TVMGetLastError.restype = ctypes.c_char_p
return lib, os.path.basename(lib_path[0])
try:
# The following import is needed for TVM to work with pdb
import readline # pylint: disable=unused-import
except ImportError:
pass
# version number
__version__ = libinfo.__version__
# library instance
_LIB, _LIB_NAME = _load_lib()
# Whether we are runtime only
_RUNTIME_ONLY = "runtime" in _LIB_NAME
# The FFI mode of TVM
_FFI_MODE = os.environ.get("TVM_FFI", "auto")
# ----------------------------
# helper function in ctypes.
# ----------------------------
def c_str(string):
"""Create ctypes char * from a python string
Parameters
----------
string : string type
python string
Returns
-------
str : c_char_p
A char pointer that can be passed to C API
"""
return ctypes.c_char_p(string.encode("utf-8"))
def c_array(ctype, values):
"""Create ctypes array from a python array
Parameters
----------
ctype : ctypes data type
data type of the array we want to convert to
values : tuple or list
data content
Returns
-------
out : ctypes array
Created ctypes array
"""
return (ctype * len(values))(*values)
def decorate(func, fwrapped):
"""A wrapper call of decorator package, differs to call time
Parameters
----------
func : function
The original function
fwrapped : function
The wrapped function
"""
import decorator
return decorator.decorate(func, fwrapped)
# -----------------------------------------
# Base code for structured error handling.
# -----------------------------------------
# Maps error type to its constructor
ERROR_TYPE = {}
class TVMError(RuntimeError):
"""Default error thrown by TVM functions.
TVMError will be raised if you do not give any error type specification,
"""
def register_error(func_name=None, cls=None):
"""Register an error class so it can be recognized by the ffi error handler.
Parameters
----------
func_name : str or function or class
The name of the error function.
cls : function
The function to create the class
Returns
-------
fregister : function
Register function if f is not specified.
Examples
--------
.. code-block:: python
@tvm.error.register_error
class MyError(RuntimeError):
pass
err_inst = tvm.error.create_ffi_error("MyError: xyz")
assert isinstance(err_inst, MyError)
"""
if callable(func_name):
cls = func_name
func_name = cls.__name__
def register(mycls):
"""internal register function"""
err_name = func_name if isinstance(func_name, str) else mycls.__name__
ERROR_TYPE[err_name] = mycls
return mycls
if cls is None:
return register
return register(cls)
def _valid_error_name(name):
"""Check whether name is a valid error name."""
return all(x.isalnum() or x in "_." for x in name)
def _find_error_type(line):
"""Find the error name given the first line of the error message.
Parameters
----------
line : str
The first line of error message.
Returns
-------
name : str The error name
"""
if sys.platform == "win32":
# Stack traces aren't logged on Windows due to a DMLC limitation,
# so we should try to get the underlying error another way.
# DMLC formats errors "[timestamp] file:line: ErrorMessage"
# ErrorMessage is usually formatted "ErrorType: message"
# We can try to extract the error type using the final ":"
end_pos = line.rfind(":")
if end_pos == -1:
return None
start_pos = line.rfind(":", 0, end_pos)
if start_pos == -1:
err_name = line[:end_pos].strip()
else:
err_name = line[start_pos + 1 : end_pos].strip()
if _valid_error_name(err_name):
return err_name
return None
end_pos = line.find(":")
if end_pos == -1:
return None
err_name = line[:end_pos]
if _valid_error_name(err_name):
return err_name
return None
def c2pyerror(err_msg):
"""Translate C API error message to python style.
Parameters
----------
err_msg : str
The error message.
Returns
-------
new_msg : str
Translated message.
err_type : str
Detected error type.
"""
arr = err_msg.split("\n")
if arr[-1] == "":
arr.pop()
err_type = _find_error_type(arr[0])
trace_mode = False
stack_trace = []
message = []
for line in arr:
if trace_mode:
if line.startswith(" ") and len(stack_trace) > 0:
stack_trace[-1] += "\n" + line
elif line.startswith(" "):
stack_trace.append(line)
else:
trace_mode = False
if not trace_mode:
if line.startswith("Stack trace"):
trace_mode = True
else:
message.append(line)
out_msg = ""
if stack_trace:
out_msg += "Traceback (most recent call last):\n"
out_msg += "\n".join(reversed(stack_trace)) + "\n"
out_msg += "\n".join(message)
return out_msg, err_type
def py2cerror(err_msg):
"""Translate python style error message to C style.
Parameters
----------
err_msg : str
The error message.
Returns
-------
new_msg : str
Translated message.
"""
arr = err_msg.split("\n")
if arr[-1] == "":
arr.pop()
trace_mode = False
stack_trace = []
message = []
for line in arr:
if trace_mode:
if line.startswith(" "):
stack_trace.append(line)
else:
trace_mode = False
if not trace_mode:
if line.find("Traceback") != -1:
trace_mode = True
else:
message.append(line)
# Remove the first error name if there are two of them.
# RuntimeError: MyErrorName: message => MyErrorName: message
head_arr = message[0].split(":", 3)
if len(head_arr) >= 3 and _valid_error_name(head_arr[1].strip()):
head_arr[1] = head_arr[1].strip()
message[0] = ":".join(head_arr[1:])
# reverse the stack trace.
out_msg = "\n".join(message)
if stack_trace:
out_msg += "\nStack trace:\n"
out_msg += "\n".join(reversed(stack_trace)) + "\n"
return out_msg
def get_last_ffi_error():
"""Create error object given result of TVMGetLastError.
Returns
-------
err : object
The error object based on the err_msg
"""
c_err_msg = py_str(_LIB.TVMGetLastError())
py_err_msg, err_type = c2pyerror(c_err_msg)
if err_type is not None and err_type.startswith("tvm.error."):
err_type = err_type[10:]
return ERROR_TYPE.get(err_type, TVMError)(py_err_msg)
def check_call(ret):
"""Check the return value of C API call
This function will raise exception when error occurs.
Wrap every API call with this function
Parameters
----------
ret : int
return value from API calls
"""
if ret != 0:
raise get_last_ffi_error()
| 9,294 | 25.633238 | 80 | py |
tvm | tvm-main/python/tvm/_ffi/libinfo.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Library information."""
import sys
import os
def split_env_var(env_var, split):
"""Splits environment variable string.
Parameters
----------
env_var : str
Name of environment variable.
split : str
String to split env_var on.
Returns
-------
splits : list(string)
If env_var exists, split env_var. Otherwise, empty list.
"""
if os.environ.get(env_var, None):
return [p.strip() for p in os.environ[env_var].split(split)]
return []
def get_dll_directories():
"""Get the possible dll directories"""
# NB: This will either be the source directory (if TVM is run
# inplace) or the install directory (if TVM is installed).
# An installed TVM's curr_path will look something like:
# $PREFIX/lib/python3.6/site-packages/tvm/_ffi
ffi_dir = os.path.dirname(os.path.realpath(os.path.expanduser(__file__)))
source_dir = os.path.join(ffi_dir, "..", "..", "..")
install_lib_dir = os.path.join(ffi_dir, "..", "..", "..", "..")
dll_path = []
if os.environ.get("TVM_LIBRARY_PATH", None):
dll_path.append(os.environ["TVM_LIBRARY_PATH"])
if sys.platform.startswith("linux") or sys.platform.startswith("freebsd"):
dll_path.extend(split_env_var("LD_LIBRARY_PATH", ":"))
dll_path.extend(split_env_var("PATH", ":"))
elif sys.platform.startswith("darwin"):
dll_path.extend(split_env_var("DYLD_LIBRARY_PATH", ":"))
dll_path.extend(split_env_var("PATH", ":"))
elif sys.platform.startswith("win32"):
dll_path.extend(split_env_var("PATH", ";"))
# Pip lib directory
dll_path.append(os.path.join(ffi_dir, ".."))
# Default cmake build directory
dll_path.append(os.path.join(source_dir, "build"))
dll_path.append(os.path.join(source_dir, "build", "Release"))
# Default make build directory
dll_path.append(os.path.join(source_dir, "lib"))
dll_path.append(install_lib_dir)
if os.path.isdir(source_dir):
dll_path.append(os.path.join(source_dir, "web", "dist", "wasm"))
dll_path.append(os.path.join(source_dir, "web", "dist"))
dll_path = [os.path.realpath(x) for x in dll_path]
return [x for x in dll_path if os.path.isdir(x)]
def find_lib_path(name=None, search_path=None, optional=False):
"""Find dynamic library files.
Parameters
----------
name : list of str
List of names to be found.
Returns
-------
lib_path : list(string)
List of all found path to the libraries
"""
use_runtime = os.environ.get("TVM_USE_RUNTIME_LIB", False)
dll_path = get_dll_directories()
if search_path is not None:
if isinstance(search_path, list):
dll_path = dll_path + search_path
else:
dll_path.append(search_path)
if name is not None:
if isinstance(name, list):
lib_dll_path = []
for n in name:
lib_dll_path += [os.path.join(p, n) for p in dll_path]
else:
lib_dll_path = [os.path.join(p, name) for p in dll_path]
runtime_dll_path = []
else:
if sys.platform.startswith("win32"):
lib_dll_names = ["libtvm.dll", "tvm.dll"]
runtime_dll_names = ["libtvm_runtime.dll", "tvm_runtime.dll"]
elif sys.platform.startswith("darwin"):
lib_dll_names = ["libtvm.dylib"]
runtime_dll_names = ["libtvm_runtime.dylib"]
else:
lib_dll_names = ["libtvm.so"]
runtime_dll_names = ["libtvm_runtime.so"]
name = lib_dll_names + runtime_dll_names
lib_dll_path = [os.path.join(p, name) for name in lib_dll_names for p in dll_path]
runtime_dll_path = [os.path.join(p, name) for name in runtime_dll_names for p in dll_path]
if not use_runtime:
# try to find lib_dll_path
lib_found = [p for p in lib_dll_path if os.path.exists(p) and os.path.isfile(p)]
lib_found += [p for p in runtime_dll_path if os.path.exists(p) and os.path.isfile(p)]
else:
# try to find runtime_dll_path
use_runtime = True
lib_found = [p for p in runtime_dll_path if os.path.exists(p) and os.path.isfile(p)]
if not lib_found:
if not optional:
message = (
f"Cannot find libraries: {name}\n"
+ "List of candidates:\n"
+ "\n".join(lib_dll_path + runtime_dll_path)
)
raise RuntimeError(message)
return None
if use_runtime:
sys.stderr.write("Loading runtime library %s... exec only\n" % lib_found[0])
sys.stderr.flush()
return lib_found
def find_include_path(name=None, search_path=None, optional=False):
"""Find header files for C compilation.
Parameters
----------
name : list of str
List of directory names to be searched.
Returns
-------
include_path : list(string)
List of all found paths to header files.
"""
if os.environ.get("TVM_HOME", None):
source_dir = os.environ["TVM_HOME"]
else:
ffi_dir = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
source_dir = os.path.join(ffi_dir, "..", "..", "..")
third_party_dir = os.path.join(source_dir, "3rdparty")
header_path = []
if os.environ.get("TVM_INCLUDE_PATH", None):
header_path.append(os.environ["TVM_INCLUDE_PATH"])
header_path.append(source_dir)
header_path.append(third_party_dir)
header_path = [os.path.abspath(x) for x in header_path]
if search_path is not None:
if isinstance(search_path, list):
header_path = header_path + search_path
else:
header_path.append(search_path)
if name is not None:
if isinstance(name, list):
tvm_include_path = []
for n in name:
tvm_include_path += [os.path.join(p, n) for p in header_path]
else:
tvm_include_path = [os.path.join(p, name) for p in header_path]
dlpack_include_path = []
dmlc_include_path = []
else:
tvm_include_path = [os.path.join(p, "include") for p in header_path]
dlpack_include_path = [os.path.join(p, "dlpack/include") for p in header_path]
dmlc_include_path = [os.path.join(p, "dmlc-core/include") for p in header_path]
# try to find include path
include_found = [p for p in tvm_include_path if os.path.exists(p) and os.path.isdir(p)]
include_found += [p for p in dlpack_include_path if os.path.exists(p) and os.path.isdir(p)]
include_found += [p for p in dmlc_include_path if os.path.exists(p) and os.path.isdir(p)]
if not include_found:
message = (
"Cannot find the files.\n"
+ "List of candidates:\n"
+ str("\n".join(tvm_include_path + dlpack_include_path))
)
if not optional:
raise RuntimeError(message)
return None
return include_found
# current version
# We use the version of the incoming release for code
# that is under development.
# The following line is set by tvm/python/update_version.py
__version__ = "0.14.dev0"
| 7,983 | 34.327434 | 99 | py |
tvm | tvm-main/python/tvm/_ffi/registry.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-import
"""FFI registry to register function and objects."""
import sys
import ctypes
from .base import _LIB, check_call, py_str, c_str, string_types, _FFI_MODE, _RUNTIME_ONLY
try:
# pylint: disable=wrong-import-position,unused-import
if _FFI_MODE == "ctypes":
raise ImportError()
from ._cy3.core import _register_object, _get_object_type_index
from ._cy3.core import _reg_extension
from ._cy3.core import convert_to_tvm_func, _get_global_func, PackedFuncBase
except (RuntimeError, ImportError) as error:
# pylint: disable=wrong-import-position,unused-import
if _FFI_MODE == "cython":
raise error
from ._ctypes.object import _register_object, _get_object_type_index
from ._ctypes.ndarray import _reg_extension
from ._ctypes.packed_func import convert_to_tvm_func, _get_global_func, PackedFuncBase
def register_object(type_key=None):
"""register object type.
Parameters
----------
type_key : str or cls
The type key of the node
Examples
--------
The following code registers MyObject
using type key "test.MyObject"
.. code-block:: python
@tvm.register_object("test.MyObject")
class MyObject(Object):
pass
"""
object_name = type_key if isinstance(type_key, str) else type_key.__name__
def register(cls):
"""internal register function"""
if hasattr(cls, "_type_index"):
tindex = cls._type_index
else:
tidx = ctypes.c_uint()
if not _RUNTIME_ONLY:
check_call(_LIB.TVMObjectTypeKey2Index(c_str(object_name), ctypes.byref(tidx)))
else:
# directly skip unknown objects during runtime.
ret = _LIB.TVMObjectTypeKey2Index(c_str(object_name), ctypes.byref(tidx))
if ret != 0:
return cls
tindex = tidx.value
_register_object(tindex, cls)
return cls
if isinstance(type_key, str):
return register
return register(type_key)
def get_object_type_index(cls):
"""
Get type index of object type
Parameters
----------
cls : type
The object type to get type index for.
Returns
-------
type_index : Optional[int]
The type index, or None if type not found in the registry.
"""
return _get_object_type_index(cls)
def register_extension(cls, fcreate=None):
"""Register a extension class to TVM.
After the class is registered, the class will be able
to directly pass as Function argument generated by TVM.
Parameters
----------
cls : class
The class object to be registered as extension.
fcreate : function, optional
The creation function to create a class object given handle value.
Note
----
The registered class is requires one property: _tvm_handle.
If the registered class is a subclass of NDArray,
it is required to have a class attribute _array_type_code.
Otherwise, it is required to have a class attribute _tvm_tcode.
- ```_tvm_handle``` returns integer represents the address of the handle.
- ```_tvm_tcode``` or ```_array_type_code``` gives integer represents type
code of the class.
Returns
-------
cls : class
The class being registered.
Example
-------
The following code registers user defined class
MyTensor to be DLTensor compatible.
.. code-block:: python
@tvm.register_extension
class MyTensor(object):
_tvm_tcode = tvm.ArgTypeCode.ARRAY_HANDLE
def __init__(self):
self.handle = _LIB.NewDLTensor()
@property
def _tvm_handle(self):
return self.handle.value
"""
assert hasattr(cls, "_tvm_tcode")
if fcreate:
raise ValueError("Extension with fcreate is no longer supported")
_reg_extension(cls, fcreate)
return cls
def register_func(func_name, f=None, override=False):
"""Register global function
Parameters
----------
func_name : str or function
The function name
f : function, optional
The function to be registered.
override: boolean optional
Whether override existing entry.
Returns
-------
fregister : function
Register function if f is not specified.
Examples
--------
The following code registers my_packed_func as global function.
Note that we simply get it back from global function table to invoke
it from python side. However, we can also invoke the same function
from C++ backend, or in the compiled TVM code.
.. code-block:: python
targs = (10, 10.0, "hello")
@tvm.register_func
def my_packed_func(*args):
assert(tuple(args) == targs)
return 10
# Get it out from global function table
f = tvm.get_global_func("my_packed_func")
assert isinstance(f, tvm.PackedFunc)
y = f(*targs)
assert y == 10
"""
if callable(func_name):
f = func_name
func_name = f.__name__
if not isinstance(func_name, str):
raise ValueError("expect string function name")
ioverride = ctypes.c_int(override)
def register(myf):
"""internal register function"""
if not isinstance(myf, PackedFuncBase):
myf = convert_to_tvm_func(myf)
check_call(_LIB.TVMFuncRegisterGlobal(c_str(func_name), myf.handle, ioverride))
return myf
if f:
return register(f)
return register
def get_global_func(name, allow_missing=False):
"""Get a global function by name
Parameters
----------
name : str
The name of the global function
allow_missing : bool
Whether allow missing function or raise an error.
Returns
-------
func : PackedFunc
The function to be returned, None if function is missing.
"""
return _get_global_func(name, allow_missing)
def list_global_func_names():
"""Get list of global functions registered.
Returns
-------
names : list
List of global functions names.
"""
plist = ctypes.POINTER(ctypes.c_char_p)()
size = ctypes.c_uint()
check_call(_LIB.TVMFuncListGlobalNames(ctypes.byref(size), ctypes.byref(plist)))
fnames = []
for i in range(size.value):
fnames.append(py_str(plist[i]))
return fnames
def extract_ext_funcs(finit):
"""
Extract the extension PackedFuncs from a C module.
Parameters
----------
finit : ctypes function
a ctypes that takes signature of TVMExtensionDeclarer
Returns
-------
fdict : dict of str to Function
The extracted functions
"""
fdict = {}
def _list(name, func):
fdict[name] = func
myf = convert_to_tvm_func(_list)
ret = finit(myf.handle)
_ = myf
if ret != 0:
raise RuntimeError("cannot initialize with %s" % finit)
return fdict
def remove_global_func(name):
"""Remove a global function by name
Parameters
----------
name : str
The name of the global function
"""
check_call(_LIB.TVMFuncRemoveGlobal(c_str(name)))
def _get_api(f):
flocal = f
flocal.is_global = True
return flocal
def _init_api(namespace, target_module_name=None):
"""Initialize api for a given module name
namespace : str
The namespace of the source registry
target_module_name : str
The target module name if different from namespace
"""
target_module_name = target_module_name if target_module_name else namespace
if namespace.startswith("tvm."):
_init_api_prefix(target_module_name, namespace[4:])
else:
_init_api_prefix(target_module_name, namespace)
def _init_api_prefix(module_name, prefix):
module = sys.modules[module_name]
for name in list_global_func_names():
if not name.startswith(prefix):
continue
fname = name[len(prefix) + 1 :]
target_module = module
if fname.find(".") != -1:
continue
f = get_global_func(name)
ff = _get_api(f)
ff.__name__ = fname
ff.__doc__ = "TVM PackedFunc %s. " % fname
setattr(target_module, ff.__name__, ff)
| 9,123 | 26.481928 | 95 | py |
tvm | tvm-main/python/tvm/_ffi/_pyversion.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Python version check
"""
import sys
# ----------------------------
# Python3 version.
# ----------------------------
if not (sys.version_info[0] >= 3 and sys.version_info[1] >= 6):
PY3STATEMENT = "The minimal Python requirement is Python 3.6"
raise Exception(PY3STATEMENT)
| 1,070 | 38.666667 | 65 | py |
tvm | tvm-main/python/tvm/_ffi/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""C interfacing code.
This namespace contains everything that interacts with C code.
Most TVM C related object are ctypes compatible, which means
they contains a handle field that is ctypes.c_void_p and can
be used via ctypes function calls.
Some performance critical functions are implemented by cython
and have a ctypes fallback implementation.
"""
from . import _pyversion
from .base import register_error
from .registry import register_object, register_func, register_extension
from .registry import _init_api, get_global_func, get_object_type_index
| 1,342 | 42.322581 | 72 | py |
tvm | tvm-main/python/tvm/_ffi/runtime_ctypes.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Common runtime ctypes."""
# pylint: disable=invalid-name
import ctypes
import json
import numpy as np
try:
import ml_dtypes
except ImportError:
ml_dtypes = None
from .base import _LIB, check_call
tvm_shape_index_t = ctypes.c_int64
class ArgTypeCode(object):
"""Type code used in API calls"""
INT = 0
UINT = 1
FLOAT = 2
HANDLE = 3
NULL = 4
TVM_TYPE = 5
DLDEVICE = 6
DLTENSOR_HANDLE = 7
OBJECT_HANDLE = 8
MODULE_HANDLE = 9
PACKED_FUNC_HANDLE = 10
STR = 11
BYTES = 12
NDARRAY_HANDLE = 13
OBJECT_RVALUE_REF_ARG = 14
EXT_BEGIN = 15
class TVMByteArray(ctypes.Structure):
"""Temp data structure for byte array."""
_fields_ = [("data", ctypes.POINTER(ctypes.c_byte)), ("size", ctypes.c_size_t)]
class DataTypeCode(object):
"""DataType code in DLTensor."""
INT = 0
UINT = 1
FLOAT = 2
HANDLE = 3
BFLOAT = 4
E4M3Float = 6
E5M2Float = 7
class DataType(ctypes.Structure):
"""TVM datatype structure"""
_fields_ = [("type_code", ctypes.c_uint8), ("bits", ctypes.c_uint8), ("lanes", ctypes.c_uint16)]
CODE2STR = {
DataTypeCode.INT: "int",
DataTypeCode.UINT: "uint",
DataTypeCode.FLOAT: "float",
DataTypeCode.HANDLE: "handle",
DataTypeCode.BFLOAT: "bfloat",
DataTypeCode.E4M3Float: "e4m3_float",
DataTypeCode.E5M2Float: "e5m2_float",
}
NUMPY2STR = {
np.dtype(np.bool_): "bool",
np.dtype(np.int8): "int8",
np.dtype(np.int16): "int16",
np.dtype(np.int32): "int32",
np.dtype(np.int64): "int64",
np.dtype(np.uint8): "uint8",
np.dtype(np.uint16): "uint16",
np.dtype(np.uint32): "uint32",
np.dtype(np.uint64): "uint64",
np.dtype(np.float16): "float16",
np.dtype(np.float32): "float32",
np.dtype(np.float64): "float64",
np.dtype(np.float_): "float64",
}
STR2DTYPE = {
"void": {"type_code": DataTypeCode.HANDLE, "bits": 0, "lanes": 0},
"bool": {"type_code": DataTypeCode.UINT, "bits": 1, "lanes": 1},
"int8": {"type_code": DataTypeCode.INT, "bits": 8, "lanes": 1},
"int16": {"type_code": DataTypeCode.INT, "bits": 16, "lanes": 1},
"int32": {"type_code": DataTypeCode.INT, "bits": 32, "lanes": 1},
"int64": {"type_code": DataTypeCode.INT, "bits": 64, "lanes": 1},
"uint8": {"type_code": DataTypeCode.UINT, "bits": 8, "lanes": 1},
"uint16": {"type_code": DataTypeCode.UINT, "bits": 16, "lanes": 1},
"uint32": {"type_code": DataTypeCode.UINT, "bits": 32, "lanes": 1},
"uint64": {"type_code": DataTypeCode.UINT, "bits": 64, "lanes": 1},
"e4m3_float8": {"type_code": DataTypeCode.E4M3Float, "bits": 8, "lanes": 1},
"e5m2_float8": {"type_code": DataTypeCode.E5M2Float, "bits": 8, "lanes": 1},
"float16": {"type_code": DataTypeCode.FLOAT, "bits": 16, "lanes": 1},
"float32": {"type_code": DataTypeCode.FLOAT, "bits": 32, "lanes": 1},
"float64": {"type_code": DataTypeCode.FLOAT, "bits": 64, "lanes": 1},
}
def __init__(self, type_str):
super(DataType, self).__init__()
numpy_str_map = DataType.NUMPY2STR
if type_str in numpy_str_map:
type_str = numpy_str_map[type_str]
elif isinstance(type_str, np.dtype):
type_str = str(type_str)
assert isinstance(type_str, str)
str_dtype_map = DataType.STR2DTYPE
if type_str in str_dtype_map:
dtype_map = str_dtype_map[type_str]
self.bits = dtype_map["bits"]
self.type_code = dtype_map["type_code"]
self.lanes = dtype_map["lanes"]
return
arr = type_str.split("x")
head = arr[0]
self.lanes = int(arr[1]) if len(arr) > 1 else 1
bits = 32
if head.startswith("int"):
self.type_code = DataTypeCode.INT
head = head[3:]
elif head.startswith("uint"):
self.type_code = DataTypeCode.UINT
head = head[4:]
elif head.startswith("float"):
self.type_code = DataTypeCode.FLOAT
head = head[5:]
elif head.startswith("handle"):
self.type_code = DataTypeCode.HANDLE
bits = 64
head = ""
elif head.startswith("bfloat"):
self.type_code = DataTypeCode.BFLOAT
head = head[6:]
elif head.startswith("e4m3_float"):
self.type_code = DataTypeCode.E4M3Float
head = head[10:]
elif head.startswith("e5m2_float"):
self.type_code = DataTypeCode.E5M2Float
head = head[10:]
elif head.startswith("custom"):
# pylint: disable=import-outside-toplevel
import tvm.runtime._ffi_api
low, high = head.find("["), head.find("]")
if not low or not high or low >= high:
raise ValueError("Badly formatted custom type string %s" % type_str)
type_name = head[low + 1 : high]
self.type_code = tvm.runtime._ffi_api._datatype_get_type_code(type_name)
head = head[high + 1 :]
else:
raise ValueError("Do not know how to handle type %s" % type_str)
bits = int(head) if head else bits
self.bits = bits
def __repr__(self):
# pylint: disable=import-outside-toplevel
if self.bits == 0 and self.lanes == 0:
return "void"
if self.bits == 1 and self.lanes == 1:
return "bool"
if self.type_code in DataType.CODE2STR:
type_name = DataType.CODE2STR[self.type_code]
else:
import tvm.runtime._ffi_api
type_name = "custom[%s]" % tvm.runtime._ffi_api._datatype_get_type_name(self.type_code)
x = "%s%d" % (type_name, self.bits)
if self.lanes != 1:
x += "x%d" % self.lanes
return x
def __eq__(self, other):
return (
self.bits == other.bits
and self.type_code == other.type_code
and self.lanes == other.lanes
)
def __ne__(self, other):
return not self.__eq__(other)
if ml_dtypes is not None:
DataType.NUMPY2STR[np.dtype(ml_dtypes.bfloat16)] = "bfloat16"
DataType.NUMPY2STR[np.dtype(ml_dtypes.float8_e4m3fn)] = "e4m3_float8"
DataType.NUMPY2STR[np.dtype(ml_dtypes.float8_e5m2)] = "e5m2_float8"
RPC_SESS_MASK = 128
class Device(ctypes.Structure):
"""TVM device strucure.
Typically constructed using convenience function
:meth:`tvm.runtime.device`.
Exposes uniform interface to device-specific APIs such as CUDA or
OpenCL. Some properties may return None depending on whether an
API exposes that particular property.
NOTE! The integer values in MASK2STR and STR2MASK *must* correspond
to the values provided by the DLDeviceType and TVMDeviceExtType enums.
"""
kDLCPU = 1
kDLCUDA = 2
kDLCUDAHost = 3
kDLOpenCL = 4
kDLVulkan = 7
kDLMetal = 8
kDLVPI = 9
kDLROCM = 10
kDLROCMHost = 11
kDLExtDev = 12
kDLCUDAManaged = 13
kDLOneAPI = 14
kDLWebGPU = 15
kDLHexagon = 16
kDLAOCL = 32
kDLSDAccel = 33
kOpenGL = 34
kDLMicroDev = 35
_fields_ = [("device_type", ctypes.c_int), ("device_id", ctypes.c_int)]
MASK2STR = {
kDLCPU: "cpu",
kDLCUDA: "cuda",
kDLCUDAHost: "cuda_host",
kDLCUDAManaged: "cuda_managed",
kDLOpenCL: "opencl",
kDLVulkan: "vulkan",
kDLMetal: "metal",
kDLVPI: "vpi",
kDLROCM: "rocm",
kDLROCMHost: "rocm_host",
kDLExtDev: "ext_dev",
kDLOneAPI: "oneapi",
kDLWebGPU: "webgpu",
kDLHexagon: "hexagon",
kDLAOCL: "aocl",
kDLSDAccel: "sdaccel",
kOpenGL: "opengl",
kDLMicroDev: "microdev",
}
STR2MASK = {
"llvm": kDLCPU,
"stackvm": kDLCPU,
"cpu": kDLCPU,
"c": kDLCPU,
"test": kDLCPU,
"hybrid": kDLCPU,
"composite": kDLCPU,
"cuda": kDLCUDA,
"nvptx": kDLCUDA,
"cl": kDLOpenCL,
"opencl": kDLOpenCL,
"sdaccel": kDLOpenCL,
"aocl": kDLAOCL,
"aocl_sw_emu": kDLAOCL,
"vulkan": kDLVulkan,
"metal": kDLMetal,
"vpi": kDLVPI,
"rocm": kDLROCM,
"ext_dev": kDLExtDev,
"hexagon": kDLHexagon,
"webgpu": kDLWebGPU,
}
def __init__(self, device_type, device_id):
super(Device, self).__init__()
self.device_type = int(device_type)
self.device_id = device_id
def _GetDeviceAttr(self, device_type, device_id, attr_id):
"""Internal helper function to invoke runtime.GetDeviceAttr"""
# pylint: disable=import-outside-toplevel
import tvm.runtime._ffi_api
return tvm.runtime._ffi_api.GetDeviceAttr(device_type, device_id, attr_id)
@property
def exist(self):
"""Whether this device exists.
Returns True if TVM has support for the device, if the
physical device is present, and the device is accessible
through appropriate drivers (e.g. cuda/vulkan).
Returns
-------
exist : bool
True if the device exists
"""
return self._GetDeviceAttr(self.device_type, self.device_id, 0) != 0
@property
def max_threads_per_block(self):
"""Maximum number of threads on each block.
Returns device value for cuda, metal, rocm, opencl, and vulkan
devices. Returns remote device value for RPC devices.
Returns None for all other devices.
Returns
-------
max_threads_per_block : int or None
The number of threads on each block
"""
return self._GetDeviceAttr(self.device_type, self.device_id, 1)
@property
def warp_size(self):
"""Number of threads that execute concurrently.
Returns device value for cuda, rocm, and vulkan. Returns
1 for metal and opencl devices, regardless of the physical
device. Returns remote device value for RPC devices. Returns
None for all other devices.
Returns
-------
warp_size : int or None
Number of threads that execute concurrently
"""
return self._GetDeviceAttr(self.device_type, self.device_id, 2)
@property
def max_shared_memory_per_block(self):
"""Total amount of shared memory per block in bytes.
Returns device value for cuda, rocm, opencl, and vulkan.
Returns remote device value for RPC devices. Returns None for
all other devices.
Returns
-------
max_shared_memory_per_block : int or None
Total amount of shared memory per block in bytes
"""
return self._GetDeviceAttr(self.device_type, self.device_id, 3)
@property
def compute_version(self):
"""Get compute version number as string.
Returns maximum API version (e.g. CUDA/OpenCL/Vulkan)
supported by the device.
Returns device value for cuda, rocm, opencl, and
vulkan. Returns remote device value for RPC devices. Returns
None for all other devices.
Returns
-------
version : str or None
The version string in `major.minor` format.
"""
return self._GetDeviceAttr(self.device_type, self.device_id, 4)
@property
def device_name(self):
"""Return the vendor-specific name of device.
Returns device value for cuda, rocm, opencl, and vulkan.
Returns remote device value for RPC devices. Returns None for
all other devices.
Returns
-------
device_name : str or None
The name of the device.
"""
return self._GetDeviceAttr(self.device_type, self.device_id, 5)
@property
def max_clock_rate(self):
"""Return the max clock frequency of device (kHz).
Returns device value for cuda, rocm, and opencl. Returns
remote device value for RPC devices. Returns None for all
other devices.
Returns
-------
max_clock_rate : int or None
The maximum clock frequency of the device (kHz)
"""
return self._GetDeviceAttr(self.device_type, self.device_id, 6)
@property
def multi_processor_count(self):
"""Return the number of compute units in the device.
Returns device value for cuda, rocm, and opencl. Returns
remote device value for RPC devices. Returns None for all
other devices.
Returns
-------
multi_processor_count : int or None
Thee number of compute units in the device
"""
return self._GetDeviceAttr(self.device_type, self.device_id, 7)
@property
def max_thread_dimensions(self):
"""Return the maximum size of each thread axis
Returns device value for cuda, rocm, opencl, and vulkan.
Returns remote device value for RPC devices. Returns None for
all other devices.
Returns
-------
dims: List of int, or None
The maximum length of threadIdx.x, threadIdx.y, threadIdx.z
"""
return json.loads(self._GetDeviceAttr(self.device_type, self.device_id, 8))
@property
def api_version(self):
"""Returns version number of the SDK used to compile TVM.
For example, CUDA_VERSION for cuda or VK_HEADER_VERSION for
Vulkan.
Returns device value for cuda, rocm, opencl, and vulkan.
Returns remote device value for RPC devices. Returns None for
all other devices.
Returns
-------
version : int or None
The version of the SDK
"""
return self._GetDeviceAttr(self.device_type, self.device_id, 11)
@property
def driver_version(self):
"""Returns version number of the driver
Returns driver vendor's internal version number.
(e.g. "450.408.256" for nvidia-driver-450)
Returns device value for opencl and vulkan. Returns remote
device value for RPC devices. Returns None for all other
devices.
Returns
-------
version : str or None
The version string in `major.minor.patch` format.
"""
return self._GetDeviceAttr(self.device_type, self.device_id, 12)
@property
def l2_cache_size_bytes(self):
"""Return the size of the device L2 cache in bytes
Supported devices include CUDA/ROCM/OpenCL.
Returns
-------
l2_cache_size_bytes : int or None
The size of the device L2 cache in bytes returned by device runtime API.
Return None if the device does not support this feature.
Note
----
The value returned by opencl's API is smaller than actual device L2 cache size.
"""
return self._GetDeviceAttr(self.device_type, self.device_id, 13)
def texture_spatial_limit(self):
"""Returns limits for textures by spatial dimensions
Returns
-------
limit : int or None
Maximum size of the texture by spatial dimensions
"""
return self._GetDeviceAttr(self.device_type, self.device_id, 12)
def create_raw_stream(self):
"""Create a new runtime stream at the context.
User should free the stream after use.
Returns
-------
stream : TVMStreamHandle
The created runtime stream.
"""
stream = ctypes.c_void_p()
check_call(_LIB.TVMStreamCreate(self.device_type, self.device_id, ctypes.byref(stream)))
return stream
def free_raw_stream(self, stream):
"""Free a created stream handle.
Parameters
----------
stream : TVMStreamHandle
The stream which should to be released.
"""
check_call(_LIB.TVMStreamFree(self.device_type, self.device_id, stream))
def set_raw_stream(self, stream):
"""Set a created stream handle.
Parameters
----------
stream : TVMStreamHandle
The stream which should to be set to the device.
"""
check_call(_LIB.TVMSetStream(self.device_type, self.device_id, stream))
def sync(self, stream=None):
"""Synchronize until jobs finished at the context.
Parameters
----------
stream : TVMStreamHandle
Jobs in this stream should be finished.
"""
check_call(_LIB.TVMSynchronize(self.device_type, self.device_id, stream))
def __eq__(self, other):
return (
isinstance(other, Device)
and self.device_id == other.device_id
and self.device_type == other.device_type
)
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(str(self))
def __repr__(self):
if self.device_type >= RPC_SESS_MASK:
tbl_id = self.device_type / RPC_SESS_MASK - 1
dev_type = self.device_type % RPC_SESS_MASK
return "remote[%d]:%s(%d)" % (tbl_id, Device.MASK2STR[dev_type], self.device_id)
return "%s(%d)" % (Device.MASK2STR[self.device_type], self.device_id)
class TVMArray(ctypes.Structure):
"""TVMValue in C API"""
_fields_ = [
("data", ctypes.c_void_p),
("device", Device),
("ndim", ctypes.c_int),
("dtype", DataType),
("shape", ctypes.POINTER(tvm_shape_index_t)),
("strides", ctypes.POINTER(tvm_shape_index_t)),
("byte_offset", ctypes.c_uint64),
]
def __str__(self):
shape = [self.shape[i] for i in range(self.ndim)]
if self.strides:
strides = [self.strides[i] for i in range(self.ndim)]
else:
strides = []
return (
f"TVMArray(data=0x{self.data:016x}, device={self.device}, "
f"dtype={self.dtype}, shape={shape}, "
f"strides={strides}, byte_offset={self.byte_offset})"
)
class ObjectRValueRef:
"""Represent an RValue ref to an object that can be moved.
Parameters
----------
obj : tvm.runtime.Object
The object that this value refers to
"""
__slots__ = ["obj"]
def __init__(self, obj):
self.obj = obj
TVMArrayHandle = ctypes.POINTER(TVMArray)
| 19,352 | 29.816879 | 100 | py |
tvm | tvm-main/python/tvm/_ffi/_cy2/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""cython2 namespace"""
| 809 | 44 | 62 | py |
tvm | tvm-main/python/tvm/_ffi/_cy3/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""cython3 namespace"""
| 809 | 44 | 62 | py |
tvm | tvm-main/python/tvm/_ffi/_ctypes/object.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""Runtime Object api"""
import ctypes
from ..base import _LIB, check_call
from .types import ArgTypeCode, RETURN_SWITCH, C_TO_PY_ARG_SWITCH, _wrap_arg_func
from .ndarray import _register_ndarray, NDArrayBase
ObjectHandle = ctypes.c_void_p
__init_by_constructor__ = None
"""Maps object type index to its constructor"""
OBJECT_TYPE = {}
"""Maps object type to its type index"""
OBJECT_INDEX = {}
_CLASS_OBJECT = None
def _set_class_object(object_class):
global _CLASS_OBJECT
_CLASS_OBJECT = object_class
def _register_object(index, cls):
"""register object class"""
if issubclass(cls, NDArrayBase):
_register_ndarray(index, cls)
return
OBJECT_TYPE[index] = cls
OBJECT_INDEX[cls] = index
def _get_object_type_index(cls):
"""get the type index of object class"""
return OBJECT_INDEX.get(cls)
def _return_object(x):
handle = x.v_handle
if not isinstance(handle, ObjectHandle):
handle = ObjectHandle(handle)
tindex = ctypes.c_uint()
check_call(_LIB.TVMObjectGetTypeIndex(handle, ctypes.byref(tindex)))
cls = OBJECT_TYPE.get(tindex.value, _CLASS_OBJECT)
if issubclass(cls, PyNativeObject):
obj = _CLASS_OBJECT.__new__(_CLASS_OBJECT)
obj.handle = handle
return cls.__from_tvm_object__(cls, obj)
# Avoid calling __init__ of cls, instead directly call __new__
# This allows child class to implement their own __init__
obj = cls.__new__(cls)
obj.handle = handle
return obj
RETURN_SWITCH[ArgTypeCode.OBJECT_HANDLE] = _return_object
C_TO_PY_ARG_SWITCH[ArgTypeCode.OBJECT_HANDLE] = _wrap_arg_func(
_return_object, ArgTypeCode.OBJECT_HANDLE
)
C_TO_PY_ARG_SWITCH[ArgTypeCode.OBJECT_RVALUE_REF_ARG] = _wrap_arg_func(
_return_object, ArgTypeCode.OBJECT_RVALUE_REF_ARG
)
class PyNativeObject:
"""Base class of all TVM objects that also subclass python's builtin types."""
__slots__ = []
def __init_tvm_object_by_constructor__(self, fconstructor, *args):
"""Initialize the internal tvm_object by calling constructor function.
Parameters
----------
fconstructor : Function
Constructor function.
args: list of objects
The arguments to the constructor
Note
----
We have a special calling convention to call constructor functions.
So the return object is directly set into the object
"""
# pylint: disable=assigning-non-slot
obj = _CLASS_OBJECT.__new__(_CLASS_OBJECT)
obj.__init_handle_by_constructor__(fconstructor, *args)
self.__tvm_object__ = obj
class ObjectBase(object):
"""Base object for all object types"""
__slots__ = ["handle"]
def __del__(self):
if _LIB is not None:
try:
handle = self.handle
except AttributeError:
return
check_call(_LIB.TVMObjectFree(handle))
def __init_handle_by_constructor__(self, fconstructor, *args):
"""Initialize the handle by calling constructor function.
Parameters
----------
fconstructor : Function
Constructor function.
args: list of objects
The arguments to the constructor
Note
----
We have a special calling convention to call constructor functions.
So the return handle is directly set into the Node object
instead of creating a new Node.
"""
# assign handle first to avoid error raising
# pylint: disable=not-callable
self.handle = None
handle = __init_by_constructor__(fconstructor, args)
if not isinstance(handle, ObjectHandle):
handle = ObjectHandle(handle)
self.handle = handle
def same_as(self, other):
"""Check object identity.
Parameters
----------
other : object
The other object to compare against.
Returns
-------
result : bool
The comparison result.
"""
if not isinstance(other, ObjectBase):
return False
if self.handle is None:
return other.handle is None
return self.handle.value == other.handle.value
| 5,075 | 29.214286 | 82 | py |
tvm | tvm-main/python/tvm/_ffi/_ctypes/types.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""The C Types used in API."""
# pylint: disable=invalid-name
import ctypes
import struct
from ..base import py_str, check_call, _LIB
from ..runtime_ctypes import TVMByteArray, ArgTypeCode, Device
class TVMValue(ctypes.Union):
"""TVMValue in C API"""
_fields_ = [
("v_int64", ctypes.c_int64),
("v_float64", ctypes.c_double),
("v_handle", ctypes.c_void_p),
("v_str", ctypes.c_char_p),
]
TVMPackedCFunc = ctypes.CFUNCTYPE(
ctypes.c_int,
ctypes.POINTER(TVMValue),
ctypes.POINTER(ctypes.c_int),
ctypes.c_int,
ctypes.c_void_p,
ctypes.c_void_p,
)
TVMCFuncFinalizer = ctypes.CFUNCTYPE(None, ctypes.c_void_p)
def _return_handle(x):
"""return handle"""
handle = x.v_handle
if not isinstance(handle, ctypes.c_void_p):
handle = ctypes.c_void_p(handle)
return handle
def _return_bytes(x):
"""return bytes"""
handle = x.v_handle
if not isinstance(handle, ctypes.c_void_p):
handle = ctypes.c_void_p(handle)
arr = ctypes.cast(handle, ctypes.POINTER(TVMByteArray))[0]
size = arr.size
res = bytearray(size)
rptr = (ctypes.c_byte * size).from_buffer(res)
if not ctypes.memmove(rptr, arr.data, size):
raise RuntimeError("memmove failed")
return res
def _return_device(value):
"""return Device"""
# use bit unpacking from int64 view
# We use this to get around ctypes issue on Union of Structure
data = struct.pack("=q", value.v_int64)
arr = struct.unpack("=ii", data)
return Device(arr[0], arr[1])
def _wrap_arg_func(return_f, type_code):
def _wrap_func(x):
tcode = ctypes.c_int(type_code)
check_call(_LIB.TVMCbArgToReturn(ctypes.byref(x), ctypes.byref(tcode)))
return return_f(x)
return _wrap_func
def _device_to_int64(dev):
"""Pack context into int64 in native endian"""
data = struct.pack("=ii", dev.device_type, dev.device_id)
return struct.unpack("=q", data)[0]
RETURN_SWITCH = {
ArgTypeCode.INT: lambda x: x.v_int64,
ArgTypeCode.FLOAT: lambda x: x.v_float64,
ArgTypeCode.HANDLE: _return_handle,
ArgTypeCode.NULL: lambda x: None,
ArgTypeCode.STR: lambda x: py_str(x.v_str),
ArgTypeCode.BYTES: _return_bytes,
ArgTypeCode.DLDEVICE: _return_device,
}
C_TO_PY_ARG_SWITCH = {
ArgTypeCode.INT: lambda x: x.v_int64,
ArgTypeCode.FLOAT: lambda x: x.v_float64,
ArgTypeCode.HANDLE: _return_handle,
ArgTypeCode.NULL: lambda x: None,
ArgTypeCode.STR: lambda x: py_str(x.v_str),
ArgTypeCode.BYTES: _return_bytes,
ArgTypeCode.DLDEVICE: _return_device,
}
| 3,399 | 28.824561 | 79 | py |
tvm | tvm-main/python/tvm/_ffi/_ctypes/ndarray.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""Runtime NDArray api"""
import ctypes
from ..base import _LIB, check_call, c_str
from ..runtime_ctypes import TVMArrayHandle
from .types import RETURN_SWITCH, C_TO_PY_ARG_SWITCH, _wrap_arg_func, _return_handle
TVMPyCapsuleDestructor = ctypes.CFUNCTYPE(None, ctypes.c_void_p)
_c_str_dltensor = c_str("dltensor")
_c_str_used_dltensor = c_str("used_dltensor")
# used for PyCapsule manipulation
if hasattr(ctypes, "pythonapi"):
ctypes.pythonapi.PyCapsule_GetName.restype = ctypes.c_char_p
ctypes.pythonapi.PyCapsule_GetPointer.restype = ctypes.c_void_p
ctypes.pythonapi.PyCapsule_New.restype = ctypes.py_object
def _from_dlpack(dltensor):
dltensor = ctypes.py_object(dltensor)
if ctypes.pythonapi.PyCapsule_IsValid(dltensor, _c_str_dltensor):
ptr = ctypes.pythonapi.PyCapsule_GetPointer(dltensor, _c_str_dltensor)
# enforce type to make sure it works for all ctypes
ptr = ctypes.cast(ptr, ctypes.c_void_p)
handle = TVMArrayHandle()
check_call(_LIB.TVMArrayFromDLPack(ptr, ctypes.byref(handle)))
ctypes.pythonapi.PyCapsule_SetName(dltensor, _c_str_used_dltensor)
ctypes.pythonapi.PyCapsule_SetDestructor(dltensor, TVMPyCapsuleDestructor(0))
return _make_array(handle, False, False)
raise ValueError("Expect a dltensor field, PyCapsule can only be consumed once")
def _dlpack_deleter(pycapsule):
pycapsule = ctypes.cast(pycapsule, ctypes.py_object)
if ctypes.pythonapi.PyCapsule_IsValid(pycapsule, _c_str_dltensor):
ptr = ctypes.pythonapi.PyCapsule_GetPointer(pycapsule, _c_str_dltensor)
# enforce type to make sure it works for all ctypes
ptr = ctypes.cast(ptr, ctypes.c_void_p)
_LIB.TVMDLManagedTensorCallDeleter(ptr)
ctypes.pythonapi.PyCapsule_SetDestructor(pycapsule, None)
_c_dlpack_deleter = TVMPyCapsuleDestructor(_dlpack_deleter)
class NDArrayBase(object):
"""A simple Device/CPU Array object in runtime."""
__slots__ = ["handle", "is_view"]
# pylint: disable=no-member
def __init__(self, handle, is_view=False):
"""Initialize the function with handle
Parameters
----------
handle : TVMArrayHandle
the handle to the underlying C++ TVMArray
"""
self.handle = handle
self.is_view = is_view
def __del__(self):
if not self.is_view and _LIB:
check_call(_LIB.TVMArrayFree(self.handle))
@property
def _tvm_handle(self):
return ctypes.cast(self.handle, ctypes.c_void_p).value
def _copyto(self, target_nd):
"""Internal function that implements copy to target ndarray."""
check_call(_LIB.TVMArrayCopyFromTo(self.handle, target_nd.handle, None))
return target_nd
@property
def shape(self):
"""Shape of this array"""
return tuple(self.handle.contents.shape[i] for i in range(self.handle.contents.ndim))
def to_dlpack(self):
"""Produce an array from a DLPack Tensor without copying memory
Returns
-------
dlpack : DLPack tensor view of the array data
"""
handle = ctypes.c_void_p()
check_call(_LIB.TVMArrayToDLPack(self.handle, ctypes.byref(handle)))
return ctypes.pythonapi.PyCapsule_New(handle, _c_str_dltensor, _c_dlpack_deleter)
def _make_array(handle, is_view, is_container):
global _TVM_ND_CLS
handle = ctypes.cast(handle, TVMArrayHandle)
if is_container:
tindex = ctypes.c_uint()
check_call(_LIB.TVMArrayGetTypeIndex(handle, ctypes.byref(tindex)))
cls = _TVM_ND_CLS.get(tindex.value, _CLASS_NDARRAY)
else:
cls = _CLASS_NDARRAY
ret = cls.__new__(cls)
ret.handle = handle
ret.is_view = is_view
return ret
_TVM_COMPATS = ()
def _reg_extension(cls, fcreate):
global _TVM_COMPATS
_TVM_COMPATS += (cls,)
if fcreate:
fret = lambda x: fcreate(_return_handle(x))
RETURN_SWITCH[cls._tvm_tcode] = fret
C_TO_PY_ARG_SWITCH[cls._tvm_tcode] = _wrap_arg_func(fret, cls._tvm_tcode)
_TVM_ND_CLS = {}
def _register_ndarray(index, cls):
global _TVM_ND_CLS
_TVM_ND_CLS[index] = cls
_CLASS_NDARRAY = None
def _set_class_ndarray(cls):
global _CLASS_NDARRAY
_CLASS_NDARRAY = cls
| 5,100 | 32.559211 | 93 | py |
tvm | tvm-main/python/tvm/_ffi/_ctypes/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""ctypes specific implementation of FFI"""
| 829 | 45.111111 | 62 | py |
tvm | tvm-main/python/tvm/_ffi/_ctypes/packed_func.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
# pylint: disable=invalid-name, protected-access, too-many-branches
# pylint: disable=global-statement, unused-import, using-constant-test
"""Function configuration API."""
import ctypes
import traceback
from numbers import Number, Integral
from ..base import _LIB, get_last_ffi_error, py2cerror, check_call
from ..base import c_str, string_types
from ..runtime_ctypes import DataType, TVMByteArray, Device, ObjectRValueRef
from . import ndarray as _nd
from .ndarray import NDArrayBase, _make_array
from .types import TVMValue, ArgTypeCode
from .types import TVMPackedCFunc, TVMCFuncFinalizer
from .types import RETURN_SWITCH, C_TO_PY_ARG_SWITCH, _wrap_arg_func, _device_to_int64
from .object import ObjectBase, PyNativeObject, _set_class_object
from . import object as _object
PackedFuncHandle = ctypes.c_void_p
ModuleHandle = ctypes.c_void_p
ObjectHandle = ctypes.c_void_p
TVMRetValueHandle = ctypes.c_void_p
def _ctypes_free_resource(rhandle):
"""callback to free resources when it is not needed."""
pyobj = ctypes.cast(rhandle, ctypes.py_object)
ctypes.pythonapi.Py_DecRef(pyobj)
# Global callback that is always alive
TVM_FREE_PYOBJ = TVMCFuncFinalizer(_ctypes_free_resource)
ctypes.pythonapi.Py_IncRef(ctypes.py_object(TVM_FREE_PYOBJ))
def _make_packed_func(handle, is_global):
"""Make a packed function class"""
obj = _CLASS_PACKED_FUNC.__new__(_CLASS_PACKED_FUNC)
obj.is_global = is_global
obj.handle = handle
return obj
def convert_to_tvm_func(pyfunc):
"""Convert a python function to TVM function
Parameters
----------
pyfunc : python function
The python function to be converted.
Returns
-------
tvmfunc: tvm.nd.Function
The converted tvm function.
"""
local_pyfunc = pyfunc
def cfun(args, type_codes, num_args, ret, _):
"""ctypes function"""
num_args = num_args.value if isinstance(num_args, ctypes.c_int) else num_args
pyargs = (C_TO_PY_ARG_SWITCH[type_codes[i]](args[i]) for i in range(num_args))
# pylint: disable=broad-except
try:
rv = local_pyfunc(*pyargs)
except Exception:
msg = traceback.format_exc()
msg = py2cerror(msg)
_LIB.TVMAPISetLastError(c_str(msg))
return -1
if rv is not None:
if isinstance(rv, tuple):
raise ValueError("PackedFunction can only support one return value")
temp_args = []
values, tcodes, _ = _make_tvm_args((rv,), temp_args)
if not isinstance(ret, TVMRetValueHandle):
ret = TVMRetValueHandle(ret)
if _LIB.TVMCFuncSetReturn(ret, values, tcodes, ctypes.c_int(1)) != 0:
raise get_last_ffi_error()
_ = temp_args
_ = rv
return 0
handle = PackedFuncHandle()
f = TVMPackedCFunc(cfun)
# NOTE: We will need to use python-api to increase ref count of the f
# TVM_FREE_PYOBJ will be called after it is no longer needed.
pyobj = ctypes.py_object(f)
ctypes.pythonapi.Py_IncRef(pyobj)
if _LIB.TVMFuncCreateFromCFunc(f, pyobj, TVM_FREE_PYOBJ, ctypes.byref(handle)) != 0:
raise get_last_ffi_error()
return _make_packed_func(handle, False)
def _make_tvm_args(args, temp_args):
"""Pack arguments into c args tvm call accept"""
num_args = len(args)
values = (TVMValue * num_args)()
type_codes = (ctypes.c_int * num_args)()
for i, arg in enumerate(args):
if isinstance(arg, ObjectBase):
values[i].v_handle = arg.handle
type_codes[i] = ArgTypeCode.OBJECT_HANDLE
elif arg is None:
values[i].v_handle = None
type_codes[i] = ArgTypeCode.NULL
elif isinstance(arg, NDArrayBase):
values[i].v_handle = ctypes.cast(arg.handle, ctypes.c_void_p)
type_codes[i] = (
ArgTypeCode.NDARRAY_HANDLE if not arg.is_view else ArgTypeCode.DLTENSOR_HANDLE
)
elif isinstance(arg, PyNativeObject):
values[i].v_handle = arg.__tvm_object__.handle
type_codes[i] = ArgTypeCode.OBJECT_HANDLE
elif isinstance(arg, _nd._TVM_COMPATS):
values[i].v_handle = ctypes.c_void_p(arg._tvm_handle)
type_codes[i] = arg.__class__._tvm_tcode
elif isinstance(arg, Integral):
values[i].v_int64 = arg
type_codes[i] = ArgTypeCode.INT
elif isinstance(arg, Number):
values[i].v_float64 = arg
type_codes[i] = ArgTypeCode.FLOAT
elif isinstance(arg, DataType):
values[i].v_str = c_str(str(arg))
type_codes[i] = ArgTypeCode.STR
elif isinstance(arg, Device):
values[i].v_int64 = _device_to_int64(arg)
type_codes[i] = ArgTypeCode.DLDEVICE
elif isinstance(arg, (bytearray, bytes)):
# from_buffer only taeks in bytearray.
if isinstance(arg, bytes):
byte_arr = bytearray(arg)
temp_args.append(byte_arr)
arg = byte_arr
arr = TVMByteArray()
arr.data = ctypes.cast(
(ctypes.c_byte * len(arg)).from_buffer(arg), ctypes.POINTER(ctypes.c_byte)
)
arr.size = len(arg)
values[i].v_handle = ctypes.c_void_p(ctypes.addressof(arr))
temp_args.append(arr)
type_codes[i] = ArgTypeCode.BYTES
elif isinstance(arg, string_types):
values[i].v_str = c_str(arg)
type_codes[i] = ArgTypeCode.STR
elif isinstance(arg, (list, tuple, dict, _CLASS_OBJECT_GENERIC)):
arg = _FUNC_CONVERT_TO_OBJECT(arg)
values[i].v_handle = arg.handle
type_codes[i] = ArgTypeCode.OBJECT_HANDLE
temp_args.append(arg)
elif isinstance(arg, _CLASS_MODULE):
values[i].v_handle = arg.handle
type_codes[i] = ArgTypeCode.MODULE_HANDLE
elif isinstance(arg, PackedFuncBase):
values[i].v_handle = arg.handle
type_codes[i] = ArgTypeCode.PACKED_FUNC_HANDLE
elif isinstance(arg, ctypes.c_void_p):
values[i].v_handle = arg
type_codes[i] = ArgTypeCode.HANDLE
elif isinstance(arg, ObjectRValueRef):
values[i].v_handle = ctypes.cast(ctypes.byref(arg.obj.handle), ctypes.c_void_p)
type_codes[i] = ArgTypeCode.OBJECT_RVALUE_REF_ARG
elif callable(arg):
arg = convert_to_tvm_func(arg)
values[i].v_handle = arg.handle
type_codes[i] = ArgTypeCode.PACKED_FUNC_HANDLE
temp_args.append(arg)
else:
raise TypeError("Don't know how to handle type %s" % type(arg))
return values, type_codes, num_args
class PackedFuncBase(object):
"""Function base."""
__slots__ = ["handle", "is_global"]
# pylint: disable=no-member
def __init__(self, handle, is_global):
"""Initialize the function with handle
Parameters
----------
handle : PackedFuncHandle
the handle to the underlying function.
is_global : bool
Whether this is a global function in python
"""
self.handle = handle
self.is_global = is_global
def __del__(self):
if not self.is_global and _LIB is not None:
if _LIB.TVMFuncFree(self.handle) != 0:
raise get_last_ffi_error()
def __call__(self, *args):
"""Call the function with positional arguments
args : list
The positional arguments to the function call.
"""
temp_args = []
values, tcodes, num_args = _make_tvm_args(args, temp_args)
ret_val = TVMValue()
ret_tcode = ctypes.c_int()
if (
_LIB.TVMFuncCall(
self.handle,
values,
tcodes,
ctypes.c_int(num_args),
ctypes.byref(ret_val),
ctypes.byref(ret_tcode),
)
!= 0
):
raise get_last_ffi_error()
_ = temp_args
_ = args
return RETURN_SWITCH[ret_tcode.value](ret_val)
def __init_handle_by_constructor__(fconstructor, args):
"""Initialize handle by constructor"""
temp_args = []
values, tcodes, num_args = _make_tvm_args(args, temp_args)
ret_val = TVMValue()
ret_tcode = ctypes.c_int()
if (
_LIB.TVMFuncCall(
fconstructor.handle,
values,
tcodes,
ctypes.c_int(num_args),
ctypes.byref(ret_val),
ctypes.byref(ret_tcode),
)
!= 0
):
raise get_last_ffi_error()
_ = temp_args
_ = args
assert ret_tcode.value == ArgTypeCode.OBJECT_HANDLE
handle = ret_val.v_handle
return handle
def _return_module(x):
"""Return function"""
handle = x.v_handle
if not isinstance(handle, ModuleHandle):
handle = ModuleHandle(handle)
return _CLASS_MODULE(handle)
def _handle_return_func(x):
"""Return function"""
handle = x.v_handle
if not isinstance(handle, PackedFuncHandle):
handle = PackedFuncHandle(handle)
return _CLASS_PACKED_FUNC(handle, False)
def _get_global_func(name, allow_missing=False):
handle = PackedFuncHandle()
check_call(_LIB.TVMFuncGetGlobal(c_str(name), ctypes.byref(handle)))
if handle.value:
return _make_packed_func(handle, False)
if allow_missing:
return None
raise ValueError("Cannot find global function %s" % name)
# setup return handle for function type
_object.__init_by_constructor__ = __init_handle_by_constructor__
RETURN_SWITCH[ArgTypeCode.PACKED_FUNC_HANDLE] = _handle_return_func
RETURN_SWITCH[ArgTypeCode.MODULE_HANDLE] = _return_module
RETURN_SWITCH[ArgTypeCode.NDARRAY_HANDLE] = lambda x: _make_array(x.v_handle, False, True)
C_TO_PY_ARG_SWITCH[ArgTypeCode.PACKED_FUNC_HANDLE] = _wrap_arg_func(
_handle_return_func, ArgTypeCode.PACKED_FUNC_HANDLE
)
C_TO_PY_ARG_SWITCH[ArgTypeCode.MODULE_HANDLE] = _wrap_arg_func(
_return_module, ArgTypeCode.MODULE_HANDLE
)
C_TO_PY_ARG_SWITCH[ArgTypeCode.DLTENSOR_HANDLE] = lambda x: _make_array(x.v_handle, True, False)
C_TO_PY_ARG_SWITCH[ArgTypeCode.NDARRAY_HANDLE] = _wrap_arg_func(
lambda x: _make_array(x.v_handle, False, True), ArgTypeCode.NDARRAY_HANDLE
)
_CLASS_MODULE = None
_CLASS_PACKED_FUNC = None
_CLASS_OBJECT_GENERIC = None
_FUNC_CONVERT_TO_OBJECT = None
def _set_class_module(module_class):
"""Initialize the module."""
global _CLASS_MODULE
_CLASS_MODULE = module_class
def _set_class_packed_func(packed_func_class):
global _CLASS_PACKED_FUNC
_CLASS_PACKED_FUNC = packed_func_class
def _set_class_object_generic(object_generic_class, func_convert_to_object):
global _CLASS_OBJECT_GENERIC
global _FUNC_CONVERT_TO_OBJECT
_CLASS_OBJECT_GENERIC = object_generic_class
_FUNC_CONVERT_TO_OBJECT = func_convert_to_object
| 11,831 | 34.214286 | 96 | py |
tvm | tvm-main/python/tvm/utils/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Utilities operating at a graph/model or other "high" level"""
from .roofline import roofline_analysis
| 891 | 43.6 | 64 | py |
tvm | tvm-main/python/tvm/utils/roofline/x86.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Estimate peak flops and bandwidth for x86 devices"""
import functools
import re
from typing import Dict, Optional, Tuple
import numpy as np
from ... import build, get_global_func, nd, transform
from ...contrib import utils
from ...rpc.base import RPC_SESS_MASK
from ...rpc.client import RPCSession
from ...runtime import DataType, Device, num_threads
from ...script import tir as T
from ...target import Target, x86
from ...tir import PrimFunc
from . import registry
def _detect_vec_width_registers(
target: Target, vec_width: Optional[int], num_vector_registers: Optional[int]
):
"""Get the vector width and number of vector registers for a target.
Parameters
----------
target : Target
Target to detect vector width and registers for.
vec_width : Optional[int]
If None, try and detect vector width from target. Otherwise provided input is used.
num_vector_registers : Optional[int]
If None, try and number of vector registers from target. Otherwise provided input is used.
Returns
-------
vec_width: int
Width of a vector register on `target` in bytes.
num_vector_registers: int
Number of vector registers on `target`.
"""
if vec_width is None:
# Only implemented for x86 so far...
if (
str(target.kind) == "llvm"
and target.device_name == ""
and len(target.keys) == 1
and target.keys[0] == "cpu"
):
with target:
vec_width = x86.get_simd_32bit_lanes() * 4 # in number of bytes
else:
raise RuntimeError(f"Cannot determine vector width for target {target}")
if num_vector_registers is None:
if target.device_name == "": # indicates x86
num_vector_registers = 16 # Assuming for all platforms, probably wrong on older ones
else:
raise RuntimeError(f"Cannot determine number of vector registers for target {target}")
return vec_width, num_vector_registers
@functools.lru_cache(maxsize=None)
def estimate_peak_fma_vector_flops(
target: Target,
dev: Device,
remote: Optional[RPCSession],
dtype: DataType,
vec_width: Optional[int] = None,
num_vector_registers: Optional[int] = None,
):
"""Estimate peak flops assuming vector fma instructions and no explicit
intrinsics. See estimate_peak_fma_flops.
"""
@T.prim_func
def peakflops_fma_tir(
a: T.handle,
vec_width: T.int32,
iters: T.int32,
num_vector_registers: T.int32,
threads: T.int32,
) -> None:
# pylint: disable=invalid-name, missing-function-docstring
A = T.match_buffer(a, [threads, num_vector_registers, vec_width], dtype)
for t in T.parallel(threads):
for _j in range(iters):
for l in T.unroll(num_vector_registers):
# We want to use as few registers as possible, so we perform
# all operations on the same element
for k in T.vectorized(vec_width):
A[t, l, k] = A[t, l, k] * A[t, l, k] + A[t, l, k]
vec_width, num_vector_registers = _detect_vec_width_registers(
target, vec_width, num_vector_registers
)
vec_width //= DataType(dtype).bits // 8
iters = 1000000
nthreads = num_threads()
specialized = peakflops_fma_tir.specialize(
{
peakflops_fma_tir.params[1]: vec_width,
peakflops_fma_tir.params[2]: iters,
peakflops_fma_tir.params[3]: num_vector_registers,
peakflops_fma_tir.params[4]: nthreads,
}
)
with transform.PassContext(opt_level=3):
f = build(specialized, target=target)
# upload to remote if running over rpc
if dev.device_type >= RPC_SESS_MASK:
if remote is None:
raise RuntimeError("A RPCSession must be provided when using a remote device.")
temp = utils.tempdir()
path = temp.relpath("peak_fma_flops.tar")
f.export_library(path)
remote.upload(path)
f = remote.load_module("peak_fma_flops.tar")
random_fill = remote.get_function("tvm.contrib.random.random_fill")
else:
random_fill = get_global_func("tvm.contrib.random.random_fill")
assert random_fill, "Please make sure USE_RANDOM is ON in config.cmake"
a = nd.empty((nthreads, num_vector_registers, vec_width), dtype=dtype, device=dev)
random_fill(a)
times = f.time_evaluator(f.entry_name, dev, repeat=100, number=1)(a)
flops = 2 * vec_width * num_vector_registers * nthreads * iters # fma is two flops
return flops / times.min
@registry.estimate_peak_flops.register("cpu")
def estimate_peak_fma_flops(
func: PrimFunc,
features: Dict[str, np.ndarray],
target: Target,
dev: Device,
remote: Optional[RPCSession],
vec_width: Optional[int] = None,
num_vector_registers: Optional[int] = None,
) -> Tuple[float, float, str]:
"""
Estimate the maximum number of FLOP/s this target/device combo is capable
of reaching by running a test program. This assumes vectorized FMA
(fused-multiply-add) instructions.
Parameters
----------
func : PrimFunc
Function to estimate peak flops for. Used to check if a specific kind
intrinsic or dtype could be used with this function.
features : Dict[str, np.ndarry]
Features extracted from `func`. Used to check if a specific kind
intrinsic or dtype could be used with this function.
target : Target
Target to run on. This should be as specific to the actual hardware as
possible to make sure that LLVM generates the best vector code.
dev : Device
Device to run on.
remote : Optional[RPCSession]
Remote session used to upload artifacts for runtime evaluation. Must be
the same session used to create `dev`.
vec_width : Optional[int]
Vector width of SIMD units on the underlying hardware. Will try to
infer if no value is provided.
num_vector_registers : Optional[int]
Number of vector registers on the underlying hardware. Will try to
infer if no value is provided.
Returns
-------
flops : float
Estimated number of flops used by `func`.
peak_flops : float
Approximate sustained FLOP/s of this target/device combo assuming
vectorized FMA instructions. Each FMA operation counts as two FLOPs.
name : str
Dtype/intrinsic used by `func` to achieve peak flops.
"""
# assume that the first argument's dtype is the one we want
dtype = list(func.buffer_map.values())[0].dtype
if "int" in dtype:
flops = np.sum(
features["int_addsub"]
+ features["int_mul"]
+ features["int_mad"] * 2
+ features["int_divmod"]
)
else:
flops = np.sum(
features["float_addsub"]
+ features["float_mul"]
+ features["float_mad"] * 2
+ features["float_divmod"]
)
peak_flops = estimate_peak_fma_vector_flops(
target, dev, remote, dtype, vec_width, num_vector_registers
)
return flops, peak_flops, f"{dtype} FMA"
@T.prim_func
def peak_bandwidth_tir(a: T.handle, b: T.handle, threads: T.int32, vec_width: T.int32) -> None:
# pylint: disable=invalid-name, missing-function-docstring
N = T.int32()
A = T.match_buffer(a, [threads, N, 4, vec_width], "float32")
B = T.match_buffer(b, [threads, 4, vec_width], "float32")
# Parallelism is necessary to hit all cores/nodes
for i in T.parallel(threads):
for k in T.serial(N):
for l in T.unroll(4):
# vectorized load is necessary to hit peak bandwidth
for j in T.vectorized(vec_width):
# += is necessary to introduce a data dependency for all
# elements of A, preventing the backend from removing the
# `k` loop and setting `k` to the loop extent.
B[i, l, j] += A[i, k, l, j]
@functools.lru_cache(maxsize=None)
def estimate_peak_bandwidth_dram(
target: Target,
dev: Device,
remote: Optional[RPCSession],
vec_width: Optional[int] = None,
) -> float:
"""Estimate peak bandwidth for DRAM. See estimate_peak_bandwidth."""
vec_width, _ = _detect_vec_width_registers(target, vec_width, 1)
specialized = peak_bandwidth_tir.specialize(
{
peak_bandwidth_tir.params[3]: vec_width,
}
)
with transform.PassContext(opt_level=3):
f = build(specialized, target=target)
# upload to remote if running over rpc
if dev.device_type >= RPC_SESS_MASK:
if remote is None:
raise RuntimeError("A RPCSession must be provided when using a remote device.")
temp = utils.tempdir()
path = temp.relpath("peak_bandwidth.tar")
f.export_library(path)
remote.upload(path)
f = remote.load_module("peak_bandwidth.tar")
random_fill = remote.get_function("tvm.contrib.random.random_fill")
else:
random_fill = get_global_func("tvm.contrib.random.random_fill")
assert random_fill, "Please make sure USE_RANDOM is ON in config.cmake"
threads = num_threads()
# Data size needs to be larger than last level of cache. We don't have a
# way of getting cache sizes, so this number should give us a large enough
# size.
size = 10**8 // (4 * threads * vec_width)
a = nd.empty((threads, size, 4, vec_width), dtype="float32", device=dev)
random_fill(a)
b = nd.empty((threads, 4, vec_width), dtype="float32", device=dev)
random_fill(b)
times = f.time_evaluator(f.entry_name, dev, repeat=10, number=1)(a, b, threads)
return a.numpy().size * 4 / times.min # 4 bytes per float32
@registry.estimate_peak_bandwidth.register("cpu")
def estimate_peak_bandwidth(
func: PrimFunc, # pylint: disable=unused-argument
features: Dict[str, np.ndarray],
target: Target,
dev: Device,
remote: Optional[RPCSession],
vec_width: Optional[int] = None,
) -> Tuple[float, float, str]:
"""Estimate peak memory bandwidth of a target/device combo.
Peak bandwidth is estimated by running a small experiment on the underlying
hardware. The peak bandwidth measurement assumes that vector instructions
are being used to load the data.
Parameters
----------
func : PrimFunc
Function to estimate peak bandwidth for. Used to check if a specific
kind of memory could be used with this function.
features : Dict[str, np.ndarry]
Features extracted from `func`. Used to check if a specific kind of
memory could be used with this function.
target : Target
Target to use for measurement. This target should be as specific to the
underlying hardware as possible.
dev : Device
Device to measure peak bandwidth on.
remote : Optional[RPCSession]
Remote session used to upload artifacts for runtime evaluation. Must be
the same session used to create `dev`.
vec_width : Optional[int]
Vector unit width, determined from target if not supplied.
Returns
-------
loaded_bytes : float
Estimated bytes loaded by `func`.
peak_bandwidth : float
Peak memory bandwidth in bytes/seconds.
name : str
Name of the memory being used.
"""
# Ideally we'd be able to use this code to measure peak bandwidth of the
# different cache levels. If we could just generate load commands, then we
# could use those in a tight loop. Instead we need some code that is
# limited on the cache bandwidth. With the L1 cache we need an operation
# that has a very low arithmetic intensity and we haven't come up with one
# yet.
peak_bandwidth = estimate_peak_bandwidth_dram(target, dev, remote, vec_width)
loaded_bytes = sum(
[np.sum(x) for (k, x) in features.items() if re.match(r"^B[0-9]+\.bytes$", k) is not None]
)
return loaded_bytes, peak_bandwidth, "DRAM"
| 12,918 | 37.912651 | 98 | py |
tvm | tvm-main/python/tvm/utils/roofline/registry.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Definition of generic functions for estimating peak flops and bandwidth"""
from typing import Dict, Optional, Tuple
import numpy as np
from ...rpc.client import RPCSession
from ...runtime import Device
from ...target import Target, generic_func
from ...tir import PrimFunc
@generic_func
def estimate_peak_bandwidth(
func: PrimFunc,
features: Dict[str, np.ndarray],
target: Target,
dev: Device,
remote: Optional[RPCSession] = None,
) -> Tuple[float, float, str]:
"""Estimate peak memory bandwidth of a target/device combo.
Peak bandwidth is estimated by running a small experiment on the underlying
hardware. The peak bandwidth measurement assumes that vector instructions
are being used to load the data.
Parameters
----------
func : PrimFunc
Function to estimate peak bandwidth for. Used to check if a specific
kind of memory could be used with this function.
features : Dict[str, np.ndarry]
Features extracted from `func`. Used to check if a specific kind of
memory could be used with this function.
target : Target
Target to use for measurement. This target should be as specific to the
underlying hardware as possible.
dev : Device
Device to measure peak bandwidth on.
remote : Optional[RPCSession]
Remote session used to upload artifacts for runtime evaluation. Must be
the same session used to create `dev`.
Returns
-------
loaded_bytes : float
Estimated bytes loaded by `func`.
peak_bandwidth : float
Peak memory bandwidth in bytes/seconds.
name : str
Name of the memory being used.
"""
raise NotImplementedError()
@generic_func
def estimate_peak_flops(
func: PrimFunc,
features: Dict[str, np.ndarray],
target: Target,
dev: Device,
remote: Optional[RPCSession],
) -> Tuple[float, float, str]:
"""
Estimate the maximum number of FLOP/s this target/device combo is capable
of reaching by running a test program. This is a generic function that
should be overridden for each target.
Parameters
----------
func : PrimFunc
Function to estimate peak flops for. Used to check if a specific kind
intrinsic or dtype could be used with this function.
features : Dict[str, np.ndarry]
Features extracted from `func`. Used to check if a specific kind
intrinsic or dtype could be used with this function.
target : Target
Target to run on. This should be as specific to the actual hardware as
possible to make sure that LLVM generates the best vector code.
dev : Device
Device to run on.
remote : Optional[RPCSession]
Remote session used to upload artifacts for runtime evaluation. Must be
the same session used to create `dev`.
Returns
-------
flops : float
Estimated number of flops used by `func`.
peak_flops : float
Approximate sustained FLOP/s of this target/device combo assuming
vectorized FMA instructions. Each FMA operation counts as two FLOPs.
name : str
Dtype/intrinsic used by `func` to achieve peak flops.
"""
raise NotImplementedError()
| 4,019 | 34.892857 | 79 | py |
tvm | tvm-main/python/tvm/utils/roofline/cuda.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Estimation of peak flops and memory bandwidth for cuda devices"""
import functools
import re
from typing import Dict, Optional, Tuple
import numpy as np
from ... import build, nd, transform
from ...contrib import nvcc, utils
from ...rpc.base import RPC_SESS_MASK
from ...rpc.client import RPCSession
from ...runtime import Device
from ...script import tir as T
from ...target import Target
from ...tir import PrimFunc
from . import registry
@functools.lru_cache(maxsize=None)
def estimate_peak_flops_tensorcore(
target: Target,
dev: Device,
remote: Optional[RPCSession],
mat_dtype: str = "float16",
acc_dtype: str = "float32",
) -> Tuple[float, float, str]:
"""Estimate the peak FLOP/s of a cuda device with tensorcores.
This estimate should only be used to compare with operators that can use
dense tensorcore mma instructions.
References
----------
Wei Sun, Ang Li, Tong Geng, Sander Stuijk, Henk Corporaal: "Dissecting
Tensor Cores via Microbenchmarks: Latency, Throughput and Numerical
Behaviors", 2022; http://arxiv.org/abs/2206.02874
https://www.nvidia.com/content/PDF/nvidia-ampere-ga-102-gpu-architecture-whitepaper-v2.1.pdf
Parameters
----------
target : Target
Target to run on. This should be as specific to the actual hardware as
possible.
dev : Device
Device to run on.
remote : Optional[RPCSession]
Remote session used to upload artifacts for runtime evaluation. Must be
the same session used to create `dev`.
mat_dtype : str
Dtype of matrices passed to mma instructions.
acc_dtype : str
Dtype of accumulator to use with mma instructions. Should be compatible
with `mat_dtype`.
Returns
-------
peak_flops : float
Approximate sustained FLOP/s of this target/device combo assuming
mma instructions. Addition and multiplications are each counted as
separate FLOPs.
"""
@T.prim_func
def peak_flops_tensorcore_tir(
inp: T.Buffer((16, 16), mat_dtype),
out: T.Buffer((16, 16), acc_dtype),
n: T.int32,
sms: T.int32,
):
# pylint: disable=invalid-name, missing-function-docstring
A = T.alloc_buffer((16, 16), dtype=mat_dtype, scope="wmma.matrix_a")
B = T.alloc_buffer((16, 16), dtype=mat_dtype, scope="wmma.matrix_b")
C = T.alloc_buffer((16, 16), dtype=acc_dtype, scope="wmma.accumulator")
for _ in T.thread_binding(sms, thread="blockIdx.x"):
for _ in T.thread_binding(
8, thread="threadIdx.y"
): # need 8 warps to get enough in-SM parallelism
for _ in T.thread_binding(32, thread="threadIdx.x"):
T.evaluate(
T.tvm_load_matrix_sync(
A.data,
16,
16,
16,
0,
T.tvm_access_ptr(
T.type_annotation(dtype=mat_dtype),
inp.data,
0,
16,
1,
dtype="handle",
),
16,
"row_major",
dtype="handle",
)
)
T.evaluate(T.tvm_fill_fragment(B.data, 16, 16, 16, 0, 0, dtype="handle"))
T.evaluate(T.tvm_fill_fragment(C.data, 16, 16, 16, 0, 0, dtype="handle"))
for _ in range(n):
T.evaluate(
T.tvm_mma_sync(
C.data, 0, A.data, 0, B.data, 0, C.data, 0, dtype="handle"
)
)
T.evaluate(
T.tvm_store_matrix_sync(
C.data,
16,
16,
16,
0,
T.tvm_access_ptr(
T.type_annotation(dtype=acc_dtype),
out.data,
0,
16,
2,
dtype="handle",
),
16,
"row_major",
dtype="handle",
)
)
n = 100000
sms = dev.multi_processor_count
specialized = peak_flops_tensorcore_tir.specialize(
{peak_flops_tensorcore_tir.params[2]: n, peak_flops_tensorcore_tir.params[3]: sms}
)
with transform.PassContext(opt_level=3):
f = build(specialized, target=target)
# upload to remote if running over rpc
if dev.device_type >= RPC_SESS_MASK:
if remote is None:
raise RuntimeError("A RPCSession must be provided when using a remote device.")
temp = utils.tempdir()
path = temp.relpath("peak_mma_flops.tar")
f.export_library(path)
remote.upload(path)
f = remote.load_module("peak_mma_flops.tar")
x = nd.empty((16, 16), dtype=mat_dtype, device=dev)
y = nd.empty((16, 16), dtype=acc_dtype, device=dev)
times = f.time_evaluator(f.entry_name, dev, repeat=10, number=1)(x, y)
# each mma operation computes 16 x 16 x 16 FLOPs
return n * 16 * 16 * 16 * 2 * sms * 8 / times.min
@functools.lru_cache(maxsize=None)
def estimate_peak_flops_fma(
target: Target,
dev: Device,
remote: Optional[RPCSession],
dtype: str,
) -> Tuple[float, float, str]:
"""Estimate the peak FLOP/s of a cuda device with fma operations (not using tensor cores).
References
----------
https://www.nvidia.com/content/PDF/nvidia-ampere-ga-102-gpu-architecture-whitepaper-v2.1.pdf
Parameters
----------
target : Target
Target to run on. This should be as specific to the actual hardware as
possible.
dev : Device
Device to run on.
remote : Optional[RPCSession]
Remote session used to upload artifacts for runtime evaluation. Must be
the same session used to create `dev`.
dtype : str
Dtype of fma operation
Returns
-------
peak_flops : float
Approximate sustained FLOP/s of this target/device combo assuming
fma instructions. Addition and multiplications are each counted as
separate FLOPs.
"""
vec_width = 32
warps = 16 # need 16 warps to get enough in-SM parallelism
sms = dev.multi_processor_count
n = 100000
@T.prim_func
def peak_flops_fma_tir(
A: T.Buffer((sms, warps, vec_width), dtype),
B: T.Buffer((sms, warps, vec_width), dtype),
):
# pylint: disable=invalid-name, missing-function-docstring
shared = T.alloc_buffer((sms, warps, vec_width), dtype=dtype, scope="shared")
for sm in T.thread_binding(sms, thread="blockIdx.x"):
for warp in T.thread_binding(warps, thread="threadIdx.y"):
for t in T.thread_binding(vec_width, thread="threadIdx.x"):
shared[sm, warp, t] = A[sm, warp, t]
for _ in range(n):
shared[sm, warp, t] = (
shared[sm, warp, t] * shared[sm, warp, t] + shared[sm, warp, t]
)
B[sm, warp, t] = shared[sm, warp, t]
with transform.PassContext(opt_level=3):
f = build(peak_flops_fma_tir, target=target)
# upload to remote if running over rpc
if dev.device_type >= RPC_SESS_MASK:
if remote is None:
raise RuntimeError("A RPCSession must be provided when using a remote device.")
temp = utils.tempdir()
path = temp.relpath("peak_fma_flops.tar")
f.export_library(path)
remote.upload(path)
f = remote.load_module("peak_fma_flops.tar")
x = nd.empty((sms, warps, vec_width), dtype=dtype, device=dev)
y = nd.empty((sms, warps, vec_width), dtype=dtype, device=dev)
times = f.time_evaluator(f.entry_name, dev, repeat=10, number=1)(x, y)
return n * warps * sms * vec_width * 2 / times.min
@registry.estimate_peak_flops.register("cuda")
def estimate_peak_flops(
func: PrimFunc, # pylint: disable=unused-argument
features: Dict[str, np.ndarray],
target: Target,
dev: Device,
remote: Optional[RPCSession],
) -> Tuple[float, float, str]:
"""Estimate the peak FLOP/s of a cuda device.
Parameters
----------
func : PrimFunc
Function to estimate peak flops for. Used to check if a specific kind
intrinsic or dtype could be used with this function.
features : Dict[str, np.ndarry]
Features extracted from `func`. Used to check if a specific kind
intrinsic or dtype could be used with this function.
target : Target
Target to run on. This should be as specific to the actual hardware as
possible.
dev : Device
Device to run on.
remote : Optional[RPCSession]
Remote session used to upload artifacts for runtime evaluation. Must be
the same session used to create `dev`.
Returns
-------
flops : float
Estimated number of flops used by `func`.
peak_flops : float
Approximate sustained FLOP/s of this target/device combo. Addition and
multiplications are each counted as separate FLOPs.
name : str
Dtype/intrinsic used by `func` to achieve peak flops.
"""
has_tensorcore = nvcc.have_tensorcore(dev.compute_version)
# assume that the first argument dtype is the same as all the others
dtype = list(func.buffer_map.values())[0].dtype
if dtype == "float16" and has_tensorcore:
peak_flops = estimate_peak_flops_tensorcore(target, dev, remote)
name = "float16 tensorcore"
else:
peak_flops = estimate_peak_flops_fma(target, dev, remote, dtype)
name = f"{dtype} fma"
flops = np.sum(
features["float_addsub"]
+ features["float_mul"]
+ features["float_mad"] * 2
+ features["float_divmod"]
)
return flops, peak_flops, name
@T.prim_func
def peak_bandwidth_tir(a: T.handle, b: T.handle, blocks: T.int32, warp_size: T.int32) -> None:
# pylint: disable=invalid-name, missing-function-docstring
N = T.int32()
A = T.match_buffer(a, [blocks, N, 4, warp_size], "float32")
B = T.match_buffer(b, [blocks, 4, warp_size], "float32")
for i in T.thread_binding(blocks, "blockIdx.x"):
for k in T.serial(N):
for l in T.unroll(4):
# vectorized load is necessary to hit peak bandwidth
for j in T.thread_binding(warp_size, "threadIdx.x"):
# += is necessary to introduce a data dependency for all
# elements of A, preventing the backend from removing the
# `k` loop and setting `k` to the loop extent.
B[i, l, j] += A[i, k, l, j]
@functools.lru_cache(maxsize=None)
def estimate_peak_bandwidth_global_mem(
target: Target,
dev: Device,
remote: Optional[RPCSession] = None,
) -> Tuple[float, float, str]:
"""Estimate peak bandwidth of global memory. See estimate_peak_bandwidth"""
warp_size = dev.warp_size
# These sizes seem large enough to give the card time to hit a fixpoint on memory bandwidth
blocks = 1024
size = 1024
specialized = peak_bandwidth_tir.specialize(
{peak_bandwidth_tir.params[2]: blocks, peak_bandwidth_tir.params[3]: warp_size}
)
with transform.PassContext(opt_level=3):
f = build(specialized, target=target)
# upload to remote if running over rpc
if dev.device_type >= RPC_SESS_MASK:
if remote is None:
raise RuntimeError("A RPCSession must be provided when using a remote device.")
temp = utils.tempdir()
path = temp.relpath("peak_bandwidth.tar")
f.export_library(path)
remote.upload(path)
f = remote.load_module("peak_bandwidth.tar")
a = nd.empty((blocks, size, 4, warp_size), dtype="float32", device=dev)
b = nd.empty((blocks, 4, warp_size), dtype="float32", device=dev)
times = f.time_evaluator(f.entry_name, dev, repeat=10, number=1)(a, b)
return a.numpy().size * 4 / times.min # 4 bytes per float32
@registry.estimate_peak_bandwidth.register("cuda")
def estimate_peak_bandwidth(
func: PrimFunc, # pylint: disable=unused-argument
features: Dict[str, np.ndarray],
target: Target,
dev: Device,
remote: Optional[RPCSession] = None,
) -> Tuple[float, float, str]:
"""Estimate peak memory bandwidth of a target/device combo.
Peak bandwidth is estimated by running a small experiment on the underlying
hardware. The peak bandwidth measurement assumes that vector instructions
are being used to load the data.
Parameters
----------
func : PrimFunc
Function to estimate peak bandwidth for. Used to check if a specific
kind of memory could be used with this function.
features : Dict[str, np.ndarry]
Features extracted from `func`. Used to check if a specific kind of
memory could be used with this function.
target : Target
Target to use for measurement. This target should be as specific to the
underlying hardware as possible.
dev : Device
Device to measure peak bandwidth on.
remote : Optional[RPCSession]
Remote session used to upload artifacts for runtime evaluation. Must be
the same session used to create `dev`.
Returns
-------
loaded_bytes : float
Estimated bytes loaded by `func`.
peak_bandwidth : float
Peak memory bandwidth in bytes/seconds.
name : str
Name of the memory being used.
"""
# autoscheduler features do not take into account that 1.
# global and shared memory have very different performance
# characteristics -- both are included in the same bytes
# touched count 2. multiple threads accessing the same byte
# of memory does not use the same amount of bandwidth as
# multiple threads accessing different bytes of memory. We
# use unique bytes accessed here to avoid these two issues,
# but this does bias results towards being more compute
# bound.
loaded_bytes = sum(
[
np.sum(x)
for (k, x) in features.items()
if re.match(r"^B[0-9]+\.unique_bytes$", k) is not None
]
)
peak_bandwidth = estimate_peak_bandwidth_global_mem(target, dev, remote)
return loaded_bytes, peak_bandwidth, "global"
| 15,703 | 37.490196 | 96 | py |
tvm | tvm-main/python/tvm/utils/roofline/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Utilities for computing an approximate roofline model"""
from typing import Dict, Optional, Union
import numpy as np
from ... import IRModule, auto_scheduler, build, get_global_func, nd, relay, tir, topi, transform
from ...contrib import utils
from ...ir.expr import GlobalVar
from ...ir.instrument import pass_instrument
from ...rpc.base import RPC_SESS_MASK
from ...rpc.client import RPCSession
from ...runtime import Device, num_threads, profiler_vm, profiling
from ...script import tir as T
from ...target import Target
from . import cuda, registry, x86
def _create_args(mod: IRModule, dev: Device, func_name: str = "main", remote=None):
if dev.device_type >= RPC_SESS_MASK:
random_fill = remote.get_function("tvm.contrib.random.random_fill")
else:
random_fill = get_global_func("tvm.contrib.random.random_fill")
assert random_fill, "Please make sure USE_RANDOM is ON in config.cmake"
args = []
for arg in mod[func_name].params:
ary = nd.empty(
[x.value for x in arg.type_annotation.shape],
arg.type_annotation.dtype,
device=dev,
)
random_fill(ary)
args.append(ary)
return args
@pass_instrument
class SaveLoweredTIR:
"""Save TIR functions for analysis.
We need the TIR function in a form that can be handled by
`auto_scheduler.feature.named_features_from_primfunc`, but which
is the closest to the final lowered form as possible. Right now this
means right before tir.SplitHostDevice.
"""
def __init__(self, before_pass: str = "tir.SplitHostDevice"):
"""
Parameters
----------
before_pass: str
Pass before which the TIR is saved.
"""
self.functions = {}
self.before_pass = before_pass
def run_before_pass(self, mod, info):
if info.name == self.before_pass:
for v, func in mod.functions.items():
if isinstance(func, tir.PrimFunc):
self.functions[v] = func
def roofline_from_existing(
report: profiling.Report,
tir_functions: Dict[GlobalVar, tir.PrimFunc],
target: Target,
dev: Device,
remote: Optional[RPCSession] = None,
) -> profiling.Report:
"""Add roofline and other estimated statistics to an existing profiling report.
:py:func:`roofline_analysis` should always be used instead of this function
unless you need a custom compilation pipeline.
Calculating roofline statistics requires features extracted the TIR
functions in addition to per-operator runtime information (`report`) of the
same TIR features. The features and TIR functions are not included with the
compiled library used to generate the per-operator runtime. It is essential
that the per-operator information comes from the exact same compilation
pipeline as the TIR functions.
Example
-------
..code: : python
import tvm
import tvm.relay
mod, params = tvm.relay.testing.mlp.get_workload()
# it is recommended to use SaveLoweredTIR to get out the tir primfuncs
save_tir = tvm.utils.roofline.SaveLoweredTIR()
with tvm.transform.PassContext(opt_level=3, pass_instrument=[save_tir]):
lib = relay.vm.compile(mod, params=params, target=target)
vmexec = profiler_vm.VirtualMachineProfiler(lib, dev)
report = vmexec.profile(*inputs)
roofline_report = roofline_from_existing(report, save_tir.functions, target, dev)
Parameters
----------
report : Report
Existing profiling report from :py:method:`VirtualMachineProfiler.profile`.
tir_functions : Dict[GlobalVar, PrimFunc]
TIR primfuncs from the module run to generate `report`. It is nessesary
that these functions come before the `tir.MakePackedAPI` pass and are
compatible with auto_scheduler featurization.
:py:class:`SaveLoweredTIR` is the recommended way to collect these
functions.
target : Target
TVM target that `report` was generated with.
dev : Device
Device that `report` was generated with.
remote : Optional[RPCSession]
Remote session used to upload artifacts for runtime evaluation. Must be
the same session used to create `dev`.
Returns
-------
profiling.Report
New profiling report that includes all information from `report`
along with additional roofline metrics. See
:py:func:`roofline_analysis` for more information on which metrics
are included.
"""
all_features = {
prim.attrs["hash"]: (name, prim, auto_scheduler.feature.named_features_from_primfunc(prim))
for name, prim in tir_functions.items()
if isinstance(prim, tir.PrimFunc) and "hash" in prim.attrs.keys()
}
new_configuration = dict(report.configuration.items())
new_calls = []
for call in report.calls:
if "Hash" in call.keys() and call["Hash"] in all_features:
_, prim, features = all_features[call["Hash"]]
if features is None:
continue
with target:
flops, peak_flops, flops_name = registry.estimate_peak_flops(
prim, features, target, dev, remote
)
loaded_bytes, peak_bandwidth, bandwidth_name = registry.estimate_peak_bandwidth(
prim, features, target, dev, remote
)
new_configuration[f"Estimated Peak FLOP/s ({flops_name})"] = profiling.Ratio(peak_flops)
new_configuration[
f"Estimated Peak Bandwidth ({bandwidth_name}, byte/second)"
] = profiling.Ratio(peak_bandwidth)
ridge_point = peak_flops / peak_bandwidth
runtime = call["Duration (us)"].microseconds * 1e-6
arith_inten = flops / loaded_bytes
call = dict(call)
call["Loaded Bytes"] = profiling.Count(int(loaded_bytes))
call["Estimated FLOPs"] = profiling.Count(int(flops))
call["Arithmetic Intensity"] = profiling.Ratio(arith_inten)
call["FLOP/s"] = profiling.Ratio(flops / runtime)
call["Bandwidth"] = profiling.Ratio(loaded_bytes / runtime)
compute_bound = arith_inten > ridge_point
call["Bound"] = "compute" if compute_bound else "memory"
per_mem_bound = (loaded_bytes / runtime) / peak_bandwidth * 100
per_compute_bound = (flops / runtime) / peak_flops * 100.0
# We use ratio here because the percentages should be averaged instead of summed.
call["Percent of Theoretical Optimal"] = profiling.Ratio(
per_compute_bound if compute_bound else per_mem_bound
)
new_calls.append(call)
else:
new_calls.append(call)
return profiling.Report(new_calls, report.device_metrics, new_configuration)
def roofline_analysis(
mod: IRModule,
params: Dict[str, nd.NDArray],
target: Union[str, Target],
dev: Device,
remote: Optional[RPCSession] = None,
) -> profiling.Report:
"""
Create a profiling report that contains roofline and other estimated
statistics from running a module on the VM.
The roofline model measures how close a operator gets to best possible
memory bandwidth or FLOP/s depending on whether it is memory or compute
bound. This computation uses the runtime of the operator along with two
numbers extracted from the TIR code: bytes of memory touched and number of
floating point operations.
These statistics are calculated by analyzing the lowered TIR of each
operator, so they are estimates of the true values. The statistics are:
- Bound: Is the operator memory or compute bound. This is computed by
assuming that the operator could perfectly cache all loads -- each byte
of memory is only loaded once.
- Percent of Theoretical Optimal: What percent of theoretical optimal for
the bound. i.e. percent of peak memory bandwidth if memory bound,
percent of peak FLOP/s if compute bound.
- Loaded Bytes: estimation of the number of bytes loaded from main memory.
- Estimated Flops: estimated number of floating point operations.
- Arithmetic Intensity: ratio of FLOPs per byte of data.
- FLOP/s: floating point operations per second.
- Bandwidth: Number of bytes loaded per second.
Parameters
----------
mod : IRModule
Uncompiled input module
params : Dict[str, nd.NDArray]
target : Union[str, Target]
Target to run on.
dev : Device
Device to run on.
remote : Optional[RPCSession]
Remote session used to upload artifacts for runtime evaluation. Must be
the same session used to create `dev`.
Returns
-------
report : profiling.Report
Profiling report which includes the estimated statistics.
"""
if isinstance(target, str):
target = Target(target)
save_tir = SaveLoweredTIR()
# copy existing context but add our instrument
pass_ctx = transform.PassContext.current()
with transform.PassContext(
opt_level=pass_ctx.opt_level,
required_pass=pass_ctx.required_pass,
disabled_pass=pass_ctx.disabled_pass,
instruments=list(pass_ctx.instruments) + [save_tir],
config=pass_ctx.config,
):
lib = relay.vm.compile(mod, params=params, target=target)
# upload to remote if running over rpc
if dev.device_type >= RPC_SESS_MASK:
if remote is None:
raise RuntimeError("A RPCSession must be provided when using a remote device.")
temp = utils.tempdir()
path = temp.relpath("roofline_lib.tar")
lib.mod.export_library(path)
remote.upload(path)
lib = remote.load_module("roofline_lib.tar")
vmexec = profiler_vm.VirtualMachineProfiler(lib, dev)
args = _create_args(mod, dev, remote=remote)
report = vmexec.profile(*args)
return roofline_from_existing(report, save_tir.functions, target, dev, remote=remote)
| 10,925 | 38.021429 | 100 | py |
tvm | tvm-main/python/tvm/rpc/base.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Base definitions for RPC."""
# pylint: disable=invalid-name
import socket
import time
import json
import errno
import struct
import random
import logging
from .._ffi.base import py_str
# Magic header for RPC data plane
RPC_MAGIC = 0xFF271
# magic header for RPC tracker(control plane)
RPC_TRACKER_MAGIC = 0x2F271
# sucess response
RPC_CODE_SUCCESS = RPC_MAGIC + 0
# duplicate key in proxy
RPC_CODE_DUPLICATE = RPC_MAGIC + 1
# cannot found matched key in server
RPC_CODE_MISMATCH = RPC_MAGIC + 2
logger = logging.getLogger("RPCServer")
class TrackerCode(object):
"""Enumeration code for the RPC tracker"""
FAIL = -1
SUCCESS = 0
PING = 1
STOP = 2
PUT = 3
REQUEST = 4
UPDATE_INFO = 5
SUMMARY = 6
GET_PENDING_MATCHKEYS = 7
RPC_SESS_MASK = 128
# Use "127.0.0.1" or "::1" if there is a need to force ip4 or ip6
# connection for "localhost".
def get_addr_family(addr):
res = socket.getaddrinfo(addr[0], addr[1], 0, 0, socket.IPPROTO_TCP)
return res[0][0]
def recvall(sock, nbytes):
"""Receive all nbytes from socket.
Parameters
----------
sock: Socket
The socket
nbytes : int
Number of bytes to be received.
"""
res = []
nread = 0
while nread < nbytes:
chunk = sock.recv(min(nbytes - nread, 1024))
if not chunk:
raise IOError("connection reset")
nread += len(chunk)
res.append(chunk)
return b"".join(res)
def sendjson(sock, data):
"""send a python value to remote via json
Parameters
----------
sock : Socket
The socket
data : object
Python value to be sent.
"""
data = json.dumps(data)
sock.sendall(struct.pack("<i", len(data)))
sock.sendall(data.encode("utf-8"))
def recvjson(sock):
"""receive python value from remote via json
Parameters
----------
sock : Socket
The socket
Returns
-------
value : object
The value received.
"""
size = struct.unpack("<i", recvall(sock, 4))[0]
data = json.loads(py_str(recvall(sock, size)))
return data
def random_key(prefix, delimiter=":", cmap=None):
"""Generate a random key
Parameters
----------
prefix : str
The string prefix
delimiter : str
The delimiter
cmap : dict
Conflict map
Returns
-------
key : str
The generated random key
"""
while True:
key = f"{prefix}{delimiter}{random.random()}"
if not cmap or key not in cmap:
break
return key
def split_random_key(key, delimiter=":"):
"""Split a random key by delimiter into prefix and random part
Parameters
----------
key : str
The generated random key
Returns
-------
prefix : str
The string prefix
random_part : str
The generated random
"""
return key.rsplit(delimiter, 1)
def connect_with_retry(addr, timeout=60, retry_period=5):
"""Connect to a TPC address with retry
This function is only reliable to short period of server restart.
Parameters
----------
addr : tuple
address tuple
timeout : float
Timeout during retry
retry_period : float
Number of seconds before we retry again.
"""
tstart = time.time()
while True:
try:
sock = socket.socket(get_addr_family(addr), socket.SOCK_STREAM)
sock.connect(addr)
return sock
except socket.error as sock_err:
if sock_err.args[0] not in (errno.ECONNREFUSED,):
raise sock_err
period = time.time() - tstart
if period > timeout:
raise RuntimeError(f"Failed to connect to server {str(addr)}")
logger.warning(
f"Cannot connect to tracker {str(addr)}, retry in {retry_period:g} secs..."
)
time.sleep(retry_period)
| 4,735 | 22.68 | 91 | py |
tvm | tvm-main/python/tvm/rpc/tracker.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""RPC Tracker, tracks and distributes the TVM RPC resources.
This folder implements the tracker server logic.
Note
----
Tracker is a TCP based rest api with the following protocol:
- Initial handshake to the peer
- RPC_TRACKER_MAGIC
- Normal message: [size(int32), json-data]
- Each message is initiated by the client, and the tracker replies with a json.
List of available APIs:
- PING: check if tracker is alive
- input: [TrackerCode.PING]
- return: TrackerCode.SUCCESS
- PUT: report resource to tracker
- input: [TrackerCode.PUT, [port, match-key]]
- return: TrackerCode.SUCCESS
- note: match-key is a randomly generated identify the resource during connection.
- REQUEST: request a new resource from tracker
- input: [TrackerCode.REQUEST, [key, user, priority]]
- return: [TrackerCode.SUCCESS, [url, port, match-key]]
"""
# pylint: disable=invalid-name
import asyncio
import heapq
import logging
import socket
import threading
import errno
import struct
import json
from tvm.contrib.popen_pool import PopenWorker
try:
from tornado import ioloop
from . import tornado_util
except ImportError as error_msg:
raise ImportError(
f"RPCTracker module requires tornado package {error_msg}. Try 'pip install tornado'."
)
from .._ffi.base import py_str
from . import base
from .base import RPC_TRACKER_MAGIC, TrackerCode
logger = logging.getLogger("RPCTracker")
console_handler = logging.StreamHandler()
console_handler.setFormatter(
logging.Formatter(
fmt="%(asctime)s.%(msecs)03d %(levelname)s %(message)s", datefmt="%Y-%m-%d %H:%M:%S"
)
)
logger.addHandler(console_handler)
logger.setLevel(logging.INFO)
logger.propagate = False
class Scheduler(object):
"""Abstract interface of scheduler."""
def put(self, value):
"""Push a resource into the scheduler.
This function can trigger callbacks in the scheduler.
Parameters
----------
value : object
The resource to be put in the scheduler.
"""
raise NotImplementedError()
def request(self, user, priority, callback):
"""Request a resource.
Parameters
----------
user : str
The user who is requesting the resource.
priority : int
The job priority
callback : function: value->bool
Callback function to receive an resource when ready
returns True if the resource is consumed.
"""
raise NotImplementedError()
def remove(self, value):
"""Remove a resource in the scheduler
Parameters
----------
value: object
The resource to remove
"""
def summary(self):
"""Get summary information of the scheduler."""
raise NotImplementedError()
class PriorityScheduler(Scheduler):
"""Priority based scheduler, FIFO based on request order"""
def __init__(self, key):
self._key = key
self._request_cnt = 0
self._lock = threading.Lock()
self._values = []
self._requests = []
def _schedule(self):
while self._requests and self._values:
value = self._values.pop(0)
item = heapq.heappop(self._requests)
callback = item[-1]
if callback(value[1:]):
value[0].pending_matchkeys.remove(value[-1])
else:
self._values.append(value)
def put(self, value):
self._values.append(value)
self._schedule()
def request(self, user, priority, callback):
with self._lock:
heapq.heappush(self._requests, (-priority, self._request_cnt, callback))
self._request_cnt += 1
self._schedule()
def remove(self, value):
if value in self._values:
self._values.remove(value)
self._schedule()
def summary(self):
"""Get summary information of the scheduler."""
return {"free": len(self._values), "pending": len(self._requests)}
class TCPEventHandler(tornado_util.TCPHandler):
"""Base asynchronize message handler.
The tracker and client follows a simple message protocol.
The message is in form [nbytes(int32)] [json-str].
All the information is packed in json-str
"""
def __init__(self, tracker, sock, addr):
super(TCPEventHandler, self).__init__(sock)
self._data = bytearray()
self._tracker = tracker
self._msg_size = 0
self._addr = addr
self._init_req_nbytes = 4
self._info = {}
# list of pending match keys that has not been used.
self.pending_matchkeys = set()
self._tracker._connections.add(self)
self.put_values = []
def name(self):
"""name of connection"""
return f"TCPSocket: {str(self._addr)}"
def summary(self):
"""Summary of this connection"""
return self._info
def _init_conn(self, message):
"""Initialize the connection"""
if len(message) != 4:
logger.warning("Invalid connection from %s", self.name())
self.close()
magic = struct.unpack("<i", message)[0]
if magic != RPC_TRACKER_MAGIC:
logger.warning("Invalid magic from %s", self.name())
self.close()
self.write_message(struct.pack("<i", RPC_TRACKER_MAGIC), binary=True)
self._init_req_nbytes = 0
def on_message(self, message):
"""Callback when a message is received.
Parameters
----------
message : bytearray
The bytes received
"""
assert isinstance(message, bytes)
if self._init_req_nbytes:
self._init_conn(message)
return
self._data += message
while True:
if self._msg_size == 0:
if len(self._data) >= 4:
self._msg_size = struct.unpack("<i", self._data[:4])[0]
else:
return
if self._msg_size != 0 and len(self._data) >= self._msg_size + 4:
msg = py_str(bytes(self._data[4 : 4 + self._msg_size]))
del self._data[: 4 + self._msg_size]
self._msg_size = 0
# pylint: disable=broad-except
self.call_handler(json.loads(msg))
else:
return
def ret_value(self, data):
"""return value to the output"""
data = json.dumps(data)
self.write_message(struct.pack("<i", len(data)), binary=True)
self.write_message(data.encode("utf-8"), binary=True)
def call_handler(self, args):
"""Event handler when json request arrives."""
code = args[0]
if code == TrackerCode.PUT:
key = args[1]
port, matchkey = args[2]
self.pending_matchkeys.add(matchkey)
# got custom address (from rpc server)
if len(args) >= 4 and args[3] is not None:
value = (self, args[3], port, matchkey)
else:
value = (self, self._addr[0], port, matchkey)
self._tracker.put(key, value)
self.put_values.append(value)
self.ret_value(TrackerCode.SUCCESS)
elif code == TrackerCode.REQUEST:
key = args[1]
user = args[2]
priority = args[3]
def _cb(value):
# if the connection is already closed
if not self._sock:
return False
try:
self.ret_value([TrackerCode.SUCCESS, value])
except (socket.error, IOError):
return False
return True
self._tracker.request(key, user, priority, _cb)
elif code == TrackerCode.PING:
self.ret_value(TrackerCode.SUCCESS)
elif code == TrackerCode.GET_PENDING_MATCHKEYS:
self.ret_value(list(self.pending_matchkeys))
elif code == TrackerCode.STOP:
# safe stop tracker
if self._tracker._stop_key == args[1]:
self.ret_value(TrackerCode.SUCCESS)
self._tracker.stop()
else:
self.ret_value(TrackerCode.FAIL)
elif code == TrackerCode.UPDATE_INFO:
info = args[1]
assert isinstance(info, dict)
if info["addr"][0] is None:
info["addr"][0] = self._addr[0]
self._info.update(info)
self.ret_value(TrackerCode.SUCCESS)
elif code == TrackerCode.SUMMARY:
status = self._tracker.summary()
self.ret_value([TrackerCode.SUCCESS, status])
else:
logger.warning("Unknown tracker code %d", code)
self.close()
def on_close(self):
self._tracker.close(self)
def on_error(self, err):
logger.warning("%s: Error in RPC Tracker: %s", self.name(), err)
self.close()
class TrackerServerHandler(object):
"""Tracker that tracks the resources."""
def __init__(self, sock, stop_key):
self._scheduler_map = {}
self._sock = sock
self._sock.setblocking(0)
self._ioloop = ioloop.IOLoop.current()
self._stop_key = stop_key
self._connections = set()
def _event_handler(_, events):
self._on_event(events)
self._ioloop.add_handler(self._sock.fileno(), _event_handler, self._ioloop.READ)
def _on_event(self, _):
while True:
try:
conn, addr = self._sock.accept()
TCPEventHandler(self, conn, addr)
except socket.error as err:
if err.args[0] in (errno.EAGAIN, errno.EWOULDBLOCK):
break
def create_scheduler(self, key):
"""Create a new scheduler."""
return PriorityScheduler(key)
def put(self, key, value):
"""Report a new resource to the tracker."""
if key not in self._scheduler_map:
self._scheduler_map[key] = self.create_scheduler(key)
self._scheduler_map[key].put(value)
def request(self, key, user, priority, callback):
"""Request a new resource."""
if key not in self._scheduler_map:
self._scheduler_map[key] = self.create_scheduler(key)
self._scheduler_map[key].request(user, priority, callback)
def close(self, conn):
self._connections.remove(conn)
if "key" in conn._info:
for value in conn.put_values:
_, _, _, key = value
rpc_key, _ = base.split_random_key(key)
self._scheduler_map[rpc_key].remove(value)
def stop(self):
"""Safely stop tracker."""
for conn in list(self._connections):
conn.close()
self._sock.close()
self._ioloop.stop()
def summary(self):
"""Return a dict summarizing current status."""
qinfo = {}
for k, v in self._scheduler_map.items():
qinfo[k] = v.summary()
cinfo = []
# ignore client connections without key
for conn in self._connections:
res = conn.summary()
if res.get("key", "").startswith("server"):
cinfo.append(res)
return {"queue_info": qinfo, "server_info": cinfo}
def run(self):
"""Run the tracker server"""
self._ioloop.start()
def _tracker_server(listen_sock, stop_key):
asyncio.set_event_loop(asyncio.new_event_loop())
handler = TrackerServerHandler(listen_sock, stop_key)
handler.run()
class PopenTrackerServerState(object):
"""Internal PopenTrackerServer State"""
current = None
def __init__(self, host, port=9190, port_end=9199, silent=False, reuse_addr=True, timeout=None):
if silent:
logger.setLevel(logging.WARN)
sock = socket.socket(base.get_addr_family((host, port)), socket.SOCK_STREAM)
if reuse_addr:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
if timeout is not None:
sock.settimeout(timeout)
self.port = None
self.stop_key = base.random_key("tracker")
for my_port in range(port, port_end):
try:
sock.bind((host, my_port))
self.port = my_port
break
except socket.error as sock_err:
if sock_err.errno in [errno.EADDRINUSE]:
continue
raise sock_err
if not self.port:
raise ValueError(f"cannot bind to any port in [{port}, {port_end})")
logger.info("bind to %s:%d", host, self.port)
sock.listen(1)
self.thread = threading.Thread(target=_tracker_server, args=(sock, self.stop_key))
self.thread.start()
self.host = host
def _popen_start_tracker_server(
host, port=9190, port_end=9199, silent=False, reuse_addr=True, timeout=None
):
# This is a function that will be sent to the
# Popen worker to run on a separate process.
# Create and start the server in a different thread
state = PopenTrackerServerState(host, port, port_end, silent, reuse_addr, timeout)
PopenTrackerServerState.current = state
# returns the port so that the main can get the port number.
return (state.port, state.stop_key)
class Tracker(object):
"""Start RPC tracker on a separate process.
Python implementation based on PopenWorker.
Parameters
----------
host : str
The host url of the server.
port : int
The TCP port to be bind to
port_end : int, optional
The end TCP port to search
silent: bool, optional
Whether run in silent mode
reuse_addr: bool, optional
Allows the kernel to reuse a local socket in TIME_WAIT state.
timeout: float, optional
set a timeout for all operations on the socket
"""
def __init__(
self, host="0.0.0.0", port=9190, port_end=9199, silent=False, reuse_addr=True, timeout=None
):
if silent:
logger.setLevel(logging.WARN)
self.proc = PopenWorker()
# send the function
self.proc.send(
_popen_start_tracker_server, [host, port, port_end, silent, reuse_addr, timeout]
)
# receive the port
self.port, self.stop_key = self.proc.recv()
self.host = host
def _stop_tracker(self):
sock = socket.socket(base.get_addr_family((self.host, self.port)), socket.SOCK_STREAM)
sock.connect(("127.0.0.1", self.port))
sock.sendall(struct.pack("<i", base.RPC_TRACKER_MAGIC))
magic = struct.unpack("<i", base.recvall(sock, 4))[0]
assert magic == base.RPC_TRACKER_MAGIC
base.sendjson(sock, [TrackerCode.STOP, self.stop_key])
assert base.recvjson(sock) == TrackerCode.SUCCESS
sock.close()
def terminate(self):
"""Terminate the server process"""
if self.proc:
if self.proc.is_alive():
self._stop_tracker()
self.proc.join(0.1)
if self.proc.is_alive():
logger.info("Terminating Tracker Server...")
self.proc.kill()
self.proc = None
def __del__(self):
try:
self.terminate()
except TypeError:
pass
| 16,163 | 31.457831 | 100 | py |
tvm | tvm-main/python/tvm/rpc/tornado_util.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Utilities used in tornado."""
import socket
import errno
from tornado import ioloop
class TCPHandler(object):
"""TCP socket handler backed tornado event loop.
Parameters
----------
sock : Socket
The TCP socket, will set it to non-blocking mode.
"""
def __init__(self, sock):
self._sock = sock
self._ioloop = ioloop.IOLoop.current()
self._sock.setblocking(0)
self._pending_write = []
self._signal_close = False
def _event_handler(_, events):
self._event_handler(events)
self._ioloop.add_handler(
self._sock.fileno(), _event_handler, self._ioloop.READ | self._ioloop.ERROR
)
def signal_close(self):
"""Signal the handler to close.
The handler will be closed after the existing
pending message are sent to the peer.
"""
if not self._pending_write:
self.close()
else:
self._signal_close = True
def close(self):
"""Close the socket"""
if self._sock is not None:
try:
self._ioloop.remove_handler(self._sock.fileno())
self._sock.close()
except socket.error:
pass
self._sock = None
self.on_close()
def write_message(self, message, binary=True):
assert binary
if self._sock is None:
raise IOError("socket is already closed")
self._pending_write.append(message)
self._update_write()
def _event_handler(self, events):
"""centeral event handler"""
if (events & self._ioloop.ERROR) or (events & self._ioloop.READ):
if self._update_read() and (events & self._ioloop.WRITE):
self._update_write()
elif events & self._ioloop.WRITE:
self._update_write()
def _update_write(self):
"""Update the state on write"""
while self._pending_write:
try:
msg = self._pending_write[0]
if self._sock is None:
return
nsend = self._sock.send(msg)
if nsend != len(msg):
self._pending_write[0] = msg[nsend:]
else:
self._pending_write.pop(0)
except socket.error as err:
if err.args[0] in (errno.EAGAIN, errno.EWOULDBLOCK):
break
self.on_error(err)
if self._pending_write:
self._ioloop.update_handler(
self._sock.fileno(), self._ioloop.READ | self._ioloop.ERROR | self._ioloop.WRITE
)
else:
if self._signal_close:
self.close()
else:
self._ioloop.update_handler(
self._sock.fileno(), self._ioloop.READ | self._ioloop.ERROR
)
def _update_read(self):
"""Update state when there is read event"""
try:
msg = bytes(self._sock.recv(4096))
if msg:
self.on_message(msg)
return True
# normal close, remote is closed
self.close()
except socket.error as err:
if err.args[0] in (errno.EAGAIN, errno.EWOULDBLOCK):
pass
else:
self.on_error(err)
return False
| 4,192 | 31.757813 | 96 | py |
tvm | tvm-main/python/tvm/rpc/proxy.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""RPC proxy, allows both client/server to connect and match connection.
In normal RPC, client directly connect to server's IP address.
Sometimes this cannot be done when server do not have a static address.
RPCProxy allows both client and server connect to the proxy server,
the proxy server will forward the message between the client and server.
"""
# pylint: disable=unused-variable, unused-argument
import os
import asyncio
import logging
import socket
import threading
import errno
import struct
import time
try:
import tornado
from tornado import gen
from tornado import websocket
from tornado import ioloop
from . import tornado_util
except ImportError as error_msg:
raise ImportError(
f"RPCProxy module requires tornado package {error_msg}. Try 'pip install tornado'."
)
from tvm.contrib.popen_pool import PopenWorker
from . import _ffi_api
from . import base
from .base import TrackerCode
from .server import _server_env
from .._ffi.base import py_str
class ForwardHandler(object):
"""Forward handler to forward the message."""
def _init_handler(self):
"""Initialize handler."""
self._init_message = bytes()
self._init_req_nbytes = 4
self._magic = None
self.timeout = None
self._rpc_key_length = None
self._done = False
self._proxy = ProxyServerHandler.current
assert self._proxy
self.rpc_key = None
self.match_key = None
self.forward_proxy = None
self.alloc_time = None
def __del__(self):
logging.info("Delete %s...", self.name())
def name(self):
"""Name of this connection."""
return "RPCConnection"
def _init_step(self, message):
if self._magic is None:
assert len(message) == 4
self._magic = struct.unpack("<i", message)[0]
if self._magic != base.RPC_MAGIC:
logging.info("Invalid RPC magic from %s", self.name())
self.close()
self._init_req_nbytes = 4
elif self._rpc_key_length is None:
assert len(message) == 4
self._rpc_key_length = struct.unpack("<i", message)[0]
self._init_req_nbytes = self._rpc_key_length
elif self.rpc_key is None:
assert len(message) == self._rpc_key_length
self.rpc_key = py_str(message)
# match key is used to do the matching
self.match_key = self.rpc_key[7:].split()[0]
self.on_start()
else:
assert False
def on_start(self):
"""Event when the initialization is completed"""
self._proxy.handler_ready(self)
def on_data(self, message):
"""on data"""
assert isinstance(message, bytes)
if self.forward_proxy:
self.forward_proxy.send_data(message)
else:
while message and self._init_req_nbytes > len(self._init_message):
nbytes = self._init_req_nbytes - len(self._init_message)
self._init_message += message[:nbytes]
message = message[nbytes:]
if self._init_req_nbytes == len(self._init_message):
temp = self._init_message
self._init_req_nbytes = 0
self._init_message = bytes()
self._init_step(temp)
if message:
logging.info("Invalid RPC protocol, too many bytes %s", self.name())
self.close()
def on_error(self, err):
logging.info("%s: Error in RPC %s", self.name(), err)
self.close_pair()
def close_pair(self):
if self.forward_proxy:
self.forward_proxy.signal_close()
self.forward_proxy = None
self.close()
def on_close_event(self):
"""on close event"""
assert not self._done
logging.info("RPCProxy:on_close_event %s ...", self.name())
if self.match_key:
key = self.match_key
if self._proxy._client_pool.get(key, None) == self:
self._proxy._client_pool.pop(key)
if self._proxy._server_pool.get(key, None) == self:
self._proxy._server_pool.pop(key)
self._done = True
self.forward_proxy = None
class TCPHandler(tornado_util.TCPHandler, ForwardHandler):
"""Event driven TCP handler."""
def __init__(self, sock, addr):
super(TCPHandler, self).__init__(sock)
self._init_handler()
self.addr = addr
def name(self):
return f"TCPSocketProxy:{str(self.addr[0])}:{self.rpc_key}"
def send_data(self, message, binary=True):
self.write_message(message, True)
def on_message(self, message):
self.on_data(message)
def on_close(self):
logging.info("RPCProxy: on_close %s ...", self.name())
self._close_process = True
if self.forward_proxy:
self.forward_proxy.signal_close()
self.forward_proxy = None
self.on_close_event()
class WebSocketHandler(websocket.WebSocketHandler, ForwardHandler):
"""Handler for websockets."""
def __init__(self, *args, **kwargs):
super(WebSocketHandler, self).__init__(*args, **kwargs)
self._init_handler()
def name(self):
return f"WebSocketProxy:{self.rpc_key}"
def on_message(self, message):
self.on_data(message)
def data_received(self, _):
raise NotImplementedError()
def send_data(self, message):
try:
self.write_message(message, True)
except websocket.WebSocketClosedError as err:
self.on_error(err)
def on_close(self):
logging.info("RPCProxy: on_close %s ...", self.name())
if self.forward_proxy:
self.forward_proxy.signal_close()
self.forward_proxy = None
self.on_close_event()
def signal_close(self):
self.close()
class RequestHandler(tornado.web.RequestHandler):
"""Handles html request."""
def __init__(self, *args, **kwargs):
file_path = kwargs.pop("file_path")
if file_path.endswith("html"):
self.page = open(file_path).read()
web_port = kwargs.pop("rpc_web_port", None)
if web_port:
self.page = self.page.replace(
"ws://localhost:9190/ws", f"ws://localhost:{web_port}/ws"
)
else:
self.page = open(file_path, "rb").read()
super(RequestHandler, self).__init__(*args, **kwargs)
def data_received(self, _):
pass
def get(self, *args, **kwargs):
self.write(self.page)
class ProxyServerHandler(object):
"""Internal proxy server handler class."""
current = None
def __init__(
self,
sock,
listen_port,
web_port,
timeout_client,
timeout_server,
tracker_addr,
index_page=None,
resource_files=None,
):
assert ProxyServerHandler.current is None
ProxyServerHandler.current = self
if web_port:
handlers = [(r"/ws", WebSocketHandler)]
if index_page:
handlers.append(
(r"/", RequestHandler, {"file_path": index_page, "rpc_web_port": web_port})
)
logging.info("Serving RPC index html page at http://localhost:%d", web_port)
resource_files = resource_files if resource_files else []
for fname in resource_files:
basename = os.path.basename(fname)
pair = (rf"/{basename}", RequestHandler, {"file_path": fname})
handlers.append(pair)
logging.info(pair)
self.app = tornado.web.Application(handlers)
self.app.listen(web_port)
self.sock = sock
self.sock.setblocking(0)
self.loop = ioloop.IOLoop.current()
def event_handler(_, events):
self._on_event(events)
self.loop.add_handler(self.sock.fileno(), event_handler, self.loop.READ)
self._client_pool = {}
self._server_pool = {}
self.timeout_alloc = 5
self.timeout_client = timeout_client
self.timeout_server = timeout_server
# tracker information
self._listen_port = listen_port
self._tracker_addr = tracker_addr
self._tracker_conn = None
self._tracker_pending_puts = []
self._key_set = set()
self.update_tracker_period = 2
if tracker_addr:
logging.info("Tracker address:%s", str(tracker_addr))
def _callback():
self._update_tracker(True)
self.loop.call_later(self.update_tracker_period, _callback)
logging.info("RPCProxy: Websock port bind to %d", web_port)
def _on_event(self, _):
while True:
try:
conn, addr = self.sock.accept()
TCPHandler(conn, addr)
except socket.error as err:
if err.args[0] in (errno.EAGAIN, errno.EWOULDBLOCK):
break
def _pair_up(self, lhs, rhs):
lhs.forward_proxy = rhs
rhs.forward_proxy = lhs
lhs.send_data(struct.pack("<i", base.RPC_CODE_SUCCESS))
lhs.send_data(struct.pack("<i", len(rhs.rpc_key)))
lhs.send_data(rhs.rpc_key.encode("utf-8"))
rhs.send_data(struct.pack("<i", base.RPC_CODE_SUCCESS))
rhs.send_data(struct.pack("<i", len(lhs.rpc_key)))
rhs.send_data(lhs.rpc_key.encode("utf-8"))
logging.info("Pairup connect %s and %s", lhs.name(), rhs.name())
def _regenerate_server_keys(self, keys):
"""Regenerate keys for server pool"""
keyset = set(self._server_pool.keys())
new_keys = []
# re-generate the server match key, so old information is invalidated.
for key in keys:
rpc_key, _ = base.split_random_key(key)
handle = self._server_pool[key]
del self._server_pool[key]
new_key = base.random_key(rpc_key, keyset)
self._server_pool[new_key] = handle
keyset.add(new_key)
new_keys.append(new_key)
return new_keys
def _update_tracker(self, period_update=False):
"""Update information on tracker."""
try:
if self._tracker_conn is None:
self._tracker_conn = socket.socket(
base.get_addr_family(self._tracker_addr), socket.SOCK_STREAM
)
self._tracker_conn.connect(self._tracker_addr)
self._tracker_conn.sendall(struct.pack("<i", base.RPC_TRACKER_MAGIC))
magic = struct.unpack("<i", base.recvall(self._tracker_conn, 4))[0]
if magic != base.RPC_TRACKER_MAGIC:
self.loop.stop()
raise RuntimeError(f"{self._tracker_addr} is not RPC Tracker")
# just connect to tracker, need to update all keys
self._tracker_pending_puts = self._server_pool.keys()
if self._tracker_conn and period_update:
# periodically update tracker information
# regenerate key if the key is not in tracker anymore
# and there is no in-coming connection after timeout_alloc
base.sendjson(self._tracker_conn, [TrackerCode.GET_PENDING_MATCHKEYS])
pending_keys = set(base.recvjson(self._tracker_conn))
update_keys = []
for k, v in self._server_pool.items():
if k not in pending_keys:
if v.alloc_time is None:
v.alloc_time = time.time()
elif time.time() - v.alloc_time > self.timeout_alloc:
update_keys.append(k)
v.alloc_time = None
if update_keys:
logging.info(
"RPCProxy: No incoming conn on %s, regenerate keys...", str(update_keys)
)
new_keys = self._regenerate_server_keys(update_keys)
self._tracker_pending_puts += new_keys
need_update_info = False
# report new connections
for key in self._tracker_pending_puts:
rpc_key, _ = base.split_random_key(key)
base.sendjson(
self._tracker_conn, [TrackerCode.PUT, rpc_key, (self._listen_port, key), None]
)
assert base.recvjson(self._tracker_conn) == TrackerCode.SUCCESS
if rpc_key not in self._key_set:
self._key_set.add(rpc_key)
need_update_info = True
if need_update_info:
keylist = "[" + ",".join(self._key_set) + "]"
cinfo = {"key": "server:proxy" + keylist, "addr": [None, self._listen_port]}
base.sendjson(self._tracker_conn, [TrackerCode.UPDATE_INFO, cinfo])
assert base.recvjson(self._tracker_conn) == TrackerCode.SUCCESS
self._tracker_pending_puts = []
except (socket.error, IOError) as err:
logging.info(
"Lost tracker connection: %s, try reconnect in %g sec",
str(err),
self.update_tracker_period,
)
self._tracker_conn.close()
self._tracker_conn = None
self._regenerate_server_keys(self._server_pool.keys())
if period_update:
def _callback():
self._update_tracker(True)
self.loop.call_later(self.update_tracker_period, _callback)
def _handler_ready_tracker_mode(self, handler):
"""tracker mode to handle handler ready."""
if handler.rpc_key.startswith("server:"):
key = base.random_key(handler.match_key, cmap=self._server_pool)
handler.match_key = key
self._server_pool[key] = handler
self._tracker_pending_puts.append(key)
self._update_tracker()
else:
if handler.match_key in self._server_pool:
self._pair_up(self._server_pool.pop(handler.match_key), handler)
else:
handler.send_data(struct.pack("<i", base.RPC_CODE_MISMATCH))
handler.signal_close()
def _handler_ready_proxy_mode(self, handler):
"""Normal proxy mode when handler is ready."""
if handler.rpc_key.startswith("server:"):
pool_src, pool_dst = self._client_pool, self._server_pool
timeout = self.timeout_server
else:
pool_src, pool_dst = self._server_pool, self._client_pool
timeout = self.timeout_client
key = handler.match_key
if key in pool_src:
self._pair_up(pool_src.pop(key), handler)
return
if key not in pool_dst:
pool_dst[key] = handler
def cleanup():
"""Cleanup client connection if timeout"""
if pool_dst.get(key, None) == handler:
logging.info(
"Timeout client connection %s, cannot find match key=%s",
handler.name(),
key,
)
pool_dst.pop(key)
handler.send_data(struct.pack("<i", base.RPC_CODE_MISMATCH))
handler.signal_close()
self.loop.call_later(timeout, cleanup)
else:
logging.info("Duplicate connection with same key=%s", key)
handler.send_data(struct.pack("<i", base.RPC_CODE_DUPLICATE))
handler.signal_close()
def handler_ready(self, handler):
"""Report handler to be ready."""
logging.info("Handler ready %s", handler.name())
if self._tracker_addr:
self._handler_ready_tracker_mode(handler)
else:
self._handler_ready_proxy_mode(handler)
def run(self):
"""Run the proxy server"""
ioloop.IOLoop.current().start()
def _proxy_server(
listen_sock,
listen_port,
web_port,
timeout_client,
timeout_server,
tracker_addr,
index_page,
resource_files,
):
asyncio.set_event_loop(asyncio.new_event_loop())
handler = ProxyServerHandler(
listen_sock,
listen_port,
web_port,
timeout_client,
timeout_server,
tracker_addr,
index_page,
resource_files,
)
handler.run()
class PopenProxyServerState(object):
"""Internal PopenProxy State for Popen"""
current = None
def __init__(
self,
host,
port=9091,
port_end=9199,
web_port=0,
timeout_client=600,
timeout_server=600,
tracker_addr=None,
index_page=None,
resource_files=None,
):
sock = socket.socket(base.get_addr_family((host, port)), socket.SOCK_STREAM)
self.port = None
for my_port in range(port, port_end):
try:
sock.bind((host, my_port))
self.port = my_port
break
except socket.error as sock_err:
if sock_err.errno in [errno.EADDRINUSE]:
continue
raise sock_err
if not self.port:
raise ValueError(f"cannot bind to any port in [{port}, {port_end})")
logging.info("RPCProxy: client port bind to %s:%d", host, self.port)
sock.listen(1)
self.thread = threading.Thread(
target=_proxy_server,
args=(
sock,
self.port,
web_port,
timeout_client,
timeout_server,
tracker_addr,
index_page,
resource_files,
),
)
# start the server in a different thread
# so we can return the port directly
self.thread.start()
def _popen_start_proxy_server(
host,
port=9091,
port_end=9199,
web_port=0,
timeout_client=600,
timeout_server=600,
tracker_addr=None,
index_page=None,
resource_files=None,
):
# This is a function that will be sent to the
# Popen worker to run on a separate process.
# Create and start the server in a different thread
state = PopenProxyServerState(
host,
port,
port_end,
web_port,
timeout_client,
timeout_server,
tracker_addr,
index_page,
resource_files,
)
PopenProxyServerState.current = state
# returns the port so that the main can get the port number.
return state.port
class Proxy(object):
"""Start RPC proxy server on a separate process.
Python implementation based on PopenWorker.
Parameters
----------
host : str
The host url of the server.
port : int
The TCP port to be bind to
port_end : int, optional
The end TCP port to search
web_port : int, optional
The http/websocket port of the server.
timeout_client : float, optional
Timeout of client until it sees a matching connection.
timeout_server : float, optional
Timeout of server until it sees a matching connection.
tracker_addr: Tuple (str, int) , optional
The address of RPC Tracker in tuple (host, ip) format.
If is not None, the server will register itself to the tracker.
index_page : str, optional
Path to an index page that can be used to display at proxy index.
resource_files : str, optional
Path to local resources that can be included in the http request
"""
def __init__(
self,
host,
port=9091,
port_end=9199,
web_port=0,
timeout_client=600,
timeout_server=600,
tracker_addr=None,
index_page=None,
resource_files=None,
):
self.proc = PopenWorker()
# send the function
self.proc.send(
_popen_start_proxy_server,
[
host,
port,
port_end,
web_port,
timeout_client,
timeout_server,
tracker_addr,
index_page,
resource_files,
],
)
# receive the port
self.port = self.proc.recv()
self.host = host
def terminate(self):
"""Terminate the server process"""
if self.proc:
logging.info("Terminating Proxy Server...")
self.proc.kill()
self.proc = None
def __del__(self):
try:
self.terminate()
except ImportError:
pass
def websocket_proxy_server(url, key=""):
"""Create a RPC server that uses an websocket that connects to a proxy.
Parameters
----------
url : str
The url to be connected.
key : str
The key to identify the server.
"""
def create_on_message(conn):
def _fsend(data):
data = bytes(data)
conn.write_message(data, binary=True)
return len(data)
on_message = _ffi_api.CreateEventDrivenServer(_fsend, "WebSocketProxyServer", "%toinit")
return on_message
@gen.coroutine
def _connect(key):
conn = yield websocket.websocket_connect(url)
on_message = create_on_message(conn)
temp = _server_env(None)
# Start connecton
conn.write_message(struct.pack("<i", base.RPC_MAGIC), binary=True)
key = "server:" + key
conn.write_message(struct.pack("<i", len(key)), binary=True)
conn.write_message(key.encode("utf-8"), binary=True)
msg = yield conn.read_message()
assert len(msg) >= 4
magic = struct.unpack("<i", msg[:4])[0]
if magic == base.RPC_CODE_DUPLICATE:
raise RuntimeError(f"key: {key} has already been used in proxy")
if magic == base.RPC_CODE_MISMATCH:
logging.info("RPCProxy do not have matching client key %s", key)
elif magic != base.RPC_CODE_SUCCESS:
raise RuntimeError(f"{url} is not RPC Proxy")
msg = msg[4:]
logging.info("Connection established with remote")
if msg:
on_message(bytearray(msg), 3)
while True:
try:
msg = yield conn.read_message()
if msg is None:
break
on_message(bytearray(msg), 3)
except websocket.WebSocketClosedError as err:
break
logging.info("WebSocketProxyServer closed...")
temp.remove()
ioloop.IOLoop.current().stop()
ioloop.IOLoop.current().spawn_callback(_connect, key)
ioloop.IOLoop.current().start()
| 23,729 | 32.375527 | 98 | py |
tvm | tvm-main/python/tvm/rpc/server_ios_launcher.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Python wrapper for running a RPC Server through iOS RPC
on the iOS simulator using the simctl command line tool.
"""
# pylint: disable=invalid-name
import os
import json
import time
import threading
import subprocess
from enum import Enum
from typing import Dict, List, AnyStr
class OSName(Enum):
"""The names of the operating systems available on the simulator."""
iOS = "iOS"
tvOS = "tvOS"
watchOS = "watchOS"
class IOSDevice(Enum):
"""The names of available iOS devices."""
iPhone = "iPhone"
iPod = "iPod"
iPad = "iPad"
class RPCServerMode(Enum):
"""Server modes available in the iOS RPC application."""
standalone = "standalone"
proxy = "proxy"
tracker = "tracker"
def get_list_of_available_simulators() -> Dict[AnyStr, List]:
"""
List of simulators available on the system. Simulators are presented as a dictionary.
The dictionary key is the name of the operating system of the simulator.
The dictionary value is a list of all simulators with a given operating system.
"""
with subprocess.Popen(
"xcrun simctl list devices available --json",
shell=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
) as proc:
out, _ = proc.communicate()
available_simulators = json.loads(out)["devices"]
available_simulators = {
key: value for key, value in available_simulators.items() if value != []
}
return available_simulators
def grep_by_system(available_devices: Dict[AnyStr, List], system_name: OSName) -> List[Dict]:
"""Search for simulators that use the target operating system."""
def find_index_of_substr(search_field: List[AnyStr], target: AnyStr) -> int:
for i, item in enumerate(search_field):
if target in item:
return i
raise ValueError("Search field doesn't content target")
keys = list(available_devices.keys())
return available_devices[keys[find_index_of_substr(keys, system_name.value)]]
def grep_by_device(available_devices: List[Dict], device_name: IOSDevice) -> List[Dict]:
"""Search for simulators that emulate a given device."""
return [item for item in available_devices if device_name.value in item["name"]]
def get_device_uid(target_device: Dict) -> AnyStr:
"""Get a unique device ID."""
return target_device["udid"]
def check_call_with_runtime_error(cmd: AnyStr, error_message: AnyStr) -> None:
"""Calling the function `subprocess.check_call` and catching its possible thrown exception."""
try:
subprocess.check_call(cmd.split(" "))
except subprocess.CalledProcessError as called_process_error:
raise called_process_error from RuntimeError(error_message)
def boot_device(udid: AnyStr) -> None:
"""Boot the device by its unique ID."""
cmd = f"xcrun simctl boot {udid}"
error_message = f"Failed to boot device with unique id: {udid}"
check_call_with_runtime_error(cmd, error_message)
if not is_booted(udid):
raise RuntimeError(error_message)
def shutdown_device(udid: AnyStr) -> None:
"""Shutdown the device by its unique ID."""
cmd = f"xcrun simctl shutdown {udid}"
error_message = f"Failed to shut down device with unique id: {udid}"
check_call_with_runtime_error(cmd, error_message)
if not is_turned_off(udid):
raise RuntimeError(error_message)
def deploy_bundle_to_simulator(udid: AnyStr, bundle_path: AnyStr) -> None:
"""Deploy iOS RPC bundle <bundle_path> to simulator with its unique ID <udid>."""
check_call_with_runtime_error(
cmd=f"xcrun simctl install {udid} {bundle_path}",
error_message=f"Failed to deploy bundle <{bundle_path}> to device with unique id: {udid}",
)
def delete_bundle_from_simulator(udid: AnyStr, bundle_id: AnyStr) -> None:
"""Delete iOS RPC bundle <bundle_id> from simulator with its unique ID <udid>."""
check_call_with_runtime_error(
cmd=f"xcrun simctl uninstall {udid} {bundle_id}",
error_message=f"Failed to uninstall bundle <{bundle_id}> "
f"from device with unique id: {udid}",
)
def launch_ios_rpc(
udid: AnyStr, bundle_id: AnyStr, host_url: AnyStr, host_port: int, key: AnyStr, mode: AnyStr
): # pylint: disable=too-many-arguments, consider-using-with
"""
Launch iOS RPC application on simulator with No UI interconnection.
udid : str
Unique device ID.
bundle_id : str
iOS RPC bundle ID.
host_url : str
The tracker/proxy address.
host_port : int
The tracker/proxy port.
key : str
The key used to identify the device type in tracker.
mode : str
Server mode. See RPCServerMode.
"""
cmd = (
f"xcrun simctl launch --console {udid} {bundle_id}"
f" --immediate_connect"
f" --host_url={host_url}"
f" --host_port={host_port}"
f" --key={key}"
f" --server_mode={mode}"
f" --verbose"
)
proc = subprocess.Popen(
cmd.split(" "),
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
bufsize=1,
universal_newlines=True,
)
return proc
def terminate_ios_rpc(udid: AnyStr, bundle_id: AnyStr) -> None:
"""Terminate iOS RPC application."""
check_call_with_runtime_error(
cmd=f"xcrun simctl terminate {udid} {bundle_id}",
error_message=f"Failed to terminate bundle <{bundle_id}> "
f"from device with unique id: {udid}",
)
def is_booted(udid: AnyStr) -> bool:
"""Check that the device has booted."""
device = find_device(udid)
return device["state"] == "Booted"
def is_turned_off(udid: AnyStr) -> bool:
"""Check that the device has turned off."""
device = find_device(udid)
return device["state"] == "Shutdown"
def check_booted_device(devices: List[Dict]) -> Dict:
"""Check if there is already a booted device. If so, return this device."""
for device in devices:
if device["state"] == "Booted":
return device
return {}
def find_device(udid: AnyStr) -> Dict:
"""Find device by its unique ID."""
return_value = {}
available_devices = get_list_of_available_simulators()
for devices in available_devices.values():
for device in devices:
if device["udid"] == udid:
return_value = device
return return_value
class ServerIOSLauncher:
"""
Python wrapper for launch iOS RPC to simulator.
mode : str
Server mode. See RPCServerMode.
host : str
The tracker/proxy address.
port : int
The tracker/proxy port.
key : str
The key used to identify the device type in tracker.
"""
booted_devices = []
bundle_id = os.environ.get("BUNDLE_ID")
bundle_path = os.environ.get("BUNDLE_PATH")
class ConsoleMarkers(Enum):
"""
Marker-messages that iOS RPC Server should print to the console output
when its states change (see apps/ios_rpc/tvmrpc/RPCServer.mm).
STOPPED : str
iOS RPC Server process was stopped
CALLSTACK : str
Call stack if RPC Server was stopped with an error.
CONNECTED : str
RPC Server reports that it successfully connected.
SERVER_IP : str
IP on which RPC Server started (for standalone mode).
SERVER_PORT : str
HOST on which RPC Server started (for standalone mode).
"""
STOPPED = "PROCESS_STOPPED"
CALLSTACK = "First throw call stack"
CONNECTED = "[IOS-RPC] STATE: 2"
SERVER_IP = "[IOS-RPC] IP: "
SERVER_PORT = "[IOS-RPC] PORT: "
def __init__(self, mode, host, port, key):
if not ServerIOSLauncher.is_compatible_environment():
raise RuntimeError(
"Can't create ServerIOSLauncher instance."
" No environment variables set for iOS RPC Server."
)
self.host = host
self.port = port
self.external_booted_device = None
if not ServerIOSLauncher.booted_devices:
self._boot_or_find_booted_device()
self.udid = get_device_uid(
self.external_booted_device
if self.external_booted_device is not None
else ServerIOSLauncher.booted_devices[-1]
)
self.bundle_was_deployed = False
deploy_bundle_to_simulator(self.udid, self.bundle_path)
self.bundle_was_deployed = True
self.server_was_started = False
self.launch_process = launch_ios_rpc(self.udid, self.bundle_id, host, port, key, mode)
self._wait_launch_complete(
waiting_time=60,
hz=10,
should_print_host_and_port=mode == RPCServerMode.standalone.value,
)
self.server_was_started = True
def terminate(self):
"""Terminate iOS RPC server."""
if self.bundle_was_deployed and self.server_was_started:
try:
terminate_ios_rpc(self.udid, self.bundle_id)
self.launch_process.terminate()
self.server_was_started = False
except RuntimeError as e:
print(e)
if self.bundle_was_deployed:
try:
delete_bundle_from_simulator(self.udid, self.bundle_id)
self.bundle_was_deployed = False
except RuntimeError as e:
print(e)
def __del__(self):
try:
self.terminate()
except ImportError:
pass
@staticmethod
def is_compatible_environment():
"""Check that the current environment has the required variables."""
return bool(os.environ.get("BUNDLE_ID")) and bool(os.environ.get("BUNDLE_PATH"))
@staticmethod
def shutdown_booted_devices():
"""Shutdown simulators that have been booted using this class."""
for device_meta in ServerIOSLauncher.booted_devices:
try:
shutdown_device(get_device_uid(device_meta))
except RuntimeError as e:
print(e)
ServerIOSLauncher.booted_devices = []
def _boot_or_find_booted_device(self):
"""
Boot the required simulator if there is no suitable booted simulator
among the available simulators. If there is a suitable booted simulator,
then take it as a simulator to which the iOS RPC application will be deployed.
"""
target_system = OSName.iOS
target_device_type = IOSDevice.iPhone
available_devices = get_list_of_available_simulators()
if not available_devices:
raise ValueError("No devices available in this environment")
target_devices = grep_by_system(available_devices, target_system)
if not target_devices:
raise ValueError(f"No available simulators for target system: {target_system.value}")
target_devices = grep_by_device(target_devices, target_device_type)
if not target_devices:
raise ValueError(
f"No available simulators for target device type: {target_device_type.value}"
)
maybe_booted = check_booted_device(target_devices)
if maybe_booted:
self.external_booted_device = maybe_booted
else:
take_latest_model = True
target_device = target_devices[-1 if take_latest_model else 0]
boot_device(get_device_uid(target_device))
ServerIOSLauncher.booted_devices.append(target_device)
def _wait_launch_complete(self, waiting_time, hz, should_print_host_and_port=False):
# pylint: disable=too-many-locals
"""
Wait for the iOS RPC server to start.
waiting_time : int
The maximum waiting time during which it is necessary
to receive a message from RPC Server.
hz : int
The frequency of checking (in hertz) messages from RPC Server.
Checks for messages from the server will occur every 1 / hz second.
should_print_host_and_port : bool
A flag that indicates that RPC Server should print the host and port
on which it was started.
Used for standalone mode.
"""
class Switch:
"""A simple helper class for boolean switching."""
def __init__(self):
self._on = False
def toggle(self):
"""Toggle flag."""
self._on = not self._on
@property
def on(self):
"""Flag of this switch."""
return self._on
def watchdog():
for _ in range(waiting_time * hz):
time.sleep(1.0 / hz)
if switch_have_data.on:
break
if not switch_have_data.on:
self.launch_process.terminate()
switch_process_was_terminated.toggle()
switch_have_data = Switch()
switch_process_was_terminated = Switch()
watchdog_thread = threading.Thread(target=watchdog)
host, port = None, None
watchdog_thread.start()
for line in self.launch_process.stdout:
if not switch_have_data.on:
switch_have_data.toggle()
found = str(line).find(ServerIOSLauncher.ConsoleMarkers.STOPPED.value)
if found != -1:
raise RuntimeError("[ERROR] Crash during RCP Server launch.. ")
found = str(line).find(ServerIOSLauncher.ConsoleMarkers.CALLSTACK.value)
if found != -1:
raise RuntimeError("[ERROR] Crash during RCP Server launch.. ")
found = str(line).find(ServerIOSLauncher.ConsoleMarkers.SERVER_IP.value)
if found != -1:
ip = str(line)[
found + len(ServerIOSLauncher.ConsoleMarkers.SERVER_IP.value) :
].rstrip("\n")
host = ip
found = str(line).find(ServerIOSLauncher.ConsoleMarkers.SERVER_PORT.value)
if found != -1:
port = str(line)[
found + len(ServerIOSLauncher.ConsoleMarkers.SERVER_PORT.value) :
].rstrip("\n")
port = int(port)
if str(line).find(ServerIOSLauncher.ConsoleMarkers.CONNECTED.value) != -1:
# rpc server reports that it successfully connected
break
watchdog_thread.join()
if switch_process_was_terminated.on:
raise TimeoutError("Can't get a response from the iOS Server.")
if should_print_host_and_port:
if host is None or port is None:
raise RuntimeError("No messages with actual host and port.")
self.port = port
class ServerIOSContextManager:
"""
Context manager for ServerIOSLauncher.
To work with ServerIOSLauncher, it is preferable to use this class
so that the terminate method is called in any case.
"""
def __init__(self, mode, host, port, key):
self.__mode = mode
self.__host = host
self.__port = port
self.__key = key
self.__ios_rpc_server_launcher = None
def __enter__(self):
self.__ios_rpc_server_launcher = ServerIOSLauncher(
self.__mode, self.__host, self.__port, self.__key
)
return self.__ios_rpc_server_launcher
def __exit__(self, exc_type, exc_val, exc_tb):
if self.__ios_rpc_server_launcher is not None:
self.__ios_rpc_server_launcher.terminate()
self.__ios_rpc_server_launcher = None
| 16,475 | 31.820717 | 98 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.