id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
6,531 | import pickle
import os
import numpy as np
import shutil
import torch
import warnings
import hummingbird
from hummingbird.ml._utils import pandas_installed, get_device, from_strings_to_ints, dump_versions, check_dumped_versions
from hummingbird.ml.operator_converters import constants
from hummingbird.ml.containers._sklearn_api_containers import (
SklearnContainer,
SklearnContainerTransformer,
SklearnContainerRegression,
SklearnContainerClassification,
SklearnContainerAnomalyDetection,
)
def from_strings_to_ints(input, max_string_length):
"""
Utility function used to transform string inputs into a numerical representation.
"""
shape = list(input.shape)
shape.append(max_string_length // 4)
return np.array(input, dtype="|S" + str(max_string_length)).view(np.int32).reshape(shape)
The provided code snippet includes necessary dependencies for implementing the `_torchscript_wrapper` function. Write a Python function `def _torchscript_wrapper(device, function, *inputs, extra_config={})` to solve the following problem:
This function contains the code to enable predictions over torchscript models. It is used to translates inputs in the proper torch format.
Here is the function:
def _torchscript_wrapper(device, function, *inputs, extra_config={}):
"""
This function contains the code to enable predictions over torchscript models.
It is used to translates inputs in the proper torch format.
"""
inputs = [*inputs]
with torch.no_grad():
if DataFrame is not None and isinstance(inputs, DataFrame):
# Split the dataframe into column ndarrays
inputs = inputs[0]
input_names = list(inputs.columns)
splits = [inputs[input_names[idx]] for idx in range(len(input_names))]
splits = [df.to_numpy().reshape(-1, 1) for df in splits]
inputs = tuple(splits)
# Maps data inputs to the expected type and device.
for i in range(len(inputs)):
if type(inputs[i]) is list:
inputs[i] = np.array(inputs[i])
if type(inputs[i]) is np.ndarray:
# Convert string arrays into int32.
if inputs[i].dtype.kind in constants.SUPPORTED_STRING_TYPES:
assert constants.MAX_STRING_LENGTH in extra_config
inputs[i] = from_strings_to_ints(inputs[i], extra_config[constants.MAX_STRING_LENGTH])
if inputs[i].dtype == np.float64:
# We convert double precision arrays into single precision. Sklearn does the same.
inputs[i] = inputs[i].astype("float32")
inputs[i] = torch.from_numpy(inputs[i])
elif type(inputs[i]) is not torch.Tensor:
raise RuntimeError("Inputer tensor {} of not supported type {}".format(i, type(inputs[i])))
if device.type != "cpu" and device is not None:
inputs[i] = inputs[i].to(device)
return function(*inputs) | This function contains the code to enable predictions over torchscript models. It is used to translates inputs in the proper torch format. |
6,532 | from copy import deepcopy
import psutil
import numpy as np
from .operator_converters import constants
from ._parse import parse_sklearn_api_model, parse_onnx_api_model, parse_sparkml_api_model
from ._topology import convert as topology_converter
from ._utils import (
assert_torch_installed,
assert_lightgbm_installed,
assert_xgboost_installed,
pandas_installed,
sparkml_installed,
is_pandas_dataframe,
is_spark_dataframe,
tvm_installed,
)
from .exceptions import MissingConverter, MissingBackend
from .supported import backends
from sklearn.utils import all_estimators
from sklearn.utils.validation import check_is_fitted
from . import operator_converters
from .supported import xgb_operator_list
from .supported import lgbm_operator_list
def _convert_common(model, backend, test_input=None, device="cpu", extra_config={}):
"""
A common function called by convert(...) and convert_batch(...) below.
"""
assert model is not None
# We destroy extra_config during conversion, we create a copy here.
extra_config = deepcopy(extra_config)
# Set some default configurations.
# Add test input as extra configuration for conversion.
if (
test_input is not None
and constants.TEST_INPUT not in extra_config
and (is_spark_dataframe(test_input) or len(test_input) > 0)
):
extra_config[constants.TEST_INPUT] = test_input
# By default we return the converted model wrapped into a `hummingbird.ml._container.SklearnContainer` object.
if constants.CONTAINER not in extra_config:
extra_config[constants.CONTAINER] = True
# By default we set num of intra-op parallelism to be the number of physical cores available
if constants.N_THREADS not in extra_config:
extra_config[constants.N_THREADS] = psutil.cpu_count(logical=False)
# Fix the test_input type
if constants.TEST_INPUT in extra_config:
if isinstance(extra_config[constants.TEST_INPUT], list):
extra_config[constants.TEST_INPUT] = np.array(extra_config[constants.TEST_INPUT])
elif isinstance(extra_config[constants.TEST_INPUT], tuple):
# We are passing multiple datasets.
assert all(
[isinstance(input, np.ndarray) for input in extra_config[constants.TEST_INPUT]]
), "When passing multiple inputs only ndarrays are supported."
assert all([len(input.shape) == 2 for input in extra_config[constants.TEST_INPUT]])
extra_config[constants.N_FEATURES] = sum([input.shape[1] for input in extra_config[constants.TEST_INPUT]])
extra_config[constants.N_INPUTS] = len(extra_config[constants.TEST_INPUT])
elif pandas_installed() and is_pandas_dataframe(extra_config[constants.TEST_INPUT]):
# We split the input dataframe into columnar ndarrays
extra_config[constants.N_INPUTS] = len(extra_config[constants.TEST_INPUT].columns)
extra_config[constants.N_FEATURES] = extra_config[constants.N_INPUTS]
input_names = list(extra_config[constants.TEST_INPUT].columns)
splits = [extra_config[constants.TEST_INPUT][input_names[idx]] for idx in range(extra_config[constants.N_INPUTS])]
splits = [df.to_numpy().reshape(-1, 1) for df in splits]
extra_config[constants.TEST_INPUT] = tuple(splits) if len(splits) > 1 else splits[0]
extra_config[constants.INPUT_NAMES] = input_names
elif sparkml_installed() and is_spark_dataframe(extra_config[constants.TEST_INPUT]):
from pyspark.ml.linalg import DenseVector, SparseVector, VectorUDT
from pyspark.sql.types import ArrayType, FloatType, DoubleType, IntegerType, LongType
df = extra_config[constants.TEST_INPUT]
input_names = [field.name for field in df.schema.fields]
extra_config[constants.N_INPUTS] = len(input_names)
extra_config[constants.N_FEATURES] = extra_config[constants.N_INPUTS]
size = df.count()
row_dict = df.take(1)[0].asDict()
splits = []
for field in df.schema.fields:
data_col = row_dict[field.name]
spark_dtype = type(field.dataType)
shape = 1
if spark_dtype in [DenseVector, VectorUDT]:
np_dtype = np.float64
shape = data_col.array.shape[0]
elif spark_dtype == SparseVector:
np_dtype = np.float64
shape = data_col.size
elif spark_dtype == ArrayType:
np_dtype = np.float64
shape = len(data_col)
elif spark_dtype == IntegerType:
np_dtype = np.int32
elif spark_dtype == FloatType:
np_dtype = np.float32
elif spark_dtype == DoubleType:
np_dtype = np.float64
elif spark_dtype == LongType:
np_dtype = np.int64
else:
raise ValueError("Unrecognized data type: {}".format(spark_dtype))
splits.append(np.zeros((size, shape), np_dtype))
extra_config[constants.TEST_INPUT] = tuple(splits) if len(splits) > 1 else splits[0]
extra_config[constants.INPUT_NAMES] = input_names
test_input = extra_config[constants.TEST_INPUT]
# We do some normalization on backends.
if not isinstance(backend, str):
raise ValueError("Backend must be a string: {}".format(backend))
backend_formatted = backend.lower()
backend_formatted = backends[backend_formatted]
# Check whether we actually support the backend.
_supported_backend_check(backend_formatted, backend)
_supported_backend_check_config(model, backend_formatted, extra_config)
if type(model) in xgb_operator_list:
return _convert_xgboost(model, backend_formatted, test_input, device, extra_config)
if type(model) in lgbm_operator_list:
return _convert_lightgbm(model, backend_formatted, test_input, device, extra_config)
if _is_onnx_model(model):
return _convert_onnxml(model, backend_formatted, test_input, device, extra_config)
if _is_sparkml_model(model):
return _convert_sparkml(model, backend_formatted, test_input, device, extra_config)
return _convert_sklearn(model, backend_formatted, test_input, device, extra_config)
The provided code snippet includes necessary dependencies for implementing the `convert` function. Write a Python function `def convert(model, backend, test_input=None, device="cpu", extra_config={})` to solve the following problem:
This function converts the specified input *model* into an implementation targeting *backend*. *Convert* supports [Sklearn], [LightGBM], [XGBoost], [ONNX], and [SparkML] models. For *LightGBM* and *XGBoost* currently only the Sklearn API is supported. The detailed list of models and backends can be found at `hummingbird.ml.supported`. The *onnx* backend works best with a test_input, but we try to generate one if none is provided. The *torch.jit* and *tvm* backends require a test_input. For *tvm* backend, the output container can do prediction only on the test data with the same size as test_input. [Sklearn]: https://scikit-learn.org/ [LightGBM]: https://lightgbm.readthedocs.io/ [XGBoost]: https://xgboost.readthedocs.io/ [ONNX]: https://onnx.ai/ [ONNX-ML]: https://github.com/onnx/onnx/blob/master/docs/Operators-ml.md [ONNX operators]: https://github.com/onnx/onnx/blob/master/docs/Operators.md [Spark-ML]: https://spark.apache.org/docs/latest/api/python/pyspark.ml.html Args: model: An input model backend: The target for the conversion test_input: Some input data used to trace the model execution. Multiple inputs can be passed as `tuple` objects or pandas Dataframes. When possible, (`numpy`)`arrays` are suggested. The number of rows becomes the batch size when tracing PyTorch models and compiling with TVM. device: The target device the model should be run. This parameter is only used by the *torch** backends and *tvm*, and the devices supported are the one supported by PyTorch, i.e., 'cpu' or 'cuda'. extra_config: Extra configurations to be used by the individual operator converters. The set of supported extra configurations can be found at `hummingbird.ml.supported` Examples: >>> pytorch_model = convert(sklearn_model,`torch`) Returns: A model implemented in *backend*, which is equivalent to the input model
Here is the function:
def convert(model, backend, test_input=None, device="cpu", extra_config={}):
"""
This function converts the specified input *model* into an implementation targeting *backend*.
*Convert* supports [Sklearn], [LightGBM], [XGBoost], [ONNX], and [SparkML] models.
For *LightGBM* and *XGBoost* currently only the Sklearn API is supported.
The detailed list of models and backends can be found at `hummingbird.ml.supported`.
The *onnx* backend works best with a test_input, but we try to generate one if none is provided.
The *torch.jit* and *tvm* backends require a test_input.
For *tvm* backend, the output container can do prediction only on the test data with the same size as test_input.
[Sklearn]: https://scikit-learn.org/
[LightGBM]: https://lightgbm.readthedocs.io/
[XGBoost]: https://xgboost.readthedocs.io/
[ONNX]: https://onnx.ai/
[ONNX-ML]: https://github.com/onnx/onnx/blob/master/docs/Operators-ml.md
[ONNX operators]: https://github.com/onnx/onnx/blob/master/docs/Operators.md
[Spark-ML]: https://spark.apache.org/docs/latest/api/python/pyspark.ml.html
Args:
model: An input model
backend: The target for the conversion
test_input: Some input data used to trace the model execution.
Multiple inputs can be passed as `tuple` objects or pandas Dataframes.
When possible, (`numpy`)`arrays` are suggested.
The number of rows becomes the batch size when tracing PyTorch models and compiling with TVM.
device: The target device the model should be run. This parameter is only used by the *torch** backends and *tvm*, and
the devices supported are the one supported by PyTorch, i.e., 'cpu' or 'cuda'.
extra_config: Extra configurations to be used by the individual operator converters.
The set of supported extra configurations can be found at `hummingbird.ml.supported`
Examples:
>>> pytorch_model = convert(sklearn_model,`torch`)
Returns:
A model implemented in *backend*, which is equivalent to the input model
"""
assert constants.REMAINDER_SIZE not in extra_config
return _convert_common(model, backend, test_input, device, extra_config) | This function converts the specified input *model* into an implementation targeting *backend*. *Convert* supports [Sklearn], [LightGBM], [XGBoost], [ONNX], and [SparkML] models. For *LightGBM* and *XGBoost* currently only the Sklearn API is supported. The detailed list of models and backends can be found at `hummingbird.ml.supported`. The *onnx* backend works best with a test_input, but we try to generate one if none is provided. The *torch.jit* and *tvm* backends require a test_input. For *tvm* backend, the output container can do prediction only on the test data with the same size as test_input. [Sklearn]: https://scikit-learn.org/ [LightGBM]: https://lightgbm.readthedocs.io/ [XGBoost]: https://xgboost.readthedocs.io/ [ONNX]: https://onnx.ai/ [ONNX-ML]: https://github.com/onnx/onnx/blob/master/docs/Operators-ml.md [ONNX operators]: https://github.com/onnx/onnx/blob/master/docs/Operators.md [Spark-ML]: https://spark.apache.org/docs/latest/api/python/pyspark.ml.html Args: model: An input model backend: The target for the conversion test_input: Some input data used to trace the model execution. Multiple inputs can be passed as `tuple` objects or pandas Dataframes. When possible, (`numpy`)`arrays` are suggested. The number of rows becomes the batch size when tracing PyTorch models and compiling with TVM. device: The target device the model should be run. This parameter is only used by the *torch** backends and *tvm*, and the devices supported are the one supported by PyTorch, i.e., 'cpu' or 'cuda'. extra_config: Extra configurations to be used by the individual operator converters. The set of supported extra configurations can be found at `hummingbird.ml.supported` Examples: >>> pytorch_model = convert(sklearn_model,`torch`) Returns: A model implemented in *backend*, which is equivalent to the input model |
6,533 | from copy import deepcopy
import psutil
import numpy as np
from .operator_converters import constants
from ._parse import parse_sklearn_api_model, parse_onnx_api_model, parse_sparkml_api_model
from ._topology import convert as topology_converter
from ._utils import (
assert_torch_installed,
assert_lightgbm_installed,
assert_xgboost_installed,
pandas_installed,
sparkml_installed,
is_pandas_dataframe,
is_spark_dataframe,
tvm_installed,
)
from .exceptions import MissingConverter, MissingBackend
from .supported import backends
from sklearn.utils import all_estimators
from sklearn.utils.validation import check_is_fitted
from . import operator_converters
from .supported import xgb_operator_list
from .supported import lgbm_operator_list
def _convert_common(model, backend, test_input=None, device="cpu", extra_config={}):
"""
A common function called by convert(...) and convert_batch(...) below.
"""
assert model is not None
# We destroy extra_config during conversion, we create a copy here.
extra_config = deepcopy(extra_config)
# Set some default configurations.
# Add test input as extra configuration for conversion.
if (
test_input is not None
and constants.TEST_INPUT not in extra_config
and (is_spark_dataframe(test_input) or len(test_input) > 0)
):
extra_config[constants.TEST_INPUT] = test_input
# By default we return the converted model wrapped into a `hummingbird.ml._container.SklearnContainer` object.
if constants.CONTAINER not in extra_config:
extra_config[constants.CONTAINER] = True
# By default we set num of intra-op parallelism to be the number of physical cores available
if constants.N_THREADS not in extra_config:
extra_config[constants.N_THREADS] = psutil.cpu_count(logical=False)
# Fix the test_input type
if constants.TEST_INPUT in extra_config:
if isinstance(extra_config[constants.TEST_INPUT], list):
extra_config[constants.TEST_INPUT] = np.array(extra_config[constants.TEST_INPUT])
elif isinstance(extra_config[constants.TEST_INPUT], tuple):
# We are passing multiple datasets.
assert all(
[isinstance(input, np.ndarray) for input in extra_config[constants.TEST_INPUT]]
), "When passing multiple inputs only ndarrays are supported."
assert all([len(input.shape) == 2 for input in extra_config[constants.TEST_INPUT]])
extra_config[constants.N_FEATURES] = sum([input.shape[1] for input in extra_config[constants.TEST_INPUT]])
extra_config[constants.N_INPUTS] = len(extra_config[constants.TEST_INPUT])
elif pandas_installed() and is_pandas_dataframe(extra_config[constants.TEST_INPUT]):
# We split the input dataframe into columnar ndarrays
extra_config[constants.N_INPUTS] = len(extra_config[constants.TEST_INPUT].columns)
extra_config[constants.N_FEATURES] = extra_config[constants.N_INPUTS]
input_names = list(extra_config[constants.TEST_INPUT].columns)
splits = [extra_config[constants.TEST_INPUT][input_names[idx]] for idx in range(extra_config[constants.N_INPUTS])]
splits = [df.to_numpy().reshape(-1, 1) for df in splits]
extra_config[constants.TEST_INPUT] = tuple(splits) if len(splits) > 1 else splits[0]
extra_config[constants.INPUT_NAMES] = input_names
elif sparkml_installed() and is_spark_dataframe(extra_config[constants.TEST_INPUT]):
from pyspark.ml.linalg import DenseVector, SparseVector, VectorUDT
from pyspark.sql.types import ArrayType, FloatType, DoubleType, IntegerType, LongType
df = extra_config[constants.TEST_INPUT]
input_names = [field.name for field in df.schema.fields]
extra_config[constants.N_INPUTS] = len(input_names)
extra_config[constants.N_FEATURES] = extra_config[constants.N_INPUTS]
size = df.count()
row_dict = df.take(1)[0].asDict()
splits = []
for field in df.schema.fields:
data_col = row_dict[field.name]
spark_dtype = type(field.dataType)
shape = 1
if spark_dtype in [DenseVector, VectorUDT]:
np_dtype = np.float64
shape = data_col.array.shape[0]
elif spark_dtype == SparseVector:
np_dtype = np.float64
shape = data_col.size
elif spark_dtype == ArrayType:
np_dtype = np.float64
shape = len(data_col)
elif spark_dtype == IntegerType:
np_dtype = np.int32
elif spark_dtype == FloatType:
np_dtype = np.float32
elif spark_dtype == DoubleType:
np_dtype = np.float64
elif spark_dtype == LongType:
np_dtype = np.int64
else:
raise ValueError("Unrecognized data type: {}".format(spark_dtype))
splits.append(np.zeros((size, shape), np_dtype))
extra_config[constants.TEST_INPUT] = tuple(splits) if len(splits) > 1 else splits[0]
extra_config[constants.INPUT_NAMES] = input_names
test_input = extra_config[constants.TEST_INPUT]
# We do some normalization on backends.
if not isinstance(backend, str):
raise ValueError("Backend must be a string: {}".format(backend))
backend_formatted = backend.lower()
backend_formatted = backends[backend_formatted]
# Check whether we actually support the backend.
_supported_backend_check(backend_formatted, backend)
_supported_backend_check_config(model, backend_formatted, extra_config)
if type(model) in xgb_operator_list:
return _convert_xgboost(model, backend_formatted, test_input, device, extra_config)
if type(model) in lgbm_operator_list:
return _convert_lightgbm(model, backend_formatted, test_input, device, extra_config)
if _is_onnx_model(model):
return _convert_onnxml(model, backend_formatted, test_input, device, extra_config)
if _is_sparkml_model(model):
return _convert_sparkml(model, backend_formatted, test_input, device, extra_config)
return _convert_sklearn(model, backend_formatted, test_input, device, extra_config)
The provided code snippet includes necessary dependencies for implementing the `convert_batch` function. Write a Python function `def convert_batch(model, backend, test_input, remainder_size=0, device="cpu", extra_config={})` to solve the following problem:
A convert function for batch by batch prediction use cases. For some backends such as TVM, a container returned by `convert(...)` function above has a strict requirement on the allowable input shape. The container returned by this function is more flexible in that it can predict on the input of size `test_input.shape[0] * k + remainder_size`, where `k` is any integer. `test_input.shape[0]`, the number of rows in the `test_input`, is interpreted as a batch size, and at test time prediction proceeds in a batch by batch fashion. See the documentation for *convert(...)* above for more information. Args: model: An input model backend: The target for the conversion test_input: Some input data used to trace the model execution. Multiple inputs can be passed as `tuple` objects or pandas Dataframes. When possible, (`numpy`)`arrays` are suggested. The number of rows becomes the batch size when tracing PyTorch models and compiling with TVM. remainder_size: An integer that together with test_input determines the size of test data that can be predicted. The input to the returned container can be of size `test_input.shape[0] * k + remainder_size`, where `k` is any integer. device: The target device the model should be run. This parameter is only used by the *torch** backends and *tvm*, and the devices supported are the one supported by PyTorch, i.e., 'cpu' or 'cuda'. extra_config: Extra configurations to be used by the individual operator converters. The set of supported extra configurations can be found at `hummingbird.ml.supported` Examples: >>> tvm_model = convert_batch(sklearn_model,`tvm`, X) >>> tvm_model = convert_batch(sklearn_model,`tvm`, X, remainder_size=50) Returns: A `BatchContainer` object that wraps one or two containers created by `convert(...)` function above.
Here is the function:
def convert_batch(model, backend, test_input, remainder_size=0, device="cpu", extra_config={}):
"""
A convert function for batch by batch prediction use cases.
For some backends such as TVM, a container returned by `convert(...)` function above has a strict requirement on the
allowable input shape.
The container returned by this function is more flexible in that it can predict on the input of size
`test_input.shape[0] * k + remainder_size`, where `k` is any integer.
`test_input.shape[0]`, the number of rows in the `test_input`, is interpreted as a batch size, and at test time
prediction proceeds in a batch by batch fashion.
See the documentation for *convert(...)* above for more information.
Args:
model: An input model
backend: The target for the conversion
test_input: Some input data used to trace the model execution.
Multiple inputs can be passed as `tuple` objects or pandas Dataframes.
When possible, (`numpy`)`arrays` are suggested.
The number of rows becomes the batch size when tracing PyTorch models and compiling with TVM.
remainder_size: An integer that together with test_input determines the size of test data that can be predicted.
The input to the returned container can be of size `test_input.shape[0] * k + remainder_size`, where `k`
is any integer.
device: The target device the model should be run. This parameter is only used by the *torch** backends and *tvm*, and
the devices supported are the one supported by PyTorch, i.e., 'cpu' or 'cuda'.
extra_config: Extra configurations to be used by the individual operator converters.
The set of supported extra configurations can be found at `hummingbird.ml.supported`
Examples:
>>> tvm_model = convert_batch(sklearn_model,`tvm`, X)
>>> tvm_model = convert_batch(sklearn_model,`tvm`, X, remainder_size=50)
Returns:
A `BatchContainer` object that wraps one or two containers created by `convert(...)` function above.
"""
extra_config[constants.REMAINDER_SIZE] = remainder_size
return _convert_common(model, backend, test_input, device, extra_config) | A convert function for batch by batch prediction use cases. For some backends such as TVM, a container returned by `convert(...)` function above has a strict requirement on the allowable input shape. The container returned by this function is more flexible in that it can predict on the input of size `test_input.shape[0] * k + remainder_size`, where `k` is any integer. `test_input.shape[0]`, the number of rows in the `test_input`, is interpreted as a batch size, and at test time prediction proceeds in a batch by batch fashion. See the documentation for *convert(...)* above for more information. Args: model: An input model backend: The target for the conversion test_input: Some input data used to trace the model execution. Multiple inputs can be passed as `tuple` objects or pandas Dataframes. When possible, (`numpy`)`arrays` are suggested. The number of rows becomes the batch size when tracing PyTorch models and compiling with TVM. remainder_size: An integer that together with test_input determines the size of test data that can be predicted. The input to the returned container can be of size `test_input.shape[0] * k + remainder_size`, where `k` is any integer. device: The target device the model should be run. This parameter is only used by the *torch** backends and *tvm*, and the devices supported are the one supported by PyTorch, i.e., 'cpu' or 'cuda'. extra_config: Extra configurations to be used by the individual operator converters. The set of supported extra configurations can be found at `hummingbird.ml.supported` Examples: >>> tvm_model = convert_batch(sklearn_model,`tvm`, X) >>> tvm_model = convert_batch(sklearn_model,`tvm`, X, remainder_size=50) Returns: A `BatchContainer` object that wraps one or two containers created by `convert(...)` function above. |
6,534 | import numpy as np
from onnxconverter_common.registration import register_converter
from . import constants
from ._gbdt_commons import convert_gbdt_classifier_common, convert_gbdt_common
from ._tree_commons import TreeParameters
def _get_tree_parameters(tree_info, extra_config):
"""
Parse the tree and returns an in-memory friendly representation of its structure.
"""
lefts = []
rights = []
features = []
thresholds = []
values = []
# Subsitute feature names with ids if necessary.
if constants.FEATURE_NAMES in extra_config:
feature_names = extra_config[constants.FEATURE_NAMES]
for f_id, f_name in enumerate(feature_names):
tree_info = tree_info.replace(f_name, str(f_id))
_tree_traversal(
tree_info.replace("[f", "").replace("[", "").replace("]", "").split(), lefts, rights, features, thresholds, values
)
return TreeParameters(lefts, rights, features, thresholds, values)
def convert_gbdt_classifier_common(
operator, tree_infos, get_tree_parameters, n_features, n_classes, classes=None, extra_config={}, decision_cond="<="
):
"""
Common converter for GBDT classifiers.
Args:
tree_infos: The information representaing a tree (ensemble)
get_tree_parameters: A function specifying how to parse the tree_infos into parameters
n_features: The number of features input to the model
n_classes: How many classes are expected. 1 for regression tasks
classes: The classes used for classification. None if implementing a regression model
extra_config: Extra configuration used to properly implement the source tree
decision_cond: The condition of the decision nodes in the x <cond> threshold order. Default '<='. Values can be <=, <, >=, >
Returns:
A tree implementation in PyTorch
"""
assert tree_infos is not None
assert get_tree_parameters is not None
assert n_features is not None
assert n_classes is not None
# Rearrange classes and tree information.
if n_classes == 2:
n_classes -= 1
if classes is None:
classes = [i for i in range(n_classes)]
reorder_trees = True
if constants.REORDER_TREES in extra_config:
reorder_trees = extra_config[constants.REORDER_TREES]
if reorder_trees and n_classes > 1:
tree_infos = [tree_infos[i * n_classes + j] for j in range(n_classes) for i in range(len(tree_infos) // n_classes)]
return convert_gbdt_common(
operator, tree_infos, get_tree_parameters, n_features, classes, extra_config=extra_config, decision_cond=decision_cond
)
The provided code snippet includes necessary dependencies for implementing the `convert_sklearn_xgb_classifier` function. Write a Python function `def convert_sklearn_xgb_classifier(operator, device, extra_config)` to solve the following problem:
Converter for `xgboost.XGBClassifier` (trained using the Sklearn API). Args: operator: An operator wrapping a `xgboost.XGBClassifier` model device: String defining the type of device the converted operator should be run on extra_config: Extra configuration used to select the best conversion strategy Returns: A PyTorch model
Here is the function:
def convert_sklearn_xgb_classifier(operator, device, extra_config):
"""
Converter for `xgboost.XGBClassifier` (trained using the Sklearn API).
Args:
operator: An operator wrapping a `xgboost.XGBClassifier` model
device: String defining the type of device the converted operator should be run on
extra_config: Extra configuration used to select the best conversion strategy
Returns:
A PyTorch model
"""
assert operator is not None, "Cannot convert None operator"
if "n_features" in extra_config:
n_features = extra_config["n_features"]
else:
raise RuntimeError(
'XGBoost converter is not able to infer the number of input features.\
Please pass "n_features:N" as extra configuration to the converter or fill a bug report.'
)
tree_infos = operator.raw_operator.get_booster().get_dump()
n_classes = operator.raw_operator.n_classes_
return convert_gbdt_classifier_common(
operator, tree_infos, _get_tree_parameters, n_features, n_classes, decision_cond="<", extra_config=extra_config
) | Converter for `xgboost.XGBClassifier` (trained using the Sklearn API). Args: operator: An operator wrapping a `xgboost.XGBClassifier` model device: String defining the type of device the converted operator should be run on extra_config: Extra configuration used to select the best conversion strategy Returns: A PyTorch model |
6,535 | import numpy as np
from onnxconverter_common.registration import register_converter
from . import constants
from ._gbdt_commons import convert_gbdt_classifier_common, convert_gbdt_common
from ._tree_commons import TreeParameters
def _get_tree_parameters(tree_info, extra_config):
"""
Parse the tree and returns an in-memory friendly representation of its structure.
"""
lefts = []
rights = []
features = []
thresholds = []
values = []
# Subsitute feature names with ids if necessary.
if constants.FEATURE_NAMES in extra_config:
feature_names = extra_config[constants.FEATURE_NAMES]
for f_id, f_name in enumerate(feature_names):
tree_info = tree_info.replace(f_name, str(f_id))
_tree_traversal(
tree_info.replace("[f", "").replace("[", "").replace("]", "").split(), lefts, rights, features, thresholds, values
)
return TreeParameters(lefts, rights, features, thresholds, values)
def convert_gbdt_common(
operator, tree_infos, get_tree_parameters, n_features, classes=None, extra_config={}, decision_cond="<="
):
"""
Common converter for GBDT models.
Args:
tree_infos: The information representaing a tree (ensemble)
get_tree_parameters: A function specifying how to parse the tree_infos into parameters
n_features: The number of features input to the model
classes: The classes used for classification. None if implementing a regression model
extra_config: Extra configuration used to properly implement the source tree
decision_cond: The condition of the decision nodes in the x <cond> threshold order. Default '<='. Values can be <=, <, >=, >
Returns:
A tree implementation in PyTorch
"""
assert tree_infos is not None
assert get_tree_parameters is not None
assert n_features is not None
tree_parameters, max_depth, tree_type = get_tree_params_and_type(tree_infos, get_tree_parameters, extra_config)
# Apply learning rate directly on the values rather then at runtime.
if constants.LEARNING_RATE in extra_config:
for parameter in tree_parameters:
parameter.values = parameter.values * extra_config[constants.LEARNING_RATE]
# Generate the model parameters based on the selected strategy.
if tree_type == TreeImpl.gemm:
net_parameters = [
get_parameters_for_gemm_common(
tree_param.lefts,
tree_param.rights,
tree_param.features,
tree_param.thresholds,
tree_param.values,
n_features,
extra_config,
)
for tree_param in tree_parameters
]
else:
# Some models require some additional massaging of the parameters before generating the tree_trav implementation.
get_parameters_for_tree_trav = get_parameters_for_tree_trav_common
if constants.GET_PARAMETERS_FOR_TREE_TRAVERSAL in extra_config:
get_parameters_for_tree_trav = extra_config[constants.GET_PARAMETERS_FOR_TREE_TRAVERSAL]
net_parameters = [
get_parameters_for_tree_trav(
tree_param.lefts,
tree_param.rights,
tree_param.features,
tree_param.thresholds,
tree_param.values,
extra_config,
)
for tree_param in tree_parameters
]
# Define the post transform.
if constants.BASE_PREDICTION in extra_config:
base_pred = torch.FloatTensor(extra_config[constants.BASE_PREDICTION])
# For newer versions of scikit-learn (>1.1.1),
if len(base_pred.shape) == 4:
base_pred = base_pred[0][0]
base_prediction = torch.nn.Parameter(base_pred, requires_grad=False)
extra_config[constants.BASE_PREDICTION] = base_prediction
# For models following the Sklearn API we need to build the post transform ourselves.
if classes is not None and constants.POST_TRANSFORM not in extra_config:
if len(classes) <= 2:
extra_config[constants.POST_TRANSFORM] = constants.SIGMOID
else:
extra_config[constants.POST_TRANSFORM] = constants.SOFTMAX
# Set the post transform.
if constants.POST_TRANSFORM in extra_config:
if extra_config[constants.POST_TRANSFORM] == constants.SIGMOID:
if constants.BASE_PREDICTION in extra_config:
extra_config[constants.POST_TRANSFORM] = ApplySigmoidBasePredictionPostTransform(base_prediction)
else:
extra_config[constants.POST_TRANSFORM] = ApplySigmoidPostTransform()
elif extra_config[constants.POST_TRANSFORM] == constants.SOFTMAX:
if constants.BASE_PREDICTION in extra_config:
extra_config[constants.POST_TRANSFORM] = ApplySoftmaxBasePredictionPostTransform(base_prediction)
else:
extra_config[constants.POST_TRANSFORM] = ApplySoftmaxPostTransform()
elif extra_config[constants.POST_TRANSFORM] == constants.TWEEDIE:
if constants.BASE_PREDICTION in extra_config:
extra_config[constants.POST_TRANSFORM] = ApplyTweedieBasePredictionPostTransform(base_prediction)
else:
extra_config[constants.POST_TRANSFORM] = ApplyTweediePostTransform()
elif extra_config[constants.POST_TRANSFORM] is None:
extra_config[constants.POST_TRANSFORM] = PostTransform()
else:
raise NotImplementedError("Post transform {} not implemeneted yet".format(extra_config[constants.POST_TRANSFORM]))
elif constants.BASE_PREDICTION in extra_config:
extra_config[constants.POST_TRANSFORM] = ApplyBasePredictionPostTransform(base_prediction)
# Generate the tree implementation based on the selected strategy.
if tree_type == TreeImpl.gemm:
return GEMMGBDTImpl(
operator, net_parameters, n_features, classes, extra_config=extra_config, decision_cond=decision_cond
)
if tree_type == TreeImpl.tree_trav:
return TreeTraversalGBDTImpl(
operator, net_parameters, max_depth, n_features, classes, extra_config=extra_config, decision_cond=decision_cond
)
else: # Remaining possible case: tree_type == TreeImpl.perf_tree_trav.
return PerfectTreeTraversalGBDTImpl(
operator, net_parameters, max_depth, n_features, classes, extra_config=extra_config, decision_cond=decision_cond
)
The provided code snippet includes necessary dependencies for implementing the `convert_sklearn_xgb_regressor` function. Write a Python function `def convert_sklearn_xgb_regressor(operator, device, extra_config)` to solve the following problem:
Converter for `xgboost.XGBRegressor` (trained using the Sklearn API). Args: operator: An operator wrapping a `xgboost.XGBRegressor` model device: String defining the type of device the converted operator should be run on extra_config: Extra configuration used to select the best conversion strategy Returns: A PyTorch model
Here is the function:
def convert_sklearn_xgb_regressor(operator, device, extra_config):
"""
Converter for `xgboost.XGBRegressor` (trained using the Sklearn API).
Args:
operator: An operator wrapping a `xgboost.XGBRegressor` model
device: String defining the type of device the converted operator should be run on
extra_config: Extra configuration used to select the best conversion strategy
Returns:
A PyTorch model
"""
assert operator is not None, "Cannot convert None operator"
if "n_features" in extra_config:
n_features = extra_config["n_features"]
else:
raise RuntimeError(
'XGBoost converter is not able to infer the number of input features.\
Please pass "n_features:N" as extra configuration to the converter or fill a bug report.'
)
tree_infos = operator.raw_operator.get_booster().get_dump()
if hasattr(operator.raw_operator.get_booster(), "feature_names"):
feature_names = operator.raw_operator.get_booster().feature_names
if feature_names is not None:
extra_config[constants.FEATURE_NAMES] = feature_names
base_prediction = eval(operator.raw_operator.get_booster().save_config().replace('false', "False").replace("true", "True"))["learner"]["learner_model_param"]["base_score"]
if base_prediction is None:
base_prediction = [0.5]
else:
base_prediction = float(base_prediction)
if type(base_prediction) is float:
base_prediction = [base_prediction]
extra_config[constants.BASE_PREDICTION] = base_prediction
return convert_gbdt_common(
operator, tree_infos, _get_tree_parameters, n_features, decision_cond="<", extra_config=extra_config
) | Converter for `xgboost.XGBRegressor` (trained using the Sklearn API). Args: operator: An operator wrapping a `xgboost.XGBRegressor` model device: String defining the type of device the converted operator should be run on extra_config: Extra configuration used to select the best conversion strategy Returns: A PyTorch model |
6,536 | import torch
import torch.nn
from ._physical_operator import PhysicalOperator
def _compute_log_det_cholesky(matrix_chol, covariance_type, n_features):
if covariance_type == "full":
n_components, _, _ = matrix_chol.shape
log_det_chol = torch.sum(torch.log(matrix_chol.reshape(n_components, -1)[:, :: n_features + 1]), 1)
return log_det_chol
else:
raise NotImplementedError(
"Hummingbird does not currently support {} covariance type for BayesianGaussianMixture. The supported covariance type is full.".format(
covariance_type
)
)
def _estimate_log_gaussian_prob(X, means, precisions_chol, covariance_type):
n_samples, n_features = X.shape
n_components, _ = means.shape
log_det = _compute_log_det_cholesky(precisions_chol, covariance_type, n_features)
if covariance_type == "full":
log_prob = torch.empty((n_samples, n_components))
for k, (mu, prec_chol) in enumerate(zip(means, precisions_chol)):
y = torch.mm(X, prec_chol) - torch.matmul(mu, prec_chol)
log_prob[:, k] = torch.sum(torch.square(y), axis=1)
log_gaussian_prob = (-0.5 * (n_features * torch.log(torch.FloatTensor([2 * torch.pi])) + log_prob)) + log_det
return log_gaussian_prob
else:
raise NotImplementedError(
"Hummingbird does not currently support {} covariance type for BayesianGaussianMixture. The supported covariance type is full.".format(
covariance_type
)
) | null |
6,537 | import torch
import torch.nn
from ._physical_operator import PhysicalOperator
def _compute_precision_cholesky(covariances, covariance_type):
estimate_precision_error_message = (
"Fitting the mixture model failed because some components have "
"ill-defined empirical covariance (for instance caused by singleton "
"or collapsed samples). Try to decrease the number of components, "
"or increase reg_covar."
)
if covariance_type == "full":
n_components, n_features, _ = covariances.shape
precisions_chol = torch.empty((n_components, n_features, n_features))
for k, covariance in enumerate(covariances):
try:
cov_chol = torch.linalg.cholesky(torch.FloatTensor(covariance), upper=False)
except torch.linalg.LinAlgError:
raise ValueError(estimate_precision_error_message)
precisions_chol[k] = torch.linalg.solve_triangular(cov_chol, torch.eye(n_features), upper=False).T
return precisions_chol
else:
raise NotImplementedError(
"Hummingbird does not currently support {} covariance type for BayesianGaussianMixture. The supported covariance type is full.".format(
covariance_type
)
) | null |
6,538 | import numpy as np
import torch
from datetime import datetime
from onnxconverter_common.registration import register_converter
from ._physical_operator import PhysicalOperator
from . import constants
class Prophet(PhysicalOperator, torch.nn.Module):
"""
Class implementing Prophet operator in PyTorch.
"""
def __init__(self, logical_operator, k, m, deltas, floor, start, t_scale, y_scale, changepoints_t, device):
super(Prophet, self).__init__(logical_operator)
self.regression = True
self.k = k
self.m = m
self.deltas = torch.nn.Parameter(torch.Tensor(deltas), requires_grad=False)
self.floor = floor
self.start = start
self.t_scale = t_scale
self.y_scale = y_scale
self.changepoints_t = torch.nn.Parameter(torch.Tensor(changepoints_t), requires_grad=False)
def forward(self, x):
x = torch.sort(x)[0]
t = (x - self.start) / self.t_scale
# Linear.
# Intercept changes
gammas = -self.changepoints_t * self.deltas
# Get cumulative slope and intercept at each t
k_t = self.k * torch.ones_like(t)
m_t = self.m * torch.ones_like(t)
for s, t_s in enumerate(self.changepoints_t):
indx = t >= t_s
k_t[indx] += self.deltas[s]
m_t[indx] += gammas[s]
trend = k_t * t + m_t
trend = trend * self.y_scale + self.floor
return trend
The provided code snippet includes necessary dependencies for implementing the `convert_prophet` function. Write a Python function `def convert_prophet(operator, device=None, extra_config={})` to solve the following problem:
Converter for `prophet.Prophet` Args: operator: An operator wrapping a `prophet.Prophet` model device: String defining the type of device the converted operator should be run on extra_config: Extra configuration used to select the best conversion strategy Returns: A PyTorch model
Here is the function:
def convert_prophet(operator, device=None, extra_config={}):
"""
Converter for `prophet.Prophet`
Args:
operator: An operator wrapping a `prophet.Prophet` model
device: String defining the type of device the converted operator should be run on
extra_config: Extra configuration used to select the best conversion strategy
Returns:
A PyTorch model
"""
k = np.nanmean(operator.original_operator.params["k"])
m = np.nanmean(operator.original_operator.params["m"])
deltas = np.nanmean(operator.original_operator.params["delta"], axis=0)
floor = 0
start = (operator.original_operator.start - datetime(1970, 1, 1)).total_seconds()
t_scale = operator.original_operator.t_scale.total_seconds()
y_scale = operator.original_operator.y_scale
changepoints_t = operator.original_operator.changepoints_t
growth = operator.original_operator.growth
assert growth == "linear", "Growth function {} not supported yet.".format(growth)
return Prophet(operator, k, m, deltas, floor, start, t_scale, y_scale, changepoints_t, device) | Converter for `prophet.Prophet` Args: operator: An operator wrapping a `prophet.Prophet` model device: String defining the type of device the converted operator should be run on extra_config: Extra configuration used to select the best conversion strategy Returns: A PyTorch model |
6,539 | import numpy as np
from onnxconverter_common.registration import register_converter
from .._sv_implementations import SVC
class SVC(PhysicalOperator, torch.nn.Module):
def __init__(self, logical_operator, kernel, degree, sv, nv, a, b, gamma, coef0, classes, device):
super(SVC, self).__init__(logical_operator, classification=True)
self.kernel = kernel
self.degree = degree
self.gamma = gamma
self.regression = False
sv = sv.toarray() if isinstance(sv, scipy.sparse.csr_matrix) else sv
self.sv = torch.nn.Parameter(torch.from_numpy(sv).double(), requires_grad=False)
self.sv_t = torch.nn.Parameter(torch.transpose(self.sv, 0, 1), requires_grad=False)
self.sv_norm = torch.nn.Parameter(-self.gamma * (self.sv ** 2).sum(1).view(1, -1), requires_grad=False)
self.coef0 = coef0
self.n_features = sv.shape[1]
self.a = a
self.b = torch.nn.Parameter(torch.from_numpy(b.reshape(1, -1)).double(), requires_grad=False)
self.start = [sum(nv[:i]) for i in range(len(nv))]
self.end = [self.start[i] + nv[i] for i in range(len(nv))]
self.len_nv = len(nv)
true_classes, false_classes = zip(*[(i, j) for i in range(self.len_nv) for j in range(i + 1, self.len_nv)])
self.true_classes = torch.nn.Parameter(torch.IntTensor([true_classes]), requires_grad=False)
self.false_classes = torch.nn.Parameter(torch.IntTensor([false_classes]), requires_grad=False)
self.classes = torch.nn.Parameter(torch.IntTensor(classes), requires_grad=False)
self.perform_class_select = False
if min(classes) != 0 or max(classes) != len(classes) - 1:
self.perform_class_select = True
self.n_classes = len(classes)
def forward(self, x):
x = x.double()
if self.kernel == "linear":
k = torch.mm(x, self.sv_t)
elif self.kernel == "rbf":
# using quadratic expansion--susseptible to rounding-off errors
# http://www.robots.ox.ac.uk/~albanie/notes/Euclidean_distance_trick.pdf
x_norm = -self.gamma * (x ** 2).sum(1).view(-1, 1)
k = torch.exp(x_norm + self.sv_norm + 2.0 * self.gamma * torch.mm(x, self.sv_t).double())
elif self.kernel == "sigmoid":
k = torch.sigmoid(self.gamma * torch.mm(x, self.sv_t) + self.coef0)
else: # poly kernel
k = torch.pow(self.gamma * torch.mm(x, self.sv_t) + self.coef0, self.degree)
c = [
sum(self.a[i, p] * k[:, p : p + 1] for p in range(self.start[j], self.end[j]))
+ sum(self.a[j - 1, p] * k[:, p : p + 1] for p in range(self.start[i], self.end[i]))
for i in range(self.len_nv)
for j in range(i + 1, self.len_nv)
]
c = torch.cat(c, dim=1) + self.b
if self.n_classes == 2:
class_ids = torch.gt(c, 0.0).int().flatten()
else:
votes = torch.where(c > 0, self.true_classes, self.false_classes)
# TODO mode is still not implemented for GPU backend.
votes = votes.data.cpu()
class_ids, _ = torch.mode(votes, dim=1)
# No class probabilities in SVC.
if self.perform_class_select:
temp = torch.index_select(self.classes, 0, class_ids.long())
return temp, temp
else:
return class_ids, class_ids
The provided code snippet includes necessary dependencies for implementing the `convert_onnx_svm_classifier_model` function. Write a Python function `def convert_onnx_svm_classifier_model(operator, device, extra_config)` to solve the following problem:
Converter for `ai.onnx.ml.SVMClassifier` Args: operator: An operator wrapping a `ai.onnx.ml.SVMClassifier` model device: String defining the type of device the converted operator should be run on extra_config: Extra configuration used to select the best conversion strategy Returns: A PyTorch model
Here is the function:
def convert_onnx_svm_classifier_model(operator, device, extra_config):
"""
Converter for `ai.onnx.ml.SVMClassifier`
Args:
operator: An operator wrapping a `ai.onnx.ml.SVMClassifier` model
device: String defining the type of device the converted operator should be run on
extra_config: Extra configuration used to select the best conversion strategy
Returns:
A PyTorch model
"""
# These are passed as params to SVC()
kernel = degree = sv = nv = a = b = gamma = coef0 = classes = None
# These are stored for reshaping after parsing is done
sv_vals = coeffis = None
for attr in operator.raw_operator.origin.attribute:
if attr.name == "kernel_type":
# ex: Convert b'RBF' to 'rbf' for consistency
kernel = attr.s.lower().decode("UTF-8")
if kernel not in ["linear", "poly", "rbf"]: # from svc.py ln 58
raise RuntimeError("Unsupported kernel for SVC: {}".format(kernel))
elif attr.name == "coefficients":
coeffis = np.array(attr.floats)
elif attr.name == "vectors_per_class":
nv = np.array(attr.ints).astype("int32")
elif attr.name == "support_vectors":
sv_vals = np.array(attr.floats)
elif attr.name == "rho":
b = np.array(attr.floats)
elif attr.name == "kernel_params":
# See
# https://github.com/onnx/sklearn-onnx/blob/master/skl2onnx/operator_converters/support_vector_machines.py
# for details on [op._gamma, op.coef0, op.degree]
kp_arr = np.array(attr.floats)
gamma = kp_arr[0]
coef0 = kp_arr[1]
degree = int(kp_arr[2])
elif attr.name == "classlabels_ints":
classes = np.array(attr.ints)
if any(v is None for v in [sv_vals, coeffis]):
raise RuntimeError("Error parsing SVC arrays, found unexpected None")
# Now that we have parsed the degree and lengths, reshape 'a' and 'sv'
# For 'a', these are in 'dual' shape, so resize into 2:
# https://github.com/onnx/sklearn-onnx/blob/master/skl2onnx/operator_converters/support_vector_machines.py#L41
#
# Except for when they're not...
# https://stackoverflow.com/questions/22816646/the-dimension-of-dual-coef-in-sklearn-svc
if len(classes) > 2:
a = coeffis.reshape(2, len(coeffis) // 2)
else: # if not in "dual" form with classes > 3 (binary), 'a' and 'b' are the inverse. Don't ask why.
a = np.negative([coeffis])
b = np.negative(b)
sv = sv_vals.reshape(len(a[0]), len(sv_vals) // len(a[0]))
if any(v is None for v in [kernel, degree, sv, nv, a, b, gamma, coef0, classes]):
raise RuntimeError(
"Error parsing SVC, found unexpected None. kernel{} degree{} sv{} nv{} a{} b{} gamma{} coef0{} classes{}".format(
kernel, degree, sv, nv, a, b, gamma, coef0, classes
)
)
return SVC(operator, kernel, degree, sv, nv, a, b, gamma, coef0, classes, device) | Converter for `ai.onnx.ml.SVMClassifier` Args: operator: An operator wrapping a `ai.onnx.ml.SVMClassifier` model device: String defining the type of device the converted operator should be run on extra_config: Extra configuration used to select the best conversion strategy Returns: A PyTorch model |
6,540 | from onnxconverter_common.registration import register_converter
from .. import constants
from .._pipeline_implementations import Concat
class Concat(PhysicalOperator, torch.nn.Module):
def __init__(self, logical_operator):
super(Concat, self).__init__(logical_operator, transformer=True)
def forward(self, *x):
if len(x[0].shape) > 1:
# We need to explictly cast the tensors if their types don't agree.
dtypes = {t.dtype for t in x}
if len(dtypes) > 1:
if torch.float64 in dtypes:
x = [t.double() for t in x]
elif torch.float32 in dtypes:
x = [t.float() for t in x]
else:
raise RuntimeError(
"Combination of data types for Concat input tensors not supported. Please fill an issue at https://github.com/microsoft/hummingbird."
)
return torch.cat(x, dim=1)
else:
return torch.stack([i.view(-1) for i in x], dim=1)
The provided code snippet includes necessary dependencies for implementing the `convert_onnx_feature_vectorizer` function. Write a Python function `def convert_onnx_feature_vectorizer(operator, device, extra_config)` to solve the following problem:
Converter for `ai.onnx.ml.FeatureVectorizer. Args: operator: An operator wrapping a `ai.onnx.ml.FeatureVectorizer` model device: String defining the type of device the converted operator should be run on extra_config: Extra configuration used to select the best conversion strategy Returns: A PyTorch model
Here is the function:
def convert_onnx_feature_vectorizer(operator, device, extra_config):
"""
Converter for `ai.onnx.ml.FeatureVectorizer.
Args:
operator: An operator wrapping a `ai.onnx.ml.FeatureVectorizer` model
device: String defining the type of device the converted operator should be run on
extra_config: Extra configuration used to select the best conversion strategy
Returns:
A PyTorch model
"""
assert operator is not None, "Cannot convert None operator"
return Concat(operator) | Converter for `ai.onnx.ml.FeatureVectorizer. Args: operator: An operator wrapping a `ai.onnx.ml.FeatureVectorizer` model device: String defining the type of device the converted operator should be run on extra_config: Extra configuration used to select the best conversion strategy Returns: A PyTorch model |
6,541 | import numpy as np
from onnxconverter_common.registration import register_converter
from .._scaler_implementations import Scaler
class Scaler(PhysicalOperator, torch.nn.Module):
"""
Class implementing Scaler operators in PyTorch. Supported normalizers are L1, L2 and Max.
"""
def __init__(self, logical_operator, offset, scale, device):
super(Scaler, self).__init__(logical_operator, transformer=True)
if offset is None or len(offset.shape) == 0 or offset.shape == (0,):
offset = numpy.array([0], dtype=numpy.float32)
if scale is None or len(scale.shape) == 0 or scale.shape == (0,):
scale = numpy.array([1], dtype=numpy.float32)
self.offset = offset
self.scale = scale
if offset is not None:
self.offset = torch.nn.Parameter(torch.from_numpy(offset).detach().clone(), requires_grad=False)
if scale is not None:
self.scale = torch.nn.Parameter(torch.from_numpy(scale).detach().clone(), requires_grad=False)
def forward(self, x):
if self.offset is not None:
x = x - self.offset
if self.scale is not None:
x = x * self.scale
return x.float()
The provided code snippet includes necessary dependencies for implementing the `convert_onnx_scaler` function. Write a Python function `def convert_onnx_scaler(operator, device=None, extra_config={})` to solve the following problem:
Converter for `ai.onnx.ml.Scaler` Args: operator: An operator wrapping a `ai.onnx.ml.Scaler` model device: String defining the type of device the converted operator should be run on extra_config: Extra configuration used to select the best conversion strategy Returns: A PyTorch model
Here is the function:
def convert_onnx_scaler(operator, device=None, extra_config={}):
"""
Converter for `ai.onnx.ml.Scaler`
Args:
operator: An operator wrapping a `ai.onnx.ml.Scaler` model
device: String defining the type of device the converted operator should be run on
extra_config: Extra configuration used to select the best conversion strategy
Returns:
A PyTorch model
"""
assert operator is not None, "Cannot convert None operator"
offset = scale = None
for attr in operator.raw_operator.origin.attribute:
if attr.name == "offset":
offset = np.array(attr.floats)
if attr.name == "scale":
scale = np.array(attr.floats)
if any(v is None for v in [offset, scale]):
raise RuntimeError("Error parsing Scalar, found unexpected None")
return Scaler(operator, offset, scale, device) | Converter for `ai.onnx.ml.Scaler` Args: operator: An operator wrapping a `ai.onnx.ml.Scaler` model device: String defining the type of device the converted operator should be run on extra_config: Extra configuration used to select the best conversion strategy Returns: A PyTorch model |
6,542 | from onnxconverter_common.registration import register_converter
from .._discretizer_implementations import Binarizer
class Binarizer(PhysicalOperator, torch.nn.Module):
"""
Class implementing Binarizer operators in PyTorch.
"""
def __init__(self, logical_operator, threshold, device):
super(Binarizer, self).__init__(logical_operator)
self.transformer = True
self.threshold = torch.nn.Parameter(torch.FloatTensor([threshold]), requires_grad=False)
def forward(self, x):
return torch.gt(x, self.threshold).float()
The provided code snippet includes necessary dependencies for implementing the `convert_onnx_binarizer` function. Write a Python function `def convert_onnx_binarizer(operator, device=None, extra_config={})` to solve the following problem:
Converter for `ai.onnx.ml.Binarizer` Args: operator: An operator wrapping a `ai.onnx.ml.Binarizer` model device: String defining the type of device the converted operator should be run on extra_config: Extra configuration used to select the best conversion strategy Returns: A PyTorch model
Here is the function:
def convert_onnx_binarizer(operator, device=None, extra_config={}):
"""
Converter for `ai.onnx.ml.Binarizer`
Args:
operator: An operator wrapping a `ai.onnx.ml.Binarizer` model
device: String defining the type of device the converted operator should be run on
extra_config: Extra configuration used to select the best conversion strategy
Returns:
A PyTorch model
"""
assert operator is not None, "Cannot convert None operator"
threshold = None
for attr in operator.raw_operator.origin.attribute:
if attr.name == "threshold":
threshold = attr.f
break
if threshold is None:
raise RuntimeError("Error parsing Binarizer, found unexpected None")
return Binarizer(operator, threshold, device) | Converter for `ai.onnx.ml.Binarizer` Args: operator: An operator wrapping a `ai.onnx.ml.Binarizer` model device: String defining the type of device the converted operator should be run on extra_config: Extra configuration used to select the best conversion strategy Returns: A PyTorch model |
6,543 | from onnxconverter_common.registration import register_converter
from .._normalizer_implementations import Normalizer
class Normalizer(PhysicalOperator, torch.nn.Module):
"""
Class implementing Normalizer operators in PyTorch. Supported normalizers are L1, L2 and Max.
"""
def __init__(self, logical_operator, norm, device):
super(Normalizer, self).__init__(logical_operator)
self.norm = norm
self.transformer = True
def forward(self, x):
if self.norm == "l1":
return x / torch.abs(x).sum(1, keepdim=True)
elif self.norm == "l2":
return x / torch.pow(torch.pow(x, 2).sum(1, keepdim=True), 0.5)
elif self.norm == "max":
return x / torch.max(torch.abs(x), dim=1, keepdim=True)[0]
else:
raise RuntimeError("Unsupported norm: {0}".format(self.norm))
The provided code snippet includes necessary dependencies for implementing the `convert_onnx_normalizer` function. Write a Python function `def convert_onnx_normalizer(operator, device=None, extra_config={})` to solve the following problem:
Converter for `ai.onnx.ml.Normalizer` Args: operator: An operator wrapping a `ai.onnx.ml.Normalizer` model device: String defining the type of device the converted operator should be run on extra_config: Extra configuration used to select the best conversion strategy Returns: A PyTorch model
Here is the function:
def convert_onnx_normalizer(operator, device=None, extra_config={}):
"""
Converter for `ai.onnx.ml.Normalizer`
Args:
operator: An operator wrapping a `ai.onnx.ml.Normalizer` model
device: String defining the type of device the converted operator should be run on
extra_config: Extra configuration used to select the best conversion strategy
Returns:
A PyTorch model
"""
assert operator is not None, "Cannot convert None operator"
raw_operator = operator.raw_operator.origin.attribute[0].s.lower().decode("UTF-8") # (ex: b'L1' to 'l1')
if raw_operator is None or raw_operator == "":
raise RuntimeError("Error parsing Normalizer, found unexpected None")
return Normalizer(operator, raw_operator, device) | Converter for `ai.onnx.ml.Normalizer` Args: operator: An operator wrapping a `ai.onnx.ml.Normalizer` model device: String defining the type of device the converted operator should be run on extra_config: Extra configuration used to select the best conversion strategy Returns: A PyTorch model |
6,544 | import numpy as np
from onnxconverter_common.registration import register_converter
from .._imputer_implementations import SimpleImputer
class SimpleImputer(PhysicalOperator, torch.nn.Module):
"""
Class implementing SimpleImputer operators in PyTorch.
"""
def __init__(self, logical_operator, device, statistics=None, missing=None, strategy=None):
super(SimpleImputer, self).__init__(logical_operator)
sklearn_imputer = logical_operator.raw_operator
# Pull out the stats field from either the SKL imputer or args
stats_ = statistics if statistics is not None else sklearn_imputer.statistics_
# Process the stats into an array
stats = [float(stat) for stat in stats_]
missing_values = missing if missing is not None else sklearn_imputer.missing_values
strategy = strategy if strategy is not None else sklearn_imputer.strategy
b_mask = np.logical_not(np.isnan(stats))
i_mask = [i for i in range(len(b_mask)) if b_mask[i]]
self.transformer = True
self.do_mask = strategy == "constant" or all(b_mask)
self.mask = torch.nn.Parameter(torch.LongTensor([] if self.do_mask else i_mask), requires_grad=False)
self.replace_values = torch.nn.Parameter(torch.tensor(np.array([stats_]), dtype=torch.float32), requires_grad=False)
self.is_nan = True if (missing_values == "NaN" or np.isnan(missing_values)) else False
if not self.is_nan:
self.missing_values = torch.nn.Parameter(torch.tensor([missing_values], dtype=torch.float32), requires_grad=False)
def forward(self, x):
if self.is_nan:
result = torch.where(torch.isnan(x), self.replace_values.expand(x.shape), x)
if self.do_mask:
return result
return torch.index_select(result, 1, self.mask)
else:
return torch.where(torch.eq(x, self.missing_values), self.replace_values.expand(x.shape), x)
The provided code snippet includes necessary dependencies for implementing the `convert_onnx_imputer` function. Write a Python function `def convert_onnx_imputer(operator, device=None, extra_config={})` to solve the following problem:
Converter for `ai.onnx.ml.Imputer` Args: operator: An operator wrapping a `ai.onnx.ml.Imputer` model device: String defining the type of device the converted operator should be run on extra_config: Extra configuration used to select the best conversion strategy Returns: A PyTorch model
Here is the function:
def convert_onnx_imputer(operator, device=None, extra_config={}):
"""
Converter for `ai.onnx.ml.Imputer`
Args:
operator: An operator wrapping a `ai.onnx.ml.Imputer` model
device: String defining the type of device the converted operator should be run on
extra_config: Extra configuration used to select the best conversion strategy
Returns:
A PyTorch model
"""
stats = missing = None
for attr in operator.raw_operator.origin.attribute:
if attr.name == "imputed_value_floats":
stats = np.array(attr.floats).astype("float64")
elif attr.name == "replaced_value_float":
missing = attr.f
if any(v is None for v in [stats, missing]):
raise RuntimeError("Error parsing Imputer, found unexpected None. stats: {}, missing: {}", stats, missing)
# ONNXML has no "strategy" field, but always behaves similar to SKL's constant: "replace missing values with fill_value"
return SimpleImputer(operator, device, statistics=stats, missing=missing, strategy="constant") | Converter for `ai.onnx.ml.Imputer` Args: operator: An operator wrapping a `ai.onnx.ml.Imputer` model device: String defining the type of device the converted operator should be run on extra_config: Extra configuration used to select the best conversion strategy Returns: A PyTorch model |
6,545 | import numpy as np
from onnxconverter_common.registration import register_converter
from .. import constants
from .._gbdt_commons import convert_gbdt_classifier_common, convert_gbdt_common
from .._tree_commons import TreeParameters, convert_decision_ensemble_tree_common, get_parameters_for_tree_trav_common
def _dummy_get_parameter(tree_info, extra_config):
"""
Dummy function used to return parameters (TreeEnsemble converters already have parameters in the right format)
"""
return tree_info
def _get_tree_infos_from_tree_ensemble(operator, device=None, extra_config={}):
"""
Base method for extracting parameters from `ai.onnx.ml.TreeEnsemble`s.
"""
assert (
constants.N_FEATURES in extra_config
), "Cannot retrive the number of features. Please fill an issue at https://github.com/microsoft/hummingbird."
# Get the number of features.
n_features = extra_config[constants.N_FEATURES]
tree_infos, classes, post_transform, decision_cond = _get_tree_infos_from_onnx_ml_operator(operator)
# Get tree informations from the operator.
return n_features, tree_infos, classes, post_transform, decision_cond
def convert_gbdt_classifier_common(
operator, tree_infos, get_tree_parameters, n_features, n_classes, classes=None, extra_config={}, decision_cond="<="
):
"""
Common converter for GBDT classifiers.
Args:
tree_infos: The information representaing a tree (ensemble)
get_tree_parameters: A function specifying how to parse the tree_infos into parameters
n_features: The number of features input to the model
n_classes: How many classes are expected. 1 for regression tasks
classes: The classes used for classification. None if implementing a regression model
extra_config: Extra configuration used to properly implement the source tree
decision_cond: The condition of the decision nodes in the x <cond> threshold order. Default '<='. Values can be <=, <, >=, >
Returns:
A tree implementation in PyTorch
"""
assert tree_infos is not None
assert get_tree_parameters is not None
assert n_features is not None
assert n_classes is not None
# Rearrange classes and tree information.
if n_classes == 2:
n_classes -= 1
if classes is None:
classes = [i for i in range(n_classes)]
reorder_trees = True
if constants.REORDER_TREES in extra_config:
reorder_trees = extra_config[constants.REORDER_TREES]
if reorder_trees and n_classes > 1:
tree_infos = [tree_infos[i * n_classes + j] for j in range(n_classes) for i in range(len(tree_infos) // n_classes)]
return convert_gbdt_common(
operator, tree_infos, get_tree_parameters, n_features, classes, extra_config=extra_config, decision_cond=decision_cond
)
def get_parameters_for_tree_trav_common(lefts, rights, features, thresholds, values, extra_config={}):
"""
Common functions used by all tree algorithms to generate the parameters according to the tree_trav strategies.
Args:
left: The left nodes
right: The right nodes
features: The features used in the decision nodes
thresholds: The thresholds used in the decision nodes
values: The values stored in the leaf nodes
Returns:
An array containing the extracted parameters
"""
if len(lefts) == 1:
# Model creating tree with just a single leaf node. We transform it
# to a model with one internal node.
lefts = [1, -1, -1]
rights = [2, -1, -1]
features = [0, 0, 0]
thresholds = [0, 0, 0]
n_classes = values.shape[1] if type(values) is np.ndarray else 1
values = np.array([np.zeros(n_classes), values[0], values[0]])
values.reshape(3, n_classes)
ids = [i for i in range(len(lefts))]
nodes = list(zip(ids, lefts, rights, features, thresholds, values))
# Refactor the tree parameters in the proper format.
nodes_map = {0: Node(0)}
current_node = 0
for i, node in enumerate(nodes):
id, left, right, feature, threshold, value = node
if left != -1:
l_node = Node(left)
nodes_map[left] = l_node
else:
lefts[i] = id
l_node = -1
feature = -1
if right != -1:
r_node = Node(right)
nodes_map[right] = r_node
else:
rights[i] = id
r_node = -1
feature = -1
nodes_map[current_node].left = l_node
nodes_map[current_node].right = r_node
nodes_map[current_node].feature = feature
nodes_map[current_node].threshold = threshold
nodes_map[current_node].value = value
current_node += 1
lefts = np.array(lefts)
rights = np.array(rights)
features = np.array(features)
thresholds = np.array(thresholds, dtype=np.float64)
values = np.array(values, dtype=np.float64)
return [nodes_map, ids, lefts, rights, features, thresholds, values]
def convert_decision_ensemble_tree_common(
operator, tree_infos, get_parameters, get_parameters_for_tree_trav, n_features, classes=None, extra_config={}
):
tree_parameters, max_depth, tree_type = get_tree_params_and_type(tree_infos, get_parameters, extra_config)
# Generate the tree implementation based on the selected strategy.
if tree_type == TreeImpl.gemm:
net_parameters = [
get_parameters_for_gemm_common(
tree_param.lefts,
tree_param.rights,
tree_param.features,
tree_param.thresholds,
tree_param.values,
n_features,
extra_config,
)
for tree_param in tree_parameters
]
return GEMMDecisionTreeImpl(operator, net_parameters, n_features, classes, extra_config=extra_config)
net_parameters = [
get_parameters_for_tree_trav(
tree_param.lefts, tree_param.rights, tree_param.features, tree_param.thresholds, tree_param.values, extra_config,
)
for tree_param in tree_parameters
]
if tree_type == TreeImpl.tree_trav:
return TreeTraversalDecisionTreeImpl(operator, net_parameters, max_depth, n_features, classes, extra_config)
else: # Remaining possible case: tree_type == TreeImpl.perf_tree_trav
return PerfectTreeTraversalDecisionTreeImpl(operator, net_parameters, max_depth, n_features, classes, extra_config)
The provided code snippet includes necessary dependencies for implementing the `convert_onnx_tree_ensemble_classifier` function. Write a Python function `def convert_onnx_tree_ensemble_classifier(operator, device=None, extra_config={})` to solve the following problem:
Converter for `ai.onnx.ml.TreeEnsembleClassifier`. Args: operator: An operator wrapping a `ai.onnx.ml.TreeEnsembleClassifier` model device: String defining the type of device the converted operator should be run on extra_config: Extra configuration used to select the best conversion strategy Returns: A PyTorch model
Here is the function:
def convert_onnx_tree_ensemble_classifier(operator, device=None, extra_config={}):
"""
Converter for `ai.onnx.ml.TreeEnsembleClassifier`.
Args:
operator: An operator wrapping a `ai.onnx.ml.TreeEnsembleClassifier` model
device: String defining the type of device the converted operator should be run on
extra_config: Extra configuration used to select the best conversion strategy
Returns:
A PyTorch model
"""
assert operator is not None, "Cannot convert None operator"
# Get tree informations from the operator.
n_features, tree_infos, classes, post_transform, decision_cond = _get_tree_infos_from_tree_ensemble(
operator.raw_operator, device, extra_config
)
# Generate the model.
if post_transform == "NONE":
return convert_decision_ensemble_tree_common(
operator, tree_infos, _dummy_get_parameter, get_parameters_for_tree_trav_common, n_features, classes, extra_config
)
extra_config[constants.POST_TRANSFORM] = post_transform
return convert_gbdt_classifier_common(
operator, tree_infos, _dummy_get_parameter, n_features, len(classes), classes, extra_config, decision_cond
) | Converter for `ai.onnx.ml.TreeEnsembleClassifier`. Args: operator: An operator wrapping a `ai.onnx.ml.TreeEnsembleClassifier` model device: String defining the type of device the converted operator should be run on extra_config: Extra configuration used to select the best conversion strategy Returns: A PyTorch model |
6,546 | import numpy as np
from onnxconverter_common.registration import register_converter
from .. import constants
from .._gbdt_commons import convert_gbdt_classifier_common, convert_gbdt_common
from .._tree_commons import TreeParameters, convert_decision_ensemble_tree_common, get_parameters_for_tree_trav_common
def _dummy_get_parameter(tree_info, extra_config):
"""
Dummy function used to return parameters (TreeEnsemble converters already have parameters in the right format)
"""
return tree_info
def _get_tree_infos_from_tree_ensemble(operator, device=None, extra_config={}):
"""
Base method for extracting parameters from `ai.onnx.ml.TreeEnsemble`s.
"""
assert (
constants.N_FEATURES in extra_config
), "Cannot retrive the number of features. Please fill an issue at https://github.com/microsoft/hummingbird."
# Get the number of features.
n_features = extra_config[constants.N_FEATURES]
tree_infos, classes, post_transform, decision_cond = _get_tree_infos_from_onnx_ml_operator(operator)
# Get tree informations from the operator.
return n_features, tree_infos, classes, post_transform, decision_cond
def convert_gbdt_common(
operator, tree_infos, get_tree_parameters, n_features, classes=None, extra_config={}, decision_cond="<="
):
"""
Common converter for GBDT models.
Args:
tree_infos: The information representaing a tree (ensemble)
get_tree_parameters: A function specifying how to parse the tree_infos into parameters
n_features: The number of features input to the model
classes: The classes used for classification. None if implementing a regression model
extra_config: Extra configuration used to properly implement the source tree
decision_cond: The condition of the decision nodes in the x <cond> threshold order. Default '<='. Values can be <=, <, >=, >
Returns:
A tree implementation in PyTorch
"""
assert tree_infos is not None
assert get_tree_parameters is not None
assert n_features is not None
tree_parameters, max_depth, tree_type = get_tree_params_and_type(tree_infos, get_tree_parameters, extra_config)
# Apply learning rate directly on the values rather then at runtime.
if constants.LEARNING_RATE in extra_config:
for parameter in tree_parameters:
parameter.values = parameter.values * extra_config[constants.LEARNING_RATE]
# Generate the model parameters based on the selected strategy.
if tree_type == TreeImpl.gemm:
net_parameters = [
get_parameters_for_gemm_common(
tree_param.lefts,
tree_param.rights,
tree_param.features,
tree_param.thresholds,
tree_param.values,
n_features,
extra_config,
)
for tree_param in tree_parameters
]
else:
# Some models require some additional massaging of the parameters before generating the tree_trav implementation.
get_parameters_for_tree_trav = get_parameters_for_tree_trav_common
if constants.GET_PARAMETERS_FOR_TREE_TRAVERSAL in extra_config:
get_parameters_for_tree_trav = extra_config[constants.GET_PARAMETERS_FOR_TREE_TRAVERSAL]
net_parameters = [
get_parameters_for_tree_trav(
tree_param.lefts,
tree_param.rights,
tree_param.features,
tree_param.thresholds,
tree_param.values,
extra_config,
)
for tree_param in tree_parameters
]
# Define the post transform.
if constants.BASE_PREDICTION in extra_config:
base_pred = torch.FloatTensor(extra_config[constants.BASE_PREDICTION])
# For newer versions of scikit-learn (>1.1.1),
if len(base_pred.shape) == 4:
base_pred = base_pred[0][0]
base_prediction = torch.nn.Parameter(base_pred, requires_grad=False)
extra_config[constants.BASE_PREDICTION] = base_prediction
# For models following the Sklearn API we need to build the post transform ourselves.
if classes is not None and constants.POST_TRANSFORM not in extra_config:
if len(classes) <= 2:
extra_config[constants.POST_TRANSFORM] = constants.SIGMOID
else:
extra_config[constants.POST_TRANSFORM] = constants.SOFTMAX
# Set the post transform.
if constants.POST_TRANSFORM in extra_config:
if extra_config[constants.POST_TRANSFORM] == constants.SIGMOID:
if constants.BASE_PREDICTION in extra_config:
extra_config[constants.POST_TRANSFORM] = ApplySigmoidBasePredictionPostTransform(base_prediction)
else:
extra_config[constants.POST_TRANSFORM] = ApplySigmoidPostTransform()
elif extra_config[constants.POST_TRANSFORM] == constants.SOFTMAX:
if constants.BASE_PREDICTION in extra_config:
extra_config[constants.POST_TRANSFORM] = ApplySoftmaxBasePredictionPostTransform(base_prediction)
else:
extra_config[constants.POST_TRANSFORM] = ApplySoftmaxPostTransform()
elif extra_config[constants.POST_TRANSFORM] == constants.TWEEDIE:
if constants.BASE_PREDICTION in extra_config:
extra_config[constants.POST_TRANSFORM] = ApplyTweedieBasePredictionPostTransform(base_prediction)
else:
extra_config[constants.POST_TRANSFORM] = ApplyTweediePostTransform()
elif extra_config[constants.POST_TRANSFORM] is None:
extra_config[constants.POST_TRANSFORM] = PostTransform()
else:
raise NotImplementedError("Post transform {} not implemeneted yet".format(extra_config[constants.POST_TRANSFORM]))
elif constants.BASE_PREDICTION in extra_config:
extra_config[constants.POST_TRANSFORM] = ApplyBasePredictionPostTransform(base_prediction)
# Generate the tree implementation based on the selected strategy.
if tree_type == TreeImpl.gemm:
return GEMMGBDTImpl(
operator, net_parameters, n_features, classes, extra_config=extra_config, decision_cond=decision_cond
)
if tree_type == TreeImpl.tree_trav:
return TreeTraversalGBDTImpl(
operator, net_parameters, max_depth, n_features, classes, extra_config=extra_config, decision_cond=decision_cond
)
else: # Remaining possible case: tree_type == TreeImpl.perf_tree_trav.
return PerfectTreeTraversalGBDTImpl(
operator, net_parameters, max_depth, n_features, classes, extra_config=extra_config, decision_cond=decision_cond
)
The provided code snippet includes necessary dependencies for implementing the `convert_onnx_tree_ensemble_regressor` function. Write a Python function `def convert_onnx_tree_ensemble_regressor(operator, device=None, extra_config={})` to solve the following problem:
Converter for `ai.onnx.ml.TreeEnsembleRegressor`. Args: operator: An operator wrapping a `ai.onnx.ml.TreeEnsembleRegressor` model device: String defining the type of device the converted operator should be run on extra_config: Extra configuration used to select the best conversion strategy Returns: A PyTorch model
Here is the function:
def convert_onnx_tree_ensemble_regressor(operator, device=None, extra_config={}):
"""
Converter for `ai.onnx.ml.TreeEnsembleRegressor`.
Args:
operator: An operator wrapping a `ai.onnx.ml.TreeEnsembleRegressor` model
device: String defining the type of device the converted operator should be run on
extra_config: Extra configuration used to select the best conversion strategy
Returns:
A PyTorch model
"""
assert operator is not None, "Cannot convert None operator"
# Get tree informations from the operator.
n_features, tree_infos, _, _, decision_cond = _get_tree_infos_from_tree_ensemble(
operator.raw_operator, device, extra_config
)
# Generate the model.
return convert_gbdt_common(
operator, tree_infos, _dummy_get_parameter, n_features, extra_config=extra_config, decision_cond=decision_cond
) | Converter for `ai.onnx.ml.TreeEnsembleRegressor`. Args: operator: An operator wrapping a `ai.onnx.ml.TreeEnsembleRegressor` model device: String defining the type of device the converted operator should be run on extra_config: Extra configuration used to select the best conversion strategy Returns: A PyTorch model |
6,547 | import numpy as np
from onnxconverter_common.registration import register_converter
from .._label_encoder_implementations import NumericLabelEncoder, StringLabelEncoder
class StringLabelEncoder(PhysicalOperator, torch.nn.Module):
"""
LabelEncoder over string data types.
When the ONNX backend is selected, this operator only works for PyTorch => 1.8.0.
"""
def __init__(self, logical_operator, classes, device, extra_config={}):
super(StringLabelEncoder, self).__init__(logical_operator, transformer=True)
self.regression = False
self.num_columns = len(classes)
self.max_word_length = max([len(cat) for cat in classes])
while self.max_word_length % 4 != 0:
self.max_word_length += 1
data_type = "|S" + str(self.max_word_length)
max_length = 0
if constants.MAX_STRING_LENGTH in extra_config:
extra_config[constants.MAX_STRING_LENGTH]
extra_config[constants.MAX_STRING_LENGTH] = max(max_length, self.max_word_length)
# Sort the classes and convert to torch.int32
self.max_word_length = self.max_word_length // 4
classes_conv = torch.from_numpy(np.array(sorted(set(classes)), dtype=data_type).view(np.int32)).detach().clone()
classes_conv = classes_conv.view(1, -1, self.max_word_length)
self.condition_tensors = torch.nn.Parameter(classes_conv, requires_grad=False)
def forward(self, x):
x = x.view(-1, 1, self.max_word_length)
result = torch.prod(self.condition_tensors == x, dim=2).nonzero(as_tuple=True)[1]
assert result.shape[0] == x.shape[0], "x ({}) contains previously unseen labels. condition_tensors: {}".format(
x, self.condition_tensors
)
return result
class NumericLabelEncoder(PhysicalOperator, torch.nn.Module):
def __init__(self, logical_operator, classes, device):
super(NumericLabelEncoder, self).__init__(logical_operator, transformer=True)
self.regression = False
self.check_tensor = torch.nn.Parameter(torch.IntTensor(classes), requires_grad=False)
def forward(self, x):
x = x.view(-1, 1)
return torch.argmax(torch.eq(x, self.check_tensor).int(), dim=1)
The provided code snippet includes necessary dependencies for implementing the `convert_onnx_label_encoder` function. Write a Python function `def convert_onnx_label_encoder(operator, device=None, extra_config={})` to solve the following problem:
Converter for `ai.onnx.ml.LabelEncoder` Args: operator: An operator wrapping a `ai.onnx.ml.LabelEncoder` model device: String defining the type of device the converted operator should be run on extra_config: Extra configuration used to select the best conversion strategy Returns: A PyTorch model
Here is the function:
def convert_onnx_label_encoder(operator, device=None, extra_config={}):
"""
Converter for `ai.onnx.ml.LabelEncoder`
Args:
operator: An operator wrapping a `ai.onnx.ml.LabelEncoder` model
device: String defining the type of device the converted operator should be run on
extra_config: Extra configuration used to select the best conversion strategy
Returns:
A PyTorch model
"""
assert operator is not None, "Cannot convert None operator"
for attr in operator.original_operator.origin.attribute:
if attr.name == "keys_int64s":
return NumericLabelEncoder(operator, np.array(attr.ints), device)
elif attr.name == "keys_strings":
# Note that these lines will fail later on for pytorch < 1.8
keys = np.array([x.decode("UTF-8") for x in attr.strings])
return StringLabelEncoder(operator, keys, device, extra_config)
# If we reach here, we have a parsing error.
raise RuntimeError("Error parsing LabelEncoder, found unexpected None for keys") | Converter for `ai.onnx.ml.LabelEncoder` Args: operator: An operator wrapping a `ai.onnx.ml.LabelEncoder` model device: String defining the type of device the converted operator should be run on extra_config: Extra configuration used to select the best conversion strategy Returns: A PyTorch model |
6,548 | import numpy as np
from onnxconverter_common.registration import register_converter
from .._one_hot_encoder_implementations import OneHotEncoderString, OneHotEncoder
class OneHotEncoderString(PhysicalOperator, torch.nn.Module):
"""
Class implementing OneHotEncoder operators for strings in PyTorch.
Because we are dealing with tensors, strings require additional length information for processing.
"""
def __init__(self, logical_operator, categories, device, extra_config={}):
super(OneHotEncoderString, self).__init__(logical_operator, transformer=True)
self.num_columns = len(categories)
self.max_word_length = max([max([len(c) for c in cat]) for cat in categories])
# Strings are casted to int32, therefore we need to properly size the tensor to me dividable by 4.
while self.max_word_length % 4 != 0:
self.max_word_length += 1
max_length = 0
if constants.MAX_STRING_LENGTH in extra_config:
max_length = extra_config[constants.MAX_STRING_LENGTH]
extra_config[constants.MAX_STRING_LENGTH] = max(max_length, self.max_word_length)
# We build condition tensors as a 2d tensor of integers.
# The first dimension is of size num words, the second dimension is fixed to the max word length (// 4).
condition_tensors = []
categories_idx = [0]
for arr in categories:
cats = (
np.array(arr, dtype="|S" + str(self.max_word_length)) # Encode objects into 4 byte strings.
.view("int32")
.reshape(-1, self.max_word_length // 4)
.tolist()
)
# We merge all categories for all columns into a single tensor
condition_tensors.extend(cats)
# Since all categories are merged together, we need to track of indexes to retrieve them at inference time.
categories_idx.append(categories_idx[-1] + len(cats))
self.condition_tensors = torch.nn.Parameter(torch.IntTensor(condition_tensors), requires_grad=False)
self.categories_idx = categories_idx
def forward(self, x):
encoded_tensors = []
for i in range(self.num_columns):
# First we fetch the condition for the particular column.
conditions = self.condition_tensors[self.categories_idx[i] : self.categories_idx[i + 1], :].view(
1, -1, self.max_word_length // 4
)
# Differently than the numeric case where eq is enough, here we need to aggregate per object (dim = 2)
# because objects can span multiple integers. We use product here since all ints must match to get encoding of 1.
encoded_tensors.append(torch.prod(torch.eq(x[:, i : i + 1, :], conditions), dim=2))
return torch.cat(encoded_tensors, dim=1).float()
class OneHotEncoder(PhysicalOperator, torch.nn.Module):
"""
Class implementing OneHotEncoder operators for ints in PyTorch.
"""
def __init__(self, logical_operator, categories, device):
super(OneHotEncoder, self).__init__(logical_operator, transformer=True)
self.num_columns = len(categories)
condition_tensors = []
for arr in categories:
condition_tensors.append(torch.nn.Parameter(torch.LongTensor(arr).detach().clone(), requires_grad=False))
self.condition_tensors = torch.nn.ParameterList(condition_tensors)
def forward(self, *x):
encoded_tensors = []
if len(x) > 1:
assert len(x) == self.num_columns
for i in range(self.num_columns):
input = x[i]
if input.dtype != torch.int64:
input = input.long()
encoded_tensors.append(torch.eq(input, self.condition_tensors[i]))
else:
# This is already a tensor.
x = x[0]
if x.dtype != torch.int64:
x = x.long()
for i in range(self.num_columns):
encoded_tensors.append(torch.eq(x[:, i : i + 1], self.condition_tensors[i]))
return torch.cat(encoded_tensors, dim=1).float()
The provided code snippet includes necessary dependencies for implementing the `convert_onnx_one_hot_encoder` function. Write a Python function `def convert_onnx_one_hot_encoder(operator, device=None, extra_config={})` to solve the following problem:
Converter for `ai.onnx.ml.OneHotEncoder` Args: operator: An operator wrapping a `ai.onnx.ml.OneHotEncoder` model device: String defining the type of device the converted operator should be run on extra_config: Extra configuration used to select the best conversion strategy Returns: A PyTorch model
Here is the function:
def convert_onnx_one_hot_encoder(operator, device=None, extra_config={}):
"""
Converter for `ai.onnx.ml.OneHotEncoder`
Args:
operator: An operator wrapping a `ai.onnx.ml.OneHotEncoder` model
device: String defining the type of device the converted operator should be run on
extra_config: Extra configuration used to select the best conversion strategy
Returns:
A PyTorch model
"""
assert operator is not None, "Cannot convert None operator"
categories = []
for attr in operator.raw_operator.origin.attribute:
if attr.name == "cats_int64s":
categories.append(np.array(attr.ints))
return OneHotEncoder(operator, categories, device)
elif attr.name == "cats_strings":
categories.append([x.decode("UTF-8") for x in attr.strings])
return OneHotEncoderString(operator, categories, device, extra_config)
raise RuntimeError("Error parsing OneHotEncoder, no categories") | Converter for `ai.onnx.ml.OneHotEncoder` Args: operator: An operator wrapping a `ai.onnx.ml.OneHotEncoder` model device: String defining the type of device the converted operator should be run on extra_config: Extra configuration used to select the best conversion strategy Returns: A PyTorch model |
6,549 | from onnxconverter_common.registration import register_converter
from .. import constants
from .._array_feature_extractor_implementations import ArrayFeatureExtractor
class ArrayFeatureExtractor(PhysicalOperator, torch.nn.Module):
"""
Class implementing ArrayFeatureExtractor in PyTorch
This is used by SelectKBest, VarianceThreshold operators in scikit-learn
"""
def __init__(self, logical_operator, column_indices, device):
super(ArrayFeatureExtractor, self).__init__(logical_operator, transformer=True)
is_contiguous = False
if max(column_indices) - min(column_indices) + 1 == len(column_indices):
is_contiguous = True
self.min = min(column_indices)
self.max = max(column_indices) + 1
self.column_indices = torch.nn.Parameter(torch.LongTensor(column_indices), requires_grad=False)
self.is_contiguous = is_contiguous
def forward(self, x):
if isinstance(x, tuple):
return x[self.column_indices]
if len(x.shape) == 1:
x = x.view(1, -1)
if self.is_contiguous:
return x[:, self.min : self.max]
else:
return torch.index_select(x, 1, self.column_indices)
The provided code snippet includes necessary dependencies for implementing the `convert_onnx_array_feature_extractor` function. Write a Python function `def convert_onnx_array_feature_extractor(operator, device, extra_config)` to solve the following problem:
Converter for `ai.onnx.ml.ArrayFeatureExtractor`. Args: operator: An operator wrapping a `ai.onnx.ml.ArrayFeatureExtractor` model device: String defining the type of device the converted operator should be run on extra_config: Extra configuration used to select the best conversion strategy Returns: A PyTorch model
Here is the function:
def convert_onnx_array_feature_extractor(operator, device, extra_config):
"""
Converter for `ai.onnx.ml.ArrayFeatureExtractor`.
Args:
operator: An operator wrapping a `ai.onnx.ml.ArrayFeatureExtractor` model
device: String defining the type of device the converted operator should be run on
extra_config: Extra configuration used to select the best conversion strategy
Returns:
A PyTorch model
"""
assert operator is not None, "Cannot convert None operator"
column_indices = []
initializers = extra_config[constants.ONNX_INITIALIZERS]
operator_inputs = operator.raw_operator.origin.input
column_indices = None
for input_ in operator_inputs:
if input_ in initializers:
assert column_indices is None, "More than one ArrayFeatureExtractor input matches with stored initializers."
column_indices = list(initializers[input_].int64_data)
if len(column_indices) == 0:
# If we are here it means that the column indices were not int64.
column_indices = list(initializers[input_].int32_data)
assert len(column_indices) > 0, "Cannot convert ArrayFeatureExtractor with empty column indices."
return ArrayFeatureExtractor(operator, column_indices, device) | Converter for `ai.onnx.ml.ArrayFeatureExtractor`. Args: operator: An operator wrapping a `ai.onnx.ml.ArrayFeatureExtractor` model device: String defining the type of device the converted operator should be run on extra_config: Extra configuration used to select the best conversion strategy Returns: A PyTorch model |
6,550 | import numpy as np
from onnxconverter_common.registration import register_converter
import torch
from .. import constants
from .._physical_operator import PhysicalOperator
from .._pipeline_implementations import Concat
class Cast(PhysicalOperator, torch.nn.Module):
def __init__(self, logical_operator, to_type):
super(Cast, self).__init__(logical_operator)
assert to_type is not None
self._to_type = to_type
def forward(self, x):
if self._to_type == 1: # Cast to float
return x.float()
elif self._to_type == 7: # Cast to long
return x.long()
elif self._to_type == 11: # Cast to double
return x.double()
else:
raise RuntimeError(
"Cast to ONNX type {} not supported yet. Please fill an issue at https://github.com/microsoft/hummingbird".format(
self._to_type
)
)
The provided code snippet includes necessary dependencies for implementing the `convert_onnx_cast` function. Write a Python function `def convert_onnx_cast(operator, device=None, extra_config={})` to solve the following problem:
Converter for `ai.onnx.Cast`. Args: operator: An operator wrapping a `ai.onnx.Cast` model device: String defining the type of device the converted operator should be run on extra_config: Extra configuration used to select the best conversion strategy Returns: A PyTorch model
Here is the function:
def convert_onnx_cast(operator, device=None, extra_config={}):
"""
Converter for `ai.onnx.Cast`.
Args:
operator: An operator wrapping a `ai.onnx.Cast` model
device: String defining the type of device the converted operator should be run on
extra_config: Extra configuration used to select the best conversion strategy
Returns:
A PyTorch model
"""
assert operator is not None
to_type = None
for attr in operator.raw_operator.origin.attribute:
if attr.name == "to":
to_type = attr.i
# Generate the model.
return Cast(operator, to_type) | Converter for `ai.onnx.Cast`. Args: operator: An operator wrapping a `ai.onnx.Cast` model device: String defining the type of device the converted operator should be run on extra_config: Extra configuration used to select the best conversion strategy Returns: A PyTorch model |
6,551 | import numpy as np
from onnxconverter_common.registration import register_converter
import torch
from .. import constants
from .._physical_operator import PhysicalOperator
from .._pipeline_implementations import Concat
class Concat(PhysicalOperator, torch.nn.Module):
def __init__(self, logical_operator):
super(Concat, self).__init__(logical_operator, transformer=True)
def forward(self, *x):
if len(x[0].shape) > 1:
# We need to explictly cast the tensors if their types don't agree.
dtypes = {t.dtype for t in x}
if len(dtypes) > 1:
if torch.float64 in dtypes:
x = [t.double() for t in x]
elif torch.float32 in dtypes:
x = [t.float() for t in x]
else:
raise RuntimeError(
"Combination of data types for Concat input tensors not supported. Please fill an issue at https://github.com/microsoft/hummingbird."
)
return torch.cat(x, dim=1)
else:
return torch.stack([i.view(-1) for i in x], dim=1)
The provided code snippet includes necessary dependencies for implementing the `convert_onnx_concat` function. Write a Python function `def convert_onnx_concat(operator, device=None, extra_config={})` to solve the following problem:
Converter for `ai.onnx.Concat`. Args: operator: An operator wrapping a `ai.onnx.Concat` model device: String defining the type of device the converted operator should be run on extra_config: Extra configuration used to select the best conversion strategy Returns: A PyTorch model
Here is the function:
def convert_onnx_concat(operator, device=None, extra_config={}):
"""
Converter for `ai.onnx.Concat`.
Args:
operator: An operator wrapping a `ai.onnx.Concat` model
device: String defining the type of device the converted operator should be run on
extra_config: Extra configuration used to select the best conversion strategy
Returns:
A PyTorch model
"""
assert operator is not None
# Generate the model.
return Concat(operator) | Converter for `ai.onnx.Concat`. Args: operator: An operator wrapping a `ai.onnx.Concat` model device: String defining the type of device the converted operator should be run on extra_config: Extra configuration used to select the best conversion strategy Returns: A PyTorch model |
6,552 | import numpy as np
from onnxconverter_common.registration import register_converter
import torch
from .. import constants
from .._physical_operator import PhysicalOperator
from .._pipeline_implementations import Concat
class Reshape(PhysicalOperator, torch.nn.Module):
def __init__(self, logical_operator, shape):
super(Reshape, self).__init__(logical_operator)
self.shape = shape
def forward(self, x):
return torch.reshape(x, self.shape)
The provided code snippet includes necessary dependencies for implementing the `convert_onnx_reshape` function. Write a Python function `def convert_onnx_reshape(operator, device=None, extra_config={})` to solve the following problem:
Converter for `ai.onnx.Reshape`. Args: operator: An operator wrapping a `ai.onnx.Reshape` model device: String defining the type of device the converted operator should be run on extra_config: Extra configuration used to select the best conversion strategy Returns: A PyTorch model
Here is the function:
def convert_onnx_reshape(operator, device=None, extra_config={}):
"""
Converter for `ai.onnx.Reshape`.
Args:
operator: An operator wrapping a `ai.onnx.Reshape` model
device: String defining the type of device the converted operator should be run on
extra_config: Extra configuration used to select the best conversion strategy
Returns:
A PyTorch model
"""
assert operator is not None
shape = []
initializers = extra_config[constants.ONNX_INITIALIZERS]
shape = list(initializers[operator.raw_operator.origin.input[1]].int64_data)
# Generate the model.
return Reshape(operator, shape) | Converter for `ai.onnx.Reshape`. Args: operator: An operator wrapping a `ai.onnx.Reshape` model device: String defining the type of device the converted operator should be run on extra_config: Extra configuration used to select the best conversion strategy Returns: A PyTorch model |
6,553 | import numpy as np
from onnxconverter_common.registration import register_converter
import torch
from .. import constants
from .._physical_operator import PhysicalOperator
from .._pipeline_implementations import Concat
class ArgMax(PhysicalOperator, torch.nn.Module):
def __init__(self, logical_operator, axis):
super(ArgMax, self).__init__(logical_operator)
self.axis = axis
def forward(self, x):
return torch.argmax(x, dim=self.axis)
The provided code snippet includes necessary dependencies for implementing the `convert_onnx_argmax` function. Write a Python function `def convert_onnx_argmax(operator, device=None, extra_config={})` to solve the following problem:
Converter for `ai.onnx.ArgMax`. Args: operator: An operator wrapping a `ai.onnx.ArgMax` model device: String defining the type of device the converted operator should be run on extra_config: Extra configuration used to select the best conversion strategy Returns: A PyTorch model
Here is the function:
def convert_onnx_argmax(operator, device=None, extra_config={}):
"""
Converter for `ai.onnx.ArgMax`.
Args:
operator: An operator wrapping a `ai.onnx.ArgMax` model
device: String defining the type of device the converted operator should be run on
extra_config: Extra configuration used to select the best conversion strategy
Returns:
A PyTorch model
"""
assert operator is not None
axis = None
for attribute in operator.raw_operator.origin.attribute:
if attribute.name == "axis":
axis = attribute.i
assert axis is not None
# Generate the model.
return ArgMax(operator, axis) | Converter for `ai.onnx.ArgMax`. Args: operator: An operator wrapping a `ai.onnx.ArgMax` model device: String defining the type of device the converted operator should be run on extra_config: Extra configuration used to select the best conversion strategy Returns: A PyTorch model |
6,554 | import numpy as np
from onnxconverter_common.registration import register_converter
import torch
from .. import constants
from .._physical_operator import PhysicalOperator
from .._pipeline_implementations import Concat
class Sum(PhysicalOperator, torch.nn.Module):
def __init__(self, logical_operator):
super(Sum, self).__init__(logical_operator)
def forward(self, *x):
if len(x) > 1:
x = torch.cat(x, dim=1)
return torch.sum(*x)
The provided code snippet includes necessary dependencies for implementing the `convert_onnx_sum` function. Write a Python function `def convert_onnx_sum(operator, device=None, extra_config={})` to solve the following problem:
Converter for `ai.onnx.Sum`. Args: operator: An operator wrapping a `ai.onnx.Sum` model device: String defining the type of device the converted operator should be run on extra_config: Extra configuration used to select the best conversion strategy Returns: A PyTorch model
Here is the function:
def convert_onnx_sum(operator, device=None, extra_config={}):
"""
Converter for `ai.onnx.Sum`.
Args:
operator: An operator wrapping a `ai.onnx.Sum` model
device: String defining the type of device the converted operator should be run on
extra_config: Extra configuration used to select the best conversion strategy
Returns:
A PyTorch model
"""
assert operator is not None
# Generate the model.
return Sum(operator) | Converter for `ai.onnx.Sum`. Args: operator: An operator wrapping a `ai.onnx.Sum` model device: String defining the type of device the converted operator should be run on extra_config: Extra configuration used to select the best conversion strategy Returns: A PyTorch model |
6,555 | import numpy as np
from onnxconverter_common.registration import register_converter
import torch
from .. import constants
from .._physical_operator import PhysicalOperator
from .._pipeline_implementations import Concat
class Add(PhysicalOperator, torch.nn.Module):
def __init__(self, logical_operator, val):
super(Add, self).__init__(logical_operator)
if val is not None:
assert len(self.inputs) == 1, "Unexpected input length for Add val"
self.val = torch.nn.Parameter(torch.FloatTensor(val), requires_grad=False)
def forward(self, *x):
if len(x) == 1:
return torch.add(*x, self.val)
return torch.add(*x)
The provided code snippet includes necessary dependencies for implementing the `convert_onnx_add` function. Write a Python function `def convert_onnx_add(operator, device=None, extra_config={})` to solve the following problem:
Converter for `ai.onnx.Add`. Args: operator: An operator wrapping a `ai.onnx.Add` model device: String defining the type of device the converted operator should be run on extra_config: Extra configuration used to select the best conversion strategy Returns: A PyTorch model
Here is the function:
def convert_onnx_add(operator, device=None, extra_config={}):
"""
Converter for `ai.onnx.Add`.
Args:
operator: An operator wrapping a `ai.onnx.Add` model
device: String defining the type of device the converted operator should be run on
extra_config: Extra configuration used to select the best conversion strategy
Returns:
A PyTorch model
"""
assert operator is not None
initializers = extra_config[constants.ONNX_INITIALIZERS]
val = None
if operator.raw_operator.origin.input[1] in initializers:
val = list(initializers[operator.raw_operator.origin.input[1]].float_data)
# Generate the model.
return Add(operator, val) | Converter for `ai.onnx.Add`. Args: operator: An operator wrapping a `ai.onnx.Add` model device: String defining the type of device the converted operator should be run on extra_config: Extra configuration used to select the best conversion strategy Returns: A PyTorch model |
6,556 | import numpy as np
from onnxconverter_common.registration import register_converter
import torch
from .. import constants
from .._physical_operator import PhysicalOperator
from .._pipeline_implementations import Concat
class Sub(PhysicalOperator, torch.nn.Module):
def __init__(self, logical_operator, val):
super(Sub, self).__init__(logical_operator)
if val is not None:
assert len(self.inputs) == 1, "Unexpected input length for Sub val"
self.val = torch.nn.Parameter(torch.FloatTensor(val), requires_grad=False)
def forward(self, *x):
if len(x) == 1:
return torch.sub(*x, self.val)
return torch.sub(*x)
The provided code snippet includes necessary dependencies for implementing the `convert_onnx_sub` function. Write a Python function `def convert_onnx_sub(operator, device=None, extra_config={})` to solve the following problem:
Converter for `ai.onnx.Sub`. Args: operator: An operator wrapping a `ai.onnx.Sub` model device: String defining the type of device the converted operator should be run on extra_config: Extra configuration used to select the best conversion strategy Returns: A PyTorch model
Here is the function:
def convert_onnx_sub(operator, device=None, extra_config={}):
"""
Converter for `ai.onnx.Sub`.
Args:
operator: An operator wrapping a `ai.onnx.Sub` model
device: String defining the type of device the converted operator should be run on
extra_config: Extra configuration used to select the best conversion strategy
Returns:
A PyTorch model
"""
assert operator is not None
initializers = extra_config[constants.ONNX_INITIALIZERS]
val = None
if operator.raw_operator.origin.input[1] in initializers:
init = initializers[operator.raw_operator.origin.input[1]]
if init.data_type == 11:
val = list(init.double_data)
elif init.data_type == 1:
val = list(init.float_data)
else:
raise TypeError("Data type %r not supported for initializer %r." % (init.data_type, init))
# Generate the model.
return Sub(operator, val) | Converter for `ai.onnx.Sub`. Args: operator: An operator wrapping a `ai.onnx.Sub` model device: String defining the type of device the converted operator should be run on extra_config: Extra configuration used to select the best conversion strategy Returns: A PyTorch model |
6,557 | import numpy as np
from onnxconverter_common.registration import register_converter
import torch
from .. import constants
from .._physical_operator import PhysicalOperator
from .._pipeline_implementations import Concat
class Neg(PhysicalOperator, torch.nn.Module):
def __init__(self, logical_operator):
super(Neg, self).__init__(logical_operator)
def forward(self, *x):
return torch.neg(*x)
The provided code snippet includes necessary dependencies for implementing the `convert_onnx_neg` function. Write a Python function `def convert_onnx_neg(operator, device=None, extra_config={})` to solve the following problem:
Converter for `ai.onnx.Neg`. Args: operator: An operator wrapping a `ai.onnx.Neg` model device: String defining the type of device the converted operator should be run on extra_config: Extra configuration used to select the best conversion strategy Returns: A PyTorch model
Here is the function:
def convert_onnx_neg(operator, device=None, extra_config={}):
"""
Converter for `ai.onnx.Neg`.
Args:
operator: An operator wrapping a `ai.onnx.Neg` model
device: String defining the type of device the converted operator should be run on
extra_config: Extra configuration used to select the best conversion strategy
Returns:
A PyTorch model
"""
assert operator is not None
# Generate the model.
return Neg(operator) | Converter for `ai.onnx.Neg`. Args: operator: An operator wrapping a `ai.onnx.Neg` model device: String defining the type of device the converted operator should be run on extra_config: Extra configuration used to select the best conversion strategy Returns: A PyTorch model |
6,558 | import numpy as np
from onnxconverter_common.registration import register_converter
import torch
from .. import constants
from .._physical_operator import PhysicalOperator
from .._pipeline_implementations import Concat
class Abs(PhysicalOperator, torch.nn.Module):
def __init__(self, logical_operator):
super(Abs, self).__init__(logical_operator)
def forward(self, x):
return torch.abs(x)
The provided code snippet includes necessary dependencies for implementing the `convert_onnx_abs` function. Write a Python function `def convert_onnx_abs(operator, device=None, extra_config={})` to solve the following problem:
Converter for `ai.onnx.Abs`. Args: operator: An operator wrapping a `ai.onnx.Abs` model device: String defining the type of device the converted operator should be run on extra_config: Extra configuration used to select the best conversion strategy Returns: A PyTorch model
Here is the function:
def convert_onnx_abs(operator, device=None, extra_config={}):
"""
Converter for `ai.onnx.Abs`.
Args:
operator: An operator wrapping a `ai.onnx.Abs` model
device: String defining the type of device the converted operator should be run on
extra_config: Extra configuration used to select the best conversion strategy
Returns:
A PyTorch model
"""
assert operator is not None
# Generate the model.
return Abs(operator) | Converter for `ai.onnx.Abs`. Args: operator: An operator wrapping a `ai.onnx.Abs` model device: String defining the type of device the converted operator should be run on extra_config: Extra configuration used to select the best conversion strategy Returns: A PyTorch model |
6,559 | import numpy as np
from onnxconverter_common.registration import register_converter
import torch
from .. import constants
from .._physical_operator import PhysicalOperator
from .._pipeline_implementations import Concat
class Mul(PhysicalOperator, torch.nn.Module):
def __init__(self, logical_operator, val):
super(Mul, self).__init__(logical_operator)
if val is not None:
assert len(self.inputs) == 1, "Unexpected input length for Mul val"
self.val = torch.nn.Parameter(torch.FloatTensor(val), requires_grad=False)
def forward(self, *x):
if len(x) == 1:
return torch.mul(*x, self.val)
return torch.mul(*x)
The provided code snippet includes necessary dependencies for implementing the `convert_onnx_mul` function. Write a Python function `def convert_onnx_mul(operator, device=None, extra_config={})` to solve the following problem:
Converter for `ai.onnx.Mul`. Args: operator: An operator wrapping a `ai.onnx.Mul` model device: String defining the type of device the converted operator should be run on extra_config: Extra configuration used to select the best conversion strategy Returns: A PyTorch model
Here is the function:
def convert_onnx_mul(operator, device=None, extra_config={}):
"""
Converter for `ai.onnx.Mul`.
Args:
operator: An operator wrapping a `ai.onnx.Mul` model
device: String defining the type of device the converted operator should be run on
extra_config: Extra configuration used to select the best conversion strategy
Returns:
A PyTorch model
"""
assert operator is not None
initializers = extra_config[constants.ONNX_INITIALIZERS]
val = None
if operator.raw_operator.origin.input[1] in initializers:
val = list(initializers[operator.raw_operator.origin.input[1]].float_data)
# Generate the model.
return Mul(operator, val) | Converter for `ai.onnx.Mul`. Args: operator: An operator wrapping a `ai.onnx.Mul` model device: String defining the type of device the converted operator should be run on extra_config: Extra configuration used to select the best conversion strategy Returns: A PyTorch model |
6,560 | import numpy as np
from onnxconverter_common.registration import register_converter
import torch
from .. import constants
from .._physical_operator import PhysicalOperator
from .._pipeline_implementations import Concat
class MatMul(PhysicalOperator, torch.nn.Module):
def __init__(self, logical_operator, val):
super(MatMul, self).__init__(logical_operator)
self.val = val
def forward(self, x):
return torch.mm(x, self.val)
The provided code snippet includes necessary dependencies for implementing the `convert_onnx_mat_mul` function. Write a Python function `def convert_onnx_mat_mul(operator, device=None, extra_config={})` to solve the following problem:
Converter for `ai.onnx.MatMul`. Args: operator: An operator wrapping a `ai.onnx.MatMul` model device: String defining the type of device the converted operator should be run on extra_config: Extra configuration used to select the best conversion strategy Returns: A PyTorch model
Here is the function:
def convert_onnx_mat_mul(operator, device=None, extra_config={}):
"""
Converter for `ai.onnx.MatMul`.
Args:
operator: An operator wrapping a `ai.onnx.MatMul` model
device: String defining the type of device the converted operator should be run on
extra_config: Extra configuration used to select the best conversion strategy
Returns:
A PyTorch model
"""
assert operator is not None
initializers = extra_config[constants.ONNX_INITIALIZERS]
val = list(initializers[operator.raw_operator.origin.input[1]].float_data)
# Generate the model.
return MatMul(operator, val) | Converter for `ai.onnx.MatMul`. Args: operator: An operator wrapping a `ai.onnx.MatMul` model device: String defining the type of device the converted operator should be run on extra_config: Extra configuration used to select the best conversion strategy Returns: A PyTorch model |
6,561 | import numpy as np
from onnxconverter_common.registration import register_converter
import torch
from .. import constants
from .._physical_operator import PhysicalOperator
from .._pipeline_implementations import Concat
class Div(PhysicalOperator, torch.nn.Module):
def __init__(self, logical_operator, val):
super(Div, self).__init__(logical_operator)
if val is not None:
assert len(self.inputs) == 1, "Unexpected input length for Div val"
self.val = torch.nn.Parameter(torch.FloatTensor(val), requires_grad=False)
def forward(self, *x):
if len(x) == 1:
return torch.div(*x, self.val)
return torch.div(*x)
The provided code snippet includes necessary dependencies for implementing the `convert_onnx_div` function. Write a Python function `def convert_onnx_div(operator, device=None, extra_config={})` to solve the following problem:
Converter for `ai.onnx.Div`. Args: operator: An operator wrapping a `ai.onnx.Div` model device: String defining the type of device the converted operator should be run on extra_config: Extra configuration used to select the best conversion strategy Returns: A PyTorch model
Here is the function:
def convert_onnx_div(operator, device=None, extra_config={}):
"""
Converter for `ai.onnx.Div`.
Args:
operator: An operator wrapping a `ai.onnx.Div` model
device: String defining the type of device the converted operator should be run on
extra_config: Extra configuration used to select the best conversion strategy
Returns:
A PyTorch model
"""
assert operator is not None
initializers = extra_config[constants.ONNX_INITIALIZERS]
val = None
if operator.raw_operator.origin.input[1] in initializers:
init = initializers[operator.raw_operator.origin.input[1]]
if init.data_type == 11:
val = list(init.double_data)
elif init.data_type == 1:
val = list(init.float_data)
else:
raise TypeError("Data type %r not supported for initializer %r." % (init.data_type, init))
# Generate the model.
return Div(operator, val) | Converter for `ai.onnx.Div`. Args: operator: An operator wrapping a `ai.onnx.Div` model device: String defining the type of device the converted operator should be run on extra_config: Extra configuration used to select the best conversion strategy Returns: A PyTorch model |
6,562 | import numpy as np
from onnxconverter_common.registration import register_converter
import torch
from .. import constants
from .._physical_operator import PhysicalOperator
from .._pipeline_implementations import Concat
class Less(PhysicalOperator, torch.nn.Module):
def __init__(self, logical_operator, val):
super(Less, self).__init__(logical_operator)
self.val = torch.nn.Parameter(torch.FloatTensor(val), requires_grad=False)
def forward(self, x):
return torch.lt(x, self.val)
The provided code snippet includes necessary dependencies for implementing the `convert_onnx_less` function. Write a Python function `def convert_onnx_less(operator, device=None, extra_config={})` to solve the following problem:
Converter for `ai.onnx.Less`. Args: operator: An operator wrapping a `ai.onnx.Less` model device: String defining the type of device the converted operator should be run on extra_config: Extra configuration used to select the best conversion strategy Returns: A PyTorch model
Here is the function:
def convert_onnx_less(operator, device=None, extra_config={}):
"""
Converter for `ai.onnx.Less`.
Args:
operator: An operator wrapping a `ai.onnx.Less` model
device: String defining the type of device the converted operator should be run on
extra_config: Extra configuration used to select the best conversion strategy
Returns:
A PyTorch model
"""
assert operator is not None
initializers = extra_config[constants.ONNX_INITIALIZERS]
val = list(initializers[operator.raw_operator.origin.input[1]].float_data)
# Generate the model.
return Less(operator, val) | Converter for `ai.onnx.Less`. Args: operator: An operator wrapping a `ai.onnx.Less` model device: String defining the type of device the converted operator should be run on extra_config: Extra configuration used to select the best conversion strategy Returns: A PyTorch model |
6,563 | import numpy as np
from onnxconverter_common.registration import register_converter
from .._linear_implementations import LinearModel
class LinearModel(PhysicalOperator, torch.nn.Module):
def __init__(
self,
logical_operator,
coefficients,
intercepts,
device,
classes=[0],
multi_class=None,
loss=None,
is_linear_regression=False,
):
super(LinearModel, self).__init__(logical_operator)
self.coefficients = torch.nn.Parameter(torch.from_numpy(coefficients).detach().clone(), requires_grad=False)
self.intercepts = torch.nn.Parameter(torch.from_numpy(intercepts).view(-1).detach().clone(), requires_grad=False)
self.classes = torch.nn.Parameter(torch.IntTensor(classes), requires_grad=False)
self.multi_class = multi_class
self.regression = is_linear_regression
self.classification = not is_linear_regression
self.loss = loss
if self.loss is None and self.classification:
self.loss = "log"
self.binary_classification = False
if len(classes) == 2:
self.binary_classification = True
def forward(self, x):
x = x.float()
output = torch.addmm(self.intercepts, x, self.coefficients)
if self.regression:
if self.loss == "log":
return torch.exp(output)
return output
if self.binary_classification:
indices = (output > 0).squeeze().int()
else:
indices = torch.argmax(output, dim=1)
predict_res = torch.index_select(self.classes, 0, indices)
if self.multi_class == "multinomial":
output = torch.softmax(output, dim=1)
else:
if self.loss == "modified_huber":
output = torch.clip(output, -1, 1)
output += 1
output /= 2
else:
output = torch.sigmoid(output)
if not self.binary_classification:
if self.loss == "modified_huber":
# This loss might assign zero to all classes, which doesn't
# normalize neatly; work around this to produce uniform
# probabilities.
prob_sum = torch.sum(output, dim=1, keepdim=False)
all_zero = prob_sum == 0
if torch.any(all_zero):
output[all_zero, :] = 1
prob_sum[all_zero] = len(self.classes)
output /= prob_sum.view((output.shape[0], -1))
else:
output /= torch.sum(output, dim=1, keepdim=True)
if self.binary_classification:
output = torch.cat([1 - output, output], dim=1)
return predict_res, output
The provided code snippet includes necessary dependencies for implementing the `convert_onnx_linear_model` function. Write a Python function `def convert_onnx_linear_model(operator, device=None, extra_config={})` to solve the following problem:
Converter for `ai.onnx.ml.LinearClassifier`. Args: operator: An operator wrapping a `ai.onnx.ml.LinearClassifier` model device: String defining the type of device the converted operator should be run on extra_config: Extra configuration used to select the best conversion strategy Returns: A PyTorch model
Here is the function:
def convert_onnx_linear_model(operator, device=None, extra_config={}):
"""
Converter for `ai.onnx.ml.LinearClassifier`.
Args:
operator: An operator wrapping a `ai.onnx.ml.LinearClassifier` model
device: String defining the type of device the converted operator should be run on
extra_config: Extra configuration used to select the best conversion strategy
Returns:
A PyTorch model
"""
assert operator is not None, "Cannot convert None operator"
coefficients = intercepts = classes = multi_class = None
for attr in operator.raw_operator.origin.attribute:
if attr.name == "coefficients":
coefficients = np.array(attr.floats).astype("float32")
elif attr.name == "intercepts":
intercepts = np.array(attr.floats).astype("float32")
elif attr.name == "classlabels_ints":
classes = np.array(attr.ints)
elif attr.name == "multi_class":
if len(classes) > 2 and attr.i != 0:
# https://github.com/onnx/onnx/blob/master/docs/Operators-ml.md#ai.onnx.ml.LinearClassifier
multi_class = "multinomial"
if any(v is None for v in [coefficients, intercepts, classes]):
raise RuntimeError("Error parsing LinearClassifier, found unexpected None")
if multi_class is None: # if 'multi_class' attr was not present
multi_class = "none" if len(classes) < 3 else "ovr"
# Now reshape the coefficients/intercepts
if len(classes) == 2:
# for the binary case, it seems there is a duplicate copy of everything with opposite +/- sign. This just takes the correct copy
coefficients = np.array([[np.array(val).astype("float32")] for val in coefficients[len(coefficients) // 2 :]]).astype(
"float32"
)
intercepts = np.array([[np.array(val).astype("float32")] for val in intercepts[len(intercepts) // 2 :]]).astype(
"float32"
)
elif len(classes) > 2:
# intercepts are OK in this case.
# reshape coefficients into tuples
tmp = coefficients.reshape(len(classes), (len(coefficients) // len(classes)))
# then unzip the zipmap format
coefficients = np.array(list(zip(*tmp)))
else:
raise RuntimeError("Error parsing LinearClassifier, length of classes {} unexpected:{}".format(len(classes), classes))
return LinearModel(
operator, coefficients, intercepts, device, classes=classes, multi_class=multi_class, is_linear_regression=False
) | Converter for `ai.onnx.ml.LinearClassifier`. Args: operator: An operator wrapping a `ai.onnx.ml.LinearClassifier` model device: String defining the type of device the converted operator should be run on extra_config: Extra configuration used to select the best conversion strategy Returns: A PyTorch model |
6,564 | import numpy as np
from onnxconverter_common.registration import register_converter
from .._linear_implementations import LinearModel
class LinearModel(PhysicalOperator, torch.nn.Module):
def __init__(
self,
logical_operator,
coefficients,
intercepts,
device,
classes=[0],
multi_class=None,
loss=None,
is_linear_regression=False,
):
super(LinearModel, self).__init__(logical_operator)
self.coefficients = torch.nn.Parameter(torch.from_numpy(coefficients).detach().clone(), requires_grad=False)
self.intercepts = torch.nn.Parameter(torch.from_numpy(intercepts).view(-1).detach().clone(), requires_grad=False)
self.classes = torch.nn.Parameter(torch.IntTensor(classes), requires_grad=False)
self.multi_class = multi_class
self.regression = is_linear_regression
self.classification = not is_linear_regression
self.loss = loss
if self.loss is None and self.classification:
self.loss = "log"
self.binary_classification = False
if len(classes) == 2:
self.binary_classification = True
def forward(self, x):
x = x.float()
output = torch.addmm(self.intercepts, x, self.coefficients)
if self.regression:
if self.loss == "log":
return torch.exp(output)
return output
if self.binary_classification:
indices = (output > 0).squeeze().int()
else:
indices = torch.argmax(output, dim=1)
predict_res = torch.index_select(self.classes, 0, indices)
if self.multi_class == "multinomial":
output = torch.softmax(output, dim=1)
else:
if self.loss == "modified_huber":
output = torch.clip(output, -1, 1)
output += 1
output /= 2
else:
output = torch.sigmoid(output)
if not self.binary_classification:
if self.loss == "modified_huber":
# This loss might assign zero to all classes, which doesn't
# normalize neatly; work around this to produce uniform
# probabilities.
prob_sum = torch.sum(output, dim=1, keepdim=False)
all_zero = prob_sum == 0
if torch.any(all_zero):
output[all_zero, :] = 1
prob_sum[all_zero] = len(self.classes)
output /= prob_sum.view((output.shape[0], -1))
else:
output /= torch.sum(output, dim=1, keepdim=True)
if self.binary_classification:
output = torch.cat([1 - output, output], dim=1)
return predict_res, output
The provided code snippet includes necessary dependencies for implementing the `convert_onnx_linear_regression_model` function. Write a Python function `def convert_onnx_linear_regression_model(operator, device, extra_config)` to solve the following problem:
Converter for `ai.onnx.ml.LinearRegression` Args: operator: An operator wrapping a `ai.onnx.ml.LinearRegression` model device: String defining the type of device the converted operator should be run on extra_config: Extra configuration used to select the best conversion strategy Returns: A PyTorch model
Here is the function:
def convert_onnx_linear_regression_model(operator, device, extra_config):
"""
Converter for `ai.onnx.ml.LinearRegression`
Args:
operator: An operator wrapping a `ai.onnx.ml.LinearRegression` model
device: String defining the type of device the converted operator should be run on
extra_config: Extra configuration used to select the best conversion strategy
Returns:
A PyTorch model
"""
assert operator is not None, "Cannot convert None operator"
coefficients = intercepts = None
for attr in operator.raw_operator.origin.attribute:
if attr.name == "coefficients":
coefficients = np.array([[np.array(val).astype("float32")] for val in attr.floats]).astype("float32")
elif attr.name == "intercepts":
intercepts = np.array(attr.floats).astype("float32")
if any(v is None for v in [coefficients, intercepts]):
raise RuntimeError("Error parsing LinearRegression, found unexpected None")
return LinearModel(operator, coefficients, intercepts, device, is_linear_regression=True) | Converter for `ai.onnx.ml.LinearRegression` Args: operator: An operator wrapping a `ai.onnx.ml.LinearRegression` model device: String defining the type of device the converted operator should be run on extra_config: Extra configuration used to select the best conversion strategy Returns: A PyTorch model |
6,565 | import numpy as np
from onnxconverter_common.registration import register_converter
from . import constants
from ._gbdt_commons import convert_gbdt_classifier_common, convert_gbdt_common
from ._tree_commons import TreeParameters
def _get_tree_parameters(tree_info, extra_config):
"""
Parse the tree and returns an in-memory friendly representation of its structure.
"""
lefts = []
rights = []
features = []
thresholds = []
values = []
_tree_traversal(tree_info["tree_structure"], lefts, rights, features, thresholds, values, 0)
return TreeParameters(lefts, rights, features, thresholds, values)
def convert_gbdt_classifier_common(
operator, tree_infos, get_tree_parameters, n_features, n_classes, classes=None, extra_config={}, decision_cond="<="
):
"""
Common converter for GBDT classifiers.
Args:
tree_infos: The information representaing a tree (ensemble)
get_tree_parameters: A function specifying how to parse the tree_infos into parameters
n_features: The number of features input to the model
n_classes: How many classes are expected. 1 for regression tasks
classes: The classes used for classification. None if implementing a regression model
extra_config: Extra configuration used to properly implement the source tree
decision_cond: The condition of the decision nodes in the x <cond> threshold order. Default '<='. Values can be <=, <, >=, >
Returns:
A tree implementation in PyTorch
"""
assert tree_infos is not None
assert get_tree_parameters is not None
assert n_features is not None
assert n_classes is not None
# Rearrange classes and tree information.
if n_classes == 2:
n_classes -= 1
if classes is None:
classes = [i for i in range(n_classes)]
reorder_trees = True
if constants.REORDER_TREES in extra_config:
reorder_trees = extra_config[constants.REORDER_TREES]
if reorder_trees and n_classes > 1:
tree_infos = [tree_infos[i * n_classes + j] for j in range(n_classes) for i in range(len(tree_infos) // n_classes)]
return convert_gbdt_common(
operator, tree_infos, get_tree_parameters, n_features, classes, extra_config=extra_config, decision_cond=decision_cond
)
The provided code snippet includes necessary dependencies for implementing the `convert_sklearn_lgbm_classifier` function. Write a Python function `def convert_sklearn_lgbm_classifier(operator, device, extra_config)` to solve the following problem:
Converter for `lightgbm.LGBMClassifier` (trained using the Sklearn API). Args: operator: An operator wrapping a `lightgbm.LGBMClassifier` model device: String defining the type of device the converted operator should be run on extra_config: Extra configuration used to select the best conversion strategy Returns: A PyTorch model
Here is the function:
def convert_sklearn_lgbm_classifier(operator, device, extra_config):
"""
Converter for `lightgbm.LGBMClassifier` (trained using the Sklearn API).
Args:
operator: An operator wrapping a `lightgbm.LGBMClassifier` model
device: String defining the type of device the converted operator should be run on
extra_config: Extra configuration used to select the best conversion strategy
Returns:
A PyTorch model
"""
assert operator is not None, "Cannot convert None operator"
if operator.raw_operator.boosting_type == "rf":
raise RuntimeError("Unable to directly convert this model. " "It should be converted into ONNX first.")
n_features = operator.raw_operator._n_features
tree_infos = operator.raw_operator.booster_.dump_model()["tree_info"]
n_classes = operator.raw_operator._n_classes
return convert_gbdt_classifier_common(
operator, tree_infos, _get_tree_parameters, n_features, n_classes, extra_config=extra_config
) | Converter for `lightgbm.LGBMClassifier` (trained using the Sklearn API). Args: operator: An operator wrapping a `lightgbm.LGBMClassifier` model device: String defining the type of device the converted operator should be run on extra_config: Extra configuration used to select the best conversion strategy Returns: A PyTorch model |
6,566 | import numpy as np
from onnxconverter_common.registration import register_converter
from . import constants
from ._gbdt_commons import convert_gbdt_classifier_common, convert_gbdt_common
from ._tree_commons import TreeParameters
def _get_tree_parameters(tree_info, extra_config):
"""
Parse the tree and returns an in-memory friendly representation of its structure.
"""
lefts = []
rights = []
features = []
thresholds = []
values = []
_tree_traversal(tree_info["tree_structure"], lefts, rights, features, thresholds, values, 0)
return TreeParameters(lefts, rights, features, thresholds, values)
def convert_gbdt_common(
operator, tree_infos, get_tree_parameters, n_features, classes=None, extra_config={}, decision_cond="<="
):
"""
Common converter for GBDT models.
Args:
tree_infos: The information representaing a tree (ensemble)
get_tree_parameters: A function specifying how to parse the tree_infos into parameters
n_features: The number of features input to the model
classes: The classes used for classification. None if implementing a regression model
extra_config: Extra configuration used to properly implement the source tree
decision_cond: The condition of the decision nodes in the x <cond> threshold order. Default '<='. Values can be <=, <, >=, >
Returns:
A tree implementation in PyTorch
"""
assert tree_infos is not None
assert get_tree_parameters is not None
assert n_features is not None
tree_parameters, max_depth, tree_type = get_tree_params_and_type(tree_infos, get_tree_parameters, extra_config)
# Apply learning rate directly on the values rather then at runtime.
if constants.LEARNING_RATE in extra_config:
for parameter in tree_parameters:
parameter.values = parameter.values * extra_config[constants.LEARNING_RATE]
# Generate the model parameters based on the selected strategy.
if tree_type == TreeImpl.gemm:
net_parameters = [
get_parameters_for_gemm_common(
tree_param.lefts,
tree_param.rights,
tree_param.features,
tree_param.thresholds,
tree_param.values,
n_features,
extra_config,
)
for tree_param in tree_parameters
]
else:
# Some models require some additional massaging of the parameters before generating the tree_trav implementation.
get_parameters_for_tree_trav = get_parameters_for_tree_trav_common
if constants.GET_PARAMETERS_FOR_TREE_TRAVERSAL in extra_config:
get_parameters_for_tree_trav = extra_config[constants.GET_PARAMETERS_FOR_TREE_TRAVERSAL]
net_parameters = [
get_parameters_for_tree_trav(
tree_param.lefts,
tree_param.rights,
tree_param.features,
tree_param.thresholds,
tree_param.values,
extra_config,
)
for tree_param in tree_parameters
]
# Define the post transform.
if constants.BASE_PREDICTION in extra_config:
base_pred = torch.FloatTensor(extra_config[constants.BASE_PREDICTION])
# For newer versions of scikit-learn (>1.1.1),
if len(base_pred.shape) == 4:
base_pred = base_pred[0][0]
base_prediction = torch.nn.Parameter(base_pred, requires_grad=False)
extra_config[constants.BASE_PREDICTION] = base_prediction
# For models following the Sklearn API we need to build the post transform ourselves.
if classes is not None and constants.POST_TRANSFORM not in extra_config:
if len(classes) <= 2:
extra_config[constants.POST_TRANSFORM] = constants.SIGMOID
else:
extra_config[constants.POST_TRANSFORM] = constants.SOFTMAX
# Set the post transform.
if constants.POST_TRANSFORM in extra_config:
if extra_config[constants.POST_TRANSFORM] == constants.SIGMOID:
if constants.BASE_PREDICTION in extra_config:
extra_config[constants.POST_TRANSFORM] = ApplySigmoidBasePredictionPostTransform(base_prediction)
else:
extra_config[constants.POST_TRANSFORM] = ApplySigmoidPostTransform()
elif extra_config[constants.POST_TRANSFORM] == constants.SOFTMAX:
if constants.BASE_PREDICTION in extra_config:
extra_config[constants.POST_TRANSFORM] = ApplySoftmaxBasePredictionPostTransform(base_prediction)
else:
extra_config[constants.POST_TRANSFORM] = ApplySoftmaxPostTransform()
elif extra_config[constants.POST_TRANSFORM] == constants.TWEEDIE:
if constants.BASE_PREDICTION in extra_config:
extra_config[constants.POST_TRANSFORM] = ApplyTweedieBasePredictionPostTransform(base_prediction)
else:
extra_config[constants.POST_TRANSFORM] = ApplyTweediePostTransform()
elif extra_config[constants.POST_TRANSFORM] is None:
extra_config[constants.POST_TRANSFORM] = PostTransform()
else:
raise NotImplementedError("Post transform {} not implemeneted yet".format(extra_config[constants.POST_TRANSFORM]))
elif constants.BASE_PREDICTION in extra_config:
extra_config[constants.POST_TRANSFORM] = ApplyBasePredictionPostTransform(base_prediction)
# Generate the tree implementation based on the selected strategy.
if tree_type == TreeImpl.gemm:
return GEMMGBDTImpl(
operator, net_parameters, n_features, classes, extra_config=extra_config, decision_cond=decision_cond
)
if tree_type == TreeImpl.tree_trav:
return TreeTraversalGBDTImpl(
operator, net_parameters, max_depth, n_features, classes, extra_config=extra_config, decision_cond=decision_cond
)
else: # Remaining possible case: tree_type == TreeImpl.perf_tree_trav.
return PerfectTreeTraversalGBDTImpl(
operator, net_parameters, max_depth, n_features, classes, extra_config=extra_config, decision_cond=decision_cond
)
The provided code snippet includes necessary dependencies for implementing the `convert_sklearn_lgbm_regressor` function. Write a Python function `def convert_sklearn_lgbm_regressor(operator, device, extra_config)` to solve the following problem:
Converter for `lightgbm.LGBMRegressor` and `lightgbm.LGBMRanker` (trained using the Sklearn API). Args: operator: An operator wrapping a `lightgbm.LGBMRegressor` or `lightgbm.LGBMRanker` model device: String defining the type of device the converted operator should be run on extra_config: Extra configuration used to select the best conversion strategy Returns: A PyTorch model
Here is the function:
def convert_sklearn_lgbm_regressor(operator, device, extra_config):
"""
Converter for `lightgbm.LGBMRegressor` and `lightgbm.LGBMRanker` (trained using the Sklearn API).
Args:
operator: An operator wrapping a `lightgbm.LGBMRegressor` or `lightgbm.LGBMRanker` model
device: String defining the type of device the converted operator should be run on
extra_config: Extra configuration used to select the best conversion strategy
Returns:
A PyTorch model
"""
assert operator is not None, "Cannot convert None operator"
# Get tree information out of the model.
n_features = operator.raw_operator._n_features
tree_infos = operator.raw_operator.booster_.dump_model()["tree_info"]
if operator.raw_operator._objective == "tweedie":
extra_config[constants.POST_TRANSFORM] = constants.TWEEDIE
return convert_gbdt_common(operator, tree_infos, _get_tree_parameters, n_features, extra_config=extra_config) | Converter for `lightgbm.LGBMRegressor` and `lightgbm.LGBMRanker` (trained using the Sklearn API). Args: operator: An operator wrapping a `lightgbm.LGBMRegressor` or `lightgbm.LGBMRanker` model device: String defining the type of device the converted operator should be run on extra_config: Extra configuration used to select the best conversion strategy Returns: A PyTorch model |
6,567 | import numpy as np
from onnxconverter_common.registration import register_converter
from . import constants
from ._gbdt_commons import convert_gbdt_classifier_common, convert_gbdt_common
from ._tree_commons import TreeParameters
def _get_tree_parameters(tree_info, extra_config):
"""
Parse the tree and returns an in-memory friendly representation of its structure.
"""
lefts = []
rights = []
features = []
thresholds = []
values = []
_tree_traversal(tree_info["tree_structure"], lefts, rights, features, thresholds, values, 0)
return TreeParameters(lefts, rights, features, thresholds, values)
def convert_gbdt_common(
operator, tree_infos, get_tree_parameters, n_features, classes=None, extra_config={}, decision_cond="<="
):
"""
Common converter for GBDT models.
Args:
tree_infos: The information representaing a tree (ensemble)
get_tree_parameters: A function specifying how to parse the tree_infos into parameters
n_features: The number of features input to the model
classes: The classes used for classification. None if implementing a regression model
extra_config: Extra configuration used to properly implement the source tree
decision_cond: The condition of the decision nodes in the x <cond> threshold order. Default '<='. Values can be <=, <, >=, >
Returns:
A tree implementation in PyTorch
"""
assert tree_infos is not None
assert get_tree_parameters is not None
assert n_features is not None
tree_parameters, max_depth, tree_type = get_tree_params_and_type(tree_infos, get_tree_parameters, extra_config)
# Apply learning rate directly on the values rather then at runtime.
if constants.LEARNING_RATE in extra_config:
for parameter in tree_parameters:
parameter.values = parameter.values * extra_config[constants.LEARNING_RATE]
# Generate the model parameters based on the selected strategy.
if tree_type == TreeImpl.gemm:
net_parameters = [
get_parameters_for_gemm_common(
tree_param.lefts,
tree_param.rights,
tree_param.features,
tree_param.thresholds,
tree_param.values,
n_features,
extra_config,
)
for tree_param in tree_parameters
]
else:
# Some models require some additional massaging of the parameters before generating the tree_trav implementation.
get_parameters_for_tree_trav = get_parameters_for_tree_trav_common
if constants.GET_PARAMETERS_FOR_TREE_TRAVERSAL in extra_config:
get_parameters_for_tree_trav = extra_config[constants.GET_PARAMETERS_FOR_TREE_TRAVERSAL]
net_parameters = [
get_parameters_for_tree_trav(
tree_param.lefts,
tree_param.rights,
tree_param.features,
tree_param.thresholds,
tree_param.values,
extra_config,
)
for tree_param in tree_parameters
]
# Define the post transform.
if constants.BASE_PREDICTION in extra_config:
base_pred = torch.FloatTensor(extra_config[constants.BASE_PREDICTION])
# For newer versions of scikit-learn (>1.1.1),
if len(base_pred.shape) == 4:
base_pred = base_pred[0][0]
base_prediction = torch.nn.Parameter(base_pred, requires_grad=False)
extra_config[constants.BASE_PREDICTION] = base_prediction
# For models following the Sklearn API we need to build the post transform ourselves.
if classes is not None and constants.POST_TRANSFORM not in extra_config:
if len(classes) <= 2:
extra_config[constants.POST_TRANSFORM] = constants.SIGMOID
else:
extra_config[constants.POST_TRANSFORM] = constants.SOFTMAX
# Set the post transform.
if constants.POST_TRANSFORM in extra_config:
if extra_config[constants.POST_TRANSFORM] == constants.SIGMOID:
if constants.BASE_PREDICTION in extra_config:
extra_config[constants.POST_TRANSFORM] = ApplySigmoidBasePredictionPostTransform(base_prediction)
else:
extra_config[constants.POST_TRANSFORM] = ApplySigmoidPostTransform()
elif extra_config[constants.POST_TRANSFORM] == constants.SOFTMAX:
if constants.BASE_PREDICTION in extra_config:
extra_config[constants.POST_TRANSFORM] = ApplySoftmaxBasePredictionPostTransform(base_prediction)
else:
extra_config[constants.POST_TRANSFORM] = ApplySoftmaxPostTransform()
elif extra_config[constants.POST_TRANSFORM] == constants.TWEEDIE:
if constants.BASE_PREDICTION in extra_config:
extra_config[constants.POST_TRANSFORM] = ApplyTweedieBasePredictionPostTransform(base_prediction)
else:
extra_config[constants.POST_TRANSFORM] = ApplyTweediePostTransform()
elif extra_config[constants.POST_TRANSFORM] is None:
extra_config[constants.POST_TRANSFORM] = PostTransform()
else:
raise NotImplementedError("Post transform {} not implemeneted yet".format(extra_config[constants.POST_TRANSFORM]))
elif constants.BASE_PREDICTION in extra_config:
extra_config[constants.POST_TRANSFORM] = ApplyBasePredictionPostTransform(base_prediction)
# Generate the tree implementation based on the selected strategy.
if tree_type == TreeImpl.gemm:
return GEMMGBDTImpl(
operator, net_parameters, n_features, classes, extra_config=extra_config, decision_cond=decision_cond
)
if tree_type == TreeImpl.tree_trav:
return TreeTraversalGBDTImpl(
operator, net_parameters, max_depth, n_features, classes, extra_config=extra_config, decision_cond=decision_cond
)
else: # Remaining possible case: tree_type == TreeImpl.perf_tree_trav.
return PerfectTreeTraversalGBDTImpl(
operator, net_parameters, max_depth, n_features, classes, extra_config=extra_config, decision_cond=decision_cond
)
The provided code snippet includes necessary dependencies for implementing the `convert_lgbm_booster` function. Write a Python function `def convert_lgbm_booster(operator, device, extra_config)` to solve the following problem:
Converter for `lightgbm.Booster` Args: operator: An operator wrapping a `lightgbm.Booster` model device: String defining the type of device the converted operator should be run on extra_config: Extra configuration used to select the best conversion strategy Returns: A PyTorch model
Here is the function:
def convert_lgbm_booster(operator, device, extra_config):
"""
Converter for `lightgbm.Booster`
Args:
operator: An operator wrapping a `lightgbm.Booster` model
device: String defining the type of device the converted operator should be run on
extra_config: Extra configuration used to select the best conversion strategy
Returns:
A PyTorch model
"""
assert operator is not None, "Cannot convert None operator"
# Get tree information out of the model.
n_features = len(operator.raw_operator.feature_name())
tree_infos = operator.raw_operator.dump_model()["tree_info"]
return convert_gbdt_common(operator, tree_infos, _get_tree_parameters, n_features, extra_config=extra_config) | Converter for `lightgbm.Booster` Args: operator: An operator wrapping a `lightgbm.Booster` model device: String defining the type of device the converted operator should be run on extra_config: Extra configuration used to select the best conversion strategy Returns: A PyTorch model |
6,568 | import torch
import numpy as np
from onnxconverter_common.topology import Variable
from onnxconverter_common.registration import register_converter
from .._physical_operator import PhysicalOperator
from .._discretizer_implementations import Binarizer, KBinsDiscretizer
class KBinsDiscretizer(PhysicalOperator, torch.nn.Module):
def __init__(self, logical_operator, encode, n_bins, bin_edges, labels, device):
super(KBinsDiscretizer, self).__init__(logical_operator)
self.transformer = True
self.encode = encode
self.ge_tensor = torch.FloatTensor(bin_edges[:, 1:-1])
self.ohe = OneHotEncoder(logical_operator, labels, device)
if n_bins is not None:
self.n_bins = torch.FloatTensor([[n - 1 for n in n_bins]])
else:
self.n_bins = None
def forward(self, x):
x = torch.unsqueeze(x, 2)
x = torch.ge(x, self.ge_tensor)
x = x.float()
x = torch.sum(x, dim=2, keepdim=False)
if self.n_bins is not None:
# Clipping the encoded values (Needed for sklearn).
x = torch.min(self.n_bins, x)
if self.encode in ["onehot-dense", "onehot"]:
x = self.ohe(x)
return x
The provided code snippet includes necessary dependencies for implementing the `convert_sparkml_bucketizer` function. Write a Python function `def convert_sparkml_bucketizer(operator, device, extra_config)` to solve the following problem:
Converter for `pyspark.ml.feature.Bucketizer` Args: operator: An operator wrapping a `pyspark.ml.feature.QuantileDiscretizer` model device: String defining the type of device the converted operator should be run on extra_config: Extra configuration used to select the best conversion strategy Returns: A PyTorch model
Here is the function:
def convert_sparkml_bucketizer(operator, device, extra_config):
"""
Converter for `pyspark.ml.feature.Bucketizer`
Args:
operator: An operator wrapping a `pyspark.ml.feature.QuantileDiscretizer` model
device: String defining the type of device the converted operator should be run on
extra_config: Extra configuration used to select the best conversion strategy
Returns:
A PyTorch model
"""
bin_edges = [operator.raw_operator.getSplits()]
max_bin_edges = max([len(bins) for bins in bin_edges])
labels = []
for i in range(len(bin_edges)):
labels.append(np.array([i for i in range(len(bin_edges[i]) - 1)]))
if len(bin_edges[i]) < max_bin_edges:
bin_edges[i] = bin_edges[i] + [np.inf for _ in range((max_bin_edges - len(bin_edges[i])))]
return KBinsDiscretizer(operator, None, None, np.array(bin_edges), labels, device) | Converter for `pyspark.ml.feature.Bucketizer` Args: operator: An operator wrapping a `pyspark.ml.feature.QuantileDiscretizer` model device: String defining the type of device the converted operator should be run on extra_config: Extra configuration used to select the best conversion strategy Returns: A PyTorch model |
6,569 | import torch
import numpy as np
from onnxconverter_common.topology import Variable
from onnxconverter_common.registration import register_converter
from .._physical_operator import PhysicalOperator
from .._pipeline_implementations import Concat
class Concat(PhysicalOperator, torch.nn.Module):
def __init__(self, logical_operator):
super(Concat, self).__init__(logical_operator, transformer=True)
def forward(self, *x):
if len(x[0].shape) > 1:
# We need to explictly cast the tensors if their types don't agree.
dtypes = {t.dtype for t in x}
if len(dtypes) > 1:
if torch.float64 in dtypes:
x = [t.double() for t in x]
elif torch.float32 in dtypes:
x = [t.float() for t in x]
else:
raise RuntimeError(
"Combination of data types for Concat input tensors not supported. Please fill an issue at https://github.com/microsoft/hummingbird."
)
return torch.cat(x, dim=1)
else:
return torch.stack([i.view(-1) for i in x], dim=1)
The provided code snippet includes necessary dependencies for implementing the `convert_sparkml_vector_assembler` function. Write a Python function `def convert_sparkml_vector_assembler(operator, device, extra_config)` to solve the following problem:
Converter for `pyspark.ml.feature.VectorAssembler` Args: operator: An operator wrapping a `pyspark.ml.feature.VectorAssembler` device: String defining the type of device the converted operator should be run on extra_config: Extra configuration used to select the best conversion strategy Returns: A PyTorch model
Here is the function:
def convert_sparkml_vector_assembler(operator, device, extra_config):
"""
Converter for `pyspark.ml.feature.VectorAssembler`
Args:
operator: An operator wrapping a `pyspark.ml.feature.VectorAssembler`
device: String defining the type of device the converted operator should be run on
extra_config: Extra configuration used to select the best conversion strategy
Returns:
A PyTorch model
"""
return Concat(operator) | Converter for `pyspark.ml.feature.VectorAssembler` Args: operator: An operator wrapping a `pyspark.ml.feature.VectorAssembler` device: String defining the type of device the converted operator should be run on extra_config: Extra configuration used to select the best conversion strategy Returns: A PyTorch model |
6,570 | import numpy as np
from onnxconverter_common.registration import register_converter
from .._linear_implementations import LinearModel
class LinearModel(PhysicalOperator, torch.nn.Module):
def __init__(
self,
logical_operator,
coefficients,
intercepts,
device,
classes=[0],
multi_class=None,
loss=None,
is_linear_regression=False,
):
super(LinearModel, self).__init__(logical_operator)
self.coefficients = torch.nn.Parameter(torch.from_numpy(coefficients).detach().clone(), requires_grad=False)
self.intercepts = torch.nn.Parameter(torch.from_numpy(intercepts).view(-1).detach().clone(), requires_grad=False)
self.classes = torch.nn.Parameter(torch.IntTensor(classes), requires_grad=False)
self.multi_class = multi_class
self.regression = is_linear_regression
self.classification = not is_linear_regression
self.loss = loss
if self.loss is None and self.classification:
self.loss = "log"
self.binary_classification = False
if len(classes) == 2:
self.binary_classification = True
def forward(self, x):
x = x.float()
output = torch.addmm(self.intercepts, x, self.coefficients)
if self.regression:
if self.loss == "log":
return torch.exp(output)
return output
if self.binary_classification:
indices = (output > 0).squeeze().int()
else:
indices = torch.argmax(output, dim=1)
predict_res = torch.index_select(self.classes, 0, indices)
if self.multi_class == "multinomial":
output = torch.softmax(output, dim=1)
else:
if self.loss == "modified_huber":
output = torch.clip(output, -1, 1)
output += 1
output /= 2
else:
output = torch.sigmoid(output)
if not self.binary_classification:
if self.loss == "modified_huber":
# This loss might assign zero to all classes, which doesn't
# normalize neatly; work around this to produce uniform
# probabilities.
prob_sum = torch.sum(output, dim=1, keepdim=False)
all_zero = prob_sum == 0
if torch.any(all_zero):
output[all_zero, :] = 1
prob_sum[all_zero] = len(self.classes)
output /= prob_sum.view((output.shape[0], -1))
else:
output /= torch.sum(output, dim=1, keepdim=True)
if self.binary_classification:
output = torch.cat([1 - output, output], dim=1)
return predict_res, output
The provided code snippet includes necessary dependencies for implementing the `convert_sparkml_linear_model` function. Write a Python function `def convert_sparkml_linear_model(operator, device, extra_config)` to solve the following problem:
Converter for `pyspark.ml.classification.LogisticRegressionModel` Args: operator: An operator wrapping a `pyspark.ml.classification.LogisticRegressionModel` device: String defining the type of device the converted operator should be run on extra_config: Extra configuration used to select the best conversion strategy Returns: A PyTorch model
Here is the function:
def convert_sparkml_linear_model(operator, device, extra_config):
"""
Converter for `pyspark.ml.classification.LogisticRegressionModel`
Args:
operator: An operator wrapping a `pyspark.ml.classification.LogisticRegressionModel`
device: String defining the type of device the converted operator should be run on
extra_config: Extra configuration used to select the best conversion strategy
Returns:
A PyTorch model
"""
num_classes = operator.raw_operator.numClasses
# Spark ML assumes the label column is encoded such that labels start from zero.
classes = [i for i in range(num_classes)]
coefficients = operator.raw_operator.coefficientMatrix.toArray().transpose().astype("float32")
intercepts = operator.raw_operator.interceptVector.reshape(1, -1).astype("float32")
if num_classes > 2:
multi_class = "multinomial"
else:
multi_class = None
return LinearModel(operator, coefficients, intercepts, device, classes=classes, multi_class=multi_class) | Converter for `pyspark.ml.classification.LogisticRegressionModel` Args: operator: An operator wrapping a `pyspark.ml.classification.LogisticRegressionModel` device: String defining the type of device the converted operator should be run on extra_config: Extra configuration used to select the best conversion strategy Returns: A PyTorch model |
6,571 | from onnxconverter_common.registration import register_converter
from .._sv_implementations import SVC
class SVC(PhysicalOperator, torch.nn.Module):
def __init__(self, logical_operator, kernel, degree, sv, nv, a, b, gamma, coef0, classes, device):
super(SVC, self).__init__(logical_operator, classification=True)
self.kernel = kernel
self.degree = degree
self.gamma = gamma
self.regression = False
sv = sv.toarray() if isinstance(sv, scipy.sparse.csr_matrix) else sv
self.sv = torch.nn.Parameter(torch.from_numpy(sv).double(), requires_grad=False)
self.sv_t = torch.nn.Parameter(torch.transpose(self.sv, 0, 1), requires_grad=False)
self.sv_norm = torch.nn.Parameter(-self.gamma * (self.sv ** 2).sum(1).view(1, -1), requires_grad=False)
self.coef0 = coef0
self.n_features = sv.shape[1]
self.a = a
self.b = torch.nn.Parameter(torch.from_numpy(b.reshape(1, -1)).double(), requires_grad=False)
self.start = [sum(nv[:i]) for i in range(len(nv))]
self.end = [self.start[i] + nv[i] for i in range(len(nv))]
self.len_nv = len(nv)
true_classes, false_classes = zip(*[(i, j) for i in range(self.len_nv) for j in range(i + 1, self.len_nv)])
self.true_classes = torch.nn.Parameter(torch.IntTensor([true_classes]), requires_grad=False)
self.false_classes = torch.nn.Parameter(torch.IntTensor([false_classes]), requires_grad=False)
self.classes = torch.nn.Parameter(torch.IntTensor(classes), requires_grad=False)
self.perform_class_select = False
if min(classes) != 0 or max(classes) != len(classes) - 1:
self.perform_class_select = True
self.n_classes = len(classes)
def forward(self, x):
x = x.double()
if self.kernel == "linear":
k = torch.mm(x, self.sv_t)
elif self.kernel == "rbf":
# using quadratic expansion--susseptible to rounding-off errors
# http://www.robots.ox.ac.uk/~albanie/notes/Euclidean_distance_trick.pdf
x_norm = -self.gamma * (x ** 2).sum(1).view(-1, 1)
k = torch.exp(x_norm + self.sv_norm + 2.0 * self.gamma * torch.mm(x, self.sv_t).double())
elif self.kernel == "sigmoid":
k = torch.sigmoid(self.gamma * torch.mm(x, self.sv_t) + self.coef0)
else: # poly kernel
k = torch.pow(self.gamma * torch.mm(x, self.sv_t) + self.coef0, self.degree)
c = [
sum(self.a[i, p] * k[:, p : p + 1] for p in range(self.start[j], self.end[j]))
+ sum(self.a[j - 1, p] * k[:, p : p + 1] for p in range(self.start[i], self.end[i]))
for i in range(self.len_nv)
for j in range(i + 1, self.len_nv)
]
c = torch.cat(c, dim=1) + self.b
if self.n_classes == 2:
class_ids = torch.gt(c, 0.0).int().flatten()
else:
votes = torch.where(c > 0, self.true_classes, self.false_classes)
# TODO mode is still not implemented for GPU backend.
votes = votes.data.cpu()
class_ids, _ = torch.mode(votes, dim=1)
# No class probabilities in SVC.
if self.perform_class_select:
temp = torch.index_select(self.classes, 0, class_ids.long())
return temp, temp
else:
return class_ids, class_ids
The provided code snippet includes necessary dependencies for implementing the `convert_sklearn_svc_model` function. Write a Python function `def convert_sklearn_svc_model(operator, device, extra_config)` to solve the following problem:
Converter for `sklearn.svm.SVC` and `sklearn.svm.NuSVC` Args: operator: An operator wrapping a `sklearn.svm.SVC` or `sklearn.svm.NuSVC` model device: String defining the type of device the converted operator should be run on extra_config: Extra configuration used to select the best conversion strategy Returns: A PyTorch model
Here is the function:
def convert_sklearn_svc_model(operator, device, extra_config):
"""
Converter for `sklearn.svm.SVC` and `sklearn.svm.NuSVC`
Args:
operator: An operator wrapping a `sklearn.svm.SVC` or `sklearn.svm.NuSVC` model
device: String defining the type of device the converted operator should be run on
extra_config: Extra configuration used to select the best conversion strategy
Returns:
A PyTorch model
"""
if operator.raw_operator.kernel in ["linear", "poly", "rbf", "sigmoid"]:
# https://stackoverflow.com/questions/20113206/scikit-learn-svc-decision-function-and-predict
kernel = operator.raw_operator.kernel
degree = operator.raw_operator.degree
classes = operator.raw_operator.classes_
sv = operator.raw_operator.support_vectors_
nv = operator.raw_operator.n_support_
a = operator.raw_operator.dual_coef_
b = operator.raw_operator.intercept_
coef0 = operator.raw_operator.coef0
if hasattr(operator.raw_operator, "_gamma"):
gamma = operator.raw_operator._gamma
else:
# TODO: which versions is this case for, and how to test?
gamma = operator.raw_operator.gamma
return SVC(operator, kernel, degree, sv, nv, a, b, gamma, coef0, classes, device)
else:
raise RuntimeError("Unsupported kernel for SVC: {}".format(operator.raw_operator.kernel)) | Converter for `sklearn.svm.SVC` and `sklearn.svm.NuSVC` Args: operator: An operator wrapping a `sklearn.svm.SVC` or `sklearn.svm.NuSVC` model device: String defining the type of device the converted operator should be run on extra_config: Extra configuration used to select the best conversion strategy Returns: A PyTorch model |
6,572 | import numpy as np
from onnxconverter_common.registration import register_converter
from .._mlp_implementations import MLPModel, MLPClassificationModel
class MLPClassificationModel(MLPModel):
def __init__(self, logical_operator, weights, biases, activation, classes, device):
super(MLPClassificationModel, self).__init__(logical_operator, weights, biases, activation, device)
self.regression = False
self.classification = True
self.classes = torch.nn.Parameter(torch.IntTensor(classes), requires_grad=False)
self.perform_class_select = False
if min(classes) != 0 or max(classes) != len(classes) - 1:
self.perform_class_select = True
self.binary_classification = False
if len(classes) == 2:
self.binary_classification = True
def forward(self, x):
x = super().forward(x)
if self.binary_classification:
output = torch.sigmoid(x)
output = torch.cat([1 - output, output], dim=1)
else:
output = torch.softmax(x, dim=1)
if self.perform_class_select:
return torch.index_select(self.classes, 0, torch.argmax(output, dim=1)), output
else:
return torch.argmax(output, dim=1), output
The provided code snippet includes necessary dependencies for implementing the `convert_sklearn_mlp_classifier` function. Write a Python function `def convert_sklearn_mlp_classifier(operator, device, extra_config)` to solve the following problem:
Converter for `sklearn.neural_network.MLPClassifier` Args: operator: An operator wrapping a `sklearn.neural_network.MLPClassifier` model device: String defining the type of device the converted operator should be run on extra_config: Extra configuration used to select the best conversion strategy Returns: A PyTorch model
Here is the function:
def convert_sklearn_mlp_classifier(operator, device, extra_config):
"""
Converter for `sklearn.neural_network.MLPClassifier`
Args:
operator: An operator wrapping a `sklearn.neural_network.MLPClassifier` model
device: String defining the type of device the converted operator should be run on
extra_config: Extra configuration used to select the best conversion strategy
Returns:
A PyTorch model
"""
assert operator is not None, "Cannot convert None operator"
classes = operator.raw_operator.classes_
if not all([type(x) in [int, np.int32, np.int64] for x in classes]):
raise RuntimeError("Hummingbird supports only integer labels for class labels.")
activation = operator.raw_operator.activation
weights = operator.raw_operator.coefs_
biases = operator.raw_operator.intercepts_
return MLPClassificationModel(operator, weights, biases, activation, classes, device) | Converter for `sklearn.neural_network.MLPClassifier` Args: operator: An operator wrapping a `sklearn.neural_network.MLPClassifier` model device: String defining the type of device the converted operator should be run on extra_config: Extra configuration used to select the best conversion strategy Returns: A PyTorch model |
6,573 | import numpy as np
from onnxconverter_common.registration import register_converter
from .._mlp_implementations import MLPModel, MLPClassificationModel
class MLPModel(PhysicalOperator, torch.nn.Module):
def __init__(self, logical_operator, weights, biases, activation, device):
super(MLPModel, self).__init__(logical_operator)
self.regression = True
self.weights = torch.nn.ParameterList(
[torch.nn.Parameter(torch.from_numpy(weight.astype("float32")), requires_grad=False) for weight in weights]
)
self.biases = torch.nn.ParameterList(
[torch.nn.Parameter(torch.from_numpy(bias.astype("float32")), requires_grad=False) for bias in biases]
)
self.activation = activation
def forward(self, x):
for i in range(len(self.weights) - 1):
x = torch.addmm(self.biases[i], x, self.weights[i])
if self.activation == "relu":
x = torch.relu(x)
elif self.activation == "logistic":
x = torch.sigmoid(x)
elif self.activation == "tanh":
x = torch.tanh(x)
elif self.activation != "identity":
raise RuntimeError("Unsupported activation {0}".format(self.activation))
return torch.addmm(self.biases[-1], x, self.weights[-1])
The provided code snippet includes necessary dependencies for implementing the `convert_sklearn_mlp_regressor` function. Write a Python function `def convert_sklearn_mlp_regressor(operator, device, extra_config)` to solve the following problem:
Converter for `sklearn.neural_network.MLPRegressor` Args: operator: An operator wrapping a `sklearn.neural_network.MLPRegressor` model device: String defining the type of device the converted operator should be run on extra_config: Extra configuration used to select the best conversion strategy Returns: A PyTorch model
Here is the function:
def convert_sklearn_mlp_regressor(operator, device, extra_config):
"""
Converter for `sklearn.neural_network.MLPRegressor`
Args:
operator: An operator wrapping a `sklearn.neural_network.MLPRegressor` model
device: String defining the type of device the converted operator should be run on
extra_config: Extra configuration used to select the best conversion strategy
Returns:
A PyTorch model
"""
assert operator is not None, "Cannot convert None operator"
activation = operator.raw_operator.activation
weights = operator.raw_operator.coefs_
biases = operator.raw_operator.intercepts_
return MLPModel(operator, weights, biases, activation, device) | Converter for `sklearn.neural_network.MLPRegressor` Args: operator: An operator wrapping a `sklearn.neural_network.MLPRegressor` model device: String defining the type of device the converted operator should be run on extra_config: Extra configuration used to select the best conversion strategy Returns: A PyTorch model |
6,574 | from .._physical_operator import PhysicalOperator
from onnxconverter_common.registration import register_converter
import torch
import itertools
class PolynomialFeatures(PhysicalOperator, torch.nn.Module):
"""
Class implementing PolynomialFeatures operators in PyTorch.
# TODO extend this class to support higher orders
"""
def __init__(self, operator, n_features, degree=2, interaction_only=False, include_bias=True, device=None):
super(PolynomialFeatures, self).__init__(operator)
self.transformer = True
self.degree = degree
self.n_features = n_features
self.interaction_only = interaction_only
self.include_bias = include_bias
def forward(self, x):
if self.degree < 0:
raise ValueError("Degree should be greater than or equal to 0.")
features = []
# Move input to GPU if available
device = x.device
# Add bias term if include_bias is True
if self.include_bias:
bias = torch.ones(x.size()[0], 1, device=device)
features.append(bias)
# Generate polynomial features
for d in range(1, self.degree + 1):
for combo in itertools.combinations_with_replacement(range(self.n_features), d):
if self.interaction_only and len(set(combo)) != d:
continue
new_feature = torch.prod(torch.stack([x[:, idx] for idx in combo], dim=1), dim=1, keepdim=True)
features.append(new_feature)
return torch.cat(features, dim=1).to(device=device)
The provided code snippet includes necessary dependencies for implementing the `convert_sklearn_poly_features` function. Write a Python function `def convert_sklearn_poly_features(operator, device, extra_config)` to solve the following problem:
Converter for `sklearn.preprocessing.PolynomialFeatures` Currently this supports only degree 2, and does not support interaction_only Args: operator: An operator wrapping a `sklearn.preprocessing.PolynomialFeatures` model device: String defining the type of device the converted operator should be run on extra_config: Extra configuration used to select the best conversion strategy Returns: A PyTorch model
Here is the function:
def convert_sklearn_poly_features(operator, device, extra_config):
"""
Converter for `sklearn.preprocessing.PolynomialFeatures`
Currently this supports only degree 2, and does not support interaction_only
Args:
operator: An operator wrapping a `sklearn.preprocessing.PolynomialFeatures` model
device: String defining the type of device the converted operator should be run on
extra_config: Extra configuration used to select the best conversion strategy
Returns:
A PyTorch model
"""
assert operator is not None, "Cannot convert None operator"
if operator.raw_operator.degree < 0:
raise NotImplementedError("Hummingbird does not supports negtive degree for PolynomialFeatures")
return PolynomialFeatures(
operator,
operator.raw_operator.n_features_in_,
operator.raw_operator.degree,
operator.raw_operator.interaction_only,
operator.raw_operator.include_bias,
device,
) | Converter for `sklearn.preprocessing.PolynomialFeatures` Currently this supports only degree 2, and does not support interaction_only Args: operator: An operator wrapping a `sklearn.preprocessing.PolynomialFeatures` model device: String defining the type of device the converted operator should be run on extra_config: Extra configuration used to select the best conversion strategy Returns: A PyTorch model |
6,575 | from packaging.version import Version, parse
import numpy as np
from onnxconverter_common.registration import register_converter
import torch
from .._physical_operator import PhysicalOperator
class Bagging(PhysicalOperator, torch.nn.Module):
def __init__(self, logical_operator, is_classifier, n_estimators, classes):
super(Bagging, self).__init__(logical_operator, transformer=True)
self.is_classifier = is_classifier
self.n_estimators = float(n_estimators)
self.perform_class_select = False
if min(classes) != 0 or max(classes) != len(classes) - 1:
self.perform_class_select = True
self.binary_classification = False
if len(classes) == 2:
self.binary_classification = True
def forward(self, *x):
if self.is_classifier:
x = [t[1].view(-1, 1) if len(t[1].shape) == 1 else t[1][:, 1].view(-1, 1) for t in x]
output = torch.cat(x, dim=1)
output = torch.sum(output, dim=1) / self.n_estimators
if not self.is_classifier:
return output
if self.binary_classification:
output = torch.stack([1 - output, output], dim=1)
if self.perform_class_select:
return torch.index_select(self.classes, 0, torch.argmax(output, dim=1)), output
else:
return torch.argmax(output, dim=1), output
def convert_sklearn_baggind_model(operator, device, extra_config):
assert operator is not None, "Cannot convert None operator"
n_executors = operator.raw_operator.n_estimators
is_classifier = operator.raw_operator._estimator_type == "classifier"
classes = [0]
if is_classifier:
classes = operator.raw_operator.classes_
return Bagging(operator, is_classifier, n_executors, classes) | null |
6,576 | import numpy as np
from onnxconverter_common.registration import register_converter
from .._scaler_implementations import Scaler
class Scaler(PhysicalOperator, torch.nn.Module):
def __init__(self, logical_operator, offset, scale, device):
def forward(self, x):
def convert_sklearn_robust_scaler(operator, device, extra_config):
assert operator is not None, "Cannot convert None operator"
scale = operator.raw_operator.scale_
if scale is not None:
scale = np.reciprocal(scale)
return Scaler(operator, operator.raw_operator.center_, scale, device) | null |
6,577 | import numpy as np
from onnxconverter_common.registration import register_converter
from .._scaler_implementations import Scaler
class Scaler(PhysicalOperator, torch.nn.Module):
"""
Class implementing Scaler operators in PyTorch. Supported normalizers are L1, L2 and Max.
"""
def __init__(self, logical_operator, offset, scale, device):
super(Scaler, self).__init__(logical_operator, transformer=True)
if offset is None or len(offset.shape) == 0 or offset.shape == (0,):
offset = numpy.array([0], dtype=numpy.float32)
if scale is None or len(scale.shape) == 0 or scale.shape == (0,):
scale = numpy.array([1], dtype=numpy.float32)
self.offset = offset
self.scale = scale
if offset is not None:
self.offset = torch.nn.Parameter(torch.from_numpy(offset).detach().clone(), requires_grad=False)
if scale is not None:
self.scale = torch.nn.Parameter(torch.from_numpy(scale).detach().clone(), requires_grad=False)
def forward(self, x):
if self.offset is not None:
x = x - self.offset
if self.scale is not None:
x = x * self.scale
return x.float()
def convert_sklearn_max_abs_scaler(operator, device, extra_config):
assert operator is not None, "Cannot convert None operator"
scale = operator.raw_operator.scale_
if scale is not None:
scale = np.reciprocal(scale)
return Scaler(operator, np.array([0]), scale, device) | null |
6,578 | import numpy as np
from onnxconverter_common.registration import register_converter
from .._scaler_implementations import Scaler
class Scaler(PhysicalOperator, torch.nn.Module):
"""
Class implementing Scaler operators in PyTorch. Supported normalizers are L1, L2 and Max.
"""
def __init__(self, logical_operator, offset, scale, device):
super(Scaler, self).__init__(logical_operator, transformer=True)
if offset is None or len(offset.shape) == 0 or offset.shape == (0,):
offset = numpy.array([0], dtype=numpy.float32)
if scale is None or len(scale.shape) == 0 or scale.shape == (0,):
scale = numpy.array([1], dtype=numpy.float32)
self.offset = offset
self.scale = scale
if offset is not None:
self.offset = torch.nn.Parameter(torch.from_numpy(offset).detach().clone(), requires_grad=False)
if scale is not None:
self.scale = torch.nn.Parameter(torch.from_numpy(scale).detach().clone(), requires_grad=False)
def forward(self, x):
if self.offset is not None:
x = x - self.offset
if self.scale is not None:
x = x * self.scale
return x.float()
def convert_sklearn_min_max_scaler(operator, device, extra_config):
assert operator is not None, "Cannot convert None operator"
scale = np.array([x for x in operator.raw_operator.scale_])
offset = np.array([-1.0 / x * y for x, y in zip(operator.raw_operator.scale_, operator.raw_operator.min_)])
return Scaler(operator, offset, scale, device) | null |
6,579 | import numpy as np
from onnxconverter_common.registration import register_converter
from .._scaler_implementations import Scaler
class Scaler(PhysicalOperator, torch.nn.Module):
"""
Class implementing Scaler operators in PyTorch. Supported normalizers are L1, L2 and Max.
"""
def __init__(self, logical_operator, offset, scale, device):
super(Scaler, self).__init__(logical_operator, transformer=True)
if offset is None or len(offset.shape) == 0 or offset.shape == (0,):
offset = numpy.array([0], dtype=numpy.float32)
if scale is None or len(scale.shape) == 0 or scale.shape == (0,):
scale = numpy.array([1], dtype=numpy.float32)
self.offset = offset
self.scale = scale
if offset is not None:
self.offset = torch.nn.Parameter(torch.from_numpy(offset).detach().clone(), requires_grad=False)
if scale is not None:
self.scale = torch.nn.Parameter(torch.from_numpy(scale).detach().clone(), requires_grad=False)
def forward(self, x):
if self.offset is not None:
x = x - self.offset
if self.scale is not None:
x = x * self.scale
return x.float()
def convert_sklearn_standard_scaler(operator, device, extra_config):
assert operator is not None, "Cannot convert None operator"
scale = operator.raw_operator.scale_
if scale is not None:
scale = np.reciprocal(scale)
return Scaler(operator, operator.raw_operator.mean_, scale, device) | null |
6,580 | import numpy as np
from onnxconverter_common.registration import register_converter
from .. import constants
from .._gbdt_commons import convert_gbdt_common, convert_gbdt_classifier_common
from .._tree_commons import get_parameters_for_sklearn_common, get_parameters_for_tree_trav_sklearn, TreeParameters
def _get_n_features(model):
return model.n_features_in_
def convert_gbdt_classifier_common(
operator, tree_infos, get_tree_parameters, n_features, n_classes, classes=None, extra_config={}, decision_cond="<="
):
"""
Common converter for GBDT classifiers.
Args:
tree_infos: The information representaing a tree (ensemble)
get_tree_parameters: A function specifying how to parse the tree_infos into parameters
n_features: The number of features input to the model
n_classes: How many classes are expected. 1 for regression tasks
classes: The classes used for classification. None if implementing a regression model
extra_config: Extra configuration used to properly implement the source tree
decision_cond: The condition of the decision nodes in the x <cond> threshold order. Default '<='. Values can be <=, <, >=, >
Returns:
A tree implementation in PyTorch
"""
assert tree_infos is not None
assert get_tree_parameters is not None
assert n_features is not None
assert n_classes is not None
# Rearrange classes and tree information.
if n_classes == 2:
n_classes -= 1
if classes is None:
classes = [i for i in range(n_classes)]
reorder_trees = True
if constants.REORDER_TREES in extra_config:
reorder_trees = extra_config[constants.REORDER_TREES]
if reorder_trees and n_classes > 1:
tree_infos = [tree_infos[i * n_classes + j] for j in range(n_classes) for i in range(len(tree_infos) // n_classes)]
return convert_gbdt_common(
operator, tree_infos, get_tree_parameters, n_features, classes, extra_config=extra_config, decision_cond=decision_cond
)
def get_parameters_for_sklearn_common(tree_infos, extra_config):
"""
Parse sklearn-based trees, including SklearnRandomForestClassifier/Regressor and SklearnGradientBoostingClassifier/Regressor
Args:
tree_infos: The information representing a tree (ensemble)
Returns: The tree parameters wrapped into an instance of `operator_converters._tree_commons_TreeParameters`
"""
lefts = tree_infos.tree_.children_left
rights = tree_infos.tree_.children_right
features = tree_infos.tree_.feature
thresholds = tree_infos.tree_.threshold
values = tree_infos.tree_.value
return TreeParameters(lefts, rights, features, thresholds, values)
def get_parameters_for_tree_trav_sklearn(lefts, rights, features, thresholds, values, classes=None, extra_config={}):
"""
This function is used to generate tree parameters for sklearn trees.
Includes SklearnRandomForestClassifier/Regressor, and SklearnGradientBoostingClassifier.
Args:
left: The left nodes
right: The right nodes
features: The features used in the decision nodes
thresholds: The thresholds used in the decision nodes
values: The values stored in the leaf nodes
classes: The list of class labels. None if regression model
Returns:
An array containing the extracted parameters
"""
features = [max(x, 0) for x in features]
values = np.array(values)
if len(values.shape) == 3:
values = values.reshape(values.shape[0], -1)
if values.shape[1] > 1 and classes is not None and len(classes) > 0:
# Triggers only for classification.
values /= np.sum(values, axis=1, keepdims=True)
if constants.NUM_TREES in extra_config:
values /= extra_config[constants.NUM_TREES]
return get_parameters_for_tree_trav_common(lefts, rights, features, thresholds, values)
The provided code snippet includes necessary dependencies for implementing the `convert_sklearn_gbdt_classifier` function. Write a Python function `def convert_sklearn_gbdt_classifier(operator, device, extra_config)` to solve the following problem:
Converter for `sklearn.ensemble.GradientBoostingClassifier` Args: operator: An operator wrapping a `sklearn.ensemble.GradientBoostingClassifier` or `sklearn.ensemble.HistGradientBoostingClassifier` model device: String defining the type of device the converted operator should be run on extra_config: Extra configuration used to select the best conversion strategy Returns: A PyTorch model
Here is the function:
def convert_sklearn_gbdt_classifier(operator, device, extra_config):
"""
Converter for `sklearn.ensemble.GradientBoostingClassifier`
Args:
operator: An operator wrapping a `sklearn.ensemble.GradientBoostingClassifier`
or `sklearn.ensemble.HistGradientBoostingClassifier` model
device: String defining the type of device the converted operator should be run on
extra_config: Extra configuration used to select the best conversion strategy
Returns:
A PyTorch model
"""
assert operator is not None, "Cannot convert None operator"
# Get tree information out of the operator.
tree_infos = operator.raw_operator.estimators_
# GBDT does not scale the value using the learning rate upfront, we have to do it.
extra_config[constants.LEARNING_RATE] = operator.raw_operator.learning_rate
# GBDT does not normalize values upfront, we have to do it.
extra_config[constants.GET_PARAMETERS_FOR_TREE_TRAVERSAL] = get_parameters_for_tree_trav_sklearn
n_features = _get_n_features(operator.raw_operator)
classes = operator.raw_operator.classes_.tolist()
n_classes = len(classes)
# Analyze classes.
if not all(isinstance(c, int) for c in classes):
raise RuntimeError("GBDT Classifier translation only supports integer class labels.")
if n_classes == 2:
n_classes -= 1
# Reshape the tree_infos into hummingbird gbdt internal format.
tree_infos = [tree_infos[i][j] for j in range(n_classes) for i in range(len(tree_infos))]
# Get the value for Alpha.
if hasattr(operator.raw_operator, "init"):
if operator.raw_operator.init == "zero":
base_prediction = [[0.0]]
elif operator.raw_operator.init is None:
if n_classes == 1:
base_prediction = [
[np.log(operator.raw_operator.init_.class_prior_[1] / (1 - operator.raw_operator.init_.class_prior_[1]))]
]
else:
base_prediction = [[np.log(operator.raw_operator.init_.class_prior_[i]) for i in range(n_classes)]]
else:
raise RuntimeError("Custom initializers for GBDT are not yet supported in Hummingbird.")
elif hasattr(operator.raw_operator, "_baseline_prediction"):
if n_classes == 1:
base_prediction = [[operator.raw_operator._baseline_prediction]]
else:
base_prediction = np.array([operator.raw_operator._baseline_prediction.flatten().tolist()])
extra_config[constants.BASE_PREDICTION] = base_prediction
extra_config[constants.REORDER_TREES] = False
return convert_gbdt_classifier_common(
operator, tree_infos, get_parameters_for_sklearn_common, n_features, n_classes, classes, extra_config
) | Converter for `sklearn.ensemble.GradientBoostingClassifier` Args: operator: An operator wrapping a `sklearn.ensemble.GradientBoostingClassifier` or `sklearn.ensemble.HistGradientBoostingClassifier` model device: String defining the type of device the converted operator should be run on extra_config: Extra configuration used to select the best conversion strategy Returns: A PyTorch model |
6,581 | import numpy as np
from onnxconverter_common.registration import register_converter
from .. import constants
from .._gbdt_commons import convert_gbdt_common, convert_gbdt_classifier_common
from .._tree_commons import get_parameters_for_sklearn_common, get_parameters_for_tree_trav_sklearn, TreeParameters
def _get_n_features(model):
return model.n_features_in_
def convert_gbdt_common(
operator, tree_infos, get_tree_parameters, n_features, classes=None, extra_config={}, decision_cond="<="
):
"""
Common converter for GBDT models.
Args:
tree_infos: The information representaing a tree (ensemble)
get_tree_parameters: A function specifying how to parse the tree_infos into parameters
n_features: The number of features input to the model
classes: The classes used for classification. None if implementing a regression model
extra_config: Extra configuration used to properly implement the source tree
decision_cond: The condition of the decision nodes in the x <cond> threshold order. Default '<='. Values can be <=, <, >=, >
Returns:
A tree implementation in PyTorch
"""
assert tree_infos is not None
assert get_tree_parameters is not None
assert n_features is not None
tree_parameters, max_depth, tree_type = get_tree_params_and_type(tree_infos, get_tree_parameters, extra_config)
# Apply learning rate directly on the values rather then at runtime.
if constants.LEARNING_RATE in extra_config:
for parameter in tree_parameters:
parameter.values = parameter.values * extra_config[constants.LEARNING_RATE]
# Generate the model parameters based on the selected strategy.
if tree_type == TreeImpl.gemm:
net_parameters = [
get_parameters_for_gemm_common(
tree_param.lefts,
tree_param.rights,
tree_param.features,
tree_param.thresholds,
tree_param.values,
n_features,
extra_config,
)
for tree_param in tree_parameters
]
else:
# Some models require some additional massaging of the parameters before generating the tree_trav implementation.
get_parameters_for_tree_trav = get_parameters_for_tree_trav_common
if constants.GET_PARAMETERS_FOR_TREE_TRAVERSAL in extra_config:
get_parameters_for_tree_trav = extra_config[constants.GET_PARAMETERS_FOR_TREE_TRAVERSAL]
net_parameters = [
get_parameters_for_tree_trav(
tree_param.lefts,
tree_param.rights,
tree_param.features,
tree_param.thresholds,
tree_param.values,
extra_config,
)
for tree_param in tree_parameters
]
# Define the post transform.
if constants.BASE_PREDICTION in extra_config:
base_pred = torch.FloatTensor(extra_config[constants.BASE_PREDICTION])
# For newer versions of scikit-learn (>1.1.1),
if len(base_pred.shape) == 4:
base_pred = base_pred[0][0]
base_prediction = torch.nn.Parameter(base_pred, requires_grad=False)
extra_config[constants.BASE_PREDICTION] = base_prediction
# For models following the Sklearn API we need to build the post transform ourselves.
if classes is not None and constants.POST_TRANSFORM not in extra_config:
if len(classes) <= 2:
extra_config[constants.POST_TRANSFORM] = constants.SIGMOID
else:
extra_config[constants.POST_TRANSFORM] = constants.SOFTMAX
# Set the post transform.
if constants.POST_TRANSFORM in extra_config:
if extra_config[constants.POST_TRANSFORM] == constants.SIGMOID:
if constants.BASE_PREDICTION in extra_config:
extra_config[constants.POST_TRANSFORM] = ApplySigmoidBasePredictionPostTransform(base_prediction)
else:
extra_config[constants.POST_TRANSFORM] = ApplySigmoidPostTransform()
elif extra_config[constants.POST_TRANSFORM] == constants.SOFTMAX:
if constants.BASE_PREDICTION in extra_config:
extra_config[constants.POST_TRANSFORM] = ApplySoftmaxBasePredictionPostTransform(base_prediction)
else:
extra_config[constants.POST_TRANSFORM] = ApplySoftmaxPostTransform()
elif extra_config[constants.POST_TRANSFORM] == constants.TWEEDIE:
if constants.BASE_PREDICTION in extra_config:
extra_config[constants.POST_TRANSFORM] = ApplyTweedieBasePredictionPostTransform(base_prediction)
else:
extra_config[constants.POST_TRANSFORM] = ApplyTweediePostTransform()
elif extra_config[constants.POST_TRANSFORM] is None:
extra_config[constants.POST_TRANSFORM] = PostTransform()
else:
raise NotImplementedError("Post transform {} not implemeneted yet".format(extra_config[constants.POST_TRANSFORM]))
elif constants.BASE_PREDICTION in extra_config:
extra_config[constants.POST_TRANSFORM] = ApplyBasePredictionPostTransform(base_prediction)
# Generate the tree implementation based on the selected strategy.
if tree_type == TreeImpl.gemm:
return GEMMGBDTImpl(
operator, net_parameters, n_features, classes, extra_config=extra_config, decision_cond=decision_cond
)
if tree_type == TreeImpl.tree_trav:
return TreeTraversalGBDTImpl(
operator, net_parameters, max_depth, n_features, classes, extra_config=extra_config, decision_cond=decision_cond
)
else: # Remaining possible case: tree_type == TreeImpl.perf_tree_trav.
return PerfectTreeTraversalGBDTImpl(
operator, net_parameters, max_depth, n_features, classes, extra_config=extra_config, decision_cond=decision_cond
)
def get_parameters_for_sklearn_common(tree_infos, extra_config):
"""
Parse sklearn-based trees, including SklearnRandomForestClassifier/Regressor and SklearnGradientBoostingClassifier/Regressor
Args:
tree_infos: The information representing a tree (ensemble)
Returns: The tree parameters wrapped into an instance of `operator_converters._tree_commons_TreeParameters`
"""
lefts = tree_infos.tree_.children_left
rights = tree_infos.tree_.children_right
features = tree_infos.tree_.feature
thresholds = tree_infos.tree_.threshold
values = tree_infos.tree_.value
return TreeParameters(lefts, rights, features, thresholds, values)
def get_parameters_for_tree_trav_sklearn(lefts, rights, features, thresholds, values, classes=None, extra_config={}):
"""
This function is used to generate tree parameters for sklearn trees.
Includes SklearnRandomForestClassifier/Regressor, and SklearnGradientBoostingClassifier.
Args:
left: The left nodes
right: The right nodes
features: The features used in the decision nodes
thresholds: The thresholds used in the decision nodes
values: The values stored in the leaf nodes
classes: The list of class labels. None if regression model
Returns:
An array containing the extracted parameters
"""
features = [max(x, 0) for x in features]
values = np.array(values)
if len(values.shape) == 3:
values = values.reshape(values.shape[0], -1)
if values.shape[1] > 1 and classes is not None and len(classes) > 0:
# Triggers only for classification.
values /= np.sum(values, axis=1, keepdims=True)
if constants.NUM_TREES in extra_config:
values /= extra_config[constants.NUM_TREES]
return get_parameters_for_tree_trav_common(lefts, rights, features, thresholds, values)
The provided code snippet includes necessary dependencies for implementing the `convert_sklearn_gbdt_regressor` function. Write a Python function `def convert_sklearn_gbdt_regressor(operator, device, extra_config)` to solve the following problem:
Converter for `sklearn.ensemble.GradientBoostingRegressor`. Args: operator: An operator wrapping a `sklearn.ensemble.GradientBoostingRegressor` or `sklearn.ensemble.HistGradientBoostingRegressor` model device: String defining the type of device the converted operator should be run on extra_config: Extra configuration used to select the best conversion strategy Returns: A PyTorch model
Here is the function:
def convert_sklearn_gbdt_regressor(operator, device, extra_config):
"""
Converter for `sklearn.ensemble.GradientBoostingRegressor`.
Args:
operator: An operator wrapping a `sklearn.ensemble.GradientBoostingRegressor` or
`sklearn.ensemble.HistGradientBoostingRegressor` model
device: String defining the type of device the converted operator should be run on
extra_config: Extra configuration used to select the best conversion strategy
Returns:
A PyTorch model
"""
assert operator is not None, "Cannot convert None operator"
# Get tree information out of the operator.
tree_infos = operator.raw_operator.estimators_.ravel().tolist()
n_features = _get_n_features(operator.raw_operator)
extra_config[constants.LEARNING_RATE] = operator.raw_operator.learning_rate
# For sklearn models we need to massage the parameters a bit before generating the parameters for tree_trav.
extra_config[constants.GET_PARAMETERS_FOR_TREE_TRAVERSAL] = get_parameters_for_tree_trav_sklearn
# Get the value for Alpha.
if operator.raw_operator.init == "zero":
base_prediction = [[0.0]]
elif operator.raw_operator.init is None:
base_prediction = operator.raw_operator.init_.constant_.tolist()
else:
raise RuntimeError("Custom initializers for GBDT are not yet supported in Hummingbird.")
extra_config[constants.BASE_PREDICTION] = base_prediction
return convert_gbdt_common(operator, tree_infos, get_parameters_for_sklearn_common, n_features, None, extra_config) | Converter for `sklearn.ensemble.GradientBoostingRegressor`. Args: operator: An operator wrapping a `sklearn.ensemble.GradientBoostingRegressor` or `sklearn.ensemble.HistGradientBoostingRegressor` model device: String defining the type of device the converted operator should be run on extra_config: Extra configuration used to select the best conversion strategy Returns: A PyTorch model |
6,582 | import numpy as np
from onnxconverter_common.registration import register_converter
from .. import constants
from .._gbdt_commons import convert_gbdt_common, convert_gbdt_classifier_common
from .._tree_commons import get_parameters_for_sklearn_common, get_parameters_for_tree_trav_sklearn, TreeParameters
def _get_n_features(model):
return model.n_features_in_
def _get_parameters_hist_gbdt(trees, extra_config):
"""
Extract the tree parameters from SklearnHistGradientBoostingClassifier trees
Args:
trees: The information representing a tree (ensemble)
Returns: The tree parameters wrapped into an instance of `operator_converters._tree_commons_TreeParameters`
"""
features = [n["feature_idx"] for n in trees.nodes]
try:
thresholds = [n["threshold"] if n["threshold"] != 0 else -1 for n in trees.nodes]
except ValueError:
# newer version of scikit-learn
thresholds = [n["num_threshold"] if n["num_threshold"] != 0 else -1 for n in trees.nodes]
lefts = [n["left"] if n["left"] != 0 else -1 for n in trees.nodes]
rights = [n["right"] if n["right"] != 0 else -1 for n in trees.nodes]
values = [[n["value"]] if n["value"] != 0 else [-1] for n in trees.nodes]
return TreeParameters(lefts, rights, features, thresholds, values)
def convert_gbdt_classifier_common(
operator, tree_infos, get_tree_parameters, n_features, n_classes, classes=None, extra_config={}, decision_cond="<="
):
"""
Common converter for GBDT classifiers.
Args:
tree_infos: The information representaing a tree (ensemble)
get_tree_parameters: A function specifying how to parse the tree_infos into parameters
n_features: The number of features input to the model
n_classes: How many classes are expected. 1 for regression tasks
classes: The classes used for classification. None if implementing a regression model
extra_config: Extra configuration used to properly implement the source tree
decision_cond: The condition of the decision nodes in the x <cond> threshold order. Default '<='. Values can be <=, <, >=, >
Returns:
A tree implementation in PyTorch
"""
assert tree_infos is not None
assert get_tree_parameters is not None
assert n_features is not None
assert n_classes is not None
# Rearrange classes and tree information.
if n_classes == 2:
n_classes -= 1
if classes is None:
classes = [i for i in range(n_classes)]
reorder_trees = True
if constants.REORDER_TREES in extra_config:
reorder_trees = extra_config[constants.REORDER_TREES]
if reorder_trees and n_classes > 1:
tree_infos = [tree_infos[i * n_classes + j] for j in range(n_classes) for i in range(len(tree_infos) // n_classes)]
return convert_gbdt_common(
operator, tree_infos, get_tree_parameters, n_features, classes, extra_config=extra_config, decision_cond=decision_cond
)
The provided code snippet includes necessary dependencies for implementing the `convert_sklearn_hist_gbdt_classifier` function. Write a Python function `def convert_sklearn_hist_gbdt_classifier(operator, device, extra_config)` to solve the following problem:
Converter for `sklearn.ensemble.HistGradientBoostingClassifier` Args: operator: An operator wrapping a `sklearn.ensemble.HistGradientBoostingClassifier` model device: String defining the type of device the converted operator should be run on extra_config: Extra configuration used to select the best conversion strategy Returns: A PyTorch model
Here is the function:
def convert_sklearn_hist_gbdt_classifier(operator, device, extra_config):
"""
Converter for `sklearn.ensemble.HistGradientBoostingClassifier`
Args:
operator: An operator wrapping a `sklearn.ensemble.HistGradientBoostingClassifier` model
device: String defining the type of device the converted operator should be run on
extra_config: Extra configuration used to select the best conversion strategy
Returns:
A PyTorch model
"""
assert operator is not None, "Cannot convert None operator"
# Get tree information out of the operator.
tree_infos = operator.raw_operator._predictors
n_features = _get_n_features(operator.raw_operator)
classes = operator.raw_operator.classes_.tolist()
n_classes = len(classes)
# Analyze classes.
if not all(isinstance(c, int) for c in classes):
raise RuntimeError("GBDT Classifier translation only supports integer class labels.")
if n_classes == 2:
n_classes -= 1
# Reshape the tree_infos to a more generic format.
tree_infos = [tree_infos[i][j] for j in range(n_classes) for i in range(len(tree_infos))]
# Get the value for Alpha.
if n_classes == 1:
base_prediction = [[operator.raw_operator._baseline_prediction]]
else:
base_prediction = np.array([operator.raw_operator._baseline_prediction.flatten().tolist()])
extra_config[constants.BASE_PREDICTION] = base_prediction
extra_config[constants.REORDER_TREES] = False
return convert_gbdt_classifier_common(
operator, tree_infos, _get_parameters_hist_gbdt, n_features, n_classes, classes, extra_config
) | Converter for `sklearn.ensemble.HistGradientBoostingClassifier` Args: operator: An operator wrapping a `sklearn.ensemble.HistGradientBoostingClassifier` model device: String defining the type of device the converted operator should be run on extra_config: Extra configuration used to select the best conversion strategy Returns: A PyTorch model |
6,583 | import numpy as np
from onnxconverter_common.registration import register_converter
from .. import constants
from .._gbdt_commons import convert_gbdt_common, convert_gbdt_classifier_common
from .._tree_commons import get_parameters_for_sklearn_common, get_parameters_for_tree_trav_sklearn, TreeParameters
def _get_n_features(model):
return model.n_features_in_
def _get_parameters_hist_gbdt(trees, extra_config):
"""
Extract the tree parameters from SklearnHistGradientBoostingClassifier trees
Args:
trees: The information representing a tree (ensemble)
Returns: The tree parameters wrapped into an instance of `operator_converters._tree_commons_TreeParameters`
"""
features = [n["feature_idx"] for n in trees.nodes]
try:
thresholds = [n["threshold"] if n["threshold"] != 0 else -1 for n in trees.nodes]
except ValueError:
# newer version of scikit-learn
thresholds = [n["num_threshold"] if n["num_threshold"] != 0 else -1 for n in trees.nodes]
lefts = [n["left"] if n["left"] != 0 else -1 for n in trees.nodes]
rights = [n["right"] if n["right"] != 0 else -1 for n in trees.nodes]
values = [[n["value"]] if n["value"] != 0 else [-1] for n in trees.nodes]
return TreeParameters(lefts, rights, features, thresholds, values)
def convert_gbdt_common(
operator, tree_infos, get_tree_parameters, n_features, classes=None, extra_config={}, decision_cond="<="
):
"""
Common converter for GBDT models.
Args:
tree_infos: The information representaing a tree (ensemble)
get_tree_parameters: A function specifying how to parse the tree_infos into parameters
n_features: The number of features input to the model
classes: The classes used for classification. None if implementing a regression model
extra_config: Extra configuration used to properly implement the source tree
decision_cond: The condition of the decision nodes in the x <cond> threshold order. Default '<='. Values can be <=, <, >=, >
Returns:
A tree implementation in PyTorch
"""
assert tree_infos is not None
assert get_tree_parameters is not None
assert n_features is not None
tree_parameters, max_depth, tree_type = get_tree_params_and_type(tree_infos, get_tree_parameters, extra_config)
# Apply learning rate directly on the values rather then at runtime.
if constants.LEARNING_RATE in extra_config:
for parameter in tree_parameters:
parameter.values = parameter.values * extra_config[constants.LEARNING_RATE]
# Generate the model parameters based on the selected strategy.
if tree_type == TreeImpl.gemm:
net_parameters = [
get_parameters_for_gemm_common(
tree_param.lefts,
tree_param.rights,
tree_param.features,
tree_param.thresholds,
tree_param.values,
n_features,
extra_config,
)
for tree_param in tree_parameters
]
else:
# Some models require some additional massaging of the parameters before generating the tree_trav implementation.
get_parameters_for_tree_trav = get_parameters_for_tree_trav_common
if constants.GET_PARAMETERS_FOR_TREE_TRAVERSAL in extra_config:
get_parameters_for_tree_trav = extra_config[constants.GET_PARAMETERS_FOR_TREE_TRAVERSAL]
net_parameters = [
get_parameters_for_tree_trav(
tree_param.lefts,
tree_param.rights,
tree_param.features,
tree_param.thresholds,
tree_param.values,
extra_config,
)
for tree_param in tree_parameters
]
# Define the post transform.
if constants.BASE_PREDICTION in extra_config:
base_pred = torch.FloatTensor(extra_config[constants.BASE_PREDICTION])
# For newer versions of scikit-learn (>1.1.1),
if len(base_pred.shape) == 4:
base_pred = base_pred[0][0]
base_prediction = torch.nn.Parameter(base_pred, requires_grad=False)
extra_config[constants.BASE_PREDICTION] = base_prediction
# For models following the Sklearn API we need to build the post transform ourselves.
if classes is not None and constants.POST_TRANSFORM not in extra_config:
if len(classes) <= 2:
extra_config[constants.POST_TRANSFORM] = constants.SIGMOID
else:
extra_config[constants.POST_TRANSFORM] = constants.SOFTMAX
# Set the post transform.
if constants.POST_TRANSFORM in extra_config:
if extra_config[constants.POST_TRANSFORM] == constants.SIGMOID:
if constants.BASE_PREDICTION in extra_config:
extra_config[constants.POST_TRANSFORM] = ApplySigmoidBasePredictionPostTransform(base_prediction)
else:
extra_config[constants.POST_TRANSFORM] = ApplySigmoidPostTransform()
elif extra_config[constants.POST_TRANSFORM] == constants.SOFTMAX:
if constants.BASE_PREDICTION in extra_config:
extra_config[constants.POST_TRANSFORM] = ApplySoftmaxBasePredictionPostTransform(base_prediction)
else:
extra_config[constants.POST_TRANSFORM] = ApplySoftmaxPostTransform()
elif extra_config[constants.POST_TRANSFORM] == constants.TWEEDIE:
if constants.BASE_PREDICTION in extra_config:
extra_config[constants.POST_TRANSFORM] = ApplyTweedieBasePredictionPostTransform(base_prediction)
else:
extra_config[constants.POST_TRANSFORM] = ApplyTweediePostTransform()
elif extra_config[constants.POST_TRANSFORM] is None:
extra_config[constants.POST_TRANSFORM] = PostTransform()
else:
raise NotImplementedError("Post transform {} not implemeneted yet".format(extra_config[constants.POST_TRANSFORM]))
elif constants.BASE_PREDICTION in extra_config:
extra_config[constants.POST_TRANSFORM] = ApplyBasePredictionPostTransform(base_prediction)
# Generate the tree implementation based on the selected strategy.
if tree_type == TreeImpl.gemm:
return GEMMGBDTImpl(
operator, net_parameters, n_features, classes, extra_config=extra_config, decision_cond=decision_cond
)
if tree_type == TreeImpl.tree_trav:
return TreeTraversalGBDTImpl(
operator, net_parameters, max_depth, n_features, classes, extra_config=extra_config, decision_cond=decision_cond
)
else: # Remaining possible case: tree_type == TreeImpl.perf_tree_trav.
return PerfectTreeTraversalGBDTImpl(
operator, net_parameters, max_depth, n_features, classes, extra_config=extra_config, decision_cond=decision_cond
)
The provided code snippet includes necessary dependencies for implementing the `convert_sklearn_hist_gbdt_regressor` function. Write a Python function `def convert_sklearn_hist_gbdt_regressor(operator, device, extra_config)` to solve the following problem:
Converter for `sklearn.ensemble.HistGradientBoostingRegressor` Args: operator: An operator wrapping a `sklearn.ensemble.HistGradientBoostingRegressor` model device: String defining the type of device the converted operator should be run on extra_config: Extra configuration used to select the best conversion strategy Returns: A PyTorch model
Here is the function:
def convert_sklearn_hist_gbdt_regressor(operator, device, extra_config):
"""
Converter for `sklearn.ensemble.HistGradientBoostingRegressor`
Args:
operator: An operator wrapping a `sklearn.ensemble.HistGradientBoostingRegressor` model
device: String defining the type of device the converted operator should be run on
extra_config: Extra configuration used to select the best conversion strategy
Returns:
A PyTorch model
"""
assert operator is not None, "Cannot convert None operator"
# Get tree information out of the operator.
tree_infos = operator.raw_operator._predictors
tree_infos = [tree_infos[i][0] for i in range(len(tree_infos))]
n_features = _get_n_features(operator.raw_operator)
extra_config[constants.BASE_PREDICTION] = [[operator.raw_operator._baseline_prediction]]
return convert_gbdt_common(operator, tree_infos, _get_parameters_hist_gbdt, n_features, None, extra_config) | Converter for `sklearn.ensemble.HistGradientBoostingRegressor` Args: operator: An operator wrapping a `sklearn.ensemble.HistGradientBoostingRegressor` model device: String defining the type of device the converted operator should be run on extra_config: Extra configuration used to select the best conversion strategy Returns: A PyTorch model |
6,584 | from onnxconverter_common.registration import register_converter
from .._normalizer_implementations import Normalizer
class Normalizer(PhysicalOperator, torch.nn.Module):
"""
Class implementing Normalizer operators in PyTorch. Supported normalizers are L1, L2 and Max.
"""
def __init__(self, logical_operator, norm, device):
super(Normalizer, self).__init__(logical_operator)
self.norm = norm
self.transformer = True
def forward(self, x):
if self.norm == "l1":
return x / torch.abs(x).sum(1, keepdim=True)
elif self.norm == "l2":
return x / torch.pow(torch.pow(x, 2).sum(1, keepdim=True), 0.5)
elif self.norm == "max":
return x / torch.max(torch.abs(x), dim=1, keepdim=True)[0]
else:
raise RuntimeError("Unsupported norm: {0}".format(self.norm))
The provided code snippet includes necessary dependencies for implementing the `convert_sklearn_normalizer` function. Write a Python function `def convert_sklearn_normalizer(operator, device, extra_config)` to solve the following problem:
Converter for `sklearn.preprocessing.Normalizer` Args: operator: An operator wrapping a `sklearn.preprocessing.Normalizer` model device: String defining the type of device the converted operator should be run on extra_config: Extra configuration used to select the best conversion strategy Returns: A PyTorch model
Here is the function:
def convert_sklearn_normalizer(operator, device, extra_config):
"""
Converter for `sklearn.preprocessing.Normalizer`
Args:
operator: An operator wrapping a `sklearn.preprocessing.Normalizer` model
device: String defining the type of device the converted operator should be run on
extra_config: Extra configuration used to select the best conversion strategy
Returns:
A PyTorch model
"""
assert operator is not None, "Cannot convert None operator"
return Normalizer(operator, operator.raw_operator.norm, device) | Converter for `sklearn.preprocessing.Normalizer` Args: operator: An operator wrapping a `sklearn.preprocessing.Normalizer` model device: String defining the type of device the converted operator should be run on extra_config: Extra configuration used to select the best conversion strategy Returns: A PyTorch model |
6,585 | import numpy as np
from onnxconverter_common.registration import register_converter
from .._mixture_implementations import BayesianGaussianMixture
class BayesianGaussianMixture(PhysicalOperator, torch.nn.Module):
def __init__(self, logical_operator,
weight_concentration_prior_type,
weight_concentration_,
means_,
covariances_,
covariance_type,
degrees_of_freedom_,
mean_precision_,
device):
super(BayesianGaussianMixture, self).__init__(logical_operator, regression=True)
if (weight_concentration_prior_type == 'dirichlet_process'):
self.weight_concentration_prior_type = weight_concentration_prior_type
else:
raise RuntimeError("Unsupported weight_concentration_prior_type: {0}".format(weight_concentration_prior_type))
if (covariance_type == 'full'):
self.covariance_type = covariance_type
else:
raise RuntimeError("Unsupported covariance_type: {0}".format(covariance_type))
self.weight_concentration_ = torch.nn.Parameter(torch.FloatTensor(weight_concentration_), requires_grad=False)
self.means_ = torch.nn.Parameter(torch.FloatTensor(means_), requires_grad=False)
precisions_cholesky_ = _compute_precision_cholesky(covariances_, covariance_type)
self.precisions_cholesky_ = torch.nn.Parameter(torch.FloatTensor(precisions_cholesky_), requires_grad=False)
self.degrees_of_freedom_ = torch.nn.Parameter(torch.FloatTensor(degrees_of_freedom_), requires_grad=False)
self.mean_precision_ = torch.nn.Parameter(torch.FloatTensor(mean_precision_), requires_grad=False)
def forward(self, X):
return self._estimate_weighted_log_prob(X).argmax(axis=1)
def _estimate_weighted_log_prob(self, X):
return self._estimate_log_prob(X) + self._estimate_log_weights()
def _estimate_log_weights(self):
if self.weight_concentration_prior_type == "dirichlet_process":
digamma_sum = torch.digamma(self.weight_concentration_[0] + self.weight_concentration_[1])
digamma_a = torch.digamma(self.weight_concentration_[0])
digamma_b = torch.digamma(self.weight_concentration_[1])
return (digamma_a - digamma_sum + torch.hstack((torch.Tensor([0]), torch.cumsum(digamma_b - digamma_sum, dim=0)[:-1])))
def _estimate_log_prob(self, X):
_, n_features = X.shape
log_gauss = _estimate_log_gaussian_prob(X, self.means_, self.precisions_cholesky_, self.covariance_type) - 0.5 * n_features * torch.log(self.degrees_of_freedom_)
log_lambda = n_features * torch.log(torch.FloatTensor([2.0])) + torch.sum(
torch.digamma(0.5 * (self.degrees_of_freedom_ - torch.arange(0, n_features)[:, None])),
0,)
return log_gauss + 0.5 * (log_lambda - n_features / self.mean_precision_)
The provided code snippet includes necessary dependencies for implementing the `convert_sklearn_BayesianGaussianMixture` function. Write a Python function `def convert_sklearn_BayesianGaussianMixture(operator, device, extra_config)` to solve the following problem:
Converter for `sklearn.mixture.BayesianGaussianMixture` Args: operator: An operator wrapping a `sklearn.mixture.BayesianGaussianMixture` model device: String defining the type of device the converted operator should be run on extra_config: Extra configuration used to select the best conversion strategy Returns: A PyTorch model
Here is the function:
def convert_sklearn_BayesianGaussianMixture(operator, device, extra_config):
"""
Converter for `sklearn.mixture.BayesianGaussianMixture`
Args:
operator: An operator wrapping a `sklearn.mixture.BayesianGaussianMixture` model
device: String defining the type of device the converted operator should be run on
extra_config: Extra configuration used to select the best conversion strategy
Returns:
A PyTorch model
"""
assert operator is not None, "Cannot convert None operator"
weight_concentration_prior_type = operator.raw_operator.weight_concentration_prior_type
weight_concentration_ = np.array(operator.raw_operator.weight_concentration_)
means_ = operator.raw_operator.means_
covariances_ = operator.raw_operator.covariances_
covariance_type = operator.raw_operator.covariance_type
degrees_of_freedom_ = operator.raw_operator.degrees_of_freedom_
mean_precision_ = operator.raw_operator.mean_precision_
return BayesianGaussianMixture(operator,
weight_concentration_prior_type,
weight_concentration_,
means_,
covariances_,
covariance_type,
degrees_of_freedom_,
mean_precision_,
device) | Converter for `sklearn.mixture.BayesianGaussianMixture` Args: operator: An operator wrapping a `sklearn.mixture.BayesianGaussianMixture` model device: String defining the type of device the converted operator should be run on extra_config: Extra configuration used to select the best conversion strategy Returns: A PyTorch model |
6,586 | import copy
from onnxconverter_common.registration import register_converter
from .. import constants
from .._tree_commons import get_parameters_for_sklearn_common, get_parameters_for_tree_trav_sklearn
from .._tree_commons import convert_decision_ensemble_tree_common
def convert_sklearn_random_forest_classifier(operator, device, extra_config):
"""
Converter for `sklearn.ensemble.RandomForestClassifier` and `sklearn.ensemble.ExtraTreesClassifier`.
Args:
operator: An operator wrapping a tree (ensemble) classifier model
device: String defining the type of device the converted operator should be run on
extra_config: Extra configuration used to select the best conversion strategy
Returns:
A PyTorch model
"""
assert operator is not None, "Cannot convert None operator"
# Get tree information out of the model.
tree_infos = operator.raw_operator.estimators_
n_features = operator.raw_operator.n_features_in_
classes = operator.raw_operator.classes_.tolist()
# For Sklearn Trees we need to know how many trees are there for normalization.
extra_config[constants.NUM_TREES] = len(tree_infos)
# Analyze classes.
if not all(isinstance(c, int) for c in classes):
raise RuntimeError("Random Forest Classifier translation only supports integer class labels")
def get_parameters_for_tree_trav(lefts, rights, features, thresholds, values, extra_config={}):
return get_parameters_for_tree_trav_sklearn(lefts, rights, features, thresholds, values, classes, extra_config)
return convert_decision_ensemble_tree_common(
operator,
tree_infos,
get_parameters_for_sklearn_common,
get_parameters_for_tree_trav,
n_features,
classes,
extra_config,
)
The provided code snippet includes necessary dependencies for implementing the `convert_sklearn_decision_tree_classifier` function. Write a Python function `def convert_sklearn_decision_tree_classifier(operator, device, extra_config)` to solve the following problem:
Converter for `sklearn.tree.DecisionTreeClassifier`. Args: operator: An operator wrapping a `sklearn.tree.DecisionTreeClassifier` model device: String defining the type of device the converted operator should be run on extra_config: Extra configuration used to select the best conversion strategy Returns: A PyTorch model
Here is the function:
def convert_sklearn_decision_tree_classifier(operator, device, extra_config):
"""
Converter for `sklearn.tree.DecisionTreeClassifier`.
Args:
operator: An operator wrapping a `sklearn.tree.DecisionTreeClassifier` model
device: String defining the type of device the converted operator should be run on
extra_config: Extra configuration used to select the best conversion strategy
Returns:
A PyTorch model
"""
assert operator is not None, "Cannot convert None operator"
operator.raw_operator.estimators_ = [operator.raw_operator]
return convert_sklearn_random_forest_classifier(operator, device, extra_config) | Converter for `sklearn.tree.DecisionTreeClassifier`. Args: operator: An operator wrapping a `sklearn.tree.DecisionTreeClassifier` model device: String defining the type of device the converted operator should be run on extra_config: Extra configuration used to select the best conversion strategy Returns: A PyTorch model |
6,587 | import copy
from onnxconverter_common.registration import register_converter
from .. import constants
from .._tree_commons import get_parameters_for_sklearn_common, get_parameters_for_tree_trav_sklearn
from .._tree_commons import convert_decision_ensemble_tree_common
def convert_sklearn_random_forest_regressor(operator, device, extra_config):
"""
Converter for `sklearn.ensemble.RandomForestRegressor` and `sklearn.ensemble.ExtraTreesRegressor`
Args:
operator: An operator wrapping the RandomForestRegressor and ExtraTreesRegressor model
device: String defining the type of device the converted operator should be run on
extra_config: Extra configuration used to select the best conversion strategy
Returns:
A PyTorch model
"""
assert operator is not None, "Cannot convert None operator"
# Get tree information out of the operator.
tree_infos = operator.raw_operator.estimators_
n_features = operator.raw_operator.n_features_in_
# For Sklearn Trees we need to know how many trees are there for normalization.
extra_config[constants.NUM_TREES] = len(tree_infos)
def get_parameters_for_tree_trav(lefts, rights, features, thresholds, values, extra_config={}):
return get_parameters_for_tree_trav_sklearn(lefts, rights, features, thresholds, values, None, extra_config)
return convert_decision_ensemble_tree_common(
operator,
tree_infos,
get_parameters_for_sklearn_common,
get_parameters_for_tree_trav,
n_features,
extra_config=extra_config,
)
The provided code snippet includes necessary dependencies for implementing the `convert_sklearn_decision_tree_regressor` function. Write a Python function `def convert_sklearn_decision_tree_regressor(operator, device, extra_config)` to solve the following problem:
Converter for `sklearn.tree.DecisionTreeRegressor`. Args: operator: An operator wrapping a `sklearn.tree.DecisionTreeRegressor` model device: String defining the type of device the converted operator should be run on extra_config: Extra configuration used to select the best conversion strategy Returns: A PyTorch model
Here is the function:
def convert_sklearn_decision_tree_regressor(operator, device, extra_config):
"""
Converter for `sklearn.tree.DecisionTreeRegressor`.
Args:
operator: An operator wrapping a `sklearn.tree.DecisionTreeRegressor` model
device: String defining the type of device the converted operator should be run on
extra_config: Extra configuration used to select the best conversion strategy
Returns:
A PyTorch model
"""
assert operator is not None, "Cannot convert None operator"
operator.raw_operator.estimators_ = [operator.raw_operator]
return convert_sklearn_random_forest_regressor(operator, device, extra_config) | Converter for `sklearn.tree.DecisionTreeRegressor`. Args: operator: An operator wrapping a `sklearn.tree.DecisionTreeRegressor` model device: String defining the type of device the converted operator should be run on extra_config: Extra configuration used to select the best conversion strategy Returns: A PyTorch model |
6,588 | from .._physical_operator import PhysicalOperator
import numpy as np
from onnxconverter_common.registration import register_converter
import torch
from .._imputer_implementations import SimpleImputer, MissingIndicator
class SimpleImputer(PhysicalOperator, torch.nn.Module):
"""
Class implementing SimpleImputer operators in PyTorch.
"""
def __init__(self, logical_operator, device, statistics=None, missing=None, strategy=None):
super(SimpleImputer, self).__init__(logical_operator)
sklearn_imputer = logical_operator.raw_operator
# Pull out the stats field from either the SKL imputer or args
stats_ = statistics if statistics is not None else sklearn_imputer.statistics_
# Process the stats into an array
stats = [float(stat) for stat in stats_]
missing_values = missing if missing is not None else sklearn_imputer.missing_values
strategy = strategy if strategy is not None else sklearn_imputer.strategy
b_mask = np.logical_not(np.isnan(stats))
i_mask = [i for i in range(len(b_mask)) if b_mask[i]]
self.transformer = True
self.do_mask = strategy == "constant" or all(b_mask)
self.mask = torch.nn.Parameter(torch.LongTensor([] if self.do_mask else i_mask), requires_grad=False)
self.replace_values = torch.nn.Parameter(torch.tensor(np.array([stats_]), dtype=torch.float32), requires_grad=False)
self.is_nan = True if (missing_values == "NaN" or np.isnan(missing_values)) else False
if not self.is_nan:
self.missing_values = torch.nn.Parameter(torch.tensor([missing_values], dtype=torch.float32), requires_grad=False)
def forward(self, x):
if self.is_nan:
result = torch.where(torch.isnan(x), self.replace_values.expand(x.shape), x)
if self.do_mask:
return result
return torch.index_select(result, 1, self.mask)
else:
return torch.where(torch.eq(x, self.missing_values), self.replace_values.expand(x.shape), x)
The provided code snippet includes necessary dependencies for implementing the `convert_sklearn_simple_imputer` function. Write a Python function `def convert_sklearn_simple_imputer(operator, device, extra_config)` to solve the following problem:
Converter for `sklearn.impute.SimpleImputer` Args: operator: An operator wrapping a `sklearn.impute.SimpleImputer` model device: String defining the type of device the converted operator should be run on extra_config: Extra configuration used to select the best conversion strategy Returns: A PyTorch model
Here is the function:
def convert_sklearn_simple_imputer(operator, device, extra_config):
"""
Converter for `sklearn.impute.SimpleImputer`
Args:
operator: An operator wrapping a `sklearn.impute.SimpleImputer` model
device: String defining the type of device the converted operator should be run on
extra_config: Extra configuration used to select the best conversion strategy
Returns:
A PyTorch model
"""
assert operator is not None, "Cannot convert None operator"
return SimpleImputer(operator, device) | Converter for `sklearn.impute.SimpleImputer` Args: operator: An operator wrapping a `sklearn.impute.SimpleImputer` model device: String defining the type of device the converted operator should be run on extra_config: Extra configuration used to select the best conversion strategy Returns: A PyTorch model |
6,589 | from .._physical_operator import PhysicalOperator
import numpy as np
from onnxconverter_common.registration import register_converter
import torch
from .._imputer_implementations import SimpleImputer, MissingIndicator
class MissingIndicator(PhysicalOperator, torch.nn.Module):
"""
Class implementing Imputer operators in MissingIndicator.
"""
def __init__(self, logical_operator, device):
super(MissingIndicator, self).__init__(logical_operator)
sklearn_missing_indicator = logical_operator.raw_operator
self.transformer = True
self.missing_values = torch.nn.Parameter(
torch.tensor([sklearn_missing_indicator.missing_values], dtype=torch.float32), requires_grad=False
)
self.features = sklearn_missing_indicator.features
self.is_nan = True if (sklearn_missing_indicator.missing_values in ["NaN", None, np.nan]) else False
self.column_indices = torch.nn.Parameter(torch.LongTensor(sklearn_missing_indicator.features_), requires_grad=False)
def forward(self, x):
if self.is_nan:
if self.features == "all":
return torch.isnan(x).float()
else:
return torch.isnan(torch.index_select(x, 1, self.column_indices)).float()
else:
if self.features == "all":
return torch.eq(x, self.missing_values).float()
else:
return torch.eq(torch.index_select(x, 1, self.column_indices), self.missing_values).float()
The provided code snippet includes necessary dependencies for implementing the `convert_sklearn_missing_indicator` function. Write a Python function `def convert_sklearn_missing_indicator(operator, device, extra_config)` to solve the following problem:
Converter for `sklearn.impute.MissingIndicator` Args: operator: An operator wrapping a `sklearn.impute.MissingIndicator` model device: String defining the type of device the converted operator should be run on extra_config: Extra configuration used to select the best conversion strategy Returns: A PyTorch model
Here is the function:
def convert_sklearn_missing_indicator(operator, device, extra_config):
"""
Converter for `sklearn.impute.MissingIndicator`
Args:
operator: An operator wrapping a `sklearn.impute.MissingIndicator` model
device: String defining the type of device the converted operator should be run on
extra_config: Extra configuration used to select the best conversion strategy
Returns:
A PyTorch model
"""
assert operator is not None, "Cannot convert None operator"
return MissingIndicator(operator, device) | Converter for `sklearn.impute.MissingIndicator` Args: operator: An operator wrapping a `sklearn.impute.MissingIndicator` model device: String defining the type of device the converted operator should be run on extra_config: Extra configuration used to select the best conversion strategy Returns: A PyTorch model |
6,590 | import torch
import numpy as np
from onnxconverter_common.registration import register_converter
from .._physical_operator import PhysicalOperator
from .._discretizer_implementations import Binarizer, KBinsDiscretizer
class Binarizer(PhysicalOperator, torch.nn.Module):
"""
Class implementing Binarizer operators in PyTorch.
"""
def __init__(self, logical_operator, threshold, device):
super(Binarizer, self).__init__(logical_operator)
self.transformer = True
self.threshold = torch.nn.Parameter(torch.FloatTensor([threshold]), requires_grad=False)
def forward(self, x):
return torch.gt(x, self.threshold).float()
The provided code snippet includes necessary dependencies for implementing the `convert_sklearn_binarizer` function. Write a Python function `def convert_sklearn_binarizer(operator, device, extra_config)` to solve the following problem:
Converter for `sklearn.preprocessing.Binarizer` Args: operator: An operator wrapping a `sklearn.preprocessing.Binarizer` model device: String defining the type of device the converted operator should be run on extra_config: Extra configuration used to select the best conversion strategy Returns: A PyTorch model
Here is the function:
def convert_sklearn_binarizer(operator, device, extra_config):
"""
Converter for `sklearn.preprocessing.Binarizer`
Args:
operator: An operator wrapping a `sklearn.preprocessing.Binarizer` model
device: String defining the type of device the converted operator should be run on
extra_config: Extra configuration used to select the best conversion strategy
Returns:
A PyTorch model
"""
assert operator is not None, "Cannot convert None operator"
return Binarizer(operator, operator.raw_operator.threshold, device) | Converter for `sklearn.preprocessing.Binarizer` Args: operator: An operator wrapping a `sklearn.preprocessing.Binarizer` model device: String defining the type of device the converted operator should be run on extra_config: Extra configuration used to select the best conversion strategy Returns: A PyTorch model |
6,591 | import torch
import numpy as np
from onnxconverter_common.registration import register_converter
from .._physical_operator import PhysicalOperator
from .._discretizer_implementations import Binarizer, KBinsDiscretizer
class KBinsDiscretizer(PhysicalOperator, torch.nn.Module):
def __init__(self, logical_operator, encode, n_bins, bin_edges, labels, device):
super(KBinsDiscretizer, self).__init__(logical_operator)
self.transformer = True
self.encode = encode
self.ge_tensor = torch.FloatTensor(bin_edges[:, 1:-1])
self.ohe = OneHotEncoder(logical_operator, labels, device)
if n_bins is not None:
self.n_bins = torch.FloatTensor([[n - 1 for n in n_bins]])
else:
self.n_bins = None
def forward(self, x):
x = torch.unsqueeze(x, 2)
x = torch.ge(x, self.ge_tensor)
x = x.float()
x = torch.sum(x, dim=2, keepdim=False)
if self.n_bins is not None:
# Clipping the encoded values (Needed for sklearn).
x = torch.min(self.n_bins, x)
if self.encode in ["onehot-dense", "onehot"]:
x = self.ohe(x)
return x
The provided code snippet includes necessary dependencies for implementing the `convert_sklearn_k_bins_discretizer` function. Write a Python function `def convert_sklearn_k_bins_discretizer(operator, device, extra_config)` to solve the following problem:
Converter for `sklearn.preprocessing.KBinsDiscretizer` Args: operator: An operator wrapping a `sklearn.preprocessing.KBinsDiscretizer` model device: String defining the type of device the converted operator should be run on extra_config: Extra configuration used to select the best conversion strategy Returns: A PyTorch model
Here is the function:
def convert_sklearn_k_bins_discretizer(operator, device, extra_config):
"""
Converter for `sklearn.preprocessing.KBinsDiscretizer`
Args:
operator: An operator wrapping a `sklearn.preprocessing.KBinsDiscretizer` model
device: String defining the type of device the converted operator should be run on
extra_config: Extra configuration used to select the best conversion strategy
Returns:
A PyTorch model
"""
assert operator is not None, "Cannot convert None operator"
bin_edges = []
max_bin_edges = 0
labels = []
for x in operator.raw_operator.bin_edges_:
temp = x.flatten().tolist()
bin_edges.append(temp)
max_bin_edges = max(max_bin_edges, len(bin_edges[-1]))
for i in range(len(bin_edges)):
labels.append(np.array([i for i in range(len(bin_edges[i]) - 1)]))
if len(bin_edges[i]) < max_bin_edges:
bin_edges[i] = bin_edges[i] + [np.inf for _ in range((max_bin_edges - len(bin_edges[i])))]
return KBinsDiscretizer(
operator, operator.raw_operator.encode, operator.raw_operator.n_bins_, np.array(bin_edges), labels, device
) | Converter for `sklearn.preprocessing.KBinsDiscretizer` Args: operator: An operator wrapping a `sklearn.preprocessing.KBinsDiscretizer` model device: String defining the type of device the converted operator should be run on extra_config: Extra configuration used to select the best conversion strategy Returns: A PyTorch model |
6,592 | import torch
from .._physical_operator import PhysicalOperator
from onnxconverter_common.registration import register_converter
class KMeans(PhysicalOperator, torch.nn.Module):
"""
Class implementing Kmeans in PyTorch
"""
def __init__(self, logical_operator, centroids, device):
super(KMeans, self).__init__(logical_operator, regression=True)
self.centroids = torch.nn.Parameter(torch.FloatTensor(centroids), requires_grad=False)
def forward(self, x):
# Compute the Euclidean distance
dist = torch.cdist(x, self.centroids, compute_mode="donot_use_mm_for_euclid_dist")
label = torch.argmin(dist, dim=1)
return label
def convert_sklearn_kmeans_model(operator, device, extra_config):
assert operator is not None, "Cannot convert None operator"
centroids = operator.raw_operator.cluster_centers_
return KMeans(operator, centroids, device) | null |
6,593 | import numpy as np
from onnxconverter_common.registration import register_converter
from .._label_encoder_implementations import NumericLabelEncoder, StringLabelEncoder
class StringLabelEncoder(PhysicalOperator, torch.nn.Module):
"""
LabelEncoder over string data types.
When the ONNX backend is selected, this operator only works for PyTorch => 1.8.0.
"""
def __init__(self, logical_operator, classes, device, extra_config={}):
super(StringLabelEncoder, self).__init__(logical_operator, transformer=True)
self.regression = False
self.num_columns = len(classes)
self.max_word_length = max([len(cat) for cat in classes])
while self.max_word_length % 4 != 0:
self.max_word_length += 1
data_type = "|S" + str(self.max_word_length)
max_length = 0
if constants.MAX_STRING_LENGTH in extra_config:
extra_config[constants.MAX_STRING_LENGTH]
extra_config[constants.MAX_STRING_LENGTH] = max(max_length, self.max_word_length)
# Sort the classes and convert to torch.int32
self.max_word_length = self.max_word_length // 4
classes_conv = torch.from_numpy(np.array(sorted(set(classes)), dtype=data_type).view(np.int32)).detach().clone()
classes_conv = classes_conv.view(1, -1, self.max_word_length)
self.condition_tensors = torch.nn.Parameter(classes_conv, requires_grad=False)
def forward(self, x):
x = x.view(-1, 1, self.max_word_length)
result = torch.prod(self.condition_tensors == x, dim=2).nonzero(as_tuple=True)[1]
assert result.shape[0] == x.shape[0], "x ({}) contains previously unseen labels. condition_tensors: {}".format(
x, self.condition_tensors
)
return result
class NumericLabelEncoder(PhysicalOperator, torch.nn.Module):
def __init__(self, logical_operator, classes, device):
super(NumericLabelEncoder, self).__init__(logical_operator, transformer=True)
self.regression = False
self.check_tensor = torch.nn.Parameter(torch.IntTensor(classes), requires_grad=False)
def forward(self, x):
x = x.view(-1, 1)
return torch.argmax(torch.eq(x, self.check_tensor).int(), dim=1)
The provided code snippet includes necessary dependencies for implementing the `convert_sklearn_label_encoder` function. Write a Python function `def convert_sklearn_label_encoder(operator, device, extra_config)` to solve the following problem:
Converter for `sklearn.preprocessing.LabelEncoder` Args: operator: An operator wrapping a `sklearn.preprocessing.LabelEncoder` model device: String defining the type of device the converted operator should be run on extra_config: Extra configuration used to select the best conversion strategy Returns: A PyTorch model
Here is the function:
def convert_sklearn_label_encoder(operator, device, extra_config):
"""
Converter for `sklearn.preprocessing.LabelEncoder`
Args:
operator: An operator wrapping a `sklearn.preprocessing.LabelEncoder` model
device: String defining the type of device the converted operator should be run on
extra_config: Extra configuration used to select the best conversion strategy
Returns:
A PyTorch model
"""
assert operator is not None, "Cannot convert None operator"
if all([type(x) in [int, np.int32, np.int64] for x in operator.raw_operator.classes_]):
return NumericLabelEncoder(operator, operator.raw_operator.classes_, device)
else:
return StringLabelEncoder(operator, operator.raw_operator.classes_, device, extra_config) | Converter for `sklearn.preprocessing.LabelEncoder` Args: operator: An operator wrapping a `sklearn.preprocessing.LabelEncoder` model device: String defining the type of device the converted operator should be run on extra_config: Extra configuration used to select the best conversion strategy Returns: A PyTorch model |
6,594 | import numpy as np
from onnxconverter_common.registration import register_converter
from .._nb_implementations import BernoulliNBModel, GaussianNBModel
class BernoulliNBModel(PhysicalOperator, torch.nn.Module):
def __init__(self, logical_operator, classes, binarize, jll_calc_bias, feature_log_prob_minus_neg_prob, device):
super(BernoulliNBModel, self).__init__(logical_operator)
self.classification = True
self.binarize = binarize
self.jll_calc_bias = torch.nn.Parameter(
torch.from_numpy(jll_calc_bias.astype("float64")).view(-1), requires_grad=False
)
self.feature_log_prob_minus_neg_prob = torch.nn.Parameter(
torch.from_numpy(feature_log_prob_minus_neg_prob.astype("float64")), requires_grad=False
)
self.classes = torch.nn.Parameter(torch.IntTensor(classes), requires_grad=False)
self.perform_class_select = False
if min(classes) != 0 or max(classes) != len(classes) - 1:
self.perform_class_select = True
def forward(self, x):
x = x.double()
if self.binarize is not None:
x = torch.gt(x, self.binarize).double()
jll = torch.addmm(self.jll_calc_bias, x, self.feature_log_prob_minus_neg_prob)
log_prob_x = torch.logsumexp(jll, dim=1)
log_prob_x = jll - log_prob_x.view(-1, 1)
prob_x = torch.exp(log_prob_x).float()
if self.perform_class_select:
return torch.index_select(self.classes, 0, torch.argmax(jll, dim=1)), prob_x
else:
return torch.argmax(jll, dim=1), prob_x
The provided code snippet includes necessary dependencies for implementing the `convert_sklearn_bernouli_naive_bayes` function. Write a Python function `def convert_sklearn_bernouli_naive_bayes(operator, device, extra_config)` to solve the following problem:
Converter for `sklearn.naive_bayes.BernoulliNB` Args: operator: An operator wrapping a `sklearn.naive_bayes.BernoulliNB` model device: String defining the type of device the converted operator should be run on extra_config: Extra configuration used to select the best conversion strategy Returns: A PyTorch model
Here is the function:
def convert_sklearn_bernouli_naive_bayes(operator, device, extra_config):
"""
Converter for `sklearn.naive_bayes.BernoulliNB`
Args:
operator: An operator wrapping a `sklearn.naive_bayes.BernoulliNB` model
device: String defining the type of device the converted operator should be run on
extra_config: Extra configuration used to select the best conversion strategy
Returns:
A PyTorch model
"""
assert operator is not None, "Cannot convert None operator"
model = operator.raw_operator
classes = model.classes_
if not all([type(x) in [int, np.int32, np.int64] for x in classes]):
raise RuntimeError("Hummingbird supports only integer labels for class labels.")
binarize = model.binarize
neg_prob = np.log(1 - np.exp(model.feature_log_prob_))
feature_log_prob_minus_neg_prob = (model.feature_log_prob_ - neg_prob).T
jll_calc_bias = (model.class_log_prior_ + neg_prob.sum(1)).reshape(1, -1)
return BernoulliNBModel(operator, classes, binarize, jll_calc_bias, feature_log_prob_minus_neg_prob, device) | Converter for `sklearn.naive_bayes.BernoulliNB` Args: operator: An operator wrapping a `sklearn.naive_bayes.BernoulliNB` model device: String defining the type of device the converted operator should be run on extra_config: Extra configuration used to select the best conversion strategy Returns: A PyTorch model |
6,595 | import numpy as np
from onnxconverter_common.registration import register_converter
from .._nb_implementations import BernoulliNBModel, GaussianNBModel
class BernoulliNBModel(PhysicalOperator, torch.nn.Module):
def __init__(self, logical_operator, classes, binarize, jll_calc_bias, feature_log_prob_minus_neg_prob, device):
super(BernoulliNBModel, self).__init__(logical_operator)
self.classification = True
self.binarize = binarize
self.jll_calc_bias = torch.nn.Parameter(
torch.from_numpy(jll_calc_bias.astype("float64")).view(-1), requires_grad=False
)
self.feature_log_prob_minus_neg_prob = torch.nn.Parameter(
torch.from_numpy(feature_log_prob_minus_neg_prob.astype("float64")), requires_grad=False
)
self.classes = torch.nn.Parameter(torch.IntTensor(classes), requires_grad=False)
self.perform_class_select = False
if min(classes) != 0 or max(classes) != len(classes) - 1:
self.perform_class_select = True
def forward(self, x):
x = x.double()
if self.binarize is not None:
x = torch.gt(x, self.binarize).double()
jll = torch.addmm(self.jll_calc_bias, x, self.feature_log_prob_minus_neg_prob)
log_prob_x = torch.logsumexp(jll, dim=1)
log_prob_x = jll - log_prob_x.view(-1, 1)
prob_x = torch.exp(log_prob_x).float()
if self.perform_class_select:
return torch.index_select(self.classes, 0, torch.argmax(jll, dim=1)), prob_x
else:
return torch.argmax(jll, dim=1), prob_x
The provided code snippet includes necessary dependencies for implementing the `convert_sklearn_multinomial_naive_bayes` function. Write a Python function `def convert_sklearn_multinomial_naive_bayes(operator, device, extra_config)` to solve the following problem:
Converter for `sklearn.naive_bayes.MultinomialNB` Args: operator: An operator wrapping a `sklearn.naive_bayes.MultinomialNB` model device: String defining the type of device the converted operator should be run on extra_config: Extra configuration used to select the best conversion strategy Returns: A PyTorch model
Here is the function:
def convert_sklearn_multinomial_naive_bayes(operator, device, extra_config):
"""
Converter for `sklearn.naive_bayes.MultinomialNB`
Args:
operator: An operator wrapping a `sklearn.naive_bayes.MultinomialNB` model
device: String defining the type of device the converted operator should be run on
extra_config: Extra configuration used to select the best conversion strategy
Returns:
A PyTorch model
"""
assert operator is not None, "Cannot convert None operator"
model = operator.raw_operator
classes = model.classes_
if not all([type(x) in [int, np.int32, np.int64] for x in classes]):
raise RuntimeError("Hummingbird supports only integer labels for class labels.")
feature_log_prob_minus_neg_prob = model.feature_log_prob_.T
jll_calc_bias = model.class_log_prior_.reshape(1, -1)
return BernoulliNBModel(operator, classes, None, jll_calc_bias, feature_log_prob_minus_neg_prob, device) | Converter for `sklearn.naive_bayes.MultinomialNB` Args: operator: An operator wrapping a `sklearn.naive_bayes.MultinomialNB` model device: String defining the type of device the converted operator should be run on extra_config: Extra configuration used to select the best conversion strategy Returns: A PyTorch model |
6,596 | import numpy as np
from onnxconverter_common.registration import register_converter
from .._nb_implementations import BernoulliNBModel, GaussianNBModel
class GaussianNBModel(PhysicalOperator, torch.nn.Module):
def __init__(self, logical_operator, classes, jll_calc_bias, theta, sigma, device):
super(GaussianNBModel, self).__init__(logical_operator)
self.classification = True
self.jll_calc_bias = torch.nn.Parameter(torch.from_numpy(jll_calc_bias.astype("float32")), requires_grad=False)
self.theta = torch.nn.Parameter(
torch.from_numpy(theta.astype("float32")).view((len(classes), 1, -1)), requires_grad=False
)
self.sigma = torch.nn.Parameter(
torch.from_numpy(sigma.astype("float32")).view((len(classes), 1, -1)), requires_grad=False
)
self.classes = torch.nn.Parameter(torch.IntTensor(classes), requires_grad=False)
self.perform_class_select = False
if min(classes) != 0 or max(classes) != len(classes) - 1:
self.perform_class_select = True
def forward(self, x):
jll = self.jll_calc_bias - 0.5 * torch.sum(torch.div(torch.pow(x - self.theta, 2), self.sigma), 2)
jll = torch.transpose(jll, 0, 1)
log_prob_x = torch.logsumexp(jll, dim=1)
log_prob_x = jll - log_prob_x.view(-1, 1)
prob_x = torch.exp(log_prob_x)
if self.perform_class_select:
return torch.index_select(self.classes, 0, torch.argmax(jll, dim=1)), prob_x
else:
return torch.argmax(jll, dim=1), prob_x
The provided code snippet includes necessary dependencies for implementing the `convert_sklearn_gaussian_naive_bayes` function. Write a Python function `def convert_sklearn_gaussian_naive_bayes(operator, device, extra_config)` to solve the following problem:
Converter for `sklearn.naive_bayes.GaussianNB` Args: operator: An operator wrapping a `sklearn.naive_bayes.GaussianNB` model device: String defining the type of device the converted operator should be run on extra_config: Extra configuration used to select the best conversion strategy Returns: A PyTorch model
Here is the function:
def convert_sklearn_gaussian_naive_bayes(operator, device, extra_config):
"""
Converter for `sklearn.naive_bayes.GaussianNB`
Args:
operator: An operator wrapping a `sklearn.naive_bayes.GaussianNB` model
device: String defining the type of device the converted operator should be run on
extra_config: Extra configuration used to select the best conversion strategy
Returns:
A PyTorch model
"""
assert operator is not None, "Cannot convert None operator"
model = operator.raw_operator
classes = model.classes_
if not all([type(x) in [int, np.int32, np.int64] for x in classes]):
raise RuntimeError("Hummingbird supports only integer labels for class labels.")
jll_calc_bias = np.log(model.class_prior_.reshape(-1, 1)) - 0.5 * np.sum(np.log(2.0 * np.pi * model.var_), 1).reshape(
-1, 1
)
return GaussianNBModel(operator, classes, jll_calc_bias, model.theta_, model.var_, device) | Converter for `sklearn.naive_bayes.GaussianNB` Args: operator: An operator wrapping a `sklearn.naive_bayes.GaussianNB` model device: String defining the type of device the converted operator should be run on extra_config: Extra configuration used to select the best conversion strategy Returns: A PyTorch model |
6,597 | import numpy as np
from onnxconverter_common.registration import register_converter
import torch
from .. import constants
from .._array_feature_extractor_implementations import ArrayFeatureExtractor
from .._physical_operator import PhysicalOperator
from .._pipeline_implementations import Concat
class ArrayFeatureExtractor(PhysicalOperator, torch.nn.Module):
"""
Class implementing ArrayFeatureExtractor in PyTorch
This is used by SelectKBest, VarianceThreshold operators in scikit-learn
"""
def __init__(self, logical_operator, column_indices, device):
super(ArrayFeatureExtractor, self).__init__(logical_operator, transformer=True)
is_contiguous = False
if max(column_indices) - min(column_indices) + 1 == len(column_indices):
is_contiguous = True
self.min = min(column_indices)
self.max = max(column_indices) + 1
self.column_indices = torch.nn.Parameter(torch.LongTensor(column_indices), requires_grad=False)
self.is_contiguous = is_contiguous
def forward(self, x):
if isinstance(x, tuple):
return x[self.column_indices]
if len(x.shape) == 1:
x = x.view(1, -1)
if self.is_contiguous:
return x[:, self.min : self.max]
else:
return torch.index_select(x, 1, self.column_indices)
The provided code snippet includes necessary dependencies for implementing the `convert_sklearn_array_feature_extractor` function. Write a Python function `def convert_sklearn_array_feature_extractor(operator, device, extra_config)` to solve the following problem:
Converter for ArrayFeatureExtractor. Args: operator: An operator wrapping a ArrayFeatureExtractor operator device: String defining the type of device the converted operator should be run on extra_config: Extra configuration used to select the best conversion strategy Returns: A PyTorch model
Here is the function:
def convert_sklearn_array_feature_extractor(operator, device, extra_config):
"""
Converter for ArrayFeatureExtractor.
Args:
operator: An operator wrapping a ArrayFeatureExtractor operator
device: String defining the type of device the converted operator should be run on
extra_config: Extra configuration used to select the best conversion strategy
Returns:
A PyTorch model
"""
assert operator is not None, "Cannot convert None operator"
indices = operator.column_indices
if any([type(i) is bool for i in indices]):
indices = [i for i in range(len(indices)) if indices[i]]
return ArrayFeatureExtractor(operator, np.ascontiguousarray(indices), device) | Converter for ArrayFeatureExtractor. Args: operator: An operator wrapping a ArrayFeatureExtractor operator device: String defining the type of device the converted operator should be run on extra_config: Extra configuration used to select the best conversion strategy Returns: A PyTorch model |
6,598 | import numpy as np
from onnxconverter_common.registration import register_converter
import torch
from .. import constants
from .._array_feature_extractor_implementations import ArrayFeatureExtractor
from .._physical_operator import PhysicalOperator
from .._pipeline_implementations import Concat
class Concat(PhysicalOperator, torch.nn.Module):
def __init__(self, logical_operator):
super(Concat, self).__init__(logical_operator, transformer=True)
def forward(self, *x):
if len(x[0].shape) > 1:
# We need to explictly cast the tensors if their types don't agree.
dtypes = {t.dtype for t in x}
if len(dtypes) > 1:
if torch.float64 in dtypes:
x = [t.double() for t in x]
elif torch.float32 in dtypes:
x = [t.float() for t in x]
else:
raise RuntimeError(
"Combination of data types for Concat input tensors not supported. Please fill an issue at https://github.com/microsoft/hummingbird."
)
return torch.cat(x, dim=1)
else:
return torch.stack([i.view(-1) for i in x], dim=1)
The provided code snippet includes necessary dependencies for implementing the `convert_sklearn_concat` function. Write a Python function `def convert_sklearn_concat(operator, device=None, extra_config={})` to solve the following problem:
Converter for concat operators injected when parsing Sklearn pipelines. Args: operator: An empty operator device: String defining the type of device the converted operator should be run on extra_config: Extra configuration used to select the best conversion strategy Returns: A PyTorch model
Here is the function:
def convert_sklearn_concat(operator, device=None, extra_config={}):
"""
Converter for concat operators injected when parsing Sklearn pipelines.
Args:
operator: An empty operator
device: String defining the type of device the converted operator should be run on
extra_config: Extra configuration used to select the best conversion strategy
Returns:
A PyTorch model
"""
assert operator is not None, "Cannot convert None operator"
return Concat(operator) | Converter for concat operators injected when parsing Sklearn pipelines. Args: operator: An empty operator device: String defining the type of device the converted operator should be run on extra_config: Extra configuration used to select the best conversion strategy Returns: A PyTorch model |
6,599 | import numpy as np
from onnxconverter_common.registration import register_converter
import torch
from .. import constants
from .._array_feature_extractor_implementations import ArrayFeatureExtractor
from .._physical_operator import PhysicalOperator
from .._pipeline_implementations import Concat
class Multiply(PhysicalOperator, torch.nn.Module):
"""
Module used to multiply features in a pipeline by a score.
"""
def __init__(self, operator, score):
super(Multiply, self).__init__(operator)
self.score = score
def forward(self, x):
return x * self.score
The provided code snippet includes necessary dependencies for implementing the `convert_sklearn_multiply` function. Write a Python function `def convert_sklearn_multiply(operator, device=None, extra_config={})` to solve the following problem:
Converter for multiply operators injected when parsing Sklearn pipelines. Args: operator: An empty operator device: String defining the type of device the converted operator should be run on extra_config: Extra configuration used to select the best conversion strategy Returns: A PyTorch model
Here is the function:
def convert_sklearn_multiply(operator, device=None, extra_config={}):
"""
Converter for multiply operators injected when parsing Sklearn pipelines.
Args:
operator: An empty operator
device: String defining the type of device the converted operator should be run on
extra_config: Extra configuration used to select the best conversion strategy
Returns:
A PyTorch model
"""
assert operator is not None, "Cannot convert None operator"
assert hasattr(operator, "operand")
score = operator.operand
# Generate the model.
return Multiply(operator, score) | Converter for multiply operators injected when parsing Sklearn pipelines. Args: operator: An empty operator device: String defining the type of device the converted operator should be run on extra_config: Extra configuration used to select the best conversion strategy Returns: A PyTorch model |
6,600 | import numpy as np
from onnxconverter_common.registration import register_converter
from .._decomposition_implementations import KernelPCA, Decomposition, CrossDecomposition
class Decomposition(PhysicalOperator, torch.nn.Module):
def __init__(self, logical_operator, mean, transform_matrix, device):
super(Decomposition, self).__init__(logical_operator)
self.transformer = True
if mean is not None:
self.mean = torch.nn.Parameter(torch.from_numpy(mean), requires_grad=False)
else:
self.mean = None
self.transform_matrix = torch.nn.Parameter(torch.from_numpy(transform_matrix), requires_grad=False)
def forward(self, x):
if self.mean is not None:
x -= self.mean
return torch.mm(x, self.transform_matrix).float()
The provided code snippet includes necessary dependencies for implementing the `convert_sklearn_pca` function. Write a Python function `def convert_sklearn_pca(operator, device, extra_config)` to solve the following problem:
Converter for `sklearn.decomposition.PCA` Args: operator: An operator wrapping a `sklearn.decomposition.PCA` transformer device: String defining the type of device the converted operator should be run on extra_config: Extra configuration used to select the best conversion strategy Returns: A PyTorch model
Here is the function:
def convert_sklearn_pca(operator, device, extra_config):
"""
Converter for `sklearn.decomposition.PCA`
Args:
operator: An operator wrapping a `sklearn.decomposition.PCA` transformer
device: String defining the type of device the converted operator should be run on
extra_config: Extra configuration used to select the best conversion strategy
Returns:
A PyTorch model
"""
assert operator is not None, "Cannot convert None operator"
transform_matrix = operator.raw_operator.components_.transpose()
mean = operator.raw_operator.mean_.reshape(1, -1)
if operator.raw_operator.whiten:
transform_matrix = transform_matrix / np.sqrt(operator.raw_operator.explained_variance_)
return Decomposition(operator, mean, transform_matrix.astype("float32"), device) | Converter for `sklearn.decomposition.PCA` Args: operator: An operator wrapping a `sklearn.decomposition.PCA` transformer device: String defining the type of device the converted operator should be run on extra_config: Extra configuration used to select the best conversion strategy Returns: A PyTorch model |
6,601 | import numpy as np
from onnxconverter_common.registration import register_converter
from .._decomposition_implementations import KernelPCA, Decomposition, CrossDecomposition
class KernelPCA(PhysicalOperator, torch.nn.Module):
def __init__(self, logical_operator, kernel, degree, sv, scaled_alphas, gamma, coef0, k_fit_rows, k_fit_all, device):
super(KernelPCA, self).__init__(logical_operator)
self.transformer = True
self.kernel = kernel
self.degree = degree
self.n_samples = sv.shape[0]
self.sv = torch.nn.Parameter(torch.from_numpy(sv).float(), requires_grad=False)
self.n_features = sv.shape[1]
self.k_fit_rows = torch.nn.Parameter(torch.from_numpy(k_fit_rows).float(), requires_grad=False)
self.k_fit_all = k_fit_all
if gamma is None:
gamma = 1.0 / self.n_features
self.gamma = gamma
self.coef0 = coef0
self.scaled_alphas = torch.nn.Parameter(torch.from_numpy(scaled_alphas).float(), requires_grad=False)
def forward(self, x):
if self.kernel == "linear":
x = x.view(-1, 1, self.n_features)
k = self.sv * x
k = k.sum(2)
elif self.kernel == "rbf":
x = x.view(-1, 1, self.n_features)
k = torch.pow(self.sv - x, 2)
k = k.sum(2)
k = torch.exp(-self.gamma * k)
elif self.kernel == "poly":
k = torch.pow(self.gamma * torch.mm(x, self.sv.t()) + self.coef0, self.degree)
elif self.kernel == "sigmoid":
k = torch.tanh(self.gamma * torch.mm(x, self.sv.t()) + self.coef0)
elif self.kernel == "cosine":
norm_x = torch.norm(x, keepdim=True, dim=1)
norm_sv = torch.norm(self.sv, keepdim=True, dim=1)
norm = torch.mm(norm_x, norm_sv.t())
k = torch.mm(x, self.sv.t())
k = torch.div(k, norm)
elif self.kernel == "precomputed":
k = x
else:
raise NotImplementedError(
"Hummingbird does not currently support {} kernel for KernelPCA. The supported kernels are linear, poly, rbf, sigmoid, cosine, and precomputed.".format(
self.kernel
)
)
k_pred_cols = (torch.sum(k, 1) / self.n_samples).view(-1, 1)
k -= self.k_fit_rows
k -= k_pred_cols
k += self.k_fit_all
return torch.mm(k, self.scaled_alphas)
The provided code snippet includes necessary dependencies for implementing the `convert_sklearn_kernel_pca` function. Write a Python function `def convert_sklearn_kernel_pca(operator, device, extra_config)` to solve the following problem:
Converter for `sklearn.decomposition.KernelPCA` Args: operator: An operator wrapping a `sklearn.decomposition.KernelPCA` transformer device: String defining the type of device the converted operator should be run on extra_config: Extra configuration used to select the best conversion strategy Returns: A PyTorch model
Here is the function:
def convert_sklearn_kernel_pca(operator, device, extra_config):
"""
Converter for `sklearn.decomposition.KernelPCA`
Args:
operator: An operator wrapping a `sklearn.decomposition.KernelPCA` transformer
device: String defining the type of device the converted operator should be run on
extra_config: Extra configuration used to select the best conversion strategy
Returns:
A PyTorch model
"""
assert operator is not None, "Cannot convert None operator"
if operator.raw_operator.kernel in ["linear", "poly", "rbf", "sigmoid", "cosine", "precomputed"]:
kernel = operator.raw_operator.kernel
degree = operator.raw_operator.degree
sv = operator.raw_operator.X_fit_
non_zeros = np.flatnonzero(operator.raw_operator.eigenvalues_)
scaled_alphas = np.zeros_like(operator.raw_operator.eigenvectors_)
scaled_alphas[:, non_zeros] = operator.raw_operator.eigenvectors_[:, non_zeros] / np.sqrt(
operator.raw_operator.eigenvalues_[non_zeros]
)
return KernelPCA(
operator,
kernel,
degree,
sv,
scaled_alphas,
operator.raw_operator.gamma,
operator.raw_operator.coef0,
operator.raw_operator._centerer.K_fit_rows_,
operator.raw_operator._centerer.K_fit_all_,
device,
)
else:
raise NotImplementedError(
"Hummingbird does not currently support {} kernel for KernelPCA. The supported kernels are linear, poly, rbf, sigmoid, cosine, and precomputed.".format(
operator.raw_operator.kernel
)
) | Converter for `sklearn.decomposition.KernelPCA` Args: operator: An operator wrapping a `sklearn.decomposition.KernelPCA` transformer device: String defining the type of device the converted operator should be run on extra_config: Extra configuration used to select the best conversion strategy Returns: A PyTorch model |
6,602 | import numpy as np
from onnxconverter_common.registration import register_converter
from .._decomposition_implementations import KernelPCA, Decomposition, CrossDecomposition
class Decomposition(PhysicalOperator, torch.nn.Module):
def __init__(self, logical_operator, mean, transform_matrix, device):
super(Decomposition, self).__init__(logical_operator)
self.transformer = True
if mean is not None:
self.mean = torch.nn.Parameter(torch.from_numpy(mean), requires_grad=False)
else:
self.mean = None
self.transform_matrix = torch.nn.Parameter(torch.from_numpy(transform_matrix), requires_grad=False)
def forward(self, x):
if self.mean is not None:
x -= self.mean
return torch.mm(x, self.transform_matrix).float()
The provided code snippet includes necessary dependencies for implementing the `convert_sklearn_fast_ica` function. Write a Python function `def convert_sklearn_fast_ica(operator, device, extra_config)` to solve the following problem:
Converter for `sklearn.decomposition.FastICA` Args: operator: An operator wrapping a `sklearn.decomposition.FastICA` transformer device: String defining the type of device the converted operator should be run on extra_config: Extra configuration used to select the best conversion strategy Returns: A PyTorch model
Here is the function:
def convert_sklearn_fast_ica(operator, device, extra_config):
"""
Converter for `sklearn.decomposition.FastICA`
Args:
operator: An operator wrapping a `sklearn.decomposition.FastICA` transformer
device: String defining the type of device the converted operator should be run on
extra_config: Extra configuration used to select the best conversion strategy
Returns:
A PyTorch model
"""
assert operator is not None, "Cannot convert None operator"
transform_matrix = operator.raw_operator.components_.transpose()
if hasattr(operator.raw_operator, "mean_"):
mean = operator.raw_operator.mean_.reshape(1, -1).astype("float32")
else:
mean = None
return Decomposition(operator, mean, transform_matrix.astype("float32"), device) | Converter for `sklearn.decomposition.FastICA` Args: operator: An operator wrapping a `sklearn.decomposition.FastICA` transformer device: String defining the type of device the converted operator should be run on extra_config: Extra configuration used to select the best conversion strategy Returns: A PyTorch model |
6,603 | import numpy as np
from onnxconverter_common.registration import register_converter
from .._decomposition_implementations import KernelPCA, Decomposition, CrossDecomposition
class Decomposition(PhysicalOperator, torch.nn.Module):
def __init__(self, logical_operator, mean, transform_matrix, device):
super(Decomposition, self).__init__(logical_operator)
self.transformer = True
if mean is not None:
self.mean = torch.nn.Parameter(torch.from_numpy(mean), requires_grad=False)
else:
self.mean = None
self.transform_matrix = torch.nn.Parameter(torch.from_numpy(transform_matrix), requires_grad=False)
def forward(self, x):
if self.mean is not None:
x -= self.mean
return torch.mm(x, self.transform_matrix).float()
The provided code snippet includes necessary dependencies for implementing the `convert_sklearn_truncated_svd` function. Write a Python function `def convert_sklearn_truncated_svd(operator, device, extra_config)` to solve the following problem:
Converter for `sklearn.decomposition.TruncatedSVD` Args: operator: An operator wrapping a `sklearn.decomposition.TruncatedSVD` transformer device: String defining the type of device the converted operator should be run on extra_config: Extra configuration used to select the best conversion strategy Returns: A PyTorch model
Here is the function:
def convert_sklearn_truncated_svd(operator, device, extra_config):
"""
Converter for `sklearn.decomposition.TruncatedSVD`
Args:
operator: An operator wrapping a `sklearn.decomposition.TruncatedSVD` transformer
device: String defining the type of device the converted operator should be run on
extra_config: Extra configuration used to select the best conversion strategy
Returns:
A PyTorch model
"""
assert operator is not None, "Cannot convert None operator"
transform_matrix = operator.raw_operator.components_.transpose()
return Decomposition(operator, None, transform_matrix.astype("float32"), device) | Converter for `sklearn.decomposition.TruncatedSVD` Args: operator: An operator wrapping a `sklearn.decomposition.TruncatedSVD` transformer device: String defining the type of device the converted operator should be run on extra_config: Extra configuration used to select the best conversion strategy Returns: A PyTorch model |
6,604 | import numpy as np
from onnxconverter_common.registration import register_converter
from .._decomposition_implementations import KernelPCA, Decomposition, CrossDecomposition
class CrossDecomposition(PhysicalOperator, torch.nn.Module):
def __init__(self, logical_operator, x_mean, x_std, y_mean, coefficients, device):
super(CrossDecomposition, self).__init__(logical_operator)
self.regression = True
self.x_mean = torch.nn.Parameter(torch.from_numpy(x_mean), requires_grad=False)
self.x_std = torch.nn.Parameter(torch.from_numpy(x_std), requires_grad=False)
self.y_mean = torch.nn.Parameter(torch.from_numpy(y_mean), requires_grad=False)
self.coefficients = torch.nn.Parameter(torch.from_numpy(coefficients), requires_grad=False)
def forward(self, x):
x -= self.x_mean
x /= self.x_std
y_pred = torch.mm(x, self.coefficients).float()
return y_pred + self.y_mean
The provided code snippet includes necessary dependencies for implementing the `convert_sklearn_pls_regression` function. Write a Python function `def convert_sklearn_pls_regression(operator, device, extra_config)` to solve the following problem:
Converter for `sklearn.cross_decomposition.PLSRegression` Args: operator: An operator wrapping a `sklearn.cross_decomposition.PLSRegression` transformer device: String defining the type of device the converted operator should be run on extra_config: Extra configuration used to select the best conversion strategy Returns: A PyTorch model
Here is the function:
def convert_sklearn_pls_regression(operator, device, extra_config):
"""
Converter for `sklearn.cross_decomposition.PLSRegression`
Args:
operator: An operator wrapping a `sklearn.cross_decomposition.PLSRegression` transformer
device: String defining the type of device the converted operator should be run on
extra_config: Extra configuration used to select the best conversion strategy
Returns:
A PyTorch model
"""
assert operator is not None, "Cannot convert None operator"
# Check if operator.raw_operator._coef_ does not exist, which means to transpose coefficients (#26016)
if hasattr(operator.raw_operator, "_coef_"): # SKL<1.3
coefficients = operator.raw_operator.coef_
else: # SKL>=1.3
coefficients = operator.raw_operator.coef_.T
x_mean = operator.raw_operator._x_mean
x_std = operator.raw_operator._x_std
y_mean = operator.raw_operator._y_mean
return CrossDecomposition(operator, x_mean, x_std, y_mean, coefficients.astype("float32"), device) | Converter for `sklearn.cross_decomposition.PLSRegression` Args: operator: An operator wrapping a `sklearn.cross_decomposition.PLSRegression` transformer device: String defining the type of device the converted operator should be run on extra_config: Extra configuration used to select the best conversion strategy Returns: A PyTorch model |
6,605 | import numpy as np
from onnxconverter_common.registration import register_converter
from .. import constants
from .._one_hot_encoder_implementations import OneHotEncoderString, OneHotEncoder
class OneHotEncoderString(PhysicalOperator, torch.nn.Module):
"""
Class implementing OneHotEncoder operators for strings in PyTorch.
Because we are dealing with tensors, strings require additional length information for processing.
"""
def __init__(self, logical_operator, categories, device, extra_config={}):
super(OneHotEncoderString, self).__init__(logical_operator, transformer=True)
self.num_columns = len(categories)
self.max_word_length = max([max([len(c) for c in cat]) for cat in categories])
# Strings are casted to int32, therefore we need to properly size the tensor to me dividable by 4.
while self.max_word_length % 4 != 0:
self.max_word_length += 1
max_length = 0
if constants.MAX_STRING_LENGTH in extra_config:
max_length = extra_config[constants.MAX_STRING_LENGTH]
extra_config[constants.MAX_STRING_LENGTH] = max(max_length, self.max_word_length)
# We build condition tensors as a 2d tensor of integers.
# The first dimension is of size num words, the second dimension is fixed to the max word length (// 4).
condition_tensors = []
categories_idx = [0]
for arr in categories:
cats = (
np.array(arr, dtype="|S" + str(self.max_word_length)) # Encode objects into 4 byte strings.
.view("int32")
.reshape(-1, self.max_word_length // 4)
.tolist()
)
# We merge all categories for all columns into a single tensor
condition_tensors.extend(cats)
# Since all categories are merged together, we need to track of indexes to retrieve them at inference time.
categories_idx.append(categories_idx[-1] + len(cats))
self.condition_tensors = torch.nn.Parameter(torch.IntTensor(condition_tensors), requires_grad=False)
self.categories_idx = categories_idx
def forward(self, x):
encoded_tensors = []
for i in range(self.num_columns):
# First we fetch the condition for the particular column.
conditions = self.condition_tensors[self.categories_idx[i] : self.categories_idx[i + 1], :].view(
1, -1, self.max_word_length // 4
)
# Differently than the numeric case where eq is enough, here we need to aggregate per object (dim = 2)
# because objects can span multiple integers. We use product here since all ints must match to get encoding of 1.
encoded_tensors.append(torch.prod(torch.eq(x[:, i : i + 1, :], conditions), dim=2))
return torch.cat(encoded_tensors, dim=1).float()
class OneHotEncoder(PhysicalOperator, torch.nn.Module):
"""
Class implementing OneHotEncoder operators for ints in PyTorch.
"""
def __init__(self, logical_operator, categories, device):
super(OneHotEncoder, self).__init__(logical_operator, transformer=True)
self.num_columns = len(categories)
condition_tensors = []
for arr in categories:
condition_tensors.append(torch.nn.Parameter(torch.LongTensor(arr).detach().clone(), requires_grad=False))
self.condition_tensors = torch.nn.ParameterList(condition_tensors)
def forward(self, *x):
encoded_tensors = []
if len(x) > 1:
assert len(x) == self.num_columns
for i in range(self.num_columns):
input = x[i]
if input.dtype != torch.int64:
input = input.long()
encoded_tensors.append(torch.eq(input, self.condition_tensors[i]))
else:
# This is already a tensor.
x = x[0]
if x.dtype != torch.int64:
x = x.long()
for i in range(self.num_columns):
encoded_tensors.append(torch.eq(x[:, i : i + 1], self.condition_tensors[i]))
return torch.cat(encoded_tensors, dim=1).float()
The provided code snippet includes necessary dependencies for implementing the `convert_sklearn_one_hot_encoder` function. Write a Python function `def convert_sklearn_one_hot_encoder(operator, device, extra_config)` to solve the following problem:
Converter for `sklearn.preprocessing.OneHotEncoder` Args: operator: An operator wrapping a `sklearn.preprocessing.OneHotEncoder` model device: String defining the type of device the converted operator should be run on extra_config: Extra configuration used to select the best conversion strategy Returns: A PyTorch model
Here is the function:
def convert_sklearn_one_hot_encoder(operator, device, extra_config):
"""
Converter for `sklearn.preprocessing.OneHotEncoder`
Args:
operator: An operator wrapping a `sklearn.preprocessing.OneHotEncoder` model
device: String defining the type of device the converted operator should be run on
extra_config: Extra configuration used to select the best conversion strategy
Returns:
A PyTorch model
"""
assert operator is not None, "Cannot convert None operator"
if all(
[
np.array(c).dtype == object or np.array(c).dtype.kind in constants.SUPPORTED_STRING_TYPES
for c in operator.raw_operator.categories_
]
):
categories = [[str(x) for x in c.tolist()] for c in operator.raw_operator.categories_]
return OneHotEncoderString(operator, categories, device, extra_config)
else:
return OneHotEncoder(operator, operator.raw_operator.categories_, device) | Converter for `sklearn.preprocessing.OneHotEncoder` Args: operator: An operator wrapping a `sklearn.preprocessing.OneHotEncoder` model device: String defining the type of device the converted operator should be run on extra_config: Extra configuration used to select the best conversion strategy Returns: A PyTorch model |
6,606 | import numpy as np
from onnxconverter_common.registration import register_converter
from .. import constants
from .._tree_commons import (
get_parameters_for_sklearn_common,
get_parameters_for_tree_trav_sklearn,
get_tree_params_and_type,
get_parameters_for_gemm_common,
)
from .._tree_implementations import TreeImpl, GEMMTreeImpl, TreeTraversalTreeImpl, PerfectTreeTraversalTreeImpl
def _get_parameters_for_sklearn_iforest(tree_infos, extra_config):
"""
Parse sklearn-based isolation forest, replace existing values of node with anomaly score calculated in
_get_iforest_anomaly_score_per_node
Args:
tree_infos: The information representing a tree (ensemble)
Returns:
The tree parameters wrapped into an instance of `operator_converters._tree_commons_TreeParameters`
"""
tree_parameters = get_parameters_for_sklearn_common(tree_infos, extra_config)
tree_parameters.values = _get_iforest_anomaly_score_per_node(
tree_parameters.lefts, tree_parameters.rights, tree_infos.tree_.n_node_samples
).reshape(tree_parameters.values.shape)
return tree_parameters
class GEMMIsolationForestImpl(GEMMTreeImpl):
"""
Class implementing the GEMM strategy (in PyTorch) for isolation forest model.
"""
def __init__(self, logical_operator, tree_parameters, n_features, classes=None, extra_config={}):
"""
Args:
tree_parameters: The parameters defining the tree structure
n_features: The number of features input to the model
classes: The classes used for classification. None if implementing a regression model
extra_config: Extra configuration used to properly implement the source tree
"""
super(GEMMIsolationForestImpl, self).__init__(
logical_operator, tree_parameters, n_features, classes, None, anomaly_detection=True
)
# Assign the required constants.
if constants.OFFSET in extra_config:
self.offset = extra_config[constants.OFFSET]
if constants.MAX_SAMPLES in extra_config:
self.max_samples = extra_config[constants.MAX_SAMPLES]
# Backward compatibility for sklearn <= 0.21
if constants.IFOREST_THRESHOLD in extra_config:
self.offset += extra_config[constants.IFOREST_THRESHOLD]
self.final_probability_divider = len(tree_parameters)
self.average_path_length = _average_path_length(np.array([self.max_samples]))[0]
def aggregation(self, x):
output = x.sum(0).t()
if self.final_probability_divider > 1:
output = output / self.final_probability_divider
# Further normalize to match "decision_function" in sklearn implementation.
output = -1.0 * 2 ** (-output / self.average_path_length) - self.offset
return output
class TreeTraversalIsolationForestImpl(TreeTraversalTreeImpl):
"""
Class implementing the Tree Traversal strategy in PyTorch for isolation forest model.
"""
def __init__(self, logical_operator, tree_parameters, max_depth, n_features, classes=None, extra_config={}):
"""
Args:
tree_parameters: The parameters defining the tree structure
max_depth: The maximum tree-depth in the model
n_features: The number of features input to the model
classes: The classes used for classification. None if implementing a regression model
extra_config: Extra configuration used to properly implement the source tree
"""
super(TreeTraversalIsolationForestImpl, self).__init__(
logical_operator, tree_parameters, max_depth, n_features, classes, n_classes=None, anomaly_detection=True
)
# Assign the required constants.
if constants.OFFSET in extra_config:
self.offset = extra_config[constants.OFFSET]
if constants.MAX_SAMPLES in extra_config:
self.max_samples = extra_config[constants.MAX_SAMPLES]
# Backward compatibility for sklearn <= 0.21
if constants.IFOREST_THRESHOLD in extra_config:
self.offset += extra_config[constants.IFOREST_THRESHOLD]
self.final_probability_divider = len(tree_parameters)
self.average_path_length = _average_path_length(np.array([self.max_samples]))[0]
def aggregation(self, x):
output = x.sum(1)
if self.final_probability_divider > 1:
output = output / self.final_probability_divider
# Further normalize to match "decision_function" in sklearn implementation.
output = -1.0 * 2 ** (-output / self.average_path_length) - self.offset
return output
class PerfectTreeTraversalIsolationForestImpl(PerfectTreeTraversalTreeImpl):
"""
Class implementing the Perfect Tree Traversal strategy in PyTorch for isolation forest model.
"""
def __init__(self, logical_operator, tree_parameters, max_depth, n_features, classes=None, extra_config={}):
"""
Args:
tree_parameters: The parameters defining the tree structure
max_depth: The maximum tree-depth in the model
n_features: The number of features input to the model
classes: The classes used for classification. None if implementing a regression model
extra_config: Extra configuration used to properly implement the source tree
"""
super(PerfectTreeTraversalIsolationForestImpl, self).__init__(
logical_operator, tree_parameters, max_depth, n_features, classes, None, anomaly_detection=True
)
# Assign the required constants.
if constants.OFFSET in extra_config:
self.offset = extra_config[constants.OFFSET]
if constants.MAX_SAMPLES in extra_config:
self.max_samples = extra_config[constants.MAX_SAMPLES]
# Backward compatibility for sklearn <= 0.21
if constants.IFOREST_THRESHOLD in extra_config:
self.offset += extra_config[constants.IFOREST_THRESHOLD]
self.final_probability_divider = len(tree_parameters)
self.average_path_length = _average_path_length(np.array([self.max_samples]))[0]
def aggregation(self, x):
output = x.sum(1)
if self.final_probability_divider > 1:
output = output / self.final_probability_divider
# Further normalize to match "decision_function" in sklearn implementation.
output = -1.0 * 2 ** (-output / self.average_path_length) - self.offset
return output
def get_tree_params_and_type(tree_infos, get_tree_parameters, extra_config):
"""
Populate the parameters from the trees and pick the tree implementation strategy.
Args:
tree_infos: The information representaing a tree (ensemble)
get_tree_parameters: A function specifying how to parse the tree_infos into a
`operator_converters._tree_commons_TreeParameters` object
extra_config: param extra_config: Extra configuration used also to select the best conversion strategy
Returns:
The tree parameters, the maximum tree-depth and the tre implementation to use
"""
tree_parameters = [get_tree_parameters(tree_info, extra_config) for tree_info in tree_infos]
max_depth = max(1, _find_max_depth(tree_parameters))
tree_type = get_tree_implementation_by_config_or_depth(extra_config, max_depth)
return tree_parameters, max_depth, tree_type
def get_parameters_for_tree_trav_sklearn(lefts, rights, features, thresholds, values, classes=None, extra_config={}):
"""
This function is used to generate tree parameters for sklearn trees.
Includes SklearnRandomForestClassifier/Regressor, and SklearnGradientBoostingClassifier.
Args:
left: The left nodes
right: The right nodes
features: The features used in the decision nodes
thresholds: The thresholds used in the decision nodes
values: The values stored in the leaf nodes
classes: The list of class labels. None if regression model
Returns:
An array containing the extracted parameters
"""
features = [max(x, 0) for x in features]
values = np.array(values)
if len(values.shape) == 3:
values = values.reshape(values.shape[0], -1)
if values.shape[1] > 1 and classes is not None and len(classes) > 0:
# Triggers only for classification.
values /= np.sum(values, axis=1, keepdims=True)
if constants.NUM_TREES in extra_config:
values /= extra_config[constants.NUM_TREES]
return get_parameters_for_tree_trav_common(lefts, rights, features, thresholds, values)
def get_parameters_for_gemm_common(lefts, rights, features, thresholds, values, n_features, extra_config={}):
"""
Common functions used by all tree algorithms to generate the parameters according to the GEMM strategy.
Args:
left: The left nodes
right: The right nodes
features: The features used in the decision nodes
thresholds: The thresholds used in the decision nodes
values: The values stored in the leaf nodes
n_features: The number of expected input features
Returns:
The weights and bias for the GEMM implementation
"""
values = np.array(values)
weights = []
biases = []
if len(lefts) == 1:
# Model creating trees with just a single leaf node. We transform it
# to a model with one internal node.
lefts = [1, -1, -1]
rights = [2, -1, -1]
features = [0, 0, 0]
thresholds = [0, 0, 0]
n_classes = values.shape[1]
values = np.array([np.zeros(n_classes), values.reshape(1), values.reshape(1)])
values.reshape(3, n_classes)
# First hidden layer has all inequalities.
hidden_weights = []
hidden_biases = []
for left, feature, thresh in zip(lefts, features, thresholds):
if left != -1:
hidden_weights.append([1 if i == feature else 0 for i in range(n_features)])
hidden_biases.append(thresh)
weights.append(np.array(hidden_weights).astype("float32"))
biases.append(np.array(hidden_biases, dtype=np.float64))
n_splits = len(hidden_weights)
# Second hidden layer has ANDs for each leaf of the decision tree.
# Depth first enumeration of the tree in order to determine the AND by the path.
hidden_weights = []
hidden_biases = []
path = [0]
n_nodes = len(lefts)
visited = [False for _ in range(n_nodes)]
class_proba = []
nodes = list(zip(lefts, rights, features, thresholds, values))
while True and len(path) > 0:
i = path[-1]
visited[i] = True
left, right, feature, threshold, value = nodes[i]
if left == -1 and right == -1:
vec = [0 for _ in range(n_splits)]
# Keep track of positive weights for calculating bias.
num_positive = 0
for j, p in enumerate(path[:-1]):
num_leaves_before_p = list(lefts[:p]).count(-1)
if path[j + 1] in lefts:
vec[p - num_leaves_before_p] = 1
num_positive += 1
elif path[j + 1] in rights:
vec[p - num_leaves_before_p] = -1
else:
raise RuntimeError("Inconsistent state encountered while tree translation.")
if values.shape[-1] > 1:
proba = (values[i] / np.sum(values[i])).flatten()
else:
# We have only a single value. e.g., GBDT
proba = values[i].flatten()
# Some Sklearn tree implementations require normalization.
if constants.NUM_TREES in extra_config:
proba /= extra_config[constants.NUM_TREES]
class_proba.append(proba)
hidden_weights.append(vec)
hidden_biases.append(num_positive)
path.pop()
elif not visited[left]:
path.append(left)
elif not visited[right]:
path.append(right)
else:
path.pop()
weights.append(np.array(hidden_weights).astype("float32"))
biases.append(np.array(hidden_biases).astype("float32"))
# OR neurons from the preceding layer in order to get final classes.
weights.append(np.transpose(np.array(class_proba).astype("float64")))
biases.append(None)
return weights, biases
class TreeImpl(Enum):
"""
Enum definig the available implementations for tree scoring.
"""
gemm = 1
tree_trav = 2
perf_tree_trav = 3
The provided code snippet includes necessary dependencies for implementing the `convert_sklearn_isolation_forest` function. Write a Python function `def convert_sklearn_isolation_forest(operator, device, extra_config)` to solve the following problem:
Converter for `sklearn.ensemble.IsolationForest`. Args: operator: An operator wrapping a tree (ensemble) isolation forest model device: String defining the type of device the converted operator should be run on extra_config: Extra configuration used to select the best conversion strategy Returns: A PyTorch model
Here is the function:
def convert_sklearn_isolation_forest(operator, device, extra_config):
"""
Converter for `sklearn.ensemble.IsolationForest`.
Args:
operator: An operator wrapping a tree (ensemble) isolation forest model
device: String defining the type of device the converted operator should be run on
extra_config: Extra configuration used to select the best conversion strategy
Returns:
A PyTorch model
"""
assert operator is not None, "Cannot convert None operator"
tree_infos = operator.raw_operator.estimators_
n_features = operator.raw_operator.n_features_in_
# Following constants will be passed in the tree implementation to normalize the anomaly score.
extra_config[constants.OFFSET] = operator.raw_operator.offset_
if hasattr(operator.raw_operator, "threshold_"):
extra_config[constants.IFOREST_THRESHOLD] = operator.raw_operator.threshold_
extra_config[constants.MAX_SAMPLES] = operator.raw_operator.max_samples_
# Predict in isolation forest sklearn implementation produce 2 classes: -1 (normal) & 1 (anomaly).
classes = [-1, 1]
tree_parameters, max_depth, tree_type = get_tree_params_and_type(
tree_infos, _get_parameters_for_sklearn_iforest, extra_config
)
# Generate the tree implementation based on the selected strategy.
if tree_type == TreeImpl.gemm:
net_parameters = [
get_parameters_for_gemm_common(
tree_param.lefts, tree_param.rights, tree_param.features, tree_param.thresholds, tree_param.values, n_features
)
for tree_param in tree_parameters
]
return GEMMIsolationForestImpl(operator, net_parameters, n_features, classes, extra_config=extra_config)
net_parameters = [
get_parameters_for_tree_trav_sklearn(
tree_param.lefts, tree_param.rights, tree_param.features, tree_param.thresholds, tree_param.values, classes
)
for tree_param in tree_parameters
]
if tree_type == TreeImpl.tree_trav:
return TreeTraversalIsolationForestImpl(
operator, net_parameters, max_depth, n_features, classes, extra_config=extra_config
)
else: # Remaining possible case: tree_type == TreeImpl.perf_tree_trav
return PerfectTreeTraversalIsolationForestImpl(
operator, net_parameters, max_depth, n_features, classes, extra_config=extra_config
) | Converter for `sklearn.ensemble.IsolationForest`. Args: operator: An operator wrapping a tree (ensemble) isolation forest model device: String defining the type of device the converted operator should be run on extra_config: Extra configuration used to select the best conversion strategy Returns: A PyTorch model |
6,607 | import numpy as np
from onnxconverter_common.registration import register_converter
from hummingbird.ml.operator_converters._kneighbors_implementations import KNeighborsModel, MetricType
from hummingbird.ml.operator_converters import constants
def _convert_kneighbors_model(operator, device, extra_config, is_classifier):
if constants.BATCH_SIZE not in extra_config:
raise RuntimeError(
"Hummingbird requires explicit specification of "
+ constants.BATCH_SIZE
+ " parameter when compiling KNeighborsClassifier"
)
classes = None
if is_classifier:
classes = operator.raw_operator.classes_.tolist()
if not all([type(x) in [int, np.int32, np.int64] for x in classes]):
raise RuntimeError("Hummingbird supports only integer labels for class labels.")
metric = operator.raw_operator.metric
params = operator.raw_operator.metric_params
if metric not in ["minkowski", "euclidean", "manhattan", "chebyshev", "wminkowski", "seuclidean", "mahalanobis"]:
raise NotImplementedError(
"Hummingbird currently supports only the metric type 'minkowski', 'wminkowski', 'manhattan', 'chebyshev', 'mahalanobis', 'euclidean', and 'seuclidean' for KNeighbors"
+ "Classifier"
if is_classifier
else "Regressor"
)
metric_type = None
metric_params = None
if metric in ["minkowski", "euclidean", "manhattan", "chebyshev"]:
metric_type = MetricType.minkowski
p = 2
if metric == "minkowski" and params is not None and "p" in params:
p = params["p"]
elif metric == "manhattan":
p = 1
elif metric == "chebyshev":
p = float("inf")
metric_params = {"p": p}
elif metric == "wminkowski":
metric_type = MetricType.wminkowski
p = 2
if params is not None and "p" in params:
p = params["p"]
w = params["w"]
metric_params = {"p": p, "w": w}
elif metric == "seuclidean":
metric_type = MetricType.seuclidean
V = params["V"]
metric_params = {"V": V}
elif metric == "mahalanobis":
metric_type = MetricType.mahalanobis
if "VI" in params:
VI = params["VI"]
else:
VI = np.linalg.inv(params["V"])
metric_params = {"VI": VI}
weights = operator.raw_operator.weights
if weights not in ["uniform", "distance"]:
raise NotImplementedError(
"Hummingbird currently supports only the weights type 'uniform' and 'distance' for KNeighbors" + "Classifier"
if is_classifier
else "Regressor"
)
train_data = operator.raw_operator._fit_X
train_labels = operator.raw_operator._y
n_neighbors = operator.raw_operator.n_neighbors
return KNeighborsModel(
operator,
train_data,
train_labels,
n_neighbors,
weights,
classes,
extra_config[constants.BATCH_SIZE],
is_classifier,
metric_type,
metric_params,
)
The provided code snippet includes necessary dependencies for implementing the `convert_sklearn_kneighbors_regression_model` function. Write a Python function `def convert_sklearn_kneighbors_regression_model(operator, device, extra_config)` to solve the following problem:
Converter for `sklearn.neighbors.KNeighborsRegressor` Args: operator: An operator wrapping a `sklearn.neighbors.KNeighborsRegressor` model device: String defining the type of device the converted operator should be run on extra_config: Extra configuration used to select the best conversion strategy Returns: A PyTorch model
Here is the function:
def convert_sklearn_kneighbors_regression_model(operator, device, extra_config):
"""
Converter for `sklearn.neighbors.KNeighborsRegressor`
Args:
operator: An operator wrapping a `sklearn.neighbors.KNeighborsRegressor` model
device: String defining the type of device the converted operator should be run on
extra_config: Extra configuration used to select the best conversion strategy
Returns:
A PyTorch model
"""
assert operator is not None, "Cannot convert None operator"
return _convert_kneighbors_model(operator, device, extra_config, False) | Converter for `sklearn.neighbors.KNeighborsRegressor` Args: operator: An operator wrapping a `sklearn.neighbors.KNeighborsRegressor` model device: String defining the type of device the converted operator should be run on extra_config: Extra configuration used to select the best conversion strategy Returns: A PyTorch model |
6,608 | import numpy as np
from onnxconverter_common.registration import register_converter
from hummingbird.ml.operator_converters._kneighbors_implementations import KNeighborsModel, MetricType
from hummingbird.ml.operator_converters import constants
def _convert_kneighbors_model(operator, device, extra_config, is_classifier):
if constants.BATCH_SIZE not in extra_config:
raise RuntimeError(
"Hummingbird requires explicit specification of "
+ constants.BATCH_SIZE
+ " parameter when compiling KNeighborsClassifier"
)
classes = None
if is_classifier:
classes = operator.raw_operator.classes_.tolist()
if not all([type(x) in [int, np.int32, np.int64] for x in classes]):
raise RuntimeError("Hummingbird supports only integer labels for class labels.")
metric = operator.raw_operator.metric
params = operator.raw_operator.metric_params
if metric not in ["minkowski", "euclidean", "manhattan", "chebyshev", "wminkowski", "seuclidean", "mahalanobis"]:
raise NotImplementedError(
"Hummingbird currently supports only the metric type 'minkowski', 'wminkowski', 'manhattan', 'chebyshev', 'mahalanobis', 'euclidean', and 'seuclidean' for KNeighbors"
+ "Classifier"
if is_classifier
else "Regressor"
)
metric_type = None
metric_params = None
if metric in ["minkowski", "euclidean", "manhattan", "chebyshev"]:
metric_type = MetricType.minkowski
p = 2
if metric == "minkowski" and params is not None and "p" in params:
p = params["p"]
elif metric == "manhattan":
p = 1
elif metric == "chebyshev":
p = float("inf")
metric_params = {"p": p}
elif metric == "wminkowski":
metric_type = MetricType.wminkowski
p = 2
if params is not None and "p" in params:
p = params["p"]
w = params["w"]
metric_params = {"p": p, "w": w}
elif metric == "seuclidean":
metric_type = MetricType.seuclidean
V = params["V"]
metric_params = {"V": V}
elif metric == "mahalanobis":
metric_type = MetricType.mahalanobis
if "VI" in params:
VI = params["VI"]
else:
VI = np.linalg.inv(params["V"])
metric_params = {"VI": VI}
weights = operator.raw_operator.weights
if weights not in ["uniform", "distance"]:
raise NotImplementedError(
"Hummingbird currently supports only the weights type 'uniform' and 'distance' for KNeighbors" + "Classifier"
if is_classifier
else "Regressor"
)
train_data = operator.raw_operator._fit_X
train_labels = operator.raw_operator._y
n_neighbors = operator.raw_operator.n_neighbors
return KNeighborsModel(
operator,
train_data,
train_labels,
n_neighbors,
weights,
classes,
extra_config[constants.BATCH_SIZE],
is_classifier,
metric_type,
metric_params,
)
The provided code snippet includes necessary dependencies for implementing the `convert_sklearn_kneighbors_classification_model` function. Write a Python function `def convert_sklearn_kneighbors_classification_model(operator, device, extra_config)` to solve the following problem:
Converter for `sklearn.neighbors.KNeighborsClassifier` Args: operator: An operator wrapping a `sklearn.neighbors.KNeighborsClassifier` model device: String defining the type of device the converted operator should be run on extra_config: Extra configuration used to select the best conversion strategy Returns: A PyTorch model
Here is the function:
def convert_sklearn_kneighbors_classification_model(operator, device, extra_config):
"""
Converter for `sklearn.neighbors.KNeighborsClassifier`
Args:
operator: An operator wrapping a `sklearn.neighbors.KNeighborsClassifier` model
device: String defining the type of device the converted operator should be run on
extra_config: Extra configuration used to select the best conversion strategy
Returns:
A PyTorch model
"""
return _convert_kneighbors_model(operator, device, extra_config, True) | Converter for `sklearn.neighbors.KNeighborsClassifier` Args: operator: An operator wrapping a `sklearn.neighbors.KNeighborsClassifier` model device: String defining the type of device the converted operator should be run on extra_config: Extra configuration used to select the best conversion strategy Returns: A PyTorch model |
6,609 | import numpy as np
from onnxconverter_common.registration import register_converter
from .._array_feature_extractor_implementations import ArrayFeatureExtractor
class ArrayFeatureExtractor(PhysicalOperator, torch.nn.Module):
"""
Class implementing ArrayFeatureExtractor in PyTorch
This is used by SelectKBest, VarianceThreshold operators in scikit-learn
"""
def __init__(self, logical_operator, column_indices, device):
super(ArrayFeatureExtractor, self).__init__(logical_operator, transformer=True)
is_contiguous = False
if max(column_indices) - min(column_indices) + 1 == len(column_indices):
is_contiguous = True
self.min = min(column_indices)
self.max = max(column_indices) + 1
self.column_indices = torch.nn.Parameter(torch.LongTensor(column_indices), requires_grad=False)
self.is_contiguous = is_contiguous
def forward(self, x):
if isinstance(x, tuple):
return x[self.column_indices]
if len(x.shape) == 1:
x = x.view(1, -1)
if self.is_contiguous:
return x[:, self.min : self.max]
else:
return torch.index_select(x, 1, self.column_indices)
The provided code snippet includes necessary dependencies for implementing the `convert_sklearn_select_k_best` function. Write a Python function `def convert_sklearn_select_k_best(operator, device, extra_config)` to solve the following problem:
Converter for `sklearn.feature_selection.SelectKBest`. Args: operator: An operator wrapping a `sklearn.feature_selection.SelectKBest` model device: String defining the type of device the converted operator should be run on extra_config: Extra configuration used to select the best conversion strategy Returns: A PyTorch model
Here is the function:
def convert_sklearn_select_k_best(operator, device, extra_config):
"""
Converter for `sklearn.feature_selection.SelectKBest`.
Args:
operator: An operator wrapping a `sklearn.feature_selection.SelectKBest` model
device: String defining the type of device the converted operator should be run on
extra_config: Extra configuration used to select the best conversion strategy
Returns:
A PyTorch model
"""
assert operator is not None, "Cannot convert None operator"
indices = np.array([i for i, val in enumerate(operator.raw_operator.get_support()) if val])
return ArrayFeatureExtractor(operator, np.ascontiguousarray(indices), device) | Converter for `sklearn.feature_selection.SelectKBest`. Args: operator: An operator wrapping a `sklearn.feature_selection.SelectKBest` model device: String defining the type of device the converted operator should be run on extra_config: Extra configuration used to select the best conversion strategy Returns: A PyTorch model |
6,610 | import numpy as np
from onnxconverter_common.registration import register_converter
from .._array_feature_extractor_implementations import ArrayFeatureExtractor
class ArrayFeatureExtractor(PhysicalOperator, torch.nn.Module):
"""
Class implementing ArrayFeatureExtractor in PyTorch
This is used by SelectKBest, VarianceThreshold operators in scikit-learn
"""
def __init__(self, logical_operator, column_indices, device):
super(ArrayFeatureExtractor, self).__init__(logical_operator, transformer=True)
is_contiguous = False
if max(column_indices) - min(column_indices) + 1 == len(column_indices):
is_contiguous = True
self.min = min(column_indices)
self.max = max(column_indices) + 1
self.column_indices = torch.nn.Parameter(torch.LongTensor(column_indices), requires_grad=False)
self.is_contiguous = is_contiguous
def forward(self, x):
if isinstance(x, tuple):
return x[self.column_indices]
if len(x.shape) == 1:
x = x.view(1, -1)
if self.is_contiguous:
return x[:, self.min : self.max]
else:
return torch.index_select(x, 1, self.column_indices)
The provided code snippet includes necessary dependencies for implementing the `convert_sklearn_variance_threshold` function. Write a Python function `def convert_sklearn_variance_threshold(operator, device, extra_config)` to solve the following problem:
Converter for `sklearn.feature_selection.VarianceThreshold`. Args: operator: An operator wrapping a `sklearn.feature_selection.VarianceThreshold` model device: String defining the type of device the converted operator should be run on extra_config: Extra configuration used to select the best conversion strategy Returns: A PyTorch model
Here is the function:
def convert_sklearn_variance_threshold(operator, device, extra_config):
"""
Converter for `sklearn.feature_selection.VarianceThreshold`.
Args:
operator: An operator wrapping a `sklearn.feature_selection.VarianceThreshold` model
device: String defining the type of device the converted operator should be run on
extra_config: Extra configuration used to select the best conversion strategy
Returns:
A PyTorch model
"""
assert operator is not None, "Cannot convert None operator"
var = operator.raw_operator.variances_
threshold = operator.raw_operator.threshold
indices = np.array([i for i in range(len(var)) if var[i] > threshold])
return ArrayFeatureExtractor(operator, np.ascontiguousarray(indices), device) | Converter for `sklearn.feature_selection.VarianceThreshold`. Args: operator: An operator wrapping a `sklearn.feature_selection.VarianceThreshold` model device: String defining the type of device the converted operator should be run on extra_config: Extra configuration used to select the best conversion strategy Returns: A PyTorch model |
6,611 | import numpy as np
from onnxconverter_common.registration import register_converter
from .._array_feature_extractor_implementations import ArrayFeatureExtractor
class ArrayFeatureExtractor(PhysicalOperator, torch.nn.Module):
"""
Class implementing ArrayFeatureExtractor in PyTorch
This is used by SelectKBest, VarianceThreshold operators in scikit-learn
"""
def __init__(self, logical_operator, column_indices, device):
super(ArrayFeatureExtractor, self).__init__(logical_operator, transformer=True)
is_contiguous = False
if max(column_indices) - min(column_indices) + 1 == len(column_indices):
is_contiguous = True
self.min = min(column_indices)
self.max = max(column_indices) + 1
self.column_indices = torch.nn.Parameter(torch.LongTensor(column_indices), requires_grad=False)
self.is_contiguous = is_contiguous
def forward(self, x):
if isinstance(x, tuple):
return x[self.column_indices]
if len(x.shape) == 1:
x = x.view(1, -1)
if self.is_contiguous:
return x[:, self.min : self.max]
else:
return torch.index_select(x, 1, self.column_indices)
The provided code snippet includes necessary dependencies for implementing the `convert_sklearn_select_percentile` function. Write a Python function `def convert_sklearn_select_percentile(operator, device, extra_config)` to solve the following problem:
Converter for `sklearn.feature_selection.SelectPercentile`. Args: operator: An operator wrapping a `sklearn.feature_selection.SelectPercentile` model device: String defining the type of device the converted operator should be run on extra_config: Extra configuration used to select the best conversion strategy Returns: A PyTorch model
Here is the function:
def convert_sklearn_select_percentile(operator, device, extra_config):
"""
Converter for `sklearn.feature_selection.SelectPercentile`.
Args:
operator: An operator wrapping a `sklearn.feature_selection.SelectPercentile` model
device: String defining the type of device the converted operator should be run on
extra_config: Extra configuration used to select the best conversion strategy
Returns:
A PyTorch model
"""
assert operator is not None, "Cannot convert None operator"
indices = np.array([i for i, val in enumerate(operator.raw_operator.get_support()) if val])
return ArrayFeatureExtractor(operator, np.ascontiguousarray(indices), device) | Converter for `sklearn.feature_selection.SelectPercentile`. Args: operator: An operator wrapping a `sklearn.feature_selection.SelectPercentile` model device: String defining the type of device the converted operator should be run on extra_config: Extra configuration used to select the best conversion strategy Returns: A PyTorch model |
6,612 | import numpy as np
from onnxconverter_common.registration import register_converter
from sklearn._loss.link import LogLink
from .._linear_implementations import LinearModel
class LinearModel(PhysicalOperator, torch.nn.Module):
def __init__(
self,
logical_operator,
coefficients,
intercepts,
device,
classes=[0],
multi_class=None,
loss=None,
is_linear_regression=False,
):
super(LinearModel, self).__init__(logical_operator)
self.coefficients = torch.nn.Parameter(torch.from_numpy(coefficients).detach().clone(), requires_grad=False)
self.intercepts = torch.nn.Parameter(torch.from_numpy(intercepts).view(-1).detach().clone(), requires_grad=False)
self.classes = torch.nn.Parameter(torch.IntTensor(classes), requires_grad=False)
self.multi_class = multi_class
self.regression = is_linear_regression
self.classification = not is_linear_regression
self.loss = loss
if self.loss is None and self.classification:
self.loss = "log"
self.binary_classification = False
if len(classes) == 2:
self.binary_classification = True
def forward(self, x):
x = x.float()
output = torch.addmm(self.intercepts, x, self.coefficients)
if self.regression:
if self.loss == "log":
return torch.exp(output)
return output
if self.binary_classification:
indices = (output > 0).squeeze().int()
else:
indices = torch.argmax(output, dim=1)
predict_res = torch.index_select(self.classes, 0, indices)
if self.multi_class == "multinomial":
output = torch.softmax(output, dim=1)
else:
if self.loss == "modified_huber":
output = torch.clip(output, -1, 1)
output += 1
output /= 2
else:
output = torch.sigmoid(output)
if not self.binary_classification:
if self.loss == "modified_huber":
# This loss might assign zero to all classes, which doesn't
# normalize neatly; work around this to produce uniform
# probabilities.
prob_sum = torch.sum(output, dim=1, keepdim=False)
all_zero = prob_sum == 0
if torch.any(all_zero):
output[all_zero, :] = 1
prob_sum[all_zero] = len(self.classes)
output /= prob_sum.view((output.shape[0], -1))
else:
output /= torch.sum(output, dim=1, keepdim=True)
if self.binary_classification:
output = torch.cat([1 - output, output], dim=1)
return predict_res, output
The provided code snippet includes necessary dependencies for implementing the `convert_sklearn_linear_model` function. Write a Python function `def convert_sklearn_linear_model(operator, device, extra_config)` to solve the following problem:
Converter for `sklearn.svm.LinearSVC`, `sklearn.linear_model.LogisticRegression`, `sklearn.linear_model.SGDClassifier`, and `sklearn.linear_model.LogisticRegressionCV` Args: operator: An operator wrapping a `sklearn.svm.LinearSVC`, `sklearn.linear_model.LogisticRegression`, `sklearn.linear_model.SGDClassifier`, or `sklearn.linear_model.LogisticRegressionCV` model device: String defining the type of device the converted operator should be run on extra_config: Extra configuration used to select the best conversion strategy Returns: A PyTorch model
Here is the function:
def convert_sklearn_linear_model(operator, device, extra_config):
"""
Converter for `sklearn.svm.LinearSVC`, `sklearn.linear_model.LogisticRegression`,
`sklearn.linear_model.SGDClassifier`, and `sklearn.linear_model.LogisticRegressionCV`
Args:
operator: An operator wrapping a `sklearn.svm.LinearSVC`, `sklearn.linear_model.LogisticRegression`,
`sklearn.linear_model.SGDClassifier`, or `sklearn.linear_model.LogisticRegressionCV` model
device: String defining the type of device the converted operator should be run on
extra_config: Extra configuration used to select the best conversion strategy
Returns:
A PyTorch model
"""
assert operator is not None, "Cannot convert None operator"
supported_loss = {"log_loss", "modified_huber", "squared_hinge", "hinge", "huber", "perceptron", "epsilon_insensitive", "squared_error", "squared_epsilon_insensitive"}
classes = [0] if not hasattr(operator.raw_operator, "classes_") else operator.raw_operator.classes_
if not all(["int" in str(type(x)) for x in classes]):
raise RuntimeError(
"Hummingbird currently supports only integer labels for class labels. Please file an issue at https://github.com/microsoft/hummingbird."
)
coefficients = operator.raw_operator.coef_.transpose().astype("float32")
intercepts = operator.raw_operator.intercept_
if np.ndim(intercepts) == 0:
intercepts = np.array(intercepts, dtype="float32")
else:
intercepts = intercepts.reshape(1, -1).astype("float32")
multi_class = None
loss = None
if hasattr(operator.raw_operator, "multi_class"):
if operator.raw_operator.multi_class == "ovr" or operator.raw_operator.solver in ["warn", "liblinear"]:
multi_class = "ovr"
elif operator.raw_operator.multi_class == "auto" and len(classes) == 2:
multi_class = "ovr"
else:
multi_class = "multinomial"
if hasattr(operator.raw_operator, "loss"):
loss = operator.raw_operator.loss
assert (
loss in supported_loss
), "predict_proba for linear models currently only support {}. (Given {}). Please fill an issue at https://github.com/microsoft/hummingbird".format(
supported_loss, loss
)
return LinearModel(operator, coefficients, intercepts, device, classes=classes, multi_class=multi_class, loss=loss) | Converter for `sklearn.svm.LinearSVC`, `sklearn.linear_model.LogisticRegression`, `sklearn.linear_model.SGDClassifier`, and `sklearn.linear_model.LogisticRegressionCV` Args: operator: An operator wrapping a `sklearn.svm.LinearSVC`, `sklearn.linear_model.LogisticRegression`, `sklearn.linear_model.SGDClassifier`, or `sklearn.linear_model.LogisticRegressionCV` model device: String defining the type of device the converted operator should be run on extra_config: Extra configuration used to select the best conversion strategy Returns: A PyTorch model |
6,613 | import numpy as np
from onnxconverter_common.registration import register_converter
from sklearn._loss.link import LogLink
from .._linear_implementations import LinearModel
class LinearModel(PhysicalOperator, torch.nn.Module):
def __init__(
self,
logical_operator,
coefficients,
intercepts,
device,
classes=[0],
multi_class=None,
loss=None,
is_linear_regression=False,
):
super(LinearModel, self).__init__(logical_operator)
self.coefficients = torch.nn.Parameter(torch.from_numpy(coefficients).detach().clone(), requires_grad=False)
self.intercepts = torch.nn.Parameter(torch.from_numpy(intercepts).view(-1).detach().clone(), requires_grad=False)
self.classes = torch.nn.Parameter(torch.IntTensor(classes), requires_grad=False)
self.multi_class = multi_class
self.regression = is_linear_regression
self.classification = not is_linear_regression
self.loss = loss
if self.loss is None and self.classification:
self.loss = "log"
self.binary_classification = False
if len(classes) == 2:
self.binary_classification = True
def forward(self, x):
x = x.float()
output = torch.addmm(self.intercepts, x, self.coefficients)
if self.regression:
if self.loss == "log":
return torch.exp(output)
return output
if self.binary_classification:
indices = (output > 0).squeeze().int()
else:
indices = torch.argmax(output, dim=1)
predict_res = torch.index_select(self.classes, 0, indices)
if self.multi_class == "multinomial":
output = torch.softmax(output, dim=1)
else:
if self.loss == "modified_huber":
output = torch.clip(output, -1, 1)
output += 1
output /= 2
else:
output = torch.sigmoid(output)
if not self.binary_classification:
if self.loss == "modified_huber":
# This loss might assign zero to all classes, which doesn't
# normalize neatly; work around this to produce uniform
# probabilities.
prob_sum = torch.sum(output, dim=1, keepdim=False)
all_zero = prob_sum == 0
if torch.any(all_zero):
output[all_zero, :] = 1
prob_sum[all_zero] = len(self.classes)
output /= prob_sum.view((output.shape[0], -1))
else:
output /= torch.sum(output, dim=1, keepdim=True)
if self.binary_classification:
output = torch.cat([1 - output, output], dim=1)
return predict_res, output
The provided code snippet includes necessary dependencies for implementing the `convert_sklearn_linear_regression_model` function. Write a Python function `def convert_sklearn_linear_regression_model(operator, device, extra_config)` to solve the following problem:
Converter for `sklearn.linear_model.LinearRegression`, `sklearn.linear_model.Lasso`, `sklearn.linear_model.ElasticNet`, `sklearn.linear_model.Ridge`, `sklearn.svm.LinearSVR` and `sklearn.linear_model.RidgeCV` Args: operator: An operator wrapping a `sklearn.linear_model.LinearRegression`, `sklearn.svm.LinearSVR` or `sklearn.linear_model.RidgeCV` model device: String defining the type of device the converted operator should be run on extra_config: Extra configuration used to select the best conversion strategy Returns: A PyTorch model
Here is the function:
def convert_sklearn_linear_regression_model(operator, device, extra_config):
"""
Converter for `sklearn.linear_model.LinearRegression`, `sklearn.linear_model.Lasso`, `sklearn.linear_model.ElasticNet`, `sklearn.linear_model.Ridge`, `sklearn.svm.LinearSVR` and `sklearn.linear_model.RidgeCV`
Args:
operator: An operator wrapping a `sklearn.linear_model.LinearRegression`, `sklearn.svm.LinearSVR`
or `sklearn.linear_model.RidgeCV` model
device: String defining the type of device the converted operator should be run on
extra_config: Extra configuration used to select the best conversion strategy
Returns:
A PyTorch model
"""
assert operator is not None, "Cannot convert None operator"
loss = None
coefficients = operator.raw_operator.coef_.transpose().astype("float32")
if len(coefficients.shape) == 1:
coefficients = coefficients.reshape(-1, 1)
intercepts = operator.raw_operator.intercept_
if np.ndim(intercepts) == 0:
intercepts = np.array(intercepts, dtype="float32")
else:
intercepts = intercepts.reshape(1, -1).astype("float32")
if hasattr(operator.raw_operator, "_base_loss") and isinstance(operator.raw_operator._base_loss.link, LogLink):
loss = "log"
return LinearModel(operator, coefficients, intercepts, device, loss=loss, is_linear_regression=True) | Converter for `sklearn.linear_model.LinearRegression`, `sklearn.linear_model.Lasso`, `sklearn.linear_model.ElasticNet`, `sklearn.linear_model.Ridge`, `sklearn.svm.LinearSVR` and `sklearn.linear_model.RidgeCV` Args: operator: An operator wrapping a `sklearn.linear_model.LinearRegression`, `sklearn.svm.LinearSVR` or `sklearn.linear_model.RidgeCV` model device: String defining the type of device the converted operator should be run on extra_config: Extra configuration used to select the best conversion strategy Returns: A PyTorch model |
6,614 | from collections import OrderedDict
from copy import deepcopy
import pprint
from uuid import uuid4
import numpy as np
from onnxconverter_common.optimizer import LinkedNode, _topological_sort
from sklearn import pipeline
from sklearn.compose import ColumnTransformer
from sklearn.ensemble import BaggingClassifier, BaggingRegressor
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV
from sklearn.multioutput import MultiOutputRegressor, RegressorChain
from sklearn.preprocessing import OneHotEncoder, FunctionTransformer
from .containers import CommonSklearnModelContainer, CommonONNXModelContainer, CommonSparkMLModelContainer
from ._topology import Topology
from ._utils import sklearn_installed, sparkml_installed
from .operator_converters import constants
from .supported import get_sklearn_api_operator_name, get_onnxml_api_operator_name, get_sparkml_api_operator_name
def _parse_sklearn_pipeline(topology, model, inputs):
"""
The basic ideas of scikit-learn parsing:
1. Sequentially go though all stages defined in the considered
scikit-learn pipeline
2. The output variables of one stage will be fed into its next
stage as the inputs.
:param topology: Topology object defined in _topology.py
:param model: scikit-learn pipeline object
:param inputs: A list of Variable objects
:return: A list of output variables produced by the input pipeline
"""
for step in model.steps:
inputs = _parse_sklearn_api(topology, step[1], inputs)
return inputs
def _parse_sklearn_feature_union(topology, model, inputs):
"""
Taken from https://github.com/onnx/sklearn-onnx/blob/9939c089a467676f4ffe9f3cb91098c4841f89d8/skl2onnx/_parse.py#L199.
:param topology: Topology object
:param model: A scikit-learn FeatureUnion object
:param inputs: A list of Variable objects
:return: A list of output variables produced by feature union
"""
# Output variable name of each transform. It's a list of string.
transformed_result_names = []
# Encode each transform as our IR object
for name, transform in model.transformer_list:
transformed_result_names.append(_parse_sklearn_single_model(topology, transform, inputs)[0])
if model.transformer_weights is not None and name in model.transformer_weights:
transform_result = [transformed_result_names.pop()]
# Create a Multiply node
multiply_operator = topology.declare_logical_operator("SklearnMultiply")
multiply_operator.inputs = transform_result
multiply_operator.operand = model.transformer_weights[name]
multiply_output = topology.declare_logical_variable("multiply_output")
multiply_operator.outputs.append(multiply_output)
transformed_result_names.append(multiply_operator.outputs[0])
# Create a Concat operator
concat_operator = topology.declare_logical_operator("SklearnConcat")
concat_operator.inputs = transformed_result_names
# Declare output name of scikit-learn FeatureUnion
union_name = topology.declare_logical_variable("union")
concat_operator.outputs.append(union_name)
return concat_operator.outputs
def _parse_sklearn_multi_output_regressor(topology, model, inputs):
"""
:param topology: Topology object
:param model: A *scikit-learn* *MultiOutputRegressor* object
:param inputs: A list of Variable objects
:return: Output produced by MultiOutputRegressor
"""
outputs = []
for estimator in model.estimators_:
outputs.append(_parse_sklearn_api(topology, estimator, inputs)[0])
conc_op = topology.declare_logical_operator("SklearnConcat")
conc_op.inputs = outputs
conc_names = topology.declare_logical_variable("concat_outputs")
conc_op.outputs.append(conc_names)
return conc_op.outputs
def _parse_sklearn_regressor_chain(topology, model, inputs):
"""
:param topology: Topology object
:param model: A *scikit-learn* *RegressorChain* object
:param inputs: A list of Variable objects
:return: Output produced by RegressorChain
"""
outputs = []
for estimator in model.estimators_:
outputs.append(_parse_sklearn_api(topology, estimator, inputs)[0])
conc_op = topology.declare_logical_operator("SklearnConcat")
conc_op.inputs.extend(inputs)
conc_op.inputs.append(outputs[-1])
conc_names = topology.declare_logical_variable("concat_inputs")
conc_op.outputs.append(conc_names)
inputs = conc_op.outputs
conc_op = topology.declare_logical_operator("SklearnConcat")
if model.order is not None:
reorderd_outputs = [None for _ in outputs]
for i, pos in enumerate(model.order):
reorderd_outputs[pos] = outputs[i]
outputs = reorderd_outputs
conc_op.inputs = outputs
conc_names = topology.declare_logical_variable("concat_outputs")
conc_op.outputs.append(conc_names)
return conc_op.outputs
def _parse_sklearn_model_selection(topology, model, inputs):
"""
:param topology: Topology object
:param model: A *sklearn.model_selection* object
:param inputs: A list of Variable objects
:return: Output produced by sklearn.model_selection.* object
"""
op = model.best_estimator_
var_output = _parse_sklearn_api(topology, op, inputs)
return var_output
def _parse_sklearn_column_transformer(topology, model, inputs):
"""
Taken from https://github.com/onnx/sklearn-onnx/blob/9939c089a467676f4ffe9f3cb91098c4841f89d8/skl2onnx/_parse.py#L238.
:param topology: Topology object
:param model: A *scikit-learn* *ColumnTransformer* object
:param inputs: A list of Variable objects
:return: A list of output variables produced by column transformer
"""
# Output variable name of each transform. It's a list of string.
transformed_result_names = []
# Encode each transform as our IR object
for name, op, column_indices in model.transformers_:
if op == "drop":
continue
if isinstance(column_indices, slice):
column_indices = list(
range(
column_indices.start if column_indices.start is not None else 0,
column_indices.stop,
column_indices.step if column_indices.step is not None else 1,
)
)
elif isinstance(column_indices, (int, str)):
column_indices = [column_indices]
if len(column_indices) == 0:
continue
names = _get_column_indices(column_indices, inputs, len(inputs) > 1)
transform_inputs = []
for onnx_var, onnx_is in names.items():
tr_inputs = _fetch_input_slice(topology, [inputs[onnx_var]], onnx_is)
transform_inputs.extend(tr_inputs)
merged_cols = False
if len(transform_inputs) > 1:
if isinstance(op, pipeline.Pipeline):
if not isinstance(op.steps[0][1], do_not_merge_columns):
merged_cols = True
elif not isinstance(op, do_not_merge_columns):
merged_cols = True
if merged_cols:
# Many ONNX operators expect one input vector, the default behaviour is to merge columns.
ty = transform_inputs[0].type.__class__([None, None])
conc_op = topology.declare_logical_operator("SklearnConcat")
conc_op.inputs = transform_inputs
conc_names = topology.declare_logical_variable("merged_columns", ty)
conc_op.outputs.append(conc_names)
transform_inputs = [conc_names]
model_obj = model.named_transformers_[name]
if isinstance(model_obj, str):
if model_obj == "passthrough":
var_out = transform_inputs[0]
elif model_obj == "drop":
var_out = None
else:
raise RuntimeError(
"Unknown operator alias " "'{0}'. These are specified in " "supported.py." "".format(model_obj)
)
else:
var_out = _parse_sklearn_api(topology, model_obj, transform_inputs)[0]
if model.transformer_weights is not None and name in model.transformer_weights:
# Create a Multiply node
multiply_operator = topology.declare_logical_operator("SklearnMultiply")
multiply_operator.inputs.append(var_out)
multiply_operator.operand = model.transformer_weights[name]
var_out = topology.declare_logical_variable("multiply_output")
multiply_operator.outputs.append(var_out)
if var_out:
transformed_result_names.append(var_out)
# Create a Concat node
if len(transformed_result_names) > 1:
concat_operator = topology.declare_logical_operator("SklearnConcat")
concat_operator.inputs = transformed_result_names
# Declare output name of scikit-learn ColumnTransformer
transformed_column_name = topology.declare_logical_variable("transformed_column")
concat_operator.outputs.append(transformed_column_name)
return concat_operator.outputs
return transformed_result_names
def _parse_sklearn_function_transformer(topology, model, inputs):
"""
Taken from https://github.com/onnx/sklearn-onnx/blob/fdb52cec86d4d19401cc365db97650fd7692676b/skl2onnx/operator_converters/function_transformer.py#L10. # noqa: E501
:param topology: Topology object
:param model: A *scikit-learn* *FunctionTransformer* object
:param inputs: A list of Variable objects
:return: Output produced by function transformer
"""
if len(inputs) == 1:
# apply identity
return inputs
else:
# apply concat
conc_op = topology.declare_logical_operator("SklearnConcat")
conc_op.inputs = inputs
conc_names = topology.declare_logical_variable("concat_inputs")
conc_op.outputs.append(conc_names)
return conc_op.outputs
def _parse_sklearn_stacking(topology, model, inputs):
"""
Taken from https://github.com/onnx/sklearn-onnx/blob/9939c089a467676f4ffe9f3cb91098c4841f89d8/skl2onnx/_parse.py#L238.
:param topology: Topology object
:param model: A *scikit-learn* *Stacking* object
:param inputs: A list of Variable objects
:return: A list of output variables produced by column transformer
"""
# Output variable name of each estimator. It's a list of variables.
transformed_result_names = []
# Encode each estimator as our IR object.
for op, method in zip(model.estimators_, model.stack_method_):
var_out = _parse_sklearn_api(topology, op, inputs)
if method not in ["predict_proba", "predict"]:
raise ValueError(
"Ensemble method {} not supported. Please fill an issue at https://github.com/microsoft/hummingbird.".format(
method
)
)
index = 0
if method == "predict_proba":
index = 1
array_feature_extractor_operator = topology.declare_logical_operator("SklearnArrayFeatureExtractor")
array_feature_extractor_operator.inputs = var_out
array_feature_extractor_operator.column_indices = [index]
output_variable_name = topology.declare_logical_variable("extracted_feature_columns", var_out[0].type)
array_feature_extractor_operator.outputs.append(output_variable_name)
transformed_result_names.append(output_variable_name)
if model.passthrough:
transformed_result_names.extend(inputs)
if len(transformed_result_names) > 1:
concat_operator = topology.declare_logical_operator("SklearnConcat")
concat_operator.inputs = transformed_result_names
# Declare output name of scikit-learn ColumnTransformer
transformed_column_name = topology.declare_logical_variable("transformed_column")
concat_operator.outputs.append(transformed_column_name)
transformed_result_names = [transformed_column_name]
op = model.final_estimator_
var_out = _parse_sklearn_api(topology, op, transformed_result_names)
return var_out
def _parse_sklearn_bagging(topology, model, inputs):
"""
Taken from https://github.com/onnx/sklearn-onnx/blob/9939c089a467676f4ffe9f3cb91098c4841f89d8/skl2onnx/_parse.py#L238.
:param topology: Topology object
:param model: A *scikit-learn* *BaggingClassifier* or *BaggingRegressor* object
:param inputs: A list of Variable objects
:return: A list of output variables produced by column transformer
"""
# Output variable name of each estimator. It's a list of variables.
transformed_result_names = []
# Encode each estimator as our IR object.
for op in model.estimators_:
var_out = _parse_sklearn_api(topology, op, inputs)
transformed_result_names.extend(var_out)
bagging_operator = topology.declare_logical_operator("SklearnBagging", model)
bagging_operator.inputs = transformed_result_names
# Declare output name of scikit-learn ColumnTransformer
transformed_column_name = topology.declare_logical_variable("transformed_column")
bagging_operator.outputs.append(transformed_column_name)
transformed_result_names = [transformed_column_name]
return transformed_result_names
def _build_sklearn_api_parsers_map():
# Parsers for edge cases are going here.
map_parser = {
ColumnTransformer: _parse_sklearn_column_transformer,
FunctionTransformer: _parse_sklearn_function_transformer,
GridSearchCV: _parse_sklearn_model_selection,
MultiOutputRegressor: _parse_sklearn_multi_output_regressor,
pipeline.Pipeline: _parse_sklearn_pipeline,
pipeline.FeatureUnion: _parse_sklearn_feature_union,
RandomizedSearchCV: _parse_sklearn_model_selection,
RegressorChain: _parse_sklearn_regressor_chain,
BaggingClassifier: _parse_sklearn_bagging,
BaggingRegressor: _parse_sklearn_bagging, # This may introduce some rounding error. TODO better implementation.
# More parsers will go here
}
if StackingClassifier is not None:
map_parser[StackingClassifier] = _parse_sklearn_stacking
map_parser[StackingRegressor] = _parse_sklearn_stacking
return map_parser | null |
6,615 | from collections import OrderedDict
from copy import deepcopy
import pprint
from uuid import uuid4
import numpy as np
from onnxconverter_common.optimizer import LinkedNode, _topological_sort
from sklearn import pipeline
from sklearn.compose import ColumnTransformer
from sklearn.ensemble import BaggingClassifier, BaggingRegressor
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV
from sklearn.multioutput import MultiOutputRegressor, RegressorChain
from sklearn.preprocessing import OneHotEncoder, FunctionTransformer
from .containers import CommonSklearnModelContainer, CommonONNXModelContainer, CommonSparkMLModelContainer
from ._topology import Topology
from ._utils import sklearn_installed, sparkml_installed
from .operator_converters import constants
from .supported import get_sklearn_api_operator_name, get_onnxml_api_operator_name, get_sparkml_api_operator_name
def _parse_sparkml_pipeline(topology, model, all_outputs):
def _build_sparkml_api_parsers_map():
# Parsers for edge cases are going here.
from pyspark.ml.pipeline import PipelineModel
map_parser = {
PipelineModel: _parse_sparkml_pipeline,
# More parsers will go here
}
return map_parser | null |
6,616 | import numpy as np
import os
from packaging.version import Version, parse
import torch
from uuid import uuid4
from onnxconverter_common.registration import get_converter
from onnxconverter_common.topology import Topology as ONNXTopology
import onnx
from hummingbird.ml.containers import (
PyTorchSklearnContainerRegression,
PyTorchSklearnContainerClassification,
PyTorchSklearnContainerTransformer,
PyTorchSklearnContainerAnomalyDetection,
TorchScriptSklearnContainerRegression,
TorchScriptSklearnContainerClassification,
TorchScriptSklearnContainerTransformer,
TorchScriptSklearnContainerAnomalyDetection,
ONNXSklearnContainerRegression,
ONNXSklearnContainerClassification,
ONNXSklearnContainerTransformer,
ONNXSklearnContainerAnomalyDetection,
TVMSklearnContainerRegression,
TVMSklearnContainerClassification,
TVMSklearnContainerTransformer,
TVMSklearnContainerAnomalyDetection,
BatchContainer,
)
from hummingbird.ml._utils import pandas_installed, tvm_installed, get_device, from_strings_to_ints
from hummingbird.ml._executor import Executor
from hummingbird.ml.exceptions import MissingConverter
from hummingbird.ml.operator_converters import constants
def _jit_trace(executor, trace_input, device, extra_config):
"""
Function used to convert an input pytorch model into torchscript.
"""
if device != "cpu":
if type(trace_input) is tuple:
for input_ in trace_input:
input_.to(device)
else:
trace_input.to(device)
return torch.jit.trace(executor, trace_input).eval()
def _get_trace_input_from_test_input(input, remainder_size=None, extra_config={}):
"""
Utility function used to properly put the inputs into a format understandable by torch.
If `remainder_size` is provided, also return inputs for a remainder model (see below).
"""
remainder = None
if isinstance(input, tuple):
trace_input = []
for input_ in input:
# Convert string arrays into int32.
if input_.dtype.kind in constants.SUPPORTED_STRING_TYPES:
assert constants.MAX_STRING_LENGTH in extra_config
max_string_length = extra_config[constants.MAX_STRING_LENGTH]
input_ = from_strings_to_ints(input_, max_string_length)
trace_input.append(torch.from_numpy(input_))
trace_input = tuple(trace_input)
if remainder_size is not None and remainder_size != 0:
remainder = tuple([inp[0:remainder_size, :] for inp in trace_input])
else:
# Convert string arrays into int32.
if input.dtype.kind in constants.SUPPORTED_STRING_TYPES:
assert constants.MAX_STRING_LENGTH in extra_config
max_string_length = extra_config[constants.MAX_STRING_LENGTH]
input = from_strings_to_ints(input, max_string_length)
trace_input = torch.from_numpy(input)
if remainder_size is not None and remainder_size != 0:
remainder = trace_input[0:remainder_size, :]
return (trace_input, remainder)
def _get_batch_size(batch):
if isinstance(batch, tuple):
return batch[0].shape[0]
assert isinstance(batch, np.ndarray)
return batch.shape[0]
def _compile_to_tvm(topology, executor, trace_input, target, ctx, config, extra_config):
import tvm
from tvm import relay
from tvm.contrib import graph_runtime
ts_model = _jit_trace(executor, trace_input, "cpu", extra_config)
test_input = [
(
topology.input_container.input_names[i],
trace_input[i].shape if type(trace_input) is tuple else trace_input.shape,
)
for i in range(len(topology.input_container.input_names))
]
model, params = relay.frontend.from_pytorch(ts_model, test_input)
with tvm.transform.PassContext(opt_level=3, config=config):
graph, lib, params = relay.build(model, target=target, params=params)
tvm_model = graph_runtime.create(graph, lib, ctx)
tvm_model.set_input(**params)
extra_config[constants.TVM_GRAPH] = graph
extra_config[constants.TVM_LIB] = lib
extra_config[constants.TVM_PARAMS] = params
return tvm_model
def tvm_installed():
"""
Checks that *TVM* is available.
"""
try:
import tvm
except ImportError:
return False
return True
class Executor(torch.nn.Module, object):
"""
Executor class able to run Hummingbird's internal representation of a converted pipeline.
"""
def __init__(self, input_names, output_names, operator_map, operators, extra_config):
"""
Args:
input_names: The names of the input `onnxconverter_common.topology.Variable`s for this model
output_names: The names of the output `onnxconverter_common.topology.Variable`s generated by this model
operator_map: A dictionary of operator aliases and related PyTorch implementations
operators: The list of operators (in a topological order) that will be executed by the model (in order)
extra_config: Some additional custom configuration parameter
"""
super(Executor, self).__init__()
# Define input \ output names.
# This is required because the internal variable names may differ from the original (raw) one.
# This may happen, for instance, because we force our internal naming to be unique.
def _fix_var_naming(operators, names, mod="input"):
new_names = []
map = {}
for op in operators:
if mod == "input":
iter = op.inputs
else:
iter = op.outputs
for i in iter:
for name in names:
if i.raw_name == name and name not in map:
map[i.raw_name] = i.full_name
if len(map) == len(names):
break
if map == {}:
return names
for name in names:
new_names.append(map[name])
return new_names
self._input_names = _fix_var_naming(operators, input_names)
self._output_names = _fix_var_naming(reversed(operators), output_names, "output")
self._operators = torch.nn.ModuleList([operator_map[operator.full_name] for operator in operators])
self.max_string_length = None
if constants.MAX_STRING_LENGTH in extra_config:
self.max_string_length = extra_config[constants.MAX_STRING_LENGTH]
def forward(self, *inputs):
with torch.no_grad():
assert len(self._input_names) == len(inputs) or (
DataFrame is not None
and isinstance(inputs[0], DataFrame)
and not self.check_dataframe_to_array
and len(self._input_names) == len(inputs[0].columns)
), "number of inputs or number of columns in the dataframe do not match with the expected number of inputs {}".format(
self._input_names
)
if DataFrame is not None and isinstance(inputs[0], DataFrame):
# Split the dataframe into column ndarrays.
inputs = inputs[0]
input_names = list(inputs.columns)
splits = [inputs[input_names[idx]] for idx in range(len(input_names))]
splits = [df.to_numpy().reshape(-1, 1) for df in splits]
inputs = tuple(splits)
inputs = [*inputs]
variable_map = {}
device = get_device(self)
# Maps data inputs to the expected variables.
for i, input_name in enumerate(self._input_names):
input_ = inputs[i]
if type(input_) is list:
input_ = np.array(input_)
if type(input_) is np.ndarray:
# Convert string arrays into int32.
if input_.dtype.kind in constants.SUPPORTED_STRING_TYPES:
assert self.max_string_length is not None
input_ = from_strings_to_ints(input_, self.max_string_length)
elif input_.dtype.kind == "M": # Datetime
# We convert into seconds from 1970-1-1.
input_ = (input_ - np.datetime64("1970-01-01T00:00:00.000000000")).astype(np.int64) / 1000000000
input_ = torch.from_numpy(input_)
elif type(input_) is not torch.Tensor:
raise RuntimeError("Inputer tensor {} of not supported type {}".format(input_name, type(input_)))
if input_.dtype == torch.float64:
# We convert double precision arrays into single precision. Sklearn does the same.
input_ = input_.float()
if device is not None and device.type != "cpu":
input_ = input_.to(device)
variable_map[input_name] = input_
# Evaluate all the operators in the topology by properly wiring inputs \ outputs
for operator in self._operators:
outputs = operator(*(variable_map[input_name] for input_name in operator.inputs))
if len(operator.outputs) == 1:
variable_map[operator.outputs[0]] = outputs
else:
for i, output_name in enumerate(operator.outputs):
variable_map[output_name] = outputs[i]
# Prepare and return the output.
if len(self._output_names) == 1:
return variable_map[self._output_names[0]]
else:
return tuple(variable_map[output_name] for output_name in self._output_names)
class MissingConverter(RuntimeError):
"""
Raised when there is no registered converter for a machine learning operator.
"""
def __init__(self, msg):
super().__init__(msg + _missing_converter)
The provided code snippet includes necessary dependencies for implementing the `convert` function. Write a Python function `def convert(topology, backend, test_input, device, extra_config={})` to solve the following problem:
This function is used to convert a `Topology` object into a *backend* model. Args: topology: The `Topology` object that will be converted into a backend model backend: Which backend the model should be run on test_input: Inputs for PyTorch model tracing device: Which device the translated model will be run on extra_config: Extra configurations to be used by individual operator converters Returns: A model implemented in the selected backend
Here is the function:
def convert(topology, backend, test_input, device, extra_config={}):
"""
This function is used to convert a `Topology` object into a *backend* model.
Args:
topology: The `Topology` object that will be converted into a backend model
backend: Which backend the model should be run on
test_input: Inputs for PyTorch model tracing
device: Which device the translated model will be run on
extra_config: Extra configurations to be used by individual operator converters
Returns:
A model implemented in the selected backend
"""
assert topology is not None, "Cannot convert a Topology object of type None."
assert backend is not None, "Cannot convert a Topology object into backend None."
assert device is not None, "Cannot convert a Topology object into device None."
tvm_backend = None
operator_map = {}
if tvm_installed():
import tvm
tvm_backend = tvm.__name__
for operator in topology.topological_operator_iterator():
converter = get_converter(operator.type)
if converter is None:
raise MissingConverter(
"Unable to find converter for {} type {} with extra config: {}.".format(
operator.type, type(getattr(operator, "raw_model", None)), extra_config
)
)
if backend == onnx.__name__:
# Pytorch <= 1.4.0 has a bug with exporting GEMM into ONNX.
if parse(torch.__version__) <= Version("1.4"):
# Raise en error and warn user that the torch version is not supported with onnx backend
raise Exception(
f"The current torch version {torch.__version__} is not supported with {backend} backend. "
"Please use a torch version > 1.4 or change the backend."
)
operator_map[operator.full_name] = converter(operator, device, extra_config)
# Set the parameters for the model / container
n_threads = None if constants.N_THREADS not in extra_config else extra_config[constants.N_THREADS]
# We set the number of threads for torch here to avoid errors in case we JIT.
# We set intra op concurrency while we force operators to run sequentially.
# We can revise this later, but in general we don't have graphs requireing inter-op parallelism.
if n_threads is not None:
if torch.get_num_interop_threads() != 1:
torch.set_num_interop_threads(1)
torch.set_num_threads(n_threads)
operators = list(topology.topological_operator_iterator())
executor = Executor(
topology.input_container.input_names, topology.input_container.output_names, operator_map, operators, extra_config
).eval()
# if constants.REMAINDER_SIZE is present in extra_config, we are in the convert_batch mode.
remainder_model = None
remainder_size = None if constants.REMAINDER_SIZE not in extra_config else extra_config[constants.REMAINDER_SIZE]
if backend == onnx.__name__:
onnx_model_name = output_model_name = None
target_opset = 11
# Set optional configuration options for ONNX if any.
if constants.ONNX_OUTPUT_MODEL_NAME in extra_config:
onnx_model_name = extra_config[constants.ONNX_OUTPUT_MODEL_NAME]
output_model_name = onnx_model_name + ".onnx"
if constants.ONNX_TARGET_OPSET in extra_config:
target_opset = extra_config[constants.ONNX_TARGET_OPSET]
if output_model_name is None:
output_model_name = str(uuid4().hex) + ".onnx"
# Put the tracing test input into the right format.
batch_trace_input, remainder_input = _get_trace_input_from_test_input(test_input, remainder_size, extra_config)
# Supports dynamic batch size
dynamic_axes_cfg = {
k: {0: "sym"} for k in topology.input_container.input_names + topology.input_container.output_names
}
# Generate the ONNX models
torch.onnx.export(
executor,
batch_trace_input,
output_model_name,
input_names=topology.input_container.input_names,
output_names=topology.input_container.output_names,
dynamic_axes=dynamic_axes_cfg,
keep_initializers_as_inputs=False,
opset_version=target_opset,
do_constant_folding=True,
)
hb_model = onnx.load(output_model_name)
os.remove(output_model_name)
if remainder_size:
torch.onnx.export(
executor,
remainder_input,
output_model_name,
input_names=topology.input_container.input_names,
output_names=topology.input_container.output_names,
dynamic_axes=dynamic_axes_cfg,
keep_initializers_as_inputs=False,
opset_version=target_opset,
do_constant_folding=True,
)
remainder_model = onnx.load(output_model_name)
os.remove(output_model_name)
# Set the ONNX model name if any.
if onnx_model_name is not None:
hb_model.graph.name = onnx_model_name
elif backend == tvm_backend:
# Pick the proper target.
if device == "cuda":
target = tvm.target.cuda()
ctx = tvm.gpu()
elif device == "cpu":
target = "llvm"
ctx = tvm.cpu()
elif "llvm" in device:
target = device
ctx = tvm.cpu()
else:
raise RuntimeError("Device {} not recognized".format(device))
# Get configuration parameters.
# 50 is a good depth for operator fusion. More than that will probably hurt performance.
# https://github.com/microsoft/hummingbird/issues/232#issuecomment-697979508
config = {"relay.FuseOps.max_depth": 50}
if constants.TVM_MAX_FUSE_DEPTH in extra_config:
config["relay.FuseOps.max_depth"] = extra_config[constants.TVM_MAX_FUSE_DEPTH]
# First we need to generate the torchscript model.
batch_trace_input, remainder_trace_input = _get_trace_input_from_test_input(test_input, remainder_size, extra_config)
tvm_model = _compile_to_tvm(topology, executor, batch_trace_input, target, ctx, config, extra_config)
if remainder_trace_input is not None:
remainder_model = _compile_to_tvm(topology, executor, remainder_trace_input, target, ctx, config, extra_config)
# In the container we will be using the context to properly configure the input tensors.
extra_config[constants.TVM_CONTEXT] = ctx
extra_config[constants.TVM_INPUT_NAMES] = topology.input_container.input_names
hb_model = tvm_model
else:
# Set the device for the model.
if device != "cpu":
if backend == torch.__name__ or torch.jit.__name__:
executor = executor.to(device)
# If the backend is tochscript, jit the model.
if backend == torch.jit.__name__:
trace_input, _ = _get_trace_input_from_test_input(test_input, remainder_size, extra_config)
executor = _jit_trace(executor, trace_input, device, extra_config)
torch.jit.optimized_execution(executor)
hb_model = executor
# Return if the container is not needed.
if constants.CONTAINER in extra_config and not extra_config[constants.CONTAINER]:
return hb_model
# We scan the operators backwards until we find an operator with a defined type.
# This is necessary because ONNX models can have arbitrary operators doing casting, reshaping etc.
idx = len(operators) - 1
while (
idx >= 0
and not operator_map[operators[idx].full_name].regression
and not operator_map[operators[idx].full_name].classification
and not operator_map[operators[idx].full_name].anomaly_detection
and not operator_map[operators[idx].full_name].transformer
):
idx -= 1
force_transformer = False
if idx < 0:
force_transformer = True
# If is a transformer, we need to check whether there is another operator type before.
# E.g., normalization after classification.
if not force_transformer:
tmp_idx = idx
if operator_map[operators[idx].full_name].transformer:
while (
idx >= 0
and not operator_map[operators[idx].full_name].regression
and not operator_map[operators[idx].full_name].classification
and not operator_map[operators[idx].full_name].anomaly_detection
):
idx -= 1
if idx < 0:
idx = tmp_idx
# Get the proper container type.
if force_transformer or operator_map[operators[idx].full_name].transformer:
# We are just transforming the input data.
if backend == torch.jit.__name__:
container = TorchScriptSklearnContainerTransformer
elif backend == onnx.__name__:
container = ONNXSklearnContainerTransformer
elif backend == tvm_backend:
container = TVMSklearnContainerTransformer
else:
container = PyTorchSklearnContainerTransformer
elif operator_map[operators[idx].full_name].regression:
# We are doing a regression task.
if backend == torch.jit.__name__:
container = TorchScriptSklearnContainerRegression
elif backend == onnx.__name__:
container = ONNXSklearnContainerRegression
elif backend == tvm_backend:
container = TVMSklearnContainerRegression
else:
container = PyTorchSklearnContainerRegression
elif operator_map[operators[idx].full_name].anomaly_detection:
# We are doing anomaly detection.
if backend == torch.jit.__name__:
container = TorchScriptSklearnContainerAnomalyDetection
elif backend == onnx.__name__:
container = ONNXSklearnContainerAnomalyDetection
elif backend == tvm_backend:
container = TVMSklearnContainerAnomalyDetection
else:
container = PyTorchSklearnContainerAnomalyDetection
else:
# We are doing a classification task.
if backend == torch.jit.__name__:
container = TorchScriptSklearnContainerClassification
elif backend == onnx.__name__:
container = ONNXSklearnContainerClassification
elif backend == tvm_backend:
container = TVMSklearnContainerClassification
else:
container = PyTorchSklearnContainerClassification
n_threads = None if constants.N_THREADS not in extra_config else extra_config[constants.N_THREADS]
batch_size = None if constants.TEST_INPUT not in extra_config else _get_batch_size(test_input)
hb_container = container(hb_model, n_threads, batch_size, extra_config=extra_config)
if remainder_model:
aux_container = container(remainder_model, n_threads, remainder_size, extra_config=extra_config)
return BatchContainer(hb_container, aux_container)
elif remainder_size is not None and remainder_size > 0:
# remainder_size is non zero but remainder_model is not created
# -> torch backend case
aux_container = container(hb_model, n_threads, remainder_size, extra_config=extra_config)
return BatchContainer(hb_container, aux_container)
elif remainder_size is not None:
# remainder_size is not None but remainder_model is not created
# -> remainder_size must be zero (no need to create remainder_model)
assert remainder_size == 0, "remainder_size is non zero but no remainder_model has been created"
# remainder_size is not None only if called by convert_batch(...), so we return BatchContainer
# for this code path, even though there is no remainder_model created.
return BatchContainer(hb_container)
return hb_container | This function is used to convert a `Topology` object into a *backend* model. Args: topology: The `Topology` object that will be converted into a backend model backend: Which backend the model should be run on test_input: Inputs for PyTorch model tracing device: Which device the translated model will be run on extra_config: Extra configurations to be used by individual operator converters Returns: A model implemented in the selected backend |
6,617 | from collections import defaultdict
from .exceptions import MissingConverter
from ._utils import (
torch_installed,
sklearn_installed,
lightgbm_installed,
xgboost_installed,
onnx_runtime_installed,
tvm_installed,
sparkml_installed,
prophet_installed,
)
def sklearn_installed():
"""
Checks that *Sklearn* is available.
"""
try:
import sklearn
return True
except ImportError:
return False
class KMeans(PhysicalOperator, torch.nn.Module):
"""
Class implementing Kmeans in PyTorch
"""
def __init__(self, logical_operator, centroids, device):
super(KMeans, self).__init__(logical_operator, regression=True)
self.centroids = torch.nn.Parameter(torch.FloatTensor(centroids), requires_grad=False)
def forward(self, x):
# Compute the Euclidean distance
dist = torch.cdist(x, self.centroids, compute_mode="donot_use_mm_for_euclid_dist")
label = torch.argmin(dist, dim=1)
return label
The provided code snippet includes necessary dependencies for implementing the `_build_sklearn_operator_list` function. Write a Python function `def _build_sklearn_operator_list()` to solve the following problem:
Put all supported Sklearn operators on a list.
Here is the function:
def _build_sklearn_operator_list():
"""
Put all supported Sklearn operators on a list.
"""
if sklearn_installed():
# Tree-based models
from sklearn.ensemble import (
ExtraTreesClassifier,
ExtraTreesRegressor,
GradientBoostingClassifier,
GradientBoostingRegressor,
HistGradientBoostingClassifier,
HistGradientBoostingRegressor,
IsolationForest,
RandomForestClassifier,
RandomForestRegressor,
)
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
# Linear-based models
from sklearn.linear_model import (
LinearRegression,
LogisticRegression,
LogisticRegressionCV,
SGDClassifier,
RidgeCV,
ElasticNet,
Ridge,
Lasso,
TweedieRegressor,
PoissonRegressor,
GammaRegressor,
)
# SVM-based models
from sklearn.svm import LinearSVC, SVC, NuSVC, LinearSVR
# Imputers
from sklearn.impute import MissingIndicator, SimpleImputer
# MLP Models
from sklearn.neural_network import MLPClassifier, MLPRegressor
# Naive Bayes Models
from sklearn.naive_bayes import BernoulliNB, GaussianNB, MultinomialNB
# Matrix decomposition transformers
from sklearn.decomposition import PCA, KernelPCA, FastICA, TruncatedSVD
# Cross decomposition
from sklearn.cross_decomposition import PLSRegression
# KNeighbors models
from sklearn.neighbors import KNeighborsClassifier
from sklearn.neighbors import KNeighborsRegressor
# Clustering models
from sklearn.cluster import KMeans, MeanShift
# Model selection
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV
# Preprocessing
from sklearn.preprocessing import (
Binarizer,
KBinsDiscretizer,
LabelEncoder,
MaxAbsScaler,
MinMaxScaler,
Normalizer,
OneHotEncoder,
PolynomialFeatures,
RobustScaler,
StandardScaler,
)
try:
from sklearn.preprocessing import Imputer
except ImportError:
# Imputer was deprecate in sklearn >= 0.22
Imputer = None
# Features
from sklearn.feature_selection import SelectKBest, SelectPercentile, VarianceThreshold
# Mixture models
from sklearn.mixture import BayesianGaussianMixture
supported_ops = [
# Trees
DecisionTreeClassifier,
DecisionTreeRegressor,
ExtraTreesClassifier,
ExtraTreesRegressor,
GradientBoostingClassifier,
GradientBoostingRegressor,
HistGradientBoostingClassifier,
HistGradientBoostingRegressor,
IsolationForest,
OneHotEncoder,
RandomForestClassifier,
RandomForestRegressor,
# Linear-methods
LinearRegression,
LinearSVC,
LinearSVR,
LogisticRegression,
LogisticRegressionCV,
SGDClassifier,
RidgeCV,
Lasso,
ElasticNet,
Ridge,
TweedieRegressor,
PoissonRegressor,
GammaRegressor,
# Clustering
KMeans,
MeanShift,
# Other models
BernoulliNB,
GaussianNB,
KNeighborsClassifier,
KNeighborsRegressor,
MLPClassifier,
MLPRegressor,
MultinomialNB,
# SVM
NuSVC,
SVC,
# Imputers
Imputer,
MissingIndicator,
SimpleImputer,
# Preprocessing
Binarizer,
KBinsDiscretizer,
LabelEncoder,
MaxAbsScaler,
MinMaxScaler,
Normalizer,
PolynomialFeatures,
RobustScaler,
StandardScaler,
# Matrix Decomposition
FastICA,
KernelPCA,
PCA,
TruncatedSVD,
# Cross Decomposition
PLSRegression,
# Feature selection
SelectKBest,
SelectPercentile,
VarianceThreshold,
# Mixture models
BayesianGaussianMixture,
]
# Remove all deprecated operators given the sklearn version. E.g., Imputer for sklearn > 0.21.3.
return [x for x in supported_ops if x is not None]
return [] | Put all supported Sklearn operators on a list. |
6,618 | from collections import defaultdict
from .exceptions import MissingConverter
from ._utils import (
torch_installed,
sklearn_installed,
lightgbm_installed,
xgboost_installed,
onnx_runtime_installed,
tvm_installed,
sparkml_installed,
prophet_installed,
)
def sparkml_installed():
"""
Checks that *Spark ML/PySpark* is available.
"""
try:
import pyspark
return True
except ImportError:
return False
The provided code snippet includes necessary dependencies for implementing the `_build_sparkml_operator_list` function. Write a Python function `def _build_sparkml_operator_list()` to solve the following problem:
List all supported SparkML operators.
Here is the function:
def _build_sparkml_operator_list():
"""
List all supported SparkML operators.
"""
if sparkml_installed():
from pyspark.ml.classification import LogisticRegressionModel
from pyspark.ml.feature import Bucketizer, VectorAssembler
supported_ops = [
# Featurizers
Bucketizer,
VectorAssembler,
# Linear Models
LogisticRegressionModel,
]
return supported_ops
return [] | List all supported SparkML operators. |
6,619 | from collections import defaultdict
from .exceptions import MissingConverter
from ._utils import (
torch_installed,
sklearn_installed,
lightgbm_installed,
xgboost_installed,
onnx_runtime_installed,
tvm_installed,
sparkml_installed,
prophet_installed,
)
def xgboost_installed():
"""
Checks that *XGBoost* is available.
"""
try:
import xgboost
except ImportError:
return False
from xgboost.core import _LIB
try:
_LIB.XGBoosterDumpModelEx
except AttributeError:
# The version is not recent enough even though it is version 0.6.
# You need to install xgboost from github and not from pypi.
return False
from xgboost import __version__
vers = parse(__version__)
allowed_min = Version("0.90")
if vers < allowed_min:
warnings.warn("The converter works for xgboost >= 0.9. Different versions might not.")
return True
The provided code snippet includes necessary dependencies for implementing the `_build_xgboost_operator_list` function. Write a Python function `def _build_xgboost_operator_list()` to solve the following problem:
List all supported XGBoost (Sklearn API) operators.
Here is the function:
def _build_xgboost_operator_list():
"""
List all supported XGBoost (Sklearn API) operators.
"""
if xgboost_installed():
from xgboost import XGBClassifier, XGBRanker, XGBRegressor
return [XGBClassifier, XGBRanker, XGBRegressor]
return [] | List all supported XGBoost (Sklearn API) operators. |
6,620 | from collections import defaultdict
from .exceptions import MissingConverter
from ._utils import (
torch_installed,
sklearn_installed,
lightgbm_installed,
xgboost_installed,
onnx_runtime_installed,
tvm_installed,
sparkml_installed,
prophet_installed,
)
def lightgbm_installed():
"""
Checks that *LightGBM* is available.
"""
try:
import lightgbm
return True
except ImportError:
return False
The provided code snippet includes necessary dependencies for implementing the `_build_lightgbm_operator_list` function. Write a Python function `def _build_lightgbm_operator_list()` to solve the following problem:
List all supported LightGBM (Sklearn API) operators.
Here is the function:
def _build_lightgbm_operator_list():
"""
List all supported LightGBM (Sklearn API) operators.
"""
if lightgbm_installed():
from lightgbm import LGBMClassifier, LGBMRanker, LGBMRegressor, Booster
return [LGBMClassifier, LGBMRanker, LGBMRegressor, Booster]
return [] | List all supported LightGBM (Sklearn API) operators. |
6,621 | from collections import defaultdict
from .exceptions import MissingConverter
from ._utils import (
torch_installed,
sklearn_installed,
lightgbm_installed,
xgboost_installed,
onnx_runtime_installed,
tvm_installed,
sparkml_installed,
prophet_installed,
)
def onnx_runtime_installed():
"""
Checks that *ONNX Runtime* is available.
"""
try:
import onnxruntime
return True
except ImportError:
return False
The provided code snippet includes necessary dependencies for implementing the `_build_onnxml_operator_list` function. Write a Python function `def _build_onnxml_operator_list()` to solve the following problem:
List all supported ONNXML operators.
Here is the function:
def _build_onnxml_operator_list():
"""
List all supported ONNXML operators.
"""
if onnx_runtime_installed():
return [
# Linear-based models
"LinearClassifier",
"LinearRegressor",
# ONNX operators.
"Abs",
"Add",
"ArgMax",
"Cast",
"Concat",
"Div",
"Less",
"MatMul",
"Mul",
"Neg",
"Reshape",
"Sum",
# Preprocessing
"ArrayFeatureExtractor",
"Binarizer",
"FeatureVectorizer",
"Gather",
"Imputer",
"LabelEncoder",
"OneHotEncoder",
"Normalizer",
"Scaler",
"SVMClassifier",
# Tree-based models
"TreeEnsembleClassifier",
"TreeEnsembleRegressor",
]
return [] | List all supported ONNXML operators. |
6,622 | from collections import defaultdict
from .exceptions import MissingConverter
from ._utils import (
torch_installed,
sklearn_installed,
lightgbm_installed,
xgboost_installed,
onnx_runtime_installed,
tvm_installed,
sparkml_installed,
prophet_installed,
)
def prophet_installed():
"""
Checks that *Prophet* is available.
"""
try:
from prophet import Prophet
except ImportError:
return False
return True
class Prophet(PhysicalOperator, torch.nn.Module):
"""
Class implementing Prophet operator in PyTorch.
"""
def __init__(self, logical_operator, k, m, deltas, floor, start, t_scale, y_scale, changepoints_t, device):
super(Prophet, self).__init__(logical_operator)
self.regression = True
self.k = k
self.m = m
self.deltas = torch.nn.Parameter(torch.Tensor(deltas), requires_grad=False)
self.floor = floor
self.start = start
self.t_scale = t_scale
self.y_scale = y_scale
self.changepoints_t = torch.nn.Parameter(torch.Tensor(changepoints_t), requires_grad=False)
def forward(self, x):
x = torch.sort(x)[0]
t = (x - self.start) / self.t_scale
# Linear.
# Intercept changes
gammas = -self.changepoints_t * self.deltas
# Get cumulative slope and intercept at each t
k_t = self.k * torch.ones_like(t)
m_t = self.m * torch.ones_like(t)
for s, t_s in enumerate(self.changepoints_t):
indx = t >= t_s
k_t[indx] += self.deltas[s]
m_t[indx] += gammas[s]
trend = k_t * t + m_t
trend = trend * self.y_scale + self.floor
return trend
The provided code snippet includes necessary dependencies for implementing the `_build_prophet_operator_list` function. Write a Python function `def _build_prophet_operator_list()` to solve the following problem:
List all supported Prophet (Sklearn API) operators.
Here is the function:
def _build_prophet_operator_list():
"""
List all supported Prophet (Sklearn API) operators.
"""
if prophet_installed():
from prophet import Prophet
return [Prophet]
return [] | List all supported Prophet (Sklearn API) operators. |
6,623 | from collections import defaultdict
from .exceptions import MissingConverter
from ._utils import (
torch_installed,
sklearn_installed,
lightgbm_installed,
xgboost_installed,
onnx_runtime_installed,
tvm_installed,
sparkml_installed,
prophet_installed,
)
backends = _build_backend_map()
def torch_installed():
"""
Checks that *PyTorch* is available.
"""
try:
import torch
assert parse(torch.__version__) > Version("1.7.0"), "Please install torch >1.7.0"
return True
except ImportError:
return False
def onnx_runtime_installed():
"""
Checks that *ONNX Runtime* is available.
"""
try:
import onnxruntime
return True
except ImportError:
return False
def tvm_installed():
"""
Checks that *TVM* is available.
"""
try:
import tvm
except ImportError:
return False
return True
The provided code snippet includes necessary dependencies for implementing the `_build_backend_map` function. Write a Python function `def _build_backend_map()` to solve the following problem:
The set of supported backends is defined here.
Here is the function:
def _build_backend_map():
"""
The set of supported backends is defined here.
"""
backends = defaultdict(lambda: None)
if torch_installed():
import torch
backends[torch.__name__] = torch.__name__
backends["py" + torch.__name__] = torch.__name__ # For compatibility with earlier versions.
backends[torch.jit.__name__] = torch.jit.__name__
backends["torchscript"] = torch.jit.__name__ # For reference outside Hummingbird.
if onnx_runtime_installed():
import onnx
backends[onnx.__name__] = onnx.__name__
if tvm_installed():
import tvm
backends[tvm.__name__] = tvm.__name__
return backends | The set of supported backends is defined here. |
6,624 | from collections import defaultdict
from .exceptions import MissingConverter
from ._utils import (
torch_installed,
sklearn_installed,
lightgbm_installed,
xgboost_installed,
onnx_runtime_installed,
tvm_installed,
sparkml_installed,
prophet_installed,
)
sklearn_operator_list = _build_sklearn_operator_list()
xgb_operator_list = _build_xgboost_operator_list()
lgbm_operator_list = _build_lightgbm_operator_list()
prophet_operator_list = _build_prophet_operator_list()
The provided code snippet includes necessary dependencies for implementing the `_build_sklearn_api_operator_name_map` function. Write a Python function `def _build_sklearn_api_operator_name_map()` to solve the following problem:
Associate Sklearn with the operator class names. If two scikit-learn (API) models share a single name, it means they are equivalent in terms of conversion.
Here is the function:
def _build_sklearn_api_operator_name_map():
"""
Associate Sklearn with the operator class names.
If two scikit-learn (API) models share a single name, it means they are equivalent in terms of conversion.
"""
# Pipeline ops. These are ops injected by the parser not "real" sklearn operators.
pipeline_operator_list = [
"ArrayFeatureExtractor",
"Concat",
"Multiply",
"Bagging",
]
return {
k: "Sklearn" + k.__name__ if hasattr(k, "__name__") else k
for k in sklearn_operator_list
+ pipeline_operator_list
+ xgb_operator_list
+ lgbm_operator_list
+ prophet_operator_list
} | Associate Sklearn with the operator class names. If two scikit-learn (API) models share a single name, it means they are equivalent in terms of conversion. |
6,625 | from collections import defaultdict
from .exceptions import MissingConverter
from ._utils import (
torch_installed,
sklearn_installed,
lightgbm_installed,
xgboost_installed,
onnx_runtime_installed,
tvm_installed,
sparkml_installed,
prophet_installed,
)
onnxml_operator_list = _build_onnxml_operator_list()
The provided code snippet includes necessary dependencies for implementing the `_build_onnxml_api_operator_name_map` function. Write a Python function `def _build_onnxml_api_operator_name_map()` to solve the following problem:
Associate ONNXML with the operator class names. If two ONNXML models share a single name, it means they are equivalent in terms of conversion.
Here is the function:
def _build_onnxml_api_operator_name_map():
"""
Associate ONNXML with the operator class names.
If two ONNXML models share a single name, it means they are equivalent in terms of conversion.
"""
return {k: "ONNXML" + k for k in onnxml_operator_list if k is not None} | Associate ONNXML with the operator class names. If two ONNXML models share a single name, it means they are equivalent in terms of conversion. |
6,626 | from collections import defaultdict
from .exceptions import MissingConverter
from ._utils import (
torch_installed,
sklearn_installed,
lightgbm_installed,
xgboost_installed,
onnx_runtime_installed,
tvm_installed,
sparkml_installed,
prophet_installed,
)
sparkml_operator_list = _build_sparkml_operator_list()
The provided code snippet includes necessary dependencies for implementing the `_build_sparkml_api_operator_name_map` function. Write a Python function `def _build_sparkml_api_operator_name_map()` to solve the following problem:
Associate Spark-ML with the operator class names. If two Spark-ML models share a single name, it means they are equivalent in terms of conversion.
Here is the function:
def _build_sparkml_api_operator_name_map():
"""
Associate Spark-ML with the operator class names.
If two Spark-ML models share a single name, it means they are equivalent in terms of conversion.
"""
return {k: "SparkML" + k.__name__ if hasattr(k, "__name__") else k for k in sparkml_operator_list if k is not None} | Associate Spark-ML with the operator class names. If two Spark-ML models share a single name, it means they are equivalent in terms of conversion. |
6,627 | from collections import defaultdict
from .exceptions import MissingConverter
from ._utils import (
torch_installed,
sklearn_installed,
lightgbm_installed,
xgboost_installed,
onnx_runtime_installed,
tvm_installed,
sparkml_installed,
prophet_installed,
)
prophet_operator_list = _build_prophet_operator_list()
The provided code snippet includes necessary dependencies for implementing the `_build_prophet_api_operator_name_map` function. Write a Python function `def _build_prophet_api_operator_name_map()` to solve the following problem:
Associate Prophet with the operator class names.
Here is the function:
def _build_prophet_api_operator_name_map():
"""
Associate Prophet with the operator class names.
"""
return {k: k.__name__ if hasattr(k, "__name__") else k for k in prophet_operator_list if k is not None} | Associate Prophet with the operator class names. |
6,628 | import re
from docutils import nodes, utils
from sphinx.util.nodes import split_explicit_title
def format_commit_text(sha):
return sha[:7] | null |
6,629 | import re
from docutils import nodes, utils
from sphinx.util.nodes import split_explicit_title
__version__ = "1.2.0"
def user_role(name, rawtext, text, lineno, inliner, options=None, content=None):
"""Sphinx role for linking to a user profile. Defaults to linking to
Github profiles, but the profile URIS can be configured via the
``issues_user_uri`` config value.
Examples: ::
:user:`sloria`
Anchor text also works: ::
:user:`Steven Loria <sloria>`
"""
options = options or {}
content = content or []
has_explicit_title, title, target = split_explicit_title(text)
target = utils.unescape(target).strip()
title = utils.unescape(title).strip()
config = inliner.document.settings.env.app.config
if config.issues_user_uri:
ref = config.issues_user_uri.format(user=target)
else:
ref = "https://github.com/{0}".format(target)
if has_explicit_title:
text = title
else:
text = "@{0}".format(target)
link = nodes.reference(text=text, refuri=ref, **options)
return [link], []
def cve_role(name, rawtext, text, lineno, inliner, options=None, content=None):
"""Sphinx role for linking to a CVE on https://cve.mitre.org.
Examples: ::
:cve:`CVE-2018-17175`
"""
options = options or {}
content = content or []
has_explicit_title, title, target = split_explicit_title(text)
target = utils.unescape(target).strip()
title = utils.unescape(title).strip()
ref = "https://cve.mitre.org/cgi-bin/cvename.cgi?name={0}".format(target)
text = title if has_explicit_title else target
link = nodes.reference(text=text, refuri=ref, **options)
return [link], []
issue_role = IssueRole(
uri_config_option="issues_uri",
format_kwarg="issue",
github_uri_template="https://github.com/{issues_github_path}/issues/{n}",
)
pr_role = IssueRole(
uri_config_option="issues_pr_uri",
format_kwarg="pr",
github_uri_template="https://github.com/{issues_github_path}/pull/{n}",
)
commit_role = IssueRole(
uri_config_option="issues_commit_uri",
format_kwarg="commit",
github_uri_template="https://github.com/{issues_github_path}/commit/{n}",
format_text=format_commit_text,
)
def setup(app):
# Format template for issues URI
# e.g. 'https://github.com/sloria/marshmallow/issues/{issue}
app.add_config_value("issues_uri", default=None, rebuild="html")
# Format template for PR URI
# e.g. 'https://github.com/sloria/marshmallow/pull/{issue}
app.add_config_value("issues_pr_uri", default=None, rebuild="html")
# Format template for commit URI
# e.g. 'https://github.com/sloria/marshmallow/commits/{commit}
app.add_config_value("issues_commit_uri", default=None, rebuild="html")
# Shortcut for Github, e.g. 'sloria/marshmallow'
app.add_config_value("issues_github_path", default=None, rebuild="html")
# Format template for user profile URI
# e.g. 'https://github.com/{user}'
app.add_config_value("issues_user_uri", default=None, rebuild="html")
app.add_role("issue", issue_role)
app.add_role("pr", pr_role)
app.add_role("user", user_role)
app.add_role("commit", commit_role)
app.add_role("cve", cve_role)
return {
"version": __version__,
"parallel_read_safe": True,
"parallel_write_safe": True,
} | null |
6,630 | from operator import attrgetter
import inspect
import subprocess
import os
import sys
from functools import partial
def _get_git_revision():
try:
revision = subprocess.check_output(REVISION_CMD.split()).strip()
except (subprocess.CalledProcessError, OSError):
print("Failed to execute git to get revision")
return None
return revision.decode("utf-8")
def _linkcode_resolve(domain, info, package, url_fmt, revision):
"""Determine a link to online source for a class/method/function
This is called by sphinx.ext.linkcode
An example with a long-untouched module that everyone has
>>> _linkcode_resolve('py', {'module': 'tty',
... 'fullname': 'setraw'},
... package='tty',
... url_fmt='http://hg.python.org/cpython/file/'
... '{revision}/Lib/{package}/{path}#L{lineno}',
... revision='xxxx')
'http://hg.python.org/cpython/file/xxxx/Lib/tty/tty.py#L18'
"""
if revision is None:
return
if domain not in ("py", "pyx"):
return
if not info.get("module") or not info.get("fullname"):
return
class_name = info["fullname"].split(".")[0]
if not isinstance(class_name, str):
# Python 2 only
class_name = class_name.encode("utf-8")
module = __import__(info["module"], fromlist=[class_name])
obj = attrgetter(info["fullname"])(module)
try:
fn = inspect.getsourcefile(obj)
except Exception:
fn = None
if not fn:
try:
fn = inspect.getsourcefile(sys.modules[obj.__module__])
except Exception:
fn = None
if not fn:
return
fn = os.path.relpath(fn, start=os.path.dirname(__import__(package).__file__))
try:
lineno = inspect.getsourcelines(obj)[1]
except Exception:
lineno = ""
return url_fmt.format(revision=revision, package=package, path=fn, lineno=lineno)
The provided code snippet includes necessary dependencies for implementing the `make_linkcode_resolve` function. Write a Python function `def make_linkcode_resolve(package, url_fmt)` to solve the following problem:
Returns a linkcode_resolve function for the given URL format revision is a git commit reference (hash or name) package is the name of the root module of the package url_fmt is along the lines of ('https://github.com/USER/PROJECT/' 'blob/{revision}/{package}/' '{path}#L{lineno}')
Here is the function:
def make_linkcode_resolve(package, url_fmt):
"""Returns a linkcode_resolve function for the given URL format
revision is a git commit reference (hash or name)
package is the name of the root module of the package
url_fmt is along the lines of ('https://github.com/USER/PROJECT/'
'blob/{revision}/{package}/'
'{path}#L{lineno}')
"""
revision = _get_git_revision()
return partial(_linkcode_resolve, revision=revision, package=package, url_fmt=url_fmt) | Returns a linkcode_resolve function for the given URL format revision is a git commit reference (hash or name) package is the name of the root module of the package url_fmt is along the lines of ('https://github.com/USER/PROJECT/' 'blob/{revision}/{package}/' '{path}#L{lineno}') |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.