id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
2,460 | from __future__ import annotations
from typing import Any
from checkov.common.typing import LibraryGraph
from checkov.common.util.data_structures_utils import find_in_dict
from checkov.common.util.type_forcers import force_list, extract_json
from checkov.terraform.image_referencer.base_provider import BaseTerraformProvider
def extract_json(json_str: Any) -> dict[str, Any] | list[dict[str, Any]] | None:
def extract_images_from_aws_ecs_task_definition(resource: dict[str, Any]) -> list[str]:
image_names: list[str] = []
definitions = extract_json(resource.get("container_definitions"))
if isinstance(definitions, list):
for definition in definitions:
if isinstance(definition, dict):
name = definition.get("image")
if name and isinstance(name, str):
image_names.append(name)
return image_names | null |
2,461 | from __future__ import annotations
from typing import Any
from checkov.common.typing import LibraryGraph
from checkov.common.util.data_structures_utils import find_in_dict
from checkov.common.util.type_forcers import force_list, extract_json
from checkov.terraform.image_referencer.base_provider import BaseTerraformProvider
def force_list(var: list[T]) -> list[T]:
...
def force_list(var: T) -> list[T]:
...
def force_list(var: T | list[T]) -> list[T]:
if not isinstance(var, list):
return [var]
return var
def extract_images_from_aws_lightsail_container_service_deployment_version(resource: dict[str, Any]) -> list[str]:
image_names: list[str] = []
containers = resource.get("container")
if containers:
for container in force_list(containers):
if isinstance(container, dict):
name = container.get("image")
if name and isinstance(name, str):
image_names.append(name)
return image_names | null |
2,462 | from __future__ import annotations
import json
import logging
import os
from collections import defaultdict
from pathlib import Path
from typing import Optional, Dict, Mapping, Set, Tuple, Callable, Any, List, cast, TYPE_CHECKING, overload
import hcl2
from checkov.common.parallelizer.parallel_runner import parallel_runner
from checkov.common.runners.base_runner import filter_ignored_paths, IGNORE_HIDDEN_DIRECTORY_ENV
from checkov.common.util.consts import DEFAULT_EXTERNAL_MODULES_DIR, RESOLVED_MODULE_ENTRY_NAME
from checkov.common.util.data_structures_utils import pickle_deepcopy
from checkov.common.util.deep_merge import pickle_deep_merge
from checkov.common.util.type_forcers import force_list
from checkov.common.variables.context import EvaluationContext
from checkov.terraform import validate_malformed_definitions, clean_bad_definitions
from checkov.terraform.graph_builder.graph_components.block_types import BlockType
from checkov.terraform.graph_builder.graph_components.module import Module
from checkov.terraform.module_loading.content import ModuleContent
from checkov.terraform.module_loading.module_finder import load_tf_modules
from checkov.terraform.module_loading.registry import module_loader_registry as default_ml_registry, \
ModuleLoaderRegistry
from checkov.common.util.parser_utils import is_acceptable_module_param
from checkov.terraform.modules.module_utils import safe_index, \
remove_module_dependency_from_path, \
clean_parser_types, serialize_definitions, _Hcl2Payload
from checkov.terraform.modules.module_objects import TFModule, TFDefinitionKey
def filter_ignored_paths(
root_dir: str,
names: list[str] | list[os.DirEntry[str]],
excluded_paths: list[str] | None,
included_paths: Iterable[str] | None = None
) -> None:
# we need to handle legacy logic, where directories to skip could be specified using the env var (default value above)
# or a directory starting with '.'; these look only at directory basenames, not relative paths.
#
# But then any other excluded paths (specified via --skip-path or via the platform repo settings) should look at
# the path name relative to the root folder. These can be files or directories.
# Example: take the following dir tree:
# .
# ./dir1
# ./dir1/dir33
# ./dir1/.terraform
# ./dir2
# ./dir2/dir33
# /.dir2/hello.yaml
#
# if excluded_paths = ['dir1/dir33', 'dir2/hello.yaml'], then we would scan dir1, but we would skip its subdirectories. We would scan
# dir2 and its subdirectory, but we'd skip hello.yaml.
# first handle the legacy logic - this will also remove files starting with '.' but that's probably fine
# mostly this will just remove those problematic directories hardcoded above.
included_paths = included_paths or []
for entry in list(names):
path = entry.name if isinstance(entry, os.DirEntry) else entry
if path in ignored_directories:
safe_remove(names, entry)
if path.startswith(".") and IGNORE_HIDDEN_DIRECTORY_ENV and path not in included_paths:
safe_remove(names, entry)
# now apply the new logic
# TODO this is not going to work well on Windows, because paths specified in the platform will use /, and
# paths specified via the CLI argument will presumably use \\
if excluded_paths:
compiled = []
for p in excluded_paths:
try:
compiled.append(re.compile(p.replace(".terraform", r"\.terraform")))
except re.error:
# do not add compiled paths that aren't regexes
continue
for entry in list(names):
path = entry.name if isinstance(entry, os.DirEntry) else entry
full_path = os.path.join(root_dir, path)
if any(pattern.search(full_path) for pattern in compiled) or any(p in full_path for p in excluded_paths):
safe_remove(names, entry)
def force_list(var: list[T]) -> list[T]:
...
def force_list(var: T) -> list[T]:
...
def force_list(var: T | list[T]) -> list[T]:
if not isinstance(var, list):
return [var]
return var
def _filter_ignored_paths(root: str, paths: list[str], excluded_paths: list[str] | None) -> None:
filter_ignored_paths(root, paths, excluded_paths)
for path in force_list(paths):
if path == default_ml_registry.external_modules_folder_name:
paths.remove(path) | null |
2,463 | from __future__ import annotations
import json
import logging
import os
from collections import defaultdict
from pathlib import Path
from typing import Optional, Dict, Mapping, Set, Tuple, Callable, Any, List, cast, TYPE_CHECKING, overload
import hcl2
from checkov.common.parallelizer.parallel_runner import parallel_runner
from checkov.common.runners.base_runner import filter_ignored_paths, IGNORE_HIDDEN_DIRECTORY_ENV
from checkov.common.util.consts import DEFAULT_EXTERNAL_MODULES_DIR, RESOLVED_MODULE_ENTRY_NAME
from checkov.common.util.data_structures_utils import pickle_deepcopy
from checkov.common.util.deep_merge import pickle_deep_merge
from checkov.common.util.type_forcers import force_list
from checkov.common.variables.context import EvaluationContext
from checkov.terraform import validate_malformed_definitions, clean_bad_definitions
from checkov.terraform.graph_builder.graph_components.block_types import BlockType
from checkov.terraform.graph_builder.graph_components.module import Module
from checkov.terraform.module_loading.content import ModuleContent
from checkov.terraform.module_loading.module_finder import load_tf_modules
from checkov.terraform.module_loading.registry import module_loader_registry as default_ml_registry, \
ModuleLoaderRegistry
from checkov.common.util.parser_utils import is_acceptable_module_param
from checkov.terraform.modules.module_utils import safe_index, \
remove_module_dependency_from_path, \
clean_parser_types, serialize_definitions, _Hcl2Payload
from checkov.terraform.modules.module_objects import TFModule, TFDefinitionKey
def is_nested_object(full_path: TFDefinitionKey) -> bool:
return True if full_path.tf_source_modules else False
class TFModule:
path: str
name: str | None
nested_tf_module: TFModule | None = None
foreach_idx: int | str | None = None
def __lt__(self, other: Any) -> bool:
if not isinstance(other, TFModule):
return False
return (self.path, self.name, self.nested_tf_module, self.foreach_idx) < (
other.path, other.name, other.nested_tf_module, other.foreach_idx)
def __repr__(self) -> str:
return f'path:{self.path}, name:{self.name}, nested_tf_module:{self.nested_tf_module}, foreach_idx:{self.foreach_idx}'
def __iter__(self) -> Iterator[tuple[str, Any]]:
yield from {
"path": self.path,
"name": self.name,
"foreach_idx": self.foreach_idx,
"nested_tf_module": dict(self.nested_tf_module) if self.nested_tf_module else None
}.items()
def __str__(self) -> str:
from checkov.common.util.json_utils import CustomJSONEncoder
return json.dumps(dict(self), cls=CustomJSONEncoder)
def from_json(json_dct: dict[str, Any] | None) -> TFModule | None:
return TFModule(path=json_dct['path'], name=json_dct['name'], foreach_idx=json_dct['foreach_idx'],
nested_tf_module=TFModule.from_json(json_dct['nested_tf_module']) if json_dct.get(
'nested_tf_module') else None) if json_dct else None
class TFDefinitionKey:
file_path: str
tf_source_modules: TFModule | None = None
def __lt__(self, other: Any) -> bool:
if not isinstance(other, TFDefinitionKey):
return False
return (self.file_path, self.tf_source_modules) < (other.file_path, other.tf_source_modules)
def __repr__(self) -> str:
return f'tf_source_modules:{self.tf_source_modules}, file_path:{self.file_path}'
def __iter__(self) -> Iterator[tuple[str, Any]]:
yield from {
"file_path": self.file_path,
"tf_source_modules": dict(self.tf_source_modules) if self.tf_source_modules else None
}.items()
def __str__(self) -> str:
from checkov.common.util.json_utils import CustomJSONEncoder
return json.dumps(self.to_json(), cls=CustomJSONEncoder)
def to_json(self) -> dict[str, Any]:
to_return: dict[str, Any] = {"file_path": self.file_path, "tf_source_modules": None}
if self.tf_source_modules:
to_return["tf_source_modules"] = dict(self.tf_source_modules)
return to_return
def from_json(json_dct: dict[str, Any]) -> TFDefinitionKey:
return TFDefinitionKey(file_path=json_dct['file_path'],
tf_source_modules=TFModule.from_json(json_dct['tf_source_modules']))
def get_tf_definition_object_from_module_dependency(
path: TFDefinitionKey, module_dependency: TFDefinitionKey | None, module_dependency_name: str | None
) -> TFDefinitionKey:
if not module_dependency:
return path
if not is_nested_object(module_dependency):
return TFDefinitionKey(path.file_path, TFModule(path=module_dependency.file_path, name=module_dependency_name))
return TFDefinitionKey(path.file_path, TFModule(path=module_dependency.file_path, name=module_dependency_name, nested_tf_module=module_dependency.tf_source_modules)) | null |
2,464 | from __future__ import annotations
import json
import logging
import os
from collections import defaultdict
from pathlib import Path
from typing import Optional, Dict, Mapping, Set, Tuple, Callable, Any, List, cast, TYPE_CHECKING, overload
import hcl2
from checkov.common.parallelizer.parallel_runner import parallel_runner
from checkov.common.runners.base_runner import filter_ignored_paths, IGNORE_HIDDEN_DIRECTORY_ENV
from checkov.common.util.consts import DEFAULT_EXTERNAL_MODULES_DIR, RESOLVED_MODULE_ENTRY_NAME
from checkov.common.util.data_structures_utils import pickle_deepcopy
from checkov.common.util.deep_merge import pickle_deep_merge
from checkov.common.util.type_forcers import force_list
from checkov.common.variables.context import EvaluationContext
from checkov.terraform import validate_malformed_definitions, clean_bad_definitions
from checkov.terraform.graph_builder.graph_components.block_types import BlockType
from checkov.terraform.graph_builder.graph_components.module import Module
from checkov.terraform.module_loading.content import ModuleContent
from checkov.terraform.module_loading.module_finder import load_tf_modules
from checkov.terraform.module_loading.registry import module_loader_registry as default_ml_registry, \
ModuleLoaderRegistry
from checkov.common.util.parser_utils import is_acceptable_module_param
from checkov.terraform.modules.module_utils import safe_index, \
remove_module_dependency_from_path, \
clean_parser_types, serialize_definitions, _Hcl2Payload
from checkov.terraform.modules.module_objects import TFModule, TFDefinitionKey
_Hcl2Payload: TypeAlias = "dict[str, list[dict[str, Any]]]"
The provided code snippet includes necessary dependencies for implementing the `load_or_die_quietly` function. Write a Python function `def load_or_die_quietly( file: str | Path | os.DirEntry[str], parsing_errors: dict[str, Exception], clean_definitions: bool = True ) -> Optional[_Hcl2Payload]` to solve the following problem:
Load JSON or HCL, depending on filename. :return: None if the file can't be loaded
Here is the function:
def load_or_die_quietly(
file: str | Path | os.DirEntry[str], parsing_errors: dict[str, Exception], clean_definitions: bool = True
) -> Optional[_Hcl2Payload]:
"""
Load JSON or HCL, depending on filename.
:return: None if the file can't be loaded
"""
file_path = os.fspath(file)
file_name = os.path.basename(file_path)
try:
logging.debug(f"Parsing {file_path}")
with open(file_path, "r", encoding="utf-8-sig") as f:
if file_name.endswith(".json"):
return cast("_Hcl2Payload", json.load(f))
else:
raw_data = hcl2.load(f)
non_malformed_definitions = validate_malformed_definitions(raw_data)
if clean_definitions:
return clean_bad_definitions(non_malformed_definitions)
else:
return non_malformed_definitions
except Exception as e:
logging.debug(f'failed while parsing file {file_path}', exc_info=True)
parsing_errors[file_path] = e
return None | Load JSON or HCL, depending on filename. :return: None if the file can't be loaded |
2,465 | from __future__ import annotations
import json
import logging
import os
import re
from pathlib import Path
from typing import List, Callable, TYPE_CHECKING
from checkov.common.parallelizer.parallel_runner import parallel_runner
from checkov.common.util.file_utils import read_file_with_any_encoding
from checkov.common.util.type_forcers import convert_str_to_bool
from checkov.terraform.module_loading.registry import module_loader_registry
class ModuleDownload:
def __init__(self, source_dir: str) -> None:
self.source_dir = source_dir
self.address: str | None = None
self.module_name: str | None = None
self.module_link: str | None = None
self.tf_managed = False
self.version: str | None = None
def __str__(self) -> str:
return f"{self.source_dir} -> {self.module_link} ({self.version})"
def find_modules(path: str) -> List[ModuleDownload]:
modules_found: list[ModuleDownload] = []
for root, _, full_file_names in os.walk(path):
for file_name in full_file_names:
if not file_name.endswith('.tf'):
continue
if root.startswith(os.path.join(path, ".terraform", "modules")):
# don't scan the modules folder used by Terraform
continue
try:
content = read_file_with_any_encoding(file_path=os.path.join(path, root, file_name))
if "module " not in content:
# if there is no "module " ref in the whole file, then no need to search line by line
continue
curr_md = None
for line in content.splitlines():
if not curr_md:
if line.startswith('module'):
curr_md = ModuleDownload(os.path.dirname(os.path.join(root, file_name)))
# also extract the name for easier mapping against the TF modules.json file
match = re.match(MODULE_NAME_PATTERN, line)
if match:
curr_md.module_name = match.group("name")
continue
else:
if line.startswith('}'):
if curr_md.module_link is None:
logging.warning(f'A module at {curr_md.source_dir} had no source, skipping')
else:
curr_md.address = f"{curr_md.module_link}:{curr_md.version}"
modules_found.append(curr_md)
curr_md = None
continue
if "source" in line:
match = re.match(MODULE_SOURCE_PATTERN, line)
if match:
curr_md.module_link = match.group('link')
continue
if "version" in line:
match = re.match(MODULE_VERSION_PATTERN, line)
if match:
curr_md.version = f"{match.group('operator')}{match.group('version')}" if match.group('operator') else match.group('version')
except (UnicodeDecodeError, FileNotFoundError) as e:
logging.warning(f"Skipping {os.path.join(path, root, file_name)} because of {e}")
continue
return modules_found
def should_download(path: str | None) -> bool:
return path is not None and not (path.startswith('./') or path.startswith('../') or path.startswith('/'))
def _download_module(ml_registry: ModuleLoaderRegistry, module_download: ModuleDownload) -> bool:
logging.info(f'Downloading module {module_download.address}')
try:
content = ml_registry.load(
current_dir=module_download.source_dir,
source=module_download.module_link,
source_version="latest" if not module_download.version else module_download.version,
module_address=module_download.address,
tf_managed=module_download.tf_managed,
)
if content is None or not content.loaded():
log_message = f'Failed to download module {module_download.address}'
if not ml_registry.download_external_modules:
log_message += ' (for external modules, the --download-external-modules flag is required)'
logging.warning(log_message)
return False
except Exception as e:
logging.warning(f"Unable to load module ({module_download.address}): {e}")
return False
return True
def replace_terraform_managed_modules(path: str, found_modules: list[ModuleDownload]) -> list[ModuleDownload]:
"""Replaces modules by Terraform managed ones to prevent addtional downloading
It can't handle nested modules yet, ex.
{
"Key": "parent_module.child_module",
"Source": "./child_module",
"Dir": "parent_module/child_module"
}
"""
if not convert_str_to_bool(os.getenv("CHECKOV_EXPERIMENTAL_TERRAFORM_MANAGED_MODULES", False)):
return found_modules
# file used by Terraform internally to map modules to the downloaded path
tf_modules_file = Path(path) / ".terraform/modules/modules.json"
if not tf_modules_file.exists():
return found_modules
# create Key (module name) to module detail map for faster querying
tf_modules = {
module["Key"]: module
for module in json.loads(tf_modules_file.read_bytes())["Modules"]
}
replaced_modules: list[ModuleDownload] = []
for module in found_modules:
if module.module_name in tf_modules:
tf_module = tf_modules[module.module_name]
module_new = ModuleDownload(source_dir=path)
# if version is 'None' then set it to latest in the address, so it can be mapped properly later on
module_new.address = f"{module.module_link}:latest" if module.version is None else module.address
module_new.module_link = tf_module["Dir"]
module_new.module_name = module.module_name
module_new.tf_managed = True
module_new.version = module.version
replaced_modules.append(module_new)
else:
replaced_modules.append(module)
return replaced_modules
parallel_runner = ParallelRunner()
module_loader_registry = ModuleLoaderRegistry()
def load_tf_modules(
path: str,
should_download_module: Callable[[str | None], bool] = should_download,
run_parallel: bool = False,
modules_to_load: List[ModuleDownload] | None = None,
stop_on_failure: bool = False
) -> None:
module_loader_registry.root_dir = path
if not modules_to_load:
modules_to_load = find_modules(path)
# To avoid duplicate work, we need to get the distinct module sources
distinct_modules = list({m.address: m for m in modules_to_load}.values())
replaced_modules = replace_terraform_managed_modules(path=path, found_modules=distinct_modules)
downloadable_modules = [
(module_loader_registry, m)
for m in replaced_modules if should_download_module(m.module_link)
]
if run_parallel:
list(parallel_runner.run_function(_download_module, downloadable_modules))
else:
logging.info(f"Starting download of modules of length {len(replaced_modules)}")
for m in downloadable_modules:
success = _download_module(*m)
if not success and stop_on_failure:
logging.info(f"Stopping downloading of modules due to failed attempt on {m[1].address}")
break | null |
2,466 | from __future__ import annotations
import re
from typing import List, Dict, Optional, cast, Callable
from checkov.common.packaging import version
VERSION_REGEX = re.compile(r"^(?P<operator>=|!=|>=|>|<=|<|~>)?\s*(?P<version>[\d.]+-?\w*)$")
class VersionConstraint:
"""
A class representing a module version. Enables comparing versions.
"""
def __init__(self, constraint_parts: Dict[str, Optional[str]]) -> None:
"""
:param constraint_parts: a dictionary representing a version constraint: {"version": "v1.2.3", "operator": ">="}
"""
self.version = cast("version.Version", version.parse(constraint_parts.get("version") or ""))
self.operator = constraint_parts.get("operator") or "="
def get_max_version_for_most_specific_segment(self) -> version.Version:
return cast("version.Version", version.parse(f"{self.version.major + 1}.0.0"))
def versions_matching(self, other_version_str: str) -> bool:
other_version = cast("version.Version", version.parse(other_version_str))
version_matchers: dict[str, Callable[[version.Version], bool]] = {
"=": lambda other: other == self.version,
"!=": lambda other: other != self.version,
">": lambda other: other > self.version,
">=": lambda other: other >= self.version,
"<": lambda other: other < self.version,
"<=": lambda other: other <= self.version,
"~>": lambda other: self.version <= other < self.get_max_version_for_most_specific_segment(),
}
return version_matchers[self.operator](other_version)
def __str__(self) -> str:
return f"{self.operator}{self.version}"
The provided code snippet includes necessary dependencies for implementing the `get_version_constraints` function. Write a Python function `def get_version_constraints(raw_version: str) -> List[VersionConstraint]` to solve the following problem:
:param raw_version: A string representation of a version, e.g: "~> v1.2.3" :return: VersionConstraint instance
Here is the function:
def get_version_constraints(raw_version: str) -> List[VersionConstraint]:
"""
:param raw_version: A string representation of a version, e.g: "~> v1.2.3"
:return: VersionConstraint instance
"""
raw_version = raw_version.replace(" ", "")
raw_version_constraints = raw_version.split(",")
version_constraints = []
for constraint in raw_version_constraints:
match = re.search(VERSION_REGEX, constraint)
if match:
constraint_parts = match.groupdict()
version_constraints.append(VersionConstraint(constraint_parts))
return version_constraints | :param raw_version: A string representation of a version, e.g: "~> v1.2.3" :return: VersionConstraint instance |
2,467 | from __future__ import annotations
import re
from typing import List, Dict, Optional, cast, Callable
from checkov.common.packaging import version
The provided code snippet includes necessary dependencies for implementing the `order_versions_in_descending_order` function. Write a Python function `def order_versions_in_descending_order(versions_strings: List[str]) -> List[str]` to solve the following problem:
:param versions_strings: array of string versions: ["v1.2.3", "v1.2.4"...] :return: A sorted array of versions in descending order
Here is the function:
def order_versions_in_descending_order(versions_strings: List[str]) -> List[str]:
"""
:param versions_strings: array of string versions: ["v1.2.3", "v1.2.4"...]
:return: A sorted array of versions in descending order
"""
for iter_num in range(len(versions_strings) - 1, 0, -1):
for idx in range(iter_num):
if version.parse(versions_strings[idx]) < version.parse(versions_strings[idx + 1]):
temp = versions_strings[idx]
versions_strings[idx] = versions_strings[idx + 1]
versions_strings[idx + 1] = temp
return versions_strings | :param versions_strings: array of string versions: ["v1.2.3", "v1.2.4"...] :return: A sorted array of versions in descending order |
2,468 | from __future__ import annotations
import json
import logging
from collections.abc import Hashable
from typing import Dict, List, Union, Any, Callable
from checkov.common.util.data_structures_utils import pickle_deepcopy
from checkov.common.util.type_forcers import convert_str_to_bool
from checkov.common.util.parser_utils import (
eval_string,
split_merge_args,
string_to_native,
to_string,
)
FUNCTION_FAILED = "____FUNCTION_FAILED____"
def split_merge_args(value: str) -> list[str] | None:
"""
Split arguments of a merge function. For example, "merge(local.one, local.two)" would
call this function with a value of "local.one, local.two" which would return
["local.one", "local.two"]. If the value cannot be unpacked, None will be returned.
"""
if not value:
return None
# There are a number of splitting scenarios depending on whether variables or
# direct maps are used:
# merge({tag1="foo"},{tag2="bar"})
# merge({tag1="foo"},local.some_tags)
# merge(local.some_tags,{tag2="bar"})
# merge(local.some_tags,local.some_other_tags)
# Also, the number of arguments can vary, things can be nested, strings are evil...
# See tests/terraform/test_parser_var_blocks.py for many examples.
to_return = []
current_arg_buffer = ""
processing_str_escape = False
inside_collection_stack: List[str] = [] # newest at position 0, contains the terminator for the collection
for c in value:
if c == "," and not inside_collection_stack:
current_arg_buffer = current_arg_buffer.strip()
# Note: can get a zero-length buffer when there's a double comman. This can
# happen with multi-line args (see parser_internals test)
if len(current_arg_buffer) != 0:
to_return.append(current_arg_buffer)
current_arg_buffer = ""
else:
current_arg_buffer += c
processing_str_escape = _str_parser_loop_collection_helper(c, inside_collection_stack, processing_str_escape)
current_arg_buffer = current_arg_buffer.strip()
if len(current_arg_buffer) > 0:
to_return.append(current_arg_buffer)
if len(to_return) == 0:
return None
return to_return
def string_to_native(value: str) -> Any:
try:
value_string = value.replace("'", '"')
return json.loads(value_string)
except Exception:
return None
def merge(original: str, var_resolver: Callable[[Any], Any], **_: Any) -> dict[Hashable, Any] | str:
# https://www.terraform.io/docs/language/functions/merge.html
args = split_merge_args(original)
if args is None:
return FUNCTION_FAILED
merged_map = {}
for arg in args:
if arg.startswith("{"):
arg_value = string_to_native(arg)
if arg_value is None:
return FUNCTION_FAILED
else:
arg_value = var_resolver(arg)
if isinstance(arg_value, dict):
merged_map.update(arg_value)
else:
return FUNCTION_FAILED # don't know what this is, blow out
return merged_map | null |
2,469 | from __future__ import annotations
import json
import logging
from collections.abc import Hashable
from typing import Dict, List, Union, Any, Callable
from checkov.common.util.data_structures_utils import pickle_deepcopy
from checkov.common.util.type_forcers import convert_str_to_bool
from checkov.common.util.parser_utils import (
eval_string,
split_merge_args,
string_to_native,
to_string,
)
FUNCTION_FAILED = "____FUNCTION_FAILED____"
def split_merge_args(value: str) -> list[str] | None:
"""
Split arguments of a merge function. For example, "merge(local.one, local.two)" would
call this function with a value of "local.one, local.two" which would return
["local.one", "local.two"]. If the value cannot be unpacked, None will be returned.
"""
if not value:
return None
# There are a number of splitting scenarios depending on whether variables or
# direct maps are used:
# merge({tag1="foo"},{tag2="bar"})
# merge({tag1="foo"},local.some_tags)
# merge(local.some_tags,{tag2="bar"})
# merge(local.some_tags,local.some_other_tags)
# Also, the number of arguments can vary, things can be nested, strings are evil...
# See tests/terraform/test_parser_var_blocks.py for many examples.
to_return = []
current_arg_buffer = ""
processing_str_escape = False
inside_collection_stack: List[str] = [] # newest at position 0, contains the terminator for the collection
for c in value:
if c == "," and not inside_collection_stack:
current_arg_buffer = current_arg_buffer.strip()
# Note: can get a zero-length buffer when there's a double comman. This can
# happen with multi-line args (see parser_internals test)
if len(current_arg_buffer) != 0:
to_return.append(current_arg_buffer)
current_arg_buffer = ""
else:
current_arg_buffer += c
processing_str_escape = _str_parser_loop_collection_helper(c, inside_collection_stack, processing_str_escape)
current_arg_buffer = current_arg_buffer.strip()
if len(current_arg_buffer) > 0:
to_return.append(current_arg_buffer)
if len(to_return) == 0:
return None
return to_return
def eval_string(value: str) -> Any:
try:
value_string = value.replace("'", '"')
parsed = hcl2.loads(f"eval = {value_string}\n") # NOTE: newline is needed
return parsed["eval"][0]
except Exception:
return None
def concat(original: str, var_resolver: Callable[[Any], Any], **_: Any) -> list[Any] | str:
# https://www.terraform.io/docs/language/functions/concat.html
args = split_merge_args(original)
if args is None:
return FUNCTION_FAILED
merged_list = []
for arg in args:
if arg.startswith("["):
value = eval_string(arg)
if value is None:
logging.debug("Unable to convert to list: %s", arg)
return FUNCTION_FAILED
else:
value = var_resolver(arg)
if isinstance(value, list):
merged_list.extend(value)
else:
return FUNCTION_FAILED # don't know what this is, blow out
return merged_list | null |
2,470 | from __future__ import annotations
import json
import logging
from collections.abc import Hashable
from typing import Dict, List, Union, Any, Callable
from checkov.common.util.data_structures_utils import pickle_deepcopy
from checkov.common.util.type_forcers import convert_str_to_bool
from checkov.common.util.parser_utils import (
eval_string,
split_merge_args,
string_to_native,
to_string,
)
FUNCTION_FAILED = "____FUNCTION_FAILED____"
def convert_str_to_bool(bool_str: bool | str) -> bool:
if isinstance(bool_str, str):
bool_str_lower = bool_str.lower()
if bool_str_lower in ("true", '"true"'):
return True
elif bool_str_lower in ("false", '"false"'):
return False
# If we got here it must be a boolean, mypy doesn't understand it, so we use cast
return typing.cast(bool, bool_str)
def tobool(original: Union[bool, str], **_: Any) -> Union[bool, str]:
# https://www.terraform.io/docs/configuration/functions/tobool.html
bool_value = convert_str_to_bool(original)
return bool_value if isinstance(bool_value, bool) else FUNCTION_FAILED | null |
2,471 | from __future__ import annotations
import json
import logging
from collections.abc import Hashable
from typing import Dict, List, Union, Any, Callable
from checkov.common.util.data_structures_utils import pickle_deepcopy
from checkov.common.util.type_forcers import convert_str_to_bool
from checkov.common.util.parser_utils import (
eval_string,
split_merge_args,
string_to_native,
to_string,
)
FUNCTION_FAILED = "____FUNCTION_FAILED____"
def tonumber(original: str, **_: Any) -> float | str:
# https://www.terraform.io/docs/configuration/functions/tonumber.html
if original.startswith('"') and original.endswith('"'):
original = original[1:-1]
try:
if "." in original:
return float(original)
else:
return int(original)
except ValueError:
return FUNCTION_FAILED | null |
2,472 | from __future__ import annotations
import json
import logging
from collections.abc import Hashable
from typing import Dict, List, Union, Any, Callable
from checkov.common.util.data_structures_utils import pickle_deepcopy
from checkov.common.util.type_forcers import convert_str_to_bool
from checkov.common.util.parser_utils import (
eval_string,
split_merge_args,
string_to_native,
to_string,
)
FUNCTION_FAILED = "____FUNCTION_FAILED____"
def convert_str_to_bool(bool_str: bool | str) -> bool:
if isinstance(bool_str, str):
bool_str_lower = bool_str.lower()
if bool_str_lower in ("true", '"true"'):
return True
elif bool_str_lower in ("false", '"false"'):
return False
# If we got here it must be a boolean, mypy doesn't understand it, so we use cast
return typing.cast(bool, bool_str)
def tostring(original: str, **_: Any) -> bool | str:
# Indicates a safe string, all good
if original.startswith('"') and original.endswith('"'):
return original[1:-1]
# Otherwise, need to check for valid types (number or bool)
bool_value = convert_str_to_bool(original)
if isinstance(bool_value, bool):
return bool_value
else:
try:
if "." in original:
return str(float(original))
else:
return str(int(original))
except ValueError:
return FUNCTION_FAILED # no change | null |
2,473 | from __future__ import annotations
import json
import logging
from collections.abc import Hashable
from typing import Dict, List, Union, Any, Callable
from checkov.common.util.data_structures_utils import pickle_deepcopy
from checkov.common.util.type_forcers import convert_str_to_bool
from checkov.common.util.parser_utils import (
eval_string,
split_merge_args,
string_to_native,
to_string,
)
FUNCTION_FAILED = "____FUNCTION_FAILED____"
def eval_string(value: str) -> Any:
try:
value_string = value.replace("'", '"')
parsed = hcl2.loads(f"eval = {value_string}\n") # NOTE: newline is needed
return parsed["eval"][0]
except Exception:
return None
def tolist(original: str, **_: Any) -> list[Any] | str:
# https://www.terraform.io/docs/configuration/functions/tolist.html
altered_value = eval_string(original)
if altered_value is None:
return FUNCTION_FAILED
return altered_value if isinstance(altered_value, list) else list(altered_value) | null |
2,474 | from __future__ import annotations
import json
import logging
from collections.abc import Hashable
from typing import Dict, List, Union, Any, Callable
from checkov.common.util.data_structures_utils import pickle_deepcopy
from checkov.common.util.type_forcers import convert_str_to_bool
from checkov.common.util.parser_utils import (
eval_string,
split_merge_args,
string_to_native,
to_string,
)
FUNCTION_FAILED = "____FUNCTION_FAILED____"
def eval_string(value: str) -> Any:
try:
value_string = value.replace("'", '"')
parsed = hcl2.loads(f"eval = {value_string}\n") # NOTE: newline is needed
return parsed["eval"][0]
except Exception:
return None
def toset(original: str, **_: Any) -> set[Any] | str:
# https://www.terraform.io/docs/configuration/functions/toset.html
altered_value = eval_string(original)
if altered_value is None:
return FUNCTION_FAILED
return altered_value if isinstance(altered_value, set) else set(altered_value) | null |
2,475 | from __future__ import annotations
import json
import logging
from collections.abc import Hashable
from typing import Dict, List, Union, Any, Callable
from checkov.common.util.data_structures_utils import pickle_deepcopy
from checkov.common.util.type_forcers import convert_str_to_bool
from checkov.common.util.parser_utils import (
eval_string,
split_merge_args,
string_to_native,
to_string,
)
FUNCTION_FAILED = "____FUNCTION_FAILED____"
def _check_map_type_consistency(value: dict[Hashable, Any]) -> dict[Hashable, Any]:
# If there is a string and anything else, convert to string
had_string = False
had_something_else = False
for k, v in value.items():
if v == "${True}":
value[k] = True
v = True
elif v == "${False}":
value[k] = False
v = False
if isinstance(v, str):
had_string = True
if had_something_else:
break
else:
had_something_else = True
if had_string:
break
if had_string and had_something_else:
value = {k: to_string(v) for k, v in value.items()}
return value
def eval_string(value: str) -> Any:
try:
value_string = value.replace("'", '"')
parsed = hcl2.loads(f"eval = {value_string}\n") # NOTE: newline is needed
return parsed["eval"][0]
except Exception:
return None
def tomap(original: str, **_: Any) -> dict[Hashable, Any] | str:
# https://www.terraform.io/docs/language/functions/tomap.html
original = original.replace(":", "=") # converted to colons by parser #shrug
altered_value = eval_string(original)
if altered_value is None or not isinstance(altered_value, dict):
return FUNCTION_FAILED
return _check_map_type_consistency(altered_value) | null |
2,476 | from __future__ import annotations
import json
import logging
from collections.abc import Hashable
from typing import Dict, List, Union, Any, Callable
from checkov.common.util.data_structures_utils import pickle_deepcopy
from checkov.common.util.type_forcers import convert_str_to_bool
from checkov.common.util.parser_utils import (
eval_string,
split_merge_args,
string_to_native,
to_string,
)
FUNCTION_FAILED = "____FUNCTION_FAILED____"
def create_map(lst: list[Any]) -> dict[Hashable, Any]:
new_map = {}
for i in range(0, len(lst), 2):
new_map[lst[i]] = lst[i + 1]
return _check_map_type_consistency(new_map)
def eval_string(value: str) -> Any:
try:
value_string = value.replace("'", '"')
parsed = hcl2.loads(f"eval = {value_string}\n") # NOTE: newline is needed
return parsed["eval"][0]
except Exception:
return None
def map(original: str, **_: Any) -> dict[Hashable, Any] | str:
# https://www.terraform.io/docs/language/functions/map.html
# NOTE: Splitting by commas is annoying due to possible commas in strings. To avoid
# the issue, act like it's a list (to allow comma separation) and let the HCL
# parser deal with it. Then iterating the list is easy.
converted_to_list = eval_string(f"[{original}]")
if converted_to_list is None or len(converted_to_list) & 1: # none or odd number of args
return FUNCTION_FAILED
return create_map(converted_to_list) | null |
2,477 | from __future__ import annotations
import json
import logging
from collections.abc import Hashable
from typing import Dict, List, Union, Any, Callable
from checkov.common.util.data_structures_utils import pickle_deepcopy
from checkov.common.util.type_forcers import convert_str_to_bool
from checkov.common.util.parser_utils import (
eval_string,
split_merge_args,
string_to_native,
to_string,
)
def process_dynamic_values(conf: Dict[str, List[Any]]) -> bool:
dynamic_conf: Union[List[Any], Dict[str, List[Any]]] = conf.get("dynamic", {})
if not isinstance(dynamic_conf, list):
return False
has_dynamic_block = False
for dynamic_element in dynamic_conf:
if isinstance(dynamic_element, str):
try:
dynamic_element = json.loads(dynamic_element)
except Exception:
dynamic_element = {}
for element_name, element_value in dynamic_element.items():
if "content" in element_value:
if element_name in conf:
if not isinstance(conf[element_name], list):
conf[element_name] = [conf[element_name]]
if isinstance(element_value["content"], list):
conf[element_name].extend(element_value["content"])
else:
conf[element_name].append(element_value["content"])
else:
conf[element_name] = pickle_deepcopy(element_value["content"])
else:
# this should be the result of a successful dynamic block rendering
# in some cases a whole dict is added, which doesn't have a list around it
conf[element_name] = element_value if isinstance(element_value, list) else [element_value]
has_dynamic_block = True
return has_dynamic_block
def handle_dynamic_values(conf: Dict[str, List[Any]], has_dynamic_block: bool = False) -> bool:
# recursively search for blocks that are dynamic
for block_name in conf.keys():
conf_block = conf[block_name]
if isinstance(conf_block, dict):
has_dynamic_block = handle_dynamic_values(conf_block, has_dynamic_block)
# if the configuration is a block element, search down again.
if conf_block and isinstance(conf_block, list) and isinstance(conf_block[0], dict):
has_dynamic_block = handle_dynamic_values(conf_block[0], has_dynamic_block)
# if a dynamic block exists somewhere in the resource it will return True
return process_dynamic_values(conf) or has_dynamic_block | null |
2,478 | from __future__ import annotations
from collections import defaultdict
import json
import logging
import os
from typing import Dict, List, Tuple, Any
from charset_normalizer import from_fp
from checkov.terraform.context_parsers.registry import parser_registry
from checkov.terraform.plan_parser import parse_tf_plan, TF_PLAN_RESOURCE_ADDRESS
from checkov.common.runners.base_runner import filter_ignored_paths
from checkov.runner_filter import RunnerFilter
def parse_tf_plan(tf_plan_file: str, out_parsing_errors: Dict[str, str]) -> Tuple[Optional[Dict[str, Any]], Optional[List[Tuple[int, str]]]]:
def filter_ignored_paths(
root_dir: str,
names: list[str] | list[os.DirEntry[str]],
excluded_paths: list[str] | None,
included_paths: Iterable[str] | None = None
) -> None:
class RunnerFilter(object):
def __init__(
self,
framework: Optional[List[str]] = None,
checks: Union[str, List[str], None] = None,
skip_checks: Union[str, List[str], None] = None,
include_all_checkov_policies: bool = True,
download_external_modules: bool = False,
external_modules_download_path: str = DEFAULT_EXTERNAL_MODULES_DIR,
evaluate_variables: bool = True,
runners: Optional[List[str]] = None,
skip_framework: Optional[List[str]] = None,
excluded_paths: Optional[List[str]] = None,
all_external: bool = False,
var_files: Optional[List[str]] = None,
skip_cve_package: Optional[List[str]] = None,
use_enforcement_rules: bool = False,
filtered_policy_ids: Optional[List[str]] = None,
show_progress_bar: Optional[bool] = True,
run_image_referencer: bool = False,
enable_secret_scan_all_files: bool = False,
block_list_secret_scan: Optional[List[str]] = None,
deep_analysis: bool = False,
repo_root_for_plan_enrichment: Optional[List[str]] = None,
resource_attr_to_omit: Optional[Dict[str, Set[str]]] = None,
enable_git_history_secret_scan: bool = False,
git_history_timeout: str = '12h',
git_history_last_commit_scanned: Optional[str] = None, # currently not exposed by a CLI flag
report_sast_imports: bool = False,
remove_default_sast_policies: bool = False,
report_sast_reachability: bool = False
) -> None:
def _load_resource_attr_to_omit(resource_attr_to_omit_input: Optional[Dict[str, Set[str]]]) -> DefaultDict[str, Set[str]]:
def apply_enforcement_rules(self, enforcement_rule_configs: Dict[str, CodeCategoryConfiguration]) -> None:
def extract_enforcement_rule_threshold(self, check_id: str, report_type: str) -> Severity:
def should_run_check(
self,
check: BaseCheck | BaseGraphCheck | BaseSastCheck | None = None,
check_id: str | None = None,
bc_check_id: str | None = None,
severity: Severity | None = None,
report_type: str | None = None,
file_origin_paths: List[str] | None = None,
root_folder: str | None = None
) -> bool:
def _match_regex_pattern(self, check_id: str, file_origin_paths: List[str] | None, root_folder: str | None) -> bool:
def check_matches(check_id: str,
bc_check_id: Optional[str],
pattern_list: List[str]) -> bool:
def within_threshold(self, severity: Severity) -> bool:
def secret_validation_status_matches(secret_validation_status: str, statuses_list: list[str]) -> bool:
def notify_external_check(check_id: str) -> None:
def is_external_check(check_id: str) -> bool:
def is_policy_filtered(self, check_id: str) -> bool:
def to_dict(self) -> Dict[str, Any]:
def from_dict(obj: Dict[str, Any]) -> RunnerFilter:
def set_suppressed_policies(self, policy_level_suppressions: List[str]) -> None:
def get_sast_languages(frameworks: Optional[List[str]], skip_framework: Optional[List[str]]) -> Set[SastLanguages]:
def create_definitions(
root_folder: str | None,
files: list[str] | None = None,
runner_filter: RunnerFilter | None = None,
out_parsing_errors: dict[str, str] | None = None,
) -> tuple[dict[str, dict[str, Any]], dict[str, list[tuple[int, str]]]]:
runner_filter = runner_filter or RunnerFilter()
out_parsing_errors = {} if out_parsing_errors is None else out_parsing_errors
if root_folder:
files = [] if not files else files
for root, d_names, f_names in os.walk(root_folder):
filter_ignored_paths(root, d_names, runner_filter.excluded_paths)
filter_ignored_paths(root, f_names, runner_filter.excluded_paths)
for file in f_names:
file_ending = os.path.splitext(file)[1]
if file_ending == '.json':
file_path = os.path.join(root, file)
try:
with open(file_path, "rb") as f:
try:
content = json.load(f)
except UnicodeDecodeError:
logging.debug(f"Encoding for file {file_path} is not UTF-8, trying to detect it")
content = str(from_fp(f).best())
if isinstance(content, dict) and content.get('terraform_version'):
files.append(file_path)
except Exception as e:
logging.debug(f'Failed to load json file {file_path}, skipping', stack_info=True)
out_parsing_errors[file_path] = str(e)
tf_definitions = {}
definitions_raw = {}
if files:
files = [os.path.realpath(file) for file in files]
for file in files:
if file.endswith(".json"):
current_tf_definitions, current_definitions_raw = parse_tf_plan(file, out_parsing_errors)
if current_tf_definitions and current_definitions_raw:
tf_definitions[file] = current_tf_definitions
definitions_raw[file] = current_definitions_raw
else:
logging.debug(f'Failed to load {file} as is not a .json file, skipping')
return tf_definitions, definitions_raw | null |
2,479 | from __future__ import annotations
from collections import defaultdict
import json
import logging
import os
from typing import Dict, List, Tuple, Any
from charset_normalizer import from_fp
from checkov.terraform.context_parsers.registry import parser_registry
from checkov.terraform.plan_parser import parse_tf_plan, TF_PLAN_RESOURCE_ADDRESS
from checkov.common.runners.base_runner import filter_ignored_paths
from checkov.runner_filter import RunnerFilter
def get_entity_context(
definitions: dict[str, dict[str, list[dict[str, Any]]]],
definitions_raw: dict[str, list[tuple[int, str]]],
definition_path: list[str],
full_file_path: str,
entity_id: str,
block_type: str = "resource",
) -> dict[str, Any]:
entity_context: dict[str, Any] = {}
if full_file_path not in definitions:
logging.debug(
f'Tried to look up file {full_file_path} in TF plan entity definitions, but it does not exist')
return entity_context
for resource in definitions.get(full_file_path, {}).get(block_type, []):
resource_type = definition_path[0]
resource_type_dict = resource.get(resource_type)
if not resource_type_dict:
continue
resource_name = definition_path[1]
resource_defintion = resource_type_dict.get(resource_name, {})
if resource_defintion and resource_defintion.get(TF_PLAN_RESOURCE_ADDRESS) == entity_id:
entity_context['start_line'] = resource_defintion['start_line'][0]
entity_context['end_line'] = resource_defintion['end_line'][0]
entity_context["code_lines"] = definitions_raw[full_file_path][
entity_context["start_line"] : entity_context["end_line"]
]
entity_context['address'] = resource_defintion[TF_PLAN_RESOURCE_ADDRESS]
return entity_context
return entity_context
parser_registry = ParserRegistry()
TF_PLAN_RESOURCE_ADDRESS = CustomAttributes.TF_RESOURCE_ADDRESS
def build_definitions_context(
definitions: dict[str, dict[str, list[dict[str, Any]]]],
definitions_raw: Dict[str, List[Tuple[int, str]]]
) -> Dict[str, Dict[str, Any]]:
definitions_context: dict[str, dict[str, Any]] = defaultdict(dict)
supported_block_types = ("data", "resource")
for full_file_path, definition in definitions.items():
for block_type in supported_block_types:
entities = definition.get(block_type, [])
for entity in entities:
context_parser = parser_registry.context_parsers[block_type]
definition_path = context_parser.get_entity_context_path(entity)
if len(definition_path) > 1:
resource_type = definition_path[0]
resource_name = definition_path[1]
entity_id = entity.get(resource_type, {}).get(resource_name, {}).get(TF_PLAN_RESOURCE_ADDRESS)
else:
entity_id = definition_path[0]
# Entity can exist only once per dir, for file as well
entity_context = get_entity_context(
definitions=definitions,
definitions_raw=definitions_raw,
definition_path=definition_path,
full_file_path=full_file_path,
entity_id=entity_id,
block_type=block_type,
)
definitions_context[full_file_path][entity_id] = entity_context
return definitions_context | null |
2,480 | from __future__ import annotations
from collections import defaultdict
import json
import logging
import os
from typing import Dict, List, Tuple, Any
from charset_normalizer import from_fp
from checkov.terraform.context_parsers.registry import parser_registry
from checkov.terraform.plan_parser import parse_tf_plan, TF_PLAN_RESOURCE_ADDRESS
from checkov.common.runners.base_runner import filter_ignored_paths
from checkov.runner_filter import RunnerFilter
The provided code snippet includes necessary dependencies for implementing the `get_resource_id_without_nested_modules` function. Write a Python function `def get_resource_id_without_nested_modules(address: str) -> str` to solve the following problem:
return resource id with the last module in the address example: from address='module.name1.module.name2.type.name' return 'module: module.name2.type.name'
Here is the function:
def get_resource_id_without_nested_modules(address: str) -> str:
"""
return resource id with the last module in the address
example: from address='module.name1.module.name2.type.name' return 'module: module.name2.type.name'
"""
return ".".join(address.split(".")[-4:]) | return resource id with the last module in the address example: from address='module.name1.module.name2.type.name' return 'module: module.name2.type.name' |
2,481 | from __future__ import annotations
import logging
import subprocess
def get_kustomize_version(kustomize_command: str) -> str | None:
try:
proc = subprocess.run([kustomize_command, "version"], capture_output=True) # nosec
version_output = proc.stdout.decode("utf-8")
if "Version:" in version_output:
# version <= 4 output looks like '{Version:kustomize/v4.5.7 GitCommit:...}\n'
kustomize_version = version_output[version_output.find("/") + 1 : version_output.find("G") - 1]
elif version_output.startswith("v"):
# version >= 5 output looks like 'v5.0.0\n'
kustomize_version = version_output.rstrip("\n")
else:
return None
return kustomize_version
except Exception:
logging.debug(f"An error occurred testing the {kustomize_command} command:", exc_info=True)
return None | null |
2,482 | from __future__ import annotations
import logging
import subprocess
def get_kubectl_version(kubectl_command: str) -> float | None:
try:
proc = subprocess.run([kubectl_command, "version", "--client=true"], capture_output=True) # nosec
version_output = proc.stdout.decode("utf-8")
if "Client Version:" in version_output:
if "Major:" in version_output and "Minor:" in version_output:
# version <= 1.27 output looks like 'Client Version: version.Info{Major:"1", Minor:"27", GitVersion:...}\n...'
kubectl_version_major = version_output.split("\n")[0].split('Major:"')[1].split('"')[0]
kubectl_version_minor = version_output.split("\n")[0].split('Minor:"')[1].split('"')[0]
else:
# version >= 1.28 output looks like 'Client Version: v1.28.0\n...'
kubectl_version_str = version_output.split("\n")[0].replace("Client Version: v", "")
kubectl_version_major, kubectl_version_minor, *_ = kubectl_version_str.split(".")
kubectl_version = float(f"{kubectl_version_major}.{kubectl_version_minor}")
return kubectl_version
except Exception:
logging.debug(f"An error occurred testing the {kubectl_command} command:", exc_info=True)
return None | null |
2,483 | from __future__ import annotations
import io
import logging
import multiprocessing
import os
import pathlib
import platform
import shutil
import subprocess
import tempfile
import yaml
from typing import Optional, Dict, Any, TextIO, TYPE_CHECKING
from checkov.common.graph.graph_builder import CustomAttributes
from checkov.common.graph.graph_builder.consts import GraphSource
from checkov.common.images.image_referencer import Image
from checkov.common.output.record import Record
from checkov.common.output.report import Report
from checkov.common.bridgecrew.check_type import CheckType
from checkov.common.runners.base_runner import BaseRunner, filter_ignored_paths
from checkov.common.typing import _CheckResult, _EntityContext
from checkov.common.util.consts import START_LINE, END_LINE
from checkov.common.util.data_structures_utils import pickle_deepcopy
from checkov.common.util.type_forcers import convert_str_to_bool
from checkov.kubernetes.kubernetes_utils import create_check_result, get_resource_id, calculate_code_lines, \
PARENT_RESOURCE_ID_KEY_NAME
from checkov.kubernetes.runner import Runner as K8sRunner, _get_entity_abs_path, _KubernetesContext, _KubernetesDefinitions
from checkov.kustomize.image_referencer.manager import KustomizeImageReferencerManager
from checkov.kustomize.utils import get_kustomize_version, get_kubectl_version
from checkov.runner_filter import RunnerFilter
from checkov.common.graph.checks_infra.registry import BaseRegistry
from checkov.common.typing import LibraryGraphConnector
from checkov.kubernetes.graph_builder.local_graph import KubernetesLocalGraph
class Runner(BaseRunner[_KubernetesDefinitions, _KubernetesContext, "KubernetesGraphManager"]):
kustomize_command = 'kustomize' # noqa: CCE003 # a static attribute
kubectl_command = 'kubectl' # noqa: CCE003 # a static attribute
check_type = CheckType.KUSTOMIZE # noqa: CCE003 # a static attribute
system_deps = True # noqa: CCE003 # a static attribute
kustomizeSupportedFileTypes = ('kustomization.yaml', 'kustomization.yml') # noqa: CCE003 # a static attribute
def __init__(self) -> None:
super().__init__(file_names=Runner.kustomizeSupportedFileTypes)
self.potentialBases: "list[str]" = []
self.potentialOverlays: "list[str]" = []
self.kustomizeProcessedFolderAndMeta: "dict[str, dict[str, str]]" = {}
self.kustomizeFileMappings: "dict[str, str]" = {}
self.templateRendererCommand: str | None = None
self.target_folder_path = ''
self.checkov_allow_kustomize_file_edits = convert_str_to_bool(os.getenv("CHECKOV_ALLOW_KUSTOMIZE_FILE_EDITS",
False))
def get_k8s_target_folder_path(self) -> str:
return self.target_folder_path
def get_kustomize_metadata(self) -> dict[str, dict[str, Any]]:
return {'kustomizeMetadata': self.kustomizeProcessedFolderAndMeta,
'kustomizeFileMappings': self.kustomizeFileMappings}
def _parseKustomization(self, kustomize_dir: str) -> dict[str, Any]:
# We may have multiple results for "kustomization.yaml" files. These could be:
# - Base and Environment (overlay) DIR's for the same kustomize-powered deployment
# - OR, Multiple different Kustomize-powered deployments
# - OR, a mixture of the two.
# We need parse some of the Kustomization.yaml files to work out which
# This is so we can provide "Environment" information back to the user as part of the checked resource name/description.
# TODO: We could also add a --kustomize-environment option so we only scan certain overlay names (prod, test etc) useful in CI.
yaml_path = os.path.join(kustomize_dir, "kustomization.yaml")
yml_path = os.path.join(kustomize_dir, "kustomization.yml")
if os.path.isfile(yml_path):
kustomization_path = yml_path
elif os.path.isfile(yaml_path):
kustomization_path = yaml_path
else:
return {}
with open(kustomization_path, 'r') as kustomization_file:
metadata: dict[str, Any] = {}
try:
file_content = yaml.safe_load(kustomization_file)
except yaml.YAMLError:
logging.info(f"Failed to load Kustomize metadata from {kustomization_path}.", exc_info=True)
return {}
if not isinstance(file_content, dict):
return {}
if 'resources' in file_content:
resources = file_content['resources']
# We can differentiate between "overlays" and "bases" based on if the `resources` refers to a directory,
# which represents an "overlay", or only files which represents a "base"
resources_representing_directories = [r for r in resources if pathlib.Path(r).suffix == '']
if resources_representing_directories:
logging.debug(
f"Kustomization contains resources: section with directories. Likely an overlay/env."
f" {kustomization_path}")
metadata['type'] = "overlay"
metadata['referenced_bases'] = resources_representing_directories
else:
logging.debug(f"Kustomization contains resources: section with only files (no dirs). Likley a base."
f" {kustomization_path}")
metadata['type'] = "base"
elif 'patchesStrategicMerge' in file_content:
logging.debug(f"Kustomization contains patchesStrategicMerge: section. Likley an overlay/env. {kustomization_path}")
metadata['type'] = "overlay"
if 'bases' in file_content:
metadata['referenced_bases'] = file_content['bases']
elif 'bases' in file_content:
logging.debug(f"Kustomization contains bases: section. Likley an overlay/env. {kustomization_path}")
metadata['type'] = "overlay"
metadata['referenced_bases'] = file_content['bases']
metadata['fileContent'] = file_content
metadata['filePath'] = f"{kustomization_path}"
if metadata.get('type') == "base":
self.potentialBases.append(metadata['filePath'])
if metadata.get('type') == "overlay":
self.potentialOverlays.append(metadata['filePath'])
return metadata
def check_system_deps(self) -> str | None:
# Ensure local system dependancies are available and of the correct version.
# Returns framework names to skip if deps **fail** (ie, return None for a successful deps check).
logging.info(f"Checking necessary system dependancies for {self.check_type} checks.")
if shutil.which(self.kubectl_command) is not None:
kubectl_version = get_kubectl_version(kubectl_command=self.kubectl_command)
if kubectl_version and kubectl_version >= 1.14:
logging.info(f"Found working version of {self.check_type} dependancy {self.kubectl_command}: {kubectl_version}")
self.templateRendererCommand = self.kubectl_command
return None
else:
return self.check_type
elif shutil.which(self.kustomize_command) is not None:
kustomize_version = get_kustomize_version(kustomize_command=self.kustomize_command)
if kustomize_version:
logging.info(
f"Found working version of {self.check_type} dependency {self.kustomize_command}: {kustomize_version}"
)
self.templateRendererCommand = self.kustomize_command
return None
else:
return self.check_type
else:
logging.info(f"Could not find usable tools locally to process {self.check_type} checks. Framework will be disabled for this run.")
return self.check_type
def _handle_overlay_case(self, file_path: str,
kustomizeProcessedFolderAndMeta: dict[str, dict[str, Any]] | None = None) -> None:
if kustomizeProcessedFolderAndMeta is None:
kustomizeProcessedFolderAndMeta = self.kustomizeProcessedFolderAndMeta
for parent in pathlib.Path(file_path).parents:
for potentialBase in self.potentialBases:
pathlib_base_object = pathlib.Path(potentialBase)
potential_base_path = pathlib_base_object.parents[1]
if parent == potential_base_path.resolve():
kustomizeProcessedFolderAndMeta[file_path]['calculated_bases'] = str(pathlib_base_object.parent)
try:
relativeToFullPath = f"{file_path}/{kustomizeProcessedFolderAndMeta[file_path]['referenced_bases'][0]}"
if pathlib.Path(kustomizeProcessedFolderAndMeta[file_path]['calculated_bases']) == pathlib.Path(relativeToFullPath).resolve():
kustomizeProcessedFolderAndMeta[file_path]['validated_base'] = str(pathlib.Path(kustomizeProcessedFolderAndMeta[file_path]['calculated_bases']))
checkov_kustomize_env_name_by_path = str(pathlib.Path(file_path).relative_to(pathlib.Path(kustomizeProcessedFolderAndMeta[file_path]['calculated_bases']).parent))
kustomizeProcessedFolderAndMeta[file_path]['overlay_name'] = checkov_kustomize_env_name_by_path
logging.debug(f"Overlay based on {kustomizeProcessedFolderAndMeta[file_path]['validated_base']}, naming overlay {checkov_kustomize_env_name_by_path} for Checkov Results.")
else:
checkov_kustomize_env_name_by_path = pathlib.Path(file_path).stem
kustomizeProcessedFolderAndMeta[file_path]['overlay_name'] = checkov_kustomize_env_name_by_path
logging.debug(f"Could not confirm base dir for Kustomize overlay/env. Using {checkov_kustomize_env_name_by_path} for Checkov Results.")
except KeyError:
checkov_kustomize_env_name_by_path = pathlib.Path(file_path).stem
kustomizeProcessedFolderAndMeta[file_path]['overlay_name'] = checkov_kustomize_env_name_by_path
logging.debug(f"Could not confirm base dir for Kustomize overlay/env. Using {checkov_kustomize_env_name_by_path} for Checkov Results.")
def _get_parsed_output(
file_path: str, extract_dir: str, output: str, shared_kustomize_file_mappings: dict[str, str]
) -> TextIO | None:
cur_source_file = None
cur_writer = None
last_line_dashes = False
line_num = 1
file_num = 0
# page-to-file parser from helm framework works well, but we expect the file to start with --- in this case from Kustomize.
output = "---\n" + output
reader = io.StringIO(output)
for s in reader:
s = s.rstrip()
if s == '---':
last_line_dashes = True
continue
if last_line_dashes:
# The next line should contain a "apiVersion" line for the next Kubernetes manifest
# So we will close the old file, open a new file, and write the dashes from last iteration plus this line
source = file_num
file_num += 1
if source != cur_source_file:
if cur_writer:
# Here we are about to close a "complete" file. The function will validate it looks like a K8S manifest before continuing.
Runner._curWriterValidateStoreMapAndClose(cur_writer, file_path, shared_kustomize_file_mappings)
parent = os.path.dirname(os.path.join(extract_dir, str(source)))
os.makedirs(parent, exist_ok=True)
cur_source_file = source
cur_writer = open(os.path.join(extract_dir, str(source)), 'a')
if cur_writer:
cur_writer.write('---' + os.linesep)
cur_writer.write(s + os.linesep)
last_line_dashes = False
else:
if not cur_writer:
continue
else:
cur_writer.write(s + os.linesep)
line_num += 1
return cur_writer
def _get_kubectl_output(self, filePath: str, template_renderer_command: str, source_type: str | None) -> bytes | None:
# Template out the Kustomizations to Kubernetes YAML
if template_renderer_command == "kubectl":
template_render_command_options = "kustomize"
elif template_renderer_command == "kustomize":
template_render_command_options = "build"
else:
logging.error(f"Template renderer command has an invalid value: {template_renderer_command}")
return None
add_origin_annotations_return_code = None
if self.checkov_allow_kustomize_file_edits:
add_origin_annotations_command = 'kustomize edit add buildmetadata originAnnotations'
add_origin_annotations_return_code = subprocess.run(add_origin_annotations_command.split(' '), # nosec
cwd=filePath).returncode
full_command = f'{template_renderer_command} {template_render_command_options}'
proc = subprocess.Popen(full_command.split(' '), cwd=filePath, stdout=subprocess.PIPE, stderr=subprocess.PIPE) # nosec
output, _ = proc.communicate()
if self.checkov_allow_kustomize_file_edits and add_origin_annotations_return_code == 0:
# If the return code is not 0, we didn't add the new buildmetadata field, so we shouldn't remove it
remove_origin_annotaions = 'kustomize edit remove buildmetadata originAnnotations'
subprocess.run(remove_origin_annotaions.split(' '), cwd=filePath) # nosec
logging.info(
f"Ran kubectl to build Kustomize output. DIR: {filePath}. TYPE: {source_type}.")
return output
def _get_env_or_base_path_prefix(
file_path: str, kustomize_processed_folder_and_meta: dict[str, dict[str, Any]]
) -> str | None:
env_or_base_path_prefix = None
if kustomize_processed_folder_and_meta[file_path].get('type') == "overlay":
if 'calculated_bases' not in kustomize_processed_folder_and_meta[file_path]:
logging.debug(f"Kustomize: Overlay with unknown base. User may have specified overlay dir directly. {file_path}")
env_or_base_path_prefix = ""
else:
base_path_parts = pathlib.Path(kustomize_processed_folder_and_meta[file_path]['calculated_bases']).parts
most_significant_base_path = f"/{base_path_parts[-3]}/{base_path_parts[-2]}"
env_or_base_path_prefix = f"{most_significant_base_path}/{kustomize_processed_folder_and_meta[file_path]['overlay_name']}"
elif kustomize_processed_folder_and_meta[file_path].get('type') == "base":
# Validated base last three parents as a path
base_path_parts = pathlib.Path(kustomize_processed_folder_and_meta[file_path]['filePath']).parts
most_significant_base_path = f"/{base_path_parts[-4]}/{base_path_parts[-3]}/{base_path_parts[-2]}"
env_or_base_path_prefix = most_significant_base_path
return env_or_base_path_prefix
def get_binary_output_from_directory(
self,
file_path: str,
template_renderer_command: str,
) -> tuple[bytes, str] | tuple[None, None]:
kustomizeProcessedFolderAndMeta = {file_path: self._parseKustomization(file_path)}
if kustomizeProcessedFolderAndMeta[file_path].get('type') == 'overlay':
self._handle_overlay_case(file_path, kustomizeProcessedFolderAndMeta)
return self.get_binary_output(file_path, kustomizeProcessedFolderAndMeta, template_renderer_command)
def get_binary_output(
self,
file_path: str,
kustomize_processed_folder_and_meta: dict[str, dict[str, Any]],
template_renderer_command: str,
) -> tuple[bytes, str] | tuple[None, None]:
source_type = kustomize_processed_folder_and_meta[file_path].get('type')
logging.debug(f"Kustomization at {file_path} likley a {source_type}")
try:
output = self._get_kubectl_output(file_path, template_renderer_command, source_type)
if output is None:
return None, None
return output, file_path
except Exception:
logging.warning(f"Error building Kustomize output at dir: {file_path}.", exc_info=True)
return None, None
def _parse_output(
output: bytes,
file_path: str,
kustomize_processed_folder_and_meta: dict[str, dict[str, Any]],
target_folder_path: str,
shared_kustomize_file_mappings: dict[str, str],
) -> None:
env_or_base_path_prefix = Runner._get_env_or_base_path_prefix(file_path, kustomize_processed_folder_and_meta)
if env_or_base_path_prefix is None:
logging.warning(f"env_or_base_path_prefix is None, filePath: {file_path}", exc_info=True)
return
extract_dir = target_folder_path + env_or_base_path_prefix
os.makedirs(extract_dir, exist_ok=True)
logging.debug(f"Kustomize: Temporary directory for {file_path} at {extract_dir}")
output_str = output.decode("utf-8")
cur_writer = Runner._get_parsed_output(file_path, extract_dir, output_str, shared_kustomize_file_mappings)
if cur_writer:
Runner._curWriterValidateStoreMapAndClose(cur_writer, file_path, shared_kustomize_file_mappings)
def _run_kustomize_parser(
self,
file_path: str,
shared_kustomize_file_mappings: dict[str, str],
kustomize_processed_folder_and_meta: dict[str, dict[str, Any]],
template_renderer_command: str,
target_folder_path: str,
) -> None:
output, _ = self.get_binary_output(file_path, kustomize_processed_folder_and_meta, template_renderer_command)
if not output:
return
Runner._parse_output(output, file_path, kustomize_processed_folder_and_meta, target_folder_path, shared_kustomize_file_mappings)
def run_kustomize_to_k8s(
self, root_folder: str | None, files: list[str] | None, runner_filter: RunnerFilter
) -> None:
kustomize_dirs = find_kustomize_directories(root_folder, files, runner_filter.excluded_paths)
if not kustomize_dirs:
# nothing to process
return
for kustomize_dir in kustomize_dirs:
self.kustomizeProcessedFolderAndMeta[kustomize_dir] = self._parseKustomization(kustomize_dir)
self.target_folder_path = tempfile.mkdtemp()
for file_path in self.kustomizeProcessedFolderAndMeta:
if self.kustomizeProcessedFolderAndMeta[file_path].get('type') == 'overlay':
self._handle_overlay_case(file_path)
if platform.system() == 'Windows':
if not self.templateRendererCommand:
logging.error("The 'templateRendererCommand' was not set correctly")
return
shared_kustomize_file_mappings: dict[str, str] = {}
for file_path in self.kustomizeProcessedFolderAndMeta:
self._run_kustomize_parser(
file_path=file_path,
shared_kustomize_file_mappings=shared_kustomize_file_mappings,
kustomize_processed_folder_and_meta=self.kustomizeProcessedFolderAndMeta,
template_renderer_command=self.templateRendererCommand,
target_folder_path=self.target_folder_path,
)
self.kustomizeFileMappings = shared_kustomize_file_mappings
return
manager = multiprocessing.Manager()
# make sure we have new dict
shared_kustomize_file_mappings = pickle_deepcopy(manager.dict()) # type:ignore[arg-type] # works with DictProxy
shared_kustomize_file_mappings.clear()
jobs = []
for filePath in self.kustomizeProcessedFolderAndMeta:
p = multiprocessing.Process(
target=self._run_kustomize_parser,
args=(
filePath,
shared_kustomize_file_mappings,
self.kustomizeProcessedFolderAndMeta,
self.templateRendererCommand,
self.target_folder_path
)
)
jobs.append(p)
p.start()
for proc in jobs:
proc.join()
self.kustomizeFileMappings = dict(shared_kustomize_file_mappings)
def run(
self,
root_folder: str | None,
external_checks_dir: list[str] | None = None,
files: list[str] | None = None,
runner_filter: RunnerFilter | None = None,
collect_skip_comments: bool = True,
) -> Report | list[Report]:
runner_filter = runner_filter or RunnerFilter()
if not runner_filter.show_progress_bar:
self.pbar.turn_off_progress_bar()
self.run_kustomize_to_k8s(root_folder, files, runner_filter)
report: "Report | list[Report]" = Report(self.check_type)
if not self.kustomizeProcessedFolderAndMeta:
# nothing to process
return report
target_dir = ""
try:
k8s_runner = K8sKustomizeRunner()
# k8s_runner.run() will kick off both CKV_ and CKV2_ checks and return a merged results object.
target_dir = self.get_k8s_target_folder_path()
k8s_runner.report_mutator_data = self.get_kustomize_metadata()
if root_folder:
k8s_runner.original_root_dir = root_folder
# the returned report can be a list of reports, which also includes an SCA image report
report = k8s_runner.run(target_dir, external_checks_dir=external_checks_dir, runner_filter=runner_filter)
self.graph_manager = k8s_runner.graph_manager
logging.debug(f"Sucessfully ran k8s scan on Kustomization templated files in tmp scan dir : {target_dir}")
shutil.rmtree(target_dir)
except Exception:
logging.warning("Failed to run Kubernetes runner", exc_info=True)
with tempfile.TemporaryDirectory() as save_error_dir:
logging.debug(
f"Error running k8s scan on Scan dir: {target_dir}. Saved context dir: {save_error_dir}")
shutil.move(target_dir, save_error_dir)
return report
def _curWriterValidateStoreMapAndClose(
cur_writer: TextIO, file_path: str, shared_kustomize_file_mappings: dict[str, str]
) -> None:
currentFileName = cur_writer.name
cur_writer.close()
# Now we have a complete k8s manifest as we closed the writer, and it's temporary file name (currentFileName) plus the original file templated out (FilePath)
# Rename them to useful information from the K8S metadata before conting.
# Then keep a mapping of template files to original kustomize repo locations for use with Checkov output later.
try:
with open(currentFileName) as f:
currentYamlObject = yaml.safe_load(f)
# Validate we have a K8S manifest
if "apiVersion" in currentYamlObject:
itemName = []
itemName.append(currentYamlObject['kind'])
if 'namespace' in currentYamlObject['metadata']:
itemName.append(currentYamlObject['metadata']['namespace'])
else:
itemName.append("default")
if 'name' in currentYamlObject['metadata']:
itemName.append(currentYamlObject['metadata']['name'])
else:
itemName.append("noname")
filename = f"{'-'.join(itemName)}.yaml"
newFullPathFilename = str(pathlib.Path(currentFileName).parent / filename)
os.rename(currentFileName, newFullPathFilename)
shared_kustomize_file_mappings[newFullPathFilename] = file_path
else:
raise Exception(f'Not a valid Kubernetes manifest (no apiVersion) while parsing Kustomize template: {file_path}. Templated output: {currentFileName}.')
except IsADirectoryError:
pass
def filter_ignored_paths(
root_dir: str,
names: list[str] | list[os.DirEntry[str]],
excluded_paths: list[str] | None,
included_paths: Iterable[str] | None = None
) -> None:
# we need to handle legacy logic, where directories to skip could be specified using the env var (default value above)
# or a directory starting with '.'; these look only at directory basenames, not relative paths.
#
# But then any other excluded paths (specified via --skip-path or via the platform repo settings) should look at
# the path name relative to the root folder. These can be files or directories.
# Example: take the following dir tree:
# .
# ./dir1
# ./dir1/dir33
# ./dir1/.terraform
# ./dir2
# ./dir2/dir33
# /.dir2/hello.yaml
#
# if excluded_paths = ['dir1/dir33', 'dir2/hello.yaml'], then we would scan dir1, but we would skip its subdirectories. We would scan
# dir2 and its subdirectory, but we'd skip hello.yaml.
# first handle the legacy logic - this will also remove files starting with '.' but that's probably fine
# mostly this will just remove those problematic directories hardcoded above.
included_paths = included_paths or []
for entry in list(names):
path = entry.name if isinstance(entry, os.DirEntry) else entry
if path in ignored_directories:
safe_remove(names, entry)
if path.startswith(".") and IGNORE_HIDDEN_DIRECTORY_ENV and path not in included_paths:
safe_remove(names, entry)
# now apply the new logic
# TODO this is not going to work well on Windows, because paths specified in the platform will use /, and
# paths specified via the CLI argument will presumably use \\
if excluded_paths:
compiled = []
for p in excluded_paths:
try:
compiled.append(re.compile(p.replace(".terraform", r"\.terraform")))
except re.error:
# do not add compiled paths that aren't regexes
continue
for entry in list(names):
path = entry.name if isinstance(entry, os.DirEntry) else entry
full_path = os.path.join(root_dir, path)
if any(pattern.search(full_path) for pattern in compiled) or any(p in full_path for p in excluded_paths):
safe_remove(names, entry)
class Runner(ImageReferencerMixin[None], BaseRunner[_KubernetesDefinitions, _KubernetesContext, KubernetesGraphManager]):
check_type = CheckType.KUBERNETES # noqa: CCE003 # a static attribute
def __init__(
self,
graph_class: Type[KubernetesLocalGraph] = KubernetesLocalGraph,
db_connector: LibraryGraphConnector | None = None,
source: str = GraphSource.KUBERNETES,
graph_manager: KubernetesGraphManager | None = None,
external_registries: list[BaseRegistry] | None = None,
report_type: str = check_type
) -> None:
super().__init__(file_extensions=K8_POSSIBLE_ENDINGS)
db_connector = db_connector or self.db_connector
self.external_registries = [] if external_registries is None else external_registries
self.graph_class = graph_class
self.graph_manager = \
graph_manager if graph_manager else KubernetesGraphManager(source=source, db_connector=db_connector)
self.graph_registry = get_graph_checks_registry(self.check_type)
self.definitions: _KubernetesDefinitions = {}
self.definitions_raw: "dict[str, list[tuple[int, str]]]" = {}
self.context: _KubernetesContext | None = None
self.report_mutator_data: "dict[str, dict[str, Any]]" = {}
self.report_type = report_type
def run(
self,
root_folder: str | None,
external_checks_dir: list[str] | None = None,
files: list[str] | None = None,
runner_filter: RunnerFilter | None = None,
collect_skip_comments: bool = True,
) -> Report | list[Report]:
runner_filter = runner_filter or RunnerFilter()
if not runner_filter.show_progress_bar:
self.pbar.turn_off_progress_bar()
report = Report(self.check_type)
if self.context is None or self.definitions is None:
if files or root_folder:
self.definitions, self.definitions_raw = create_definitions(root_folder, files, runner_filter)
else:
return report
if external_checks_dir:
for directory in external_checks_dir:
registry.load_external_checks(directory)
if self.graph_registry:
self.graph_registry.load_external_checks(directory)
self.context = build_definitions_context(self.definitions, self.definitions_raw)
self.spread_list_items()
if self.graph_manager:
logging.info("creating Kubernetes graph")
local_graph = self.graph_manager.build_graph_from_definitions(pickle_deepcopy(self.definitions))
logging.info("Successfully created Kubernetes graph")
for vertex in local_graph.vertices:
file_abs_path = _get_entity_abs_path(root_folder, vertex.path)
report.add_resource(f'{file_abs_path}:{vertex.id}')
self.graph_manager.save_graph(local_graph)
self.pbar.initiate(len(self.definitions))
report = self.check_definitions(root_folder, runner_filter, report, collect_skip_comments=collect_skip_comments)
if self.graph_manager:
graph_report = self.get_graph_checks_report(root_folder, runner_filter)
merge_reports(report, graph_report)
if runner_filter.run_image_referencer:
if files:
# 'root_folder' shouldn't be empty to remove the whole path later and only leave the shortened form
root_folder = os.path.split(os.path.commonprefix(files))[0]
image_report = self.get_image_report(root_folder, runner_filter)
if image_report:
# due too many tests failing only return a list, if there is an image report
return [report, image_report]
return report
def get_image_report(self, root_folder: str | None, runner_filter: RunnerFilter) -> Report | None:
if not self.graph_manager:
return None
return self.check_container_image_references(
graph_connector=self.graph_manager.get_reader_endpoint(),
root_path=root_folder,
runner_filter=runner_filter,
)
def spread_list_items(self) -> None:
for _, file_conf in self.definitions.items():
for resource in file_conf[:]:
if resource.get('kind') == "List":
file_conf.extend(item for item in resource.get("items", []) if item)
file_conf.remove(resource)
def check_definitions(
self, root_folder: str | None, runner_filter: RunnerFilter, report: Report, collect_skip_comments: bool = True
) -> Report:
for k8_file in self.definitions.keys():
self.pbar.set_additional_data({'Current File Scanned': os.path.relpath(k8_file, root_folder)})
# There are a few cases here. If -f was used, there could be a leading / because it's an absolute path,
# or there will be no leading slash; root_folder will always be none.
# If -d is used, root_folder will be the value given, and -f will start with a / (hardcoded above).
# The goal here is simply to get a valid path to the file (which sls_file does not always give).
file_abs_path = _get_entity_abs_path(root_folder, k8_file)
k8_file_path = f"/{os.path.relpath(file_abs_path, root_folder)}"
# Run for each definition
for entity_conf in self.definitions[k8_file]:
entity_type = entity_conf.get("kind")
# Skip Kustomization Templates.
# Should be handled by Kusomize framework handler when it finds kustomization.yaml files.
# TODO: FUTURE: Potentially call the framework if we find items here that aren't in a file called kustomization.yaml - validate this edge case.
if entity_type == "Kustomization":
continue
skipped_checks = get_skipped_checks(entity_conf)
results = registry.scan(k8_file, entity_conf, skipped_checks, runner_filter)
# TODO? - Variable Eval Message!
variable_evaluations: "dict[str, Any]" = {}
report = self.mutate_kubernetes_results(results, report, k8_file, k8_file_path, file_abs_path,
entity_conf, variable_evaluations, root_folder)
self.pbar.update()
self.pbar.close()
return report
def get_graph_checks_report(self, root_folder: str | None, runner_filter: RunnerFilter) -> Report:
report = Report(self.check_type)
checks_results = self.run_graph_checks_results(runner_filter, self.report_type)
report = self.mutate_kubernetes_graph_results(root_folder, runner_filter, report, checks_results)
return report
def mutate_kubernetes_results(
self,
results: dict[BaseCheck, _CheckResult],
report: Report,
k8_file: str,
k8_file_path: str,
file_abs_path: str,
entity_conf: dict[str, Any],
variable_evaluations: dict[str, Any],
root_folder: str | None = None
) -> Report:
# Moves report generation logic out of run() method in Runner class.
# Allows function overriding of a much smaller function than run() for other "child" frameworks such as Kustomize, Helm
# Where Kubernetes CHECKS are needed, but the specific file references are to another framework for the user output (or a mix of both).
if results:
if not self.context:
# this shouldn't happen
logging.error("Context for Kubernetes runner was not set")
return report
for check, check_result in results.items():
resource_id = get_resource_id(entity_conf)
if not resource_id:
continue
entity_context = self.context[k8_file][resource_id]
record = Record(
check_id=check.id,
bc_check_id=check.bc_id,
check_name=check.name,
check_result=check_result,
code_block=entity_context.get("code_lines"),
file_path=k8_file_path,
file_line_range=[entity_context.get("start_line"), entity_context.get("end_line")],
resource=resource_id,
evaluations=variable_evaluations,
check_class=check.__class__.__module__,
file_abs_path=file_abs_path,
severity=check.severity,
)
record.set_guideline(check.guideline)
report.add_record(record=record)
else:
resource_id = get_resource_id(entity_conf)
if not resource_id:
return report
# resources without checks, but not existing ones
report.extra_resources.add(
ExtraResource(
file_abs_path=file_abs_path,
file_path=k8_file_path,
resource=resource_id,
)
)
return report
def mutate_kubernetes_graph_results(
self,
root_folder: str | None,
runner_filter: RunnerFilter,
report: Report,
checks_results: dict[BaseGraphCheck, list[_CheckResult]],
) -> Report:
# Moves report generation logic out of run() method in Runner class.
# Allows function overriding of a much smaller function than run() for other "child" frameworks such as Kustomize, Helm
# Where Kubernetes CHECKS are needed, but the specific file references are to another framework for the user output (or a mix of both).
if not checks_results:
return report
for check, check_results in checks_results.items():
for check_result in check_results:
entity = check_result["entity"]
entity_file_path = entity[CustomAttributes.FILE_PATH]
entity_file_abs_path = _get_entity_abs_path(root_folder, entity_file_path)
entity_context = self.get_entity_context(entity=entity, entity_file_path=entity_file_path)
clean_check_result = create_check_result(
check_result=check_result, entity_context=entity_context, check_id=check.id
)
record = Record(
check_id=check.id,
check_name=check.name,
check_result=clean_check_result,
code_block=entity_context.get("code_lines") or [],
file_path=get_relative_file_path(entity_file_abs_path, root_folder),
file_line_range=[entity_context.get("start_line") or 0, entity_context.get("end_line") or 0],
resource=entity[CustomAttributes.ID],
evaluations={},
check_class=check.__class__.__module__,
file_abs_path=entity_file_abs_path,
severity=check.severity
)
record.set_guideline(check.guideline)
report.add_record(record=record)
return report
def get_entity_context(self, entity: dict[str, Any], entity_file_path: str) -> _EntityContext:
"""Extract the context for the given entity
Deal with nested pods within a deployment.
May have K8S graph adjacencies, but will not be in the self.context map of objects.
(Consider them 'virtual' objects created for the sake of graph lookups)
"""
entity_context: _EntityContext = {}
if PARENT_RESOURCE_ID_KEY_NAME in entity:
if entity[CustomAttributes.RESOURCE_TYPE] == "Pod":
# self.context not being None is checked in the caller method
entity_context = self.context[entity_file_path][entity[PARENT_RESOURCE_ID_KEY_NAME]] # type:ignore[index]
else:
logging.info(
"Unsupported nested resource type for Kubernetes graph edges. "
f"Type: {entity[CustomAttributes.RESOURCE_TYPE]} Parent: {entity[PARENT_RESOURCE_ID_KEY_NAME]}"
)
else:
entity_id = entity[CustomAttributes.ID]
# self.context not being None is checked in the caller method
entity_context = self.context[entity_file_path][entity_id] # type:ignore[index]
return entity_context
def extract_images(
self,
graph_connector: DiGraph | None = None,
definitions: None = None,
definitions_raw: dict[str, list[tuple[int, str]]] | None = None
) -> list[Image]:
if not graph_connector:
# should not happen
return []
manager = KubernetesImageReferencerManager(graph_connector=graph_connector)
images = manager.extract_images_from_resources()
return images
def find_kustomize_directories(
root_folder: str | None, files: list[str] | None, excluded_paths: list[str]
) -> list[str]:
kustomize_directories = []
if not excluded_paths:
excluded_paths = []
if files:
logging.info('Running with --file argument; file must be a kustomization.yaml file')
for file in files:
if os.path.basename(file) in Runner.kustomizeSupportedFileTypes:
kustomize_directories.append(os.path.dirname(file))
if root_folder:
for root, d_names, f_names in os.walk(root_folder):
filter_ignored_paths(root, d_names, excluded_paths)
filter_ignored_paths(root, f_names, excluded_paths)
kustomize_directories.extend(
os.path.abspath(root) for x in f_names if x in Runner.kustomizeSupportedFileTypes
)
return kustomize_directories | null |
2,484 | import hmac
from hashlib import sha1, sha256
import os
from google.cloud import secretmanager
PROJECT_NAME = os.environ.get("PROJECT_NAME")
def get_secret(project_name, secret_name, version_num):
"""
Returns secret payload from Cloud Secret Manager
"""
try:
client = secretmanager.SecretManagerServiceClient()
name = client.secret_version_path(
project_name, secret_name, version_num
)
secret = client.access_secret_version(name)
return secret.payload.data
except Exception as e:
print(e)
The provided code snippet includes necessary dependencies for implementing the `github_verification` function. Write a Python function `def github_verification(signature, body)` to solve the following problem:
Verifies that the signature received from the github event is accurate
Here is the function:
def github_verification(signature, body):
"""
Verifies that the signature received from the github event is accurate
"""
expected_signature = "sha1="
try:
# Get secret from Cloud Secret Manager
secret = get_secret(PROJECT_NAME, "event-handler", "latest")
# Compute the hashed signature
hashed = hmac.new(secret, body, sha1)
expected_signature += hashed.hexdigest()
except Exception as e:
print(e)
return hmac.compare_digest(signature, expected_signature) | Verifies that the signature received from the github event is accurate |
2,485 | import hmac
from hashlib import sha1, sha256
import os
from google.cloud import secretmanager
PROJECT_NAME = os.environ.get("PROJECT_NAME")
def get_secret(project_name, secret_name, version_num):
"""
Returns secret payload from Cloud Secret Manager
"""
try:
client = secretmanager.SecretManagerServiceClient()
name = client.secret_version_path(
project_name, secret_name, version_num
)
secret = client.access_secret_version(name)
return secret.payload.data
except Exception as e:
print(e)
The provided code snippet includes necessary dependencies for implementing the `circleci_verification` function. Write a Python function `def circleci_verification(signature, body)` to solve the following problem:
Verifies that the signature received from the circleci event is accurate
Here is the function:
def circleci_verification(signature, body):
"""
Verifies that the signature received from the circleci event is accurate
"""
expected_signature = "v1="
try:
# Get secret from Cloud Secret Manager
secret = get_secret(PROJECT_NAME, "event-handler", "latest")
# Compute the hashed signature
hashed = hmac.new(secret, body, 'sha256')
expected_signature += hashed.hexdigest()
except Exception as e:
print(e)
return hmac.compare_digest(signature, expected_signature) | Verifies that the signature received from the circleci event is accurate |
2,486 | import hmac
from hashlib import sha1, sha256
import os
from google.cloud import secretmanager
PROJECT_NAME = os.environ.get("PROJECT_NAME")
def get_secret(project_name, secret_name, version_num):
"""
Returns secret payload from Cloud Secret Manager
"""
try:
client = secretmanager.SecretManagerServiceClient()
name = client.secret_version_path(
project_name, secret_name, version_num
)
secret = client.access_secret_version(name)
return secret.payload.data
except Exception as e:
print(e)
The provided code snippet includes necessary dependencies for implementing the `pagerduty_verification` function. Write a Python function `def pagerduty_verification(signatures, body)` to solve the following problem:
Verifies that the signature received from the pagerduty event is accurate
Here is the function:
def pagerduty_verification(signatures, body):
"""
Verifies that the signature received from the pagerduty event is accurate
"""
if not signatures:
raise Exception("Pagerduty signature is empty")
signature_list = signatures.split(",")
if len(signature_list) == 0:
raise Exception("Pagerduty signature list is empty")
expected_signature = "v1="
try:
# Get secret from Cloud Secret Manager
secret = get_secret(PROJECT_NAME, "pager_duty_secret", "latest")
# Compute the hashed signature
hashed = hmac.new(secret, body, sha256)
expected_signature += hashed.hexdigest()
except Exception as e:
print(e)
if expected_signature in signature_list:
return True
else:
return False | Verifies that the signature received from the pagerduty event is accurate |
2,487 | import hmac
from hashlib import sha1, sha256
import os
from google.cloud import secretmanager
PROJECT_NAME = os.environ.get("PROJECT_NAME")
def get_secret(project_name, secret_name, version_num):
"""
Returns secret payload from Cloud Secret Manager
"""
try:
client = secretmanager.SecretManagerServiceClient()
name = client.secret_version_path(
project_name, secret_name, version_num
)
secret = client.access_secret_version(name)
return secret.payload.data
except Exception as e:
print(e)
The provided code snippet includes necessary dependencies for implementing the `simple_token_verification` function. Write a Python function `def simple_token_verification(token, body)` to solve the following problem:
Verifies that the token received from the event is accurate
Here is the function:
def simple_token_verification(token, body):
"""
Verifies that the token received from the event is accurate
"""
if not token:
raise Exception("Token is empty")
secret = get_secret(PROJECT_NAME, "event-handler", "latest")
return secret.decode() == token | Verifies that the token received from the event is accurate |
2,488 | import json
import os
import sys
from flask import abort, Flask, request
from google.cloud import pubsub_v1
import sources
def publish_to_pubsub(source, msg, headers):
"""
Publishes the message to Cloud Pub/Sub
"""
try:
publisher = pubsub_v1.PublisherClient()
topic_path = publisher.topic_path(PROJECT_NAME, source)
print(topic_path)
# Pub/Sub data must be bytestring, attributes must be strings
future = publisher.publish(
topic_path, data=msg, headers=json.dumps(headers)
)
exception = future.exception()
if exception:
raise Exception(exception)
print(f"Published message: {future.result()}")
except Exception as e:
# Log any exceptions to stackdriver
entry = dict(severity="WARNING", message=e)
print(entry)
The provided code snippet includes necessary dependencies for implementing the `index` function. Write a Python function `def index()` to solve the following problem:
Receives event data from a webhook, checks if the source is authorized, checks if the signature is verified, and then sends the data to Pub/Sub.
Here is the function:
def index():
"""
Receives event data from a webhook, checks if the source is authorized,
checks if the signature is verified, and then sends the data to Pub/Sub.
"""
# Check if the source is authorized
source = sources.get_source(request.headers)
if source not in sources.AUTHORIZED_SOURCES:
abort(403, f"Source not authorized: {source}")
auth_source = sources.AUTHORIZED_SOURCES[source]
signature_sources = {**request.headers, **request.args}
signature = signature_sources.get(auth_source.signature, None)
if not signature:
abort(403, "Signature not found in request headers")
body = request.data
# Verify the signature
verify_signature = auth_source.verification
if not verify_signature(signature, body):
abort(403, "Signature does not match expected signature")
# Remove the Auth header so we do not publish it to Pub/Sub
pubsub_headers = dict(request.headers)
if "Authorization" in pubsub_headers:
del pubsub_headers["Authorization"]
# Publish to Pub/Sub
publish_to_pubsub(source, body, pubsub_headers)
# Flush the stdout to avoid log buffering.
sys.stdout.flush()
return "", 204 | Receives event data from a webhook, checks if the source is authorized, checks if the signature is verified, and then sends the data to Pub/Sub. |
2,489 | import hashlib
import json
from google.cloud import bigquery
def is_unique(client, signature):
sql = "SELECT signature FROM four_keys.events_raw WHERE signature = '%s'"
query_job = client.query(sql % signature)
results = query_job.result()
return not results.total_rows
def insert_row_into_bigquery(event):
if not event:
raise Exception("No data to insert")
# Set up bigquery instance
client = bigquery.Client()
dataset_id = "four_keys"
table_id = "events_raw"
if is_unique(client, event["signature"]):
table_ref = client.dataset(dataset_id).table(table_id)
table = client.get_table(table_ref)
# Insert row
row_to_insert = [
(
event["event_type"],
event["id"],
event["metadata"],
event["time_created"],
event["signature"],
event["msg_id"],
event["source"],
)
]
bq_errors = client.insert_rows(table, row_to_insert)
# If errors, log to Stackdriver
if bq_errors:
entry = {
"severity": "WARNING",
"msg": "Row not inserted.",
"errors": bq_errors,
"row": row_to_insert,
}
print(json.dumps(entry)) | null |
2,490 | import hashlib
import json
from google.cloud import bigquery
def is_unique(client, signature):
sql = "SELECT signature FROM four_keys.events_raw WHERE signature = '%s'"
query_job = client.query(sql % signature)
results = query_job.result()
return not results.total_rows
def insert_row_into_events_enriched(event):
if not event:
raise Exception("No data to insert")
# Set up bigquery instance
client = bigquery.Client()
dataset_id = "four_keys"
table_id = "events_enriched"
if is_unique(client, event["events_raw_signature"]):
table_ref = client.dataset(dataset_id).table(table_id)
table = client.get_table(table_ref)
# Insert row
row_to_insert = [
(
event["events_raw_signature"],
event["enriched_metadata"]
)
]
bq_errors = client.insert_rows(table, row_to_insert)
# If errors, log to Stackdriver
if bq_errors:
entry = {
"severity": "WARNING",
"msg": "Row not inserted.",
"errors": bq_errors,
"row": row_to_insert,
}
print(json.dumps(entry)) | null |
2,491 | import hashlib
import json
from google.cloud import bigquery
def create_unique_id(msg):
hashed = hashlib.sha1(bytes(json.dumps(msg), "utf-8"))
return hashed.hexdigest() | null |
2,492 | def flatten(d, sep="_"):
obj = {}
def recurse(t, parent_key=""):
if isinstance(t, list):
for i in range(len(t)):
recurse(t[i], parent_key + sep + str(i) if parent_key else str(i))
elif isinstance(t, dict):
for k, v in t.items():
recurse(v, parent_key + sep + k if parent_key else k)
else:
obj[parent_key] = t
recurse(d)
return obj
def compare_dicts(dict_a, dict_b):
errors = []
# flatten any nested structures, so we only need one pass
flat_dict_a = flatten(dict_a)
flat_dict_b = flatten(dict_b)
if flat_dict_a.keys() != flat_dict_b.keys():
errors.append("dictionary keys do not match")
for key in flat_dict_a:
if not isinstance(flat_dict_a[key], type(flat_dict_b[key])):
errors.append(
f"type mismatch comparing '{key}': {type(flat_dict_a[key]).__name__} != {type(flat_dict_b[key]).__name__}"
)
elif isinstance(flat_dict_a[key], str) and len(flat_dict_a[key]) != len(
flat_dict_b[key]
):
errors.append(
f"length mismatch comparing strings in '{key}': {len(flat_dict_a[key])} != {len(flat_dict_b[key])}"
)
if errors:
return "\n".join(errors)
return "pass" | null |
2,493 | from __future__ import annotations
import argparse
import datetime
import hmac
import json
import math
import os
import random
import secrets
import time
import sys
from hashlib import sha1
from urllib.request import Request, urlopen
def make_changes(num_changes, vcs, event_timespan, before=None):
"""Make a single changeset
Args:
num_changes: the number of changes in this changeset
vcs: the version control system being used (options include github or gitlab
event_timespan: time duration (in seconds) of timestamps of generated events
before: the sha of the commit listed as its "before" commit, defaults to a random sha
(optional)
Returns:
event: dictionary containing changeset information
"""
changes = []
max_time = time.time() - event_timespan
head_commit = None
if not before:
before = secrets.token_hex(20) # set a random prev sha
for x in range(num_changes):
change_id = secrets.token_hex(20)
unix_timestamp = time.time() - random.randrange(0, event_timespan)
change = {
"id": change_id,
"timestamp": datetime.datetime.fromtimestamp(unix_timestamp),
}
if unix_timestamp > max_time:
max_time = unix_timestamp
head_commit = change
changes.append(change)
if vcs == "gitlab":
event = {
"object_kind": "push",
"before": before,
"checkout_sha": head_commit["id"],
"commits": changes,
}
elif vcs == "github":
event = {
"head_commit": head_commit,
"before": before,
"commits": changes
}
else:
raise ValueError("Version Control System options limited to github or gitlab.")
return event
The provided code snippet includes necessary dependencies for implementing the `make_all_changesets` function. Write a Python function `def make_all_changesets(num_events: int, vcs: str, event_timespan: int, num_changes: int = None) -> list[dict]` to solve the following problem:
Make a lit of changesets of length ``num_event`` Args: num_events (int): the number of changesets to generate vcs: the version control system being used (options include github or gitlab event_timespan: time duration (in seconds) of timestamps of generated events num_changes: number of changes per changeset, defaults to a random uniform distribution between 1 and 5 (optional) Returns: all_changesets: a list of dictionaries of all created changesets
Here is the function:
def make_all_changesets(num_events: int, vcs: str, event_timespan: int, num_changes: int = None) -> list[dict]:
"""Make a lit of changesets of length ``num_event``
Args:
num_events (int): the number of changesets to generate
vcs: the version control system being used (options include github or gitlab
event_timespan: time duration (in seconds) of timestamps of generated events
num_changes: number of changes per changeset, defaults to a random uniform distribution
between 1 and 5 (optional)
Returns:
all_changesets: a list of dictionaries of all created changesets
"""
all_changesets = []
prev_change_sha = secrets.token_hex(20) # set a random prev sha
for _ in range(num_events):
if not num_changes:
num_changes = random.randrange(1, 5)
changeset = make_changes(
num_changes,
vcs,
event_timespan,
before=prev_change_sha
)
prev_change_sha = changeset.get("checkout_sha") or changeset.get("head_commit", {}).get("id")
all_changesets.append(changeset)
return all_changesets | Make a lit of changesets of length ``num_event`` Args: num_events (int): the number of changesets to generate vcs: the version control system being used (options include github or gitlab event_timespan: time duration (in seconds) of timestamps of generated events num_changes: number of changes per changeset, defaults to a random uniform distribution between 1 and 5 (optional) Returns: all_changesets: a list of dictionaries of all created changesets |
2,494 | from __future__ import annotations
import argparse
import datetime
import hmac
import json
import math
import os
import random
import secrets
import time
import sys
from hashlib import sha1
from urllib.request import Request, urlopen
The provided code snippet includes necessary dependencies for implementing the `make_ind_changes_from_changeset` function. Write a Python function `def make_ind_changes_from_changeset(changeset, vcs)` to solve the following problem:
Make individual change from a changeset Args: changeset: Changeset to make individual change from vcs: the version control system being used (options include github or gitlab Returns:
Here is the function:
def make_ind_changes_from_changeset(changeset, vcs):
"""Make individual change from a changeset
Args:
changeset: Changeset to make individual change from
vcs: the version control system being used (options include github or gitlab
Returns:
"""
ind_changes = []
changeset_sha = changeset.get("checkout_sha") or changeset.get("head_commit", {}).get("id")
# GL and GH both use this as the first "before" sha once a branch starts off of main
# It is git for a sha/commit that doesn't exist
prev_change_sha = "0000000000000000000000000000000000000000"
for c in changeset["commits"]:
# We only post individual commits with shas not matching the changeset sha
if c["id"] != changeset_sha:
if vcs == "gitlab":
curr_change = {
"object_kind": "push",
"before": prev_change_sha,
"checkout_sha": c["id"],
"commits": [c],
}
elif vcs == "github":
curr_change = {
"head_commit": c,
"before": prev_change_sha,
"commits": [c]
}
else:
raise ValueError("Version Control System options limited to github or gitlab.")
prev_change_sha = c["id"]
ind_changes.append(curr_change)
return ind_changes | Make individual change from a changeset Args: changeset: Changeset to make individual change from vcs: the version control system being used (options include github or gitlab Returns: |
2,495 | from __future__ import annotations
import argparse
import datetime
import hmac
import json
import math
import os
import random
import secrets
import time
import sys
from hashlib import sha1
from urllib.request import Request, urlopen
def create_github_deploy_event(change):
deployment = {
"deployment_status": {
"updated_at": change["timestamp"],
"id": secrets.token_hex(20),
"state": "success",
},
"deployment": {
"sha": change["id"],
},
}
return deployment | null |
2,496 | from __future__ import annotations
import argparse
import datetime
import hmac
import json
import math
import os
import random
import secrets
import time
import sys
from hashlib import sha1
from urllib.request import Request, urlopen
def create_gitlab_pipeline_event(changes):
pipeline = None
checkout_sha = changes["checkout_sha"]
for c in changes["commits"]:
if c["id"] == checkout_sha:
pipeline = {
"object_kind": "pipeline",
"object_attributes": {
"created_at": c["timestamp"],
"id": random.randrange(0, 1000),
"status": "success",
},
"commit": c,
}
return pipeline | null |
2,497 | from __future__ import annotations
import argparse
import datetime
import hmac
import json
import math
import os
import random
import secrets
import time
import sys
from hashlib import sha1
from urllib.request import Request, urlopen
def create_gitlab_deploy_event(changes, deploy_id=None):
deployment = None
checkout_sha = changes["checkout_sha"]
if not deploy_id:
deploy_id = random.randrange(0, 1000)
for c in changes["commits"]:
if c["id"] == checkout_sha:
deployment = {
"object_kind": "deployment",
"status": "success",
"status_changed_at": c["timestamp"].strftime("%F %T +0200"),
"deployment_id": deploy_id,
"commit_url": f"http://example.com/root/test/commit/{checkout_sha}",
}
return deployment | null |
2,498 | from __future__ import annotations
import argparse
import datetime
import hmac
import json
import math
import os
import random
import secrets
import time
import sys
from hashlib import sha1
from urllib.request import Request, urlopen
def make_github_issue(root_cause):
event = {
"issue": {
"created_at": root_cause["timestamp"],
"updated_at": datetime.datetime.now(),
"closed_at": datetime.datetime.now(),
"number": random.randrange(0, 1000),
"labels": [{"name": "Incident"}],
"body": "root cause: %s" % root_cause["id"],
},
"repository": {"name": "foobar"},
}
return event | null |
2,499 | from __future__ import annotations
import argparse
import datetime
import hmac
import json
import math
import os
import random
import secrets
import time
import sys
from hashlib import sha1
from urllib.request import Request, urlopen
def make_gitlab_issue(changes):
issue = None
checkout_sha = changes["checkout_sha"]
for c in changes["commits"]:
if c["id"] == checkout_sha:
issue = {
"object_kind": "issue",
"object_attributes": {
"created_at": c["timestamp"],
"updated_at": datetime.datetime.now(),
"closed_at": datetime.datetime.now(),
"id": random.randrange(0, 1000),
"labels": [{"title": "Incident"}],
"description": "root cause: %s" % c["id"],
},
}
return issue | null |
2,500 | from __future__ import annotations
import argparse
import datetime
import hmac
import json
import math
import os
import random
import secrets
import time
import sys
from hashlib import sha1
from urllib.request import Request, urlopen
def make_webhook_request(vcs, webhook_url, secret, event_type, data, token=None):
data = json.dumps(data, default=str).encode()
request = Request(webhook_url, data)
if vcs == "github":
signature = hmac.new(secret.encode(), data, sha1)
request.add_header("X-Github-Event", event_type)
request.add_header("X-Hub-Signature", "sha1=" + signature.hexdigest())
request.add_header("User-Agent", "GitHub-Hookshot/mock")
if vcs == "gitlab":
request.add_header("X-Gitlab-Event", event_type)
request.add_header("X-Gitlab-Token", secret)
request.add_header("Content-Type", "application/json")
request.add_header("Mock", True)
if token:
request.add_header("Authorization", f"Bearer {token}")
return request
def post_to_webhook(vcs, webhook_url, secret, event_type, data, token=None):
request = make_webhook_request(vcs, webhook_url, secret, event_type, data, token)
response = urlopen(request)
if response.getcode() == 204:
return 1
else:
return 0 | null |
2,501 | import base64
import os
import json
import shared
from flask import Flask, request
def process_new_source_event(msg):
metadata = json.loads(base64.b64decode(msg["data"]).decode("utf-8").strip())
# [TODO: Parse the msg data to map to the event object below]
new_source_event = {
"event_type": "event_type", # Event type, eg "push", "pull_reqest", etc
"id": "e_id", # Object ID, eg pull request ID
"metadata": json.dumps(metadata), # The body of the msg
"time_created": 0, # The timestamp of with the event
"signature": "signature", # The unique event signature
"msg_id": msg["message_id"], # The pubsub message id
"source": "source", # The name of the source, eg "github"
}
print(new_source_event)
return new_source_event
The provided code snippet includes necessary dependencies for implementing the `index` function. Write a Python function `def index()` to solve the following problem:
Receives messages from a push subscription from Pub/Sub. Parses the message, and inserts it into BigQuery.
Here is the function:
def index():
"""
Receives messages from a push subscription from Pub/Sub.
Parses the message, and inserts it into BigQuery.
"""
event = None
# Check request for JSON
if not request.is_json:
raise Exception("Expecting JSON payload")
envelope = request.get_json()
# Check that message is a valid pub/sub message
if "message" not in envelope:
raise Exception("Not a valid Pub/Sub Message")
msg = envelope["message"]
if "attributes" not in msg:
raise Exception("Missing pubsub attributes")
try:
# [TODO: Replace mock function below]
event = process_new_source_event(msg)
# [Do not edit below]
shared.insert_row_into_bigquery(event)
except Exception as e:
entry = {
"severity": "WARNING",
"msg": "Data not saved to BigQuery",
"errors": str(e),
"json_payload": envelope
}
print(json.dumps(entry))
return "", 204 | Receives messages from a push subscription from Pub/Sub. Parses the message, and inserts it into BigQuery. |
2,502 | import base64
import os
import json
import shared
from flask import Flask, request
def process_argocd_event(msg):
metadata = json.loads(base64.b64decode(msg["data"]).decode("utf-8").strip())
# Unique hash for the event
signature = shared.create_unique_id(msg)
argocd_event = {
"event_type": "deployment", # Event type, eg "push", "pull_reqest", etc
"id": metadata["id"], # Object ID, eg pull request ID
"metadata": json.dumps(metadata), # The body of the msg
"time_created": metadata["time"], # The timestamp of with the event
"signature": signature, # The unique event signature
"msg_id": msg["message_id"], # The pubsub message id
"source": "argocd", # The name of the source, eg "github"
}
print(argocd_event)
return argocd_event
The provided code snippet includes necessary dependencies for implementing the `index` function. Write a Python function `def index()` to solve the following problem:
Receives messages from a push subscription from Pub/Sub. Parses the message, and inserts it into BigQuery.
Here is the function:
def index():
"""
Receives messages from a push subscription from Pub/Sub.
Parses the message, and inserts it into BigQuery.
"""
event = None
if not request.is_json:
raise Exception("Expecting JSON payload")
envelope = request.get_json()
print(f"envelope recieved: {envelope}")
# Check that data has been posted
if not envelope:
raise Exception("Expecting JSON payload")
# Check that message is a valid pub/sub message
if "message" not in envelope:
raise Exception("Not a valid Pub/Sub Message")
msg = envelope["message"]
if "attributes" not in msg:
raise Exception("Missing pubsub attributes")
try:
event = process_argocd_event(msg)
# [Do not edit below]
shared.insert_row_into_bigquery(event)
except Exception as e:
entry = {
"severity": "WARNING",
"msg": "Data not saved to BigQuery",
"errors": str(e),
"json_payload": envelope
}
print(json.dumps(entry))
return "", 204 | Receives messages from a push subscription from Pub/Sub. Parses the message, and inserts it into BigQuery. |
2,503 | import base64
import os
import json
import shared
from flask import Flask, request
def process_cloud_build_event(attr, msg):
event_type = "build"
e_id = attr["buildId"]
# Unique hash for the event
signature = shared.create_unique_id(msg)
# Payload
metadata = json.loads(base64.b64decode(msg["data"]).decode("utf-8").strip())
# Most up to date timestamp for the event
time_created = (metadata.get("finishTime") or metadata.get("startTime") or metadata.get("createTime"))
build_event = {
"event_type": event_type,
"id": e_id,
"metadata": json.dumps(metadata),
"time_created": time_created,
"signature": signature,
"msg_id": msg["message_id"],
"source": "cloud_build",
}
return build_event
The provided code snippet includes necessary dependencies for implementing the `index` function. Write a Python function `def index()` to solve the following problem:
Receives messages from a push subscription from Pub/Sub. Parses the message, and inserts it into BigQuery.
Here is the function:
def index():
"""
Receives messages from a push subscription from Pub/Sub.
Parses the message, and inserts it into BigQuery.
"""
event = None
# Check request for JSON
if not request.is_json:
raise Exception("Expecting JSON payload")
envelope = request.get_json()
# Check that message is a valid pub/sub message
if "message" not in envelope:
raise Exception("Not a valid Pub/Sub Message")
msg = envelope["message"]
if "attributes" not in msg:
raise Exception("Missing pubsub attributes")
try:
attr = msg["attributes"]
# Process Cloud Build event
if "buildId" in attr:
event = process_cloud_build_event(attr, msg)
shared.insert_row_into_bigquery(event)
except Exception as e:
entry = {
"severity": "WARNING",
"msg": "Data not saved to BigQuery",
"errors": str(e),
"json_payload": envelope
}
print(json.dumps(entry))
return "", 204 | Receives messages from a push subscription from Pub/Sub. Parses the message, and inserts it into BigQuery. |
2,504 | import base64
import os
import json
import shared
from flask import Flask, request
def process_circleci_event(headers, msg):
event_type = headers["Circleci-Event-Type"]
signature = headers["Circleci-Signature"]
metadata = json.loads(base64.b64decode(msg["data"]).decode("utf-8").strip())
types = {"workflow-completed", "job-completed"}
if event_type not in types:
raise Exception("Unsupported CircleCI event: '%s'" % event_type)
circleci_event = {
"event_type": event_type,
"id": metadata["id"],
"metadata": json.dumps(metadata),
"time_created": metadata["happened_at"],
"signature": signature,
"msg_id": msg["message_id"],
"source": "circleci",
}
return circleci_event
The provided code snippet includes necessary dependencies for implementing the `index` function. Write a Python function `def index()` to solve the following problem:
Receives messages from a push subscription from Pub/Sub. Parses the message, and inserts it into BigQuery.
Here is the function:
def index():
"""
Receives messages from a push subscription from Pub/Sub.
Parses the message, and inserts it into BigQuery.
"""
event = None
# Check request for JSON
if not request.is_json:
raise Exception("Expecting JSON payload")
envelope = request.get_json()
# Check that message is a valid pub/sub message
if "message" not in envelope:
raise Exception("Not a valid Pub/Sub Message")
msg = envelope["message"]
if "attributes" not in msg:
raise Exception("Missing pubsub attributes")
try:
attr = msg["attributes"]
# Header Event info
if "headers" in attr:
headers = json.loads(attr["headers"])
# Process CircleCI Events
if "Circleci-Event-Type" in headers:
event = process_circleci_event(headers, msg)
shared.insert_row_into_bigquery(event)
except Exception as e:
entry = {
"severity": "WARNING",
"msg": "Data not saved to BigQuery",
"errors": str(e),
"json_payload": envelope
}
print(json.dumps(entry))
return "", 204 | Receives messages from a push subscription from Pub/Sub. Parses the message, and inserts it into BigQuery. |
2,505 | import base64
import os
import json
import shared
from flask import Flask, request
def process_pagerduty_event(msg):
metadata = json.loads(base64.b64decode(msg["data"]).decode("utf-8").strip())
print(f"Metadata after decoding {metadata}")
# Unique hash for the event
signature = shared.create_unique_id(msg)
event = metadata['event']
event_type = event["event_type"]
types = {"incident.triggered", "incident.resolved"}
if event_type not in types:
raise Warning("Unsupported PagerDuty event: '%s'" % event_type)
pagerduty_event = {
"event_type": event_type, # Event type, eg "incident.trigger", "incident.resolved", etc
"id": event['id'], # Event ID,
"metadata": json.dumps(metadata), # The body of the msg
"signature": signature, # The unique event signature
"msg_id": msg["message_id"], # The pubsub message id
"time_created" : event['occurred_at'], # The timestamp of with the event resolved
"source": "pagerduty", # The name of the source, eg "pagerduty"
}
print(f"Pager Duty event to metrics--------> {pagerduty_event}")
return pagerduty_event
The provided code snippet includes necessary dependencies for implementing the `index` function. Write a Python function `def index()` to solve the following problem:
Receives messages from a push subscription from Pub/Sub. Parses the message, and inserts it into BigQuery.
Here is the function:
def index():
"""
Receives messages from a push subscription from Pub/Sub.
Parses the message, and inserts it into BigQuery.
"""
event = None
# Check request for JSON
if not request.is_json:
raise Exception("Expecting JSON payload")
envelope = request.get_json()
# Check that message is a valid pub/sub message
if "message" not in envelope:
raise Exception("Not a valid Pub/Sub Message")
msg = envelope["message"]
if "attributes" not in msg:
raise Exception("Missing pubsub attributes")
try:
event = process_pagerduty_event(msg)
print(f" Event which is to be inserted into Big query {event}")
if event:
# [Do not edit below]
shared.insert_row_into_bigquery(event)
except Exception as e:
entry = {
"severity": "WARNING",
"msg": "Data not saved to BigQuery",
"errors": str(e),
"json_payload": envelope
}
print(f"EXCEPTION raised {json.dumps(entry)}")
return "", 204 | Receives messages from a push subscription from Pub/Sub. Parses the message, and inserts it into BigQuery. |
2,506 | import base64
import os
import json
import shared
from cloudevents.http import from_http, to_json
from flask import Flask, request
def process_tekton_event(headers, msg):
data = base64.b64decode(msg["data"]).decode("utf-8").strip()
cloud_event = from_http(headers, data)
if "pipelineRun" in cloud_event.data:
uid = cloud_event.data["pipelineRun"]["metadata"]["uid"]
if "taskRun" in cloud_event.data:
uid = cloud_event.data["taskRun"]["metadata"]["uid"]
event = {
"event_type": cloud_event["type"],
"id": uid, # ID of the taskRun or pipelineRun
"metadata": to_json(cloud_event).decode(),
"time_created": cloud_event["time"],
"signature": cloud_event["id"], # Unique ID for the event
"msg_id": msg["message_id"], # The pubsub message id
"source": "tekton",
}
return event
The provided code snippet includes necessary dependencies for implementing the `index` function. Write a Python function `def index()` to solve the following problem:
Receives messages from a push subscription from Pub/Sub. Parses the message, and inserts it into BigQuery.
Here is the function:
def index():
"""
Receives messages from a push subscription from Pub/Sub.
Parses the message, and inserts it into BigQuery.
"""
event = None
# Check request for JSON
if not request.is_json:
raise Exception("Expecting JSON payload")
envelope = request.get_json()
# Check that message is a valid pub/sub message
if "message" not in envelope:
raise Exception("Not a valid Pub/Sub Message")
msg = envelope["message"]
if "attributes" not in msg:
raise Exception("Missing pubsub attributes")
try:
attr = msg["attributes"]
if "headers" in attr:
headers = json.loads(attr["headers"])
event = process_tekton_event(headers, msg)
shared.insert_row_into_bigquery(event)
except Exception as e:
entry = {
"severity": "WARNING",
"msg": "Data not saved to BigQuery",
"errors": str(e),
"json_payload": envelope
}
print(json.dumps(entry))
return "", 204 | Receives messages from a push subscription from Pub/Sub. Parses the message, and inserts it into BigQuery. |
2,507 | import base64
from datetime import datetime
import os
import json
import shared
from flask import Flask, request
def process_gitlab_event(headers, msg):
# Unique hash for the event
signature = shared.create_unique_id(msg)
source = "gitlab"
if "Mock" in headers:
source += "mock"
types = {"push", "merge_request",
"note", "tag_push", "issue",
"pipeline", "job", "deployment",
"build"}
metadata = json.loads(base64.b64decode(msg["data"]).decode("utf-8").strip())
event_type = metadata["object_kind"]
if event_type not in types:
raise Exception("Unsupported Gitlab event: '%s'" % event_type)
if event_type in ("push", "tag_push"):
e_id = metadata["checkout_sha"]
for commit in metadata["commits"]:
if commit["id"] == e_id:
time_created = commit["timestamp"]
if event_type in ("merge_request", "note", "issue", "pipeline"):
event_object = metadata["object_attributes"]
e_id = event_object["id"]
time_created = (
event_object.get("updated_at") or
event_object.get("finished_at") or
event_object.get("created_at"))
if event_type in ("job"):
e_id = metadata["build_id"]
time_created = (
event_object.get("finished_at") or
event_object.get("started_at"))
if event_type in ("deployment"):
e_id = metadata["deployment_id"]
time_created = metadata["status_changed_at"]
if event_type in ("build"):
e_id = metadata["build_id"]
time_created = (
metadata.get("build_finished_at") or
metadata.get("build_started_at") or
metadata.get("build_created_at"))
# Some timestamps come in a format like "2021-04-28 21:50:00 +0200"
# BigQuery does not accept this as a valid format
# Removing the extra timezone information below
try:
dt = datetime.strptime(time_created, '%Y-%m-%d %H:%M:%S %z')
time_created = dt.strftime('%Y-%m-%d %H:%M:%S')
# If the timestamp is not parsed correctly,
# we will default to the string from the event payload
except Exception:
pass
gitlab_event = {
"event_type": event_type,
"id": e_id,
"metadata": json.dumps(metadata),
# If time_created not supplied by event, default to pub/sub publishTime
"time_created": time_created or msg["publishTime"],
"signature": signature,
"msg_id": msg["message_id"],
"source": source,
}
return gitlab_event
The provided code snippet includes necessary dependencies for implementing the `index` function. Write a Python function `def index()` to solve the following problem:
Receives messages from a push subscription from Pub/Sub. Parses the message, and inserts it into BigQuery.
Here is the function:
def index():
"""
Receives messages from a push subscription from Pub/Sub.
Parses the message, and inserts it into BigQuery.
"""
event = None
# Check request for JSON
if not request.is_json:
raise Exception("Expecting JSON payload")
envelope = request.get_json()
# Check that message is a valid pub/sub message
if "message" not in envelope:
raise Exception("Not a valid Pub/Sub Message")
msg = envelope["message"]
if "attributes" not in msg:
raise Exception("Missing pubsub attributes")
try:
attr = msg["attributes"]
# Header Event info
if "headers" in attr:
headers = json.loads(attr["headers"])
# Process Gitlab Events
if "X-Gitlab-Event" in headers:
event = process_gitlab_event(headers, msg)
shared.insert_row_into_bigquery(event)
except Exception as e:
entry = {
"severity": "WARNING",
"msg": "Data not saved to BigQuery",
"errors": str(e),
"json_payload": envelope
}
print(json.dumps(entry))
return "", 204 | Receives messages from a push subscription from Pub/Sub. Parses the message, and inserts it into BigQuery. |
2,508 | import base64
import os
import json
import shared
from flask import Flask, request
def process_github_event(headers, msg):
event_type = headers["X-Github-Event"]
signature = headers["X-Hub-Signature"]
source = "github"
if "Mock" in headers:
source += "mock"
types = {"push", "pull_request", "pull_request_review",
"pull_request_review_comment", "issues",
"issue_comment", "check_run", "check_suite", "status",
"deployment_status", "release"}
if event_type not in types:
raise Exception("Unsupported GitHub event: '%s'" % event_type)
metadata = json.loads(base64.b64decode(msg["data"]).decode("utf-8").strip())
if event_type == "push":
time_created = metadata["head_commit"]["timestamp"]
e_id = metadata["head_commit"]["id"]
if event_type == "pull_request":
time_created = metadata["pull_request"]["updated_at"]
e_id = metadata["repository"]["name"] + "/" + str(metadata["number"])
if event_type == "pull_request_review":
time_created = metadata["review"]["submitted_at"]
e_id = metadata["review"]["id"]
if event_type == "pull_request_review_comment":
time_created = metadata["comment"]["updated_at"]
e_id = metadata["comment"]["id"]
if event_type == "issues":
time_created = metadata["issue"]["updated_at"]
e_id = metadata["repository"]["name"] + "/" + str(metadata["issue"]["number"])
if event_type == "issue_comment":
time_created = metadata["comment"]["updated_at"]
e_id = metadata["comment"]["id"]
if event_type == "check_run":
time_created = (metadata["check_run"]["completed_at"] or
metadata["check_run"]["started_at"])
e_id = metadata["check_run"]["id"]
if event_type == "check_suite":
time_created = (metadata["check_suite"]["updated_at"] or
metadata["check_suite"]["created_at"])
e_id = metadata["check_suite"]["id"]
if event_type == "deployment_status":
time_created = metadata["deployment_status"]["updated_at"]
e_id = metadata["deployment_status"]["id"]
if event_type == "status":
time_created = metadata["updated_at"]
e_id = metadata["id"]
if event_type == "release":
time_created = (metadata["release"]["published_at"] or
metadata["release"]["created_at"])
e_id = metadata["release"]["id"]
github_event = {
"event_type": event_type,
"id": e_id,
"metadata": json.dumps(metadata),
"time_created": time_created,
"signature": signature,
"msg_id": msg["message_id"],
"source": source,
}
return github_event
The provided code snippet includes necessary dependencies for implementing the `index` function. Write a Python function `def index()` to solve the following problem:
Receives messages from a push subscription from Pub/Sub. Parses the message, and inserts it into BigQuery.
Here is the function:
def index():
"""
Receives messages from a push subscription from Pub/Sub.
Parses the message, and inserts it into BigQuery.
"""
event = None
# Check request for JSON
if not request.is_json:
raise Exception("Expecting JSON payload")
envelope = request.get_json()
# Check that message is a valid pub/sub message
if "message" not in envelope:
raise Exception("Not a valid Pub/Sub Message")
msg = envelope["message"]
if "attributes" not in msg:
raise Exception("Missing pubsub attributes")
try:
attr = msg["attributes"]
# Header Event info
if "headers" in attr:
headers = json.loads(attr["headers"])
# Process Github Events
if "X-Github-Event" in headers:
event = process_github_event(headers, msg)
shared.insert_row_into_bigquery(event)
except Exception as e:
entry = {
"severity": "WARNING",
"msg": "Data not saved to BigQuery",
"errors": str(e),
"json_payload": envelope
}
print(json.dumps(entry))
return "", 204 | Receives messages from a push subscription from Pub/Sub. Parses the message, and inserts it into BigQuery. |
2,509 | import os
import nox
The provided code snippet includes necessary dependencies for implementing the `_collect_dirs` function. Write a Python function `def _collect_dirs( start_dir, suffix="_test.py", recurse_further=False, )` to solve the following problem:
Recursively collects a list of dirs that contain a file matching the given suffix. This works by listing the contents of directories and finding directories that have `*_test.py` files.
Here is the function:
def _collect_dirs(
start_dir,
suffix="_test.py",
recurse_further=False,
):
"""Recursively collects a list of dirs that contain a file matching the given suffix.
This works by listing the contents of directories and finding
directories that have `*_test.py` files.
"""
# Collect all the directories that have tests in them.
for parent, subdirs, files in os.walk(start_dir):
if "./." in parent:
continue # Skip top-level dotfiles
elif any(f for f in files if f.endswith(suffix)):
# Don't recurse further for tests, since py.test will do that.
if not recurse_further:
del subdirs[:]
# This dir has desired files in it. yield it.
yield parent
else:
# Filter out dirs we don't want to recurse into
subdirs[:] = [
s for s in subdirs if s[0].isalpha()
] | Recursively collects a list of dirs that contain a file matching the given suffix. This works by listing the contents of directories and finding directories that have `*_test.py` files. |
2,510 | import os
import nox
def _session_tests(session, folder):
"""Runs py.test for a particular directory."""
session.chdir(folder)
if os.path.exists("requirements.txt"):
session.install("-r", "requirements.txt")
session.run(
"pytest",
*(session.posargs),
# Pytest will return 5 when no tests are collected. This can happen
# on travis where slow and flaky tests are excluded.
# See http://doc.pytest.org/en/latest/_modules/_pytest/main.html
success_codes=[0, 5]
)
The provided code snippet includes necessary dependencies for implementing the `py` function. Write a Python function `def py(session, folder)` to solve the following problem:
Runs py.test for a folder using the specified version of Python.
Here is the function:
def py(session, folder):
"""Runs py.test for a folder using the specified version of Python."""
session.install("-r", "requirements-test.txt")
_session_tests(session, folder) | Runs py.test for a folder using the specified version of Python. |
2,511 | import os
import nox
def _determine_local_import_names(start_dir):
"""Determines all import names that should be considered "local".
This is used when running the linter to insure that import order is
properly checked.
"""
file_ext_pairs = [os.path.splitext(path) for path in os.listdir(start_dir)]
return [
basename
for basename, extension in file_ext_pairs
if extension == ".py"
or os.path.isdir(os.path.join(start_dir, basename))
and basename not in ("__pycache__")
]
FLAKE8_COMMON_ARGS = [
"--show-source",
"--builtin=gettext",
"--max-complexity=20",
"--import-order-style=google",
"--exclude=.nox,.cache,env,lib,generated_pb2,*_pb2.py,*_pb2_grpc.py",
"--ignore=E121,E123,E126,E203,E226,E24,E266,E501,E704,W503,W504,I100,I201,I202",
"--max-line-length=88",
]
def lint(session):
session.install("flake8", "flake8-import-order")
local_names = _determine_local_import_names(".")
args = FLAKE8_COMMON_ARGS + [
"--application-import-names",
",".join(local_names),
".",
]
session.run("flake8", *args) | null |
2,512 | import sys
from argparse import ArgumentParser, FileType
from inspect import signature
import numpy as np
import m2cgen
parser = ArgumentParser(
prog="m2cgen",
description="Generate code in native language for provided model.")
parser.add_argument(
"infile",
type=FileType("rb"),
nargs="?",
default=sys.stdin.buffer,
help="File with pickle representation of the model.")
parser.add_argument(
"--language", "-l",
type=str,
choices=LANGUAGE_TO_EXPORTER.keys(),
help="Target language.",
required=True)
parser.add_argument(
"--function_name", "-fn",
dest="function_name",
type=str,
# The default value is conditional and will be set in the argument's
# post-processing, based on the signature of the `export` function
# that belongs to the specified target language.
default=None,
help="Name of the function in the generated code.")
parser.add_argument(
"--class_name", "-cn",
dest="class_name",
type=str,
help="Name of the generated class (if supported by target language).")
parser.add_argument(
"--package_name", "-pn",
dest="package_name",
type=str,
help="Package name for the generated code (if supported by target language).")
parser.add_argument(
"--module_name", "-mn",
dest="module_name",
type=str,
help="Module name for the generated code (if supported by target language).")
parser.add_argument(
"--namespace", "-ns",
dest="namespace",
type=str,
help="Namespace for the generated code (if supported by target language).")
parser.add_argument(
"--indent", "-i",
dest="indent",
type=int,
default=4,
help="Indentation for the generated code.")
parser.add_argument(
"--recursion-limit", "-rl",
type=int,
help="Sets the maximum depth of the Python interpreter stack. No limit by default",
default=MAX_RECURSION_DEPTH)
parser.add_argument(
"--version", "-v",
action="version",
version=f"%(prog)s {m2cgen.__version__}")
parser.add_argument(
"--pickle-lib", "-pl",
type=str,
dest="lib",
help="Sets the lib used to save the model",
choices=["pickle", "joblib"],
default="pickle")
def parse_args(args):
return parser.parse_args(args) | null |
2,513 | import sys
from argparse import ArgumentParser, FileType
from inspect import signature
import numpy as np
import m2cgen
LANGUAGE_TO_EXPORTER = {
"python": (m2cgen.export_to_python, ["indent", "function_name"]),
"java": (m2cgen.export_to_java, ["indent", "class_name", "package_name", "function_name"]),
"c": (m2cgen.export_to_c, ["indent", "function_name"]),
"go": (m2cgen.export_to_go, ["indent", "function_name"]),
"javascript": (m2cgen.export_to_javascript, ["indent", "function_name"]),
"visual_basic": (m2cgen.export_to_visual_basic, ["module_name", "indent", "function_name"]),
"c_sharp": (m2cgen.export_to_c_sharp, ["indent", "class_name", "namespace", "function_name"]),
"powershell": (m2cgen.export_to_powershell, ["indent", "function_name"]),
"r": (m2cgen.export_to_r, ["indent", "function_name"]),
"php": (m2cgen.export_to_php, ["indent", "function_name"]),
"dart": (m2cgen.export_to_dart, ["indent", "function_name"]),
"haskell": (m2cgen.export_to_haskell, ["module_name", "indent", "function_name"]),
"ruby": (m2cgen.export_to_ruby, ["indent", "function_name"]),
"f_sharp": (m2cgen.export_to_f_sharp, ["indent", "function_name"]),
"rust": (m2cgen.export_to_rust, ["indent", "function_name"]),
"elixir": (m2cgen.export_to_elixir, ["module_name", "indent", "function_name"]),
}
def generate_code(args):
sys.setrecursionlimit(args.recursion_limit)
with args.infile as f:
pickle_lib = __import__(args.lib)
model = pickle_lib.load(f)
exporter, supported_args = LANGUAGE_TO_EXPORTER[args.language]
kwargs = {}
for arg_name in supported_args:
arg_value = getattr(args, arg_name)
if arg_value is not None:
kwargs[arg_name] = arg_value
# Special handling for the function_name parameter, which needs to be
# the same as the default value of the keyword argument of the exporter
# (this is due to languages like C# which prefer their method names to
# follow PascalCase unlike all the other supported languages -- see
# https://github.com/BayesWitnesses/m2cgen/pull/166#discussion_r379867601
# for more).
if arg_name == 'function_name' and arg_value is None:
param = signature(exporter).parameters['function_name']
kwargs[arg_name] = param.default
return exporter(model, **kwargs) | null |
2,514 | from m2cgen import interpreters
from m2cgen.assemblers import get_assembler_cls
def _export(model, interpreter):
assembler_cls = get_assembler_cls(model)
model_ast = assembler_cls(model).assemble()
return interpreter.interpret(model_ast)
The provided code snippet includes necessary dependencies for implementing the `export_to_java` function. Write a Python function `def export_to_java(model, package_name=None, class_name="Model", indent=4, function_name="score")` to solve the following problem:
Generates a Java code representation of the given model. Parameters ---------- model : object The model object that should be transpiled into code. package_name : string, optional Java package name. By default no package name is used. class_name : string, optional The name of the generated class. indent : int, optional The size of indents in the generated code. function_name : string, optional Name of the function in the generated code. Returns ------- code : string
Here is the function:
def export_to_java(model, package_name=None, class_name="Model", indent=4,
function_name="score"):
"""
Generates a Java code representation of the given model.
Parameters
----------
model : object
The model object that should be transpiled into code.
package_name : string, optional
Java package name. By default no package name is used.
class_name : string, optional
The name of the generated class.
indent : int, optional
The size of indents in the generated code.
function_name : string, optional
Name of the function in the generated code.
Returns
-------
code : string
"""
interpreter = interpreters.JavaInterpreter(
package_name=package_name,
class_name=class_name,
indent=indent,
function_name=function_name
)
return _export(model, interpreter) | Generates a Java code representation of the given model. Parameters ---------- model : object The model object that should be transpiled into code. package_name : string, optional Java package name. By default no package name is used. class_name : string, optional The name of the generated class. indent : int, optional The size of indents in the generated code. function_name : string, optional Name of the function in the generated code. Returns ------- code : string |
2,515 | from m2cgen import interpreters
from m2cgen.assemblers import get_assembler_cls
def _export(model, interpreter):
assembler_cls = get_assembler_cls(model)
model_ast = assembler_cls(model).assemble()
return interpreter.interpret(model_ast)
The provided code snippet includes necessary dependencies for implementing the `export_to_python` function. Write a Python function `def export_to_python(model, indent=4, function_name="score")` to solve the following problem:
Generates a Python code representation of the given model. Parameters ---------- model : object The model object that should be transpiled into code. indent : int, optional The size of indents in the generated code. function_name : string, optional Name of the function in the generated code. Returns ------- code : string
Here is the function:
def export_to_python(model, indent=4, function_name="score"):
"""
Generates a Python code representation of the given model.
Parameters
----------
model : object
The model object that should be transpiled into code.
indent : int, optional
The size of indents in the generated code.
function_name : string, optional
Name of the function in the generated code.
Returns
-------
code : string
"""
interpreter = interpreters.PythonInterpreter(
indent=indent,
function_name=function_name
)
return _export(model, interpreter) | Generates a Python code representation of the given model. Parameters ---------- model : object The model object that should be transpiled into code. indent : int, optional The size of indents in the generated code. function_name : string, optional Name of the function in the generated code. Returns ------- code : string |
2,516 | from m2cgen import interpreters
from m2cgen.assemblers import get_assembler_cls
def _export(model, interpreter):
assembler_cls = get_assembler_cls(model)
model_ast = assembler_cls(model).assemble()
return interpreter.interpret(model_ast)
The provided code snippet includes necessary dependencies for implementing the `export_to_c` function. Write a Python function `def export_to_c(model, indent=4, function_name="score")` to solve the following problem:
Generates a C code representation of the given model. Parameters ---------- model : object The model object that should be transpiled into code. indent : int, optional The size of indents in the generated code. function_name : string, optional Name of the function in the generated code. Returns ------- code : string
Here is the function:
def export_to_c(model, indent=4, function_name="score"):
"""
Generates a C code representation of the given model.
Parameters
----------
model : object
The model object that should be transpiled into code.
indent : int, optional
The size of indents in the generated code.
function_name : string, optional
Name of the function in the generated code.
Returns
-------
code : string
"""
interpreter = interpreters.CInterpreter(
indent=indent,
function_name=function_name
)
return _export(model, interpreter) | Generates a C code representation of the given model. Parameters ---------- model : object The model object that should be transpiled into code. indent : int, optional The size of indents in the generated code. function_name : string, optional Name of the function in the generated code. Returns ------- code : string |
2,517 | from m2cgen import interpreters
from m2cgen.assemblers import get_assembler_cls
def _export(model, interpreter):
assembler_cls = get_assembler_cls(model)
model_ast = assembler_cls(model).assemble()
return interpreter.interpret(model_ast)
The provided code snippet includes necessary dependencies for implementing the `export_to_go` function. Write a Python function `def export_to_go(model, indent=4, function_name="score")` to solve the following problem:
Generates a Go code representation of the given model. Parameters ---------- model : object The model object that should be transpiled into code. indent : int, optional The size of indents in the generated code. function_name : string, optional Name of the function in the generated code. Returns ------- code : string
Here is the function:
def export_to_go(model, indent=4, function_name="score"):
"""
Generates a Go code representation of the given model.
Parameters
----------
model : object
The model object that should be transpiled into code.
indent : int, optional
The size of indents in the generated code.
function_name : string, optional
Name of the function in the generated code.
Returns
-------
code : string
"""
interpreter = interpreters.GoInterpreter(
indent=indent,
function_name=function_name
)
return _export(model, interpreter) | Generates a Go code representation of the given model. Parameters ---------- model : object The model object that should be transpiled into code. indent : int, optional The size of indents in the generated code. function_name : string, optional Name of the function in the generated code. Returns ------- code : string |
2,518 | from m2cgen import interpreters
from m2cgen.assemblers import get_assembler_cls
def _export(model, interpreter):
assembler_cls = get_assembler_cls(model)
model_ast = assembler_cls(model).assemble()
return interpreter.interpret(model_ast)
The provided code snippet includes necessary dependencies for implementing the `export_to_javascript` function. Write a Python function `def export_to_javascript(model, indent=4, function_name="score")` to solve the following problem:
Generates a JavaScript code representation of the given model. Parameters ---------- model : object The model object that should be transpiled into code. indent : int, optional The size of indents in the generated code. function_name : string, optional Name of the function in the generated code. Returns ------- code : string
Here is the function:
def export_to_javascript(model, indent=4, function_name="score"):
"""
Generates a JavaScript code representation of the given model.
Parameters
----------
model : object
The model object that should be transpiled into code.
indent : int, optional
The size of indents in the generated code.
function_name : string, optional
Name of the function in the generated code.
Returns
-------
code : string
"""
interpreter = interpreters.JavascriptInterpreter(
indent=indent,
function_name=function_name
)
return _export(model, interpreter) | Generates a JavaScript code representation of the given model. Parameters ---------- model : object The model object that should be transpiled into code. indent : int, optional The size of indents in the generated code. function_name : string, optional Name of the function in the generated code. Returns ------- code : string |
2,519 | from m2cgen import interpreters
from m2cgen.assemblers import get_assembler_cls
def _export(model, interpreter):
assembler_cls = get_assembler_cls(model)
model_ast = assembler_cls(model).assemble()
return interpreter.interpret(model_ast)
The provided code snippet includes necessary dependencies for implementing the `export_to_visual_basic` function. Write a Python function `def export_to_visual_basic(model, module_name="Model", indent=4, function_name="Score")` to solve the following problem:
Generates a Visual Basic (also can be treated as VBA with some small manual changes, see a note below) code representation of the given model. .. note:: The generated code representation can be easily used as VBA code. You simply need to remove the first (`Module Model`) and the last (`End Module`) lines, and manually adjust the code to meet the following limitations: - nested floating-point expressions have level limits, e.g. 8 in 32-bit environment (**expression too complex**: https://docs.microsoft.com/ru-ru/office/vba/language/reference/user-interface-help/expression-too-complex-error-16); - **fixed or static data can't be larger than 64K**: https://docs.microsoft.com/ru-ru/office/vba/language/reference/user-interface-help/fixed-or-static-data-can-t-be-larger-than-64k; - code line length cannot contain more than 1023 characters (**line too long**: https://docs.microsoft.com/ru-ru/office/vba/language/reference/user-interface-help/line-too-long); - segment boundaries are 64K (**out of memory**: https://docs.microsoft.com/ru-ru/office/vba/language/reference/user-interface-help/out-of-memory-error-7); - nested function calls have depth limit (**out of stack space**: https://docs.microsoft.com/ru-ru/office/vba/language/reference/user-interface-help/out-of-stack-space-error-28); - compiled procedure cannot exceed 64K size limit (**procedure too large**: https://docs.microsoft.com/ru-ru/office/vba/language/reference/user-interface-help/procedure-too-large); - project's name table is limited by 32768 names (**project contains too many procedure, variable, and constant names**: https://docs.microsoft.com/ru-ru/office/vba/language/reference/user-interface-help/project-contains-too-many-procedure-variable-and-constant-names); - statements cannot be extremely complex (**statement too complex**: https://docs.microsoft.com/ru-ru/office/vba/language/reference/user-interface-help/statement-too-complex); - there can't be more than 24 consecutive line-continuation characters (**too many line continuations**: https://docs.microsoft.com/ru-ru/office/vba/language/reference/user-interface-help/too-many-line-continuations); - procedure's local, nonstatic variables and compiler-generated temporary variables cannot exceed 32K size limit (**too many local, nonstatic variables**: https://docs.microsoft.com/ru-ru/office/vba/language/reference/user-interface-help/too-many-local-nonstatic-variables); - and some others... Parameters ---------- model : object The model object that should be transpiled into code. module_name : string, optional The name of the generated module. indent : int, optional The size of indents in the generated code. function_name : string, optional Name of the function in the generated code. Returns ------- code : string
Here is the function:
def export_to_visual_basic(model, module_name="Model", indent=4,
function_name="Score"):
"""
Generates a Visual Basic (also can be treated as VBA
with some small manual changes, see a note below)
code representation of the given model.
.. note::
The generated code representation can be easily used as VBA code.
You simply need to remove the first (`Module Model`) and
the last (`End Module`) lines, and manually adjust the code
to meet the following limitations:
- nested floating-point expressions have level limits,
e.g. 8 in 32-bit environment
(**expression too complex**:
https://docs.microsoft.com/ru-ru/office/vba/language/reference/user-interface-help/expression-too-complex-error-16);
- **fixed or static data can't be larger than 64K**:
https://docs.microsoft.com/ru-ru/office/vba/language/reference/user-interface-help/fixed-or-static-data-can-t-be-larger-than-64k;
- code line length cannot contain more than 1023 characters
(**line too long**:
https://docs.microsoft.com/ru-ru/office/vba/language/reference/user-interface-help/line-too-long);
- segment boundaries are 64K
(**out of memory**:
https://docs.microsoft.com/ru-ru/office/vba/language/reference/user-interface-help/out-of-memory-error-7);
- nested function calls have depth limit
(**out of stack space**:
https://docs.microsoft.com/ru-ru/office/vba/language/reference/user-interface-help/out-of-stack-space-error-28);
- compiled procedure cannot exceed 64K size limit
(**procedure too large**:
https://docs.microsoft.com/ru-ru/office/vba/language/reference/user-interface-help/procedure-too-large);
- project's name table is limited by 32768 names
(**project contains too many procedure, variable,
and constant names**:
https://docs.microsoft.com/ru-ru/office/vba/language/reference/user-interface-help/project-contains-too-many-procedure-variable-and-constant-names);
- statements cannot be extremely complex
(**statement too complex**:
https://docs.microsoft.com/ru-ru/office/vba/language/reference/user-interface-help/statement-too-complex);
- there can't be more than 24 consecutive line-continuation characters
(**too many line continuations**:
https://docs.microsoft.com/ru-ru/office/vba/language/reference/user-interface-help/too-many-line-continuations);
- procedure's local, nonstatic variables and
compiler-generated temporary variables cannot exceed 32K size limit
(**too many local, nonstatic variables**:
https://docs.microsoft.com/ru-ru/office/vba/language/reference/user-interface-help/too-many-local-nonstatic-variables);
- and some others...
Parameters
----------
model : object
The model object that should be transpiled into code.
module_name : string, optional
The name of the generated module.
indent : int, optional
The size of indents in the generated code.
function_name : string, optional
Name of the function in the generated code.
Returns
-------
code : string
"""
interpreter = interpreters.VisualBasicInterpreter(
module_name=module_name,
indent=indent,
function_name=function_name
)
return _export(model, interpreter) | Generates a Visual Basic (also can be treated as VBA with some small manual changes, see a note below) code representation of the given model. .. note:: The generated code representation can be easily used as VBA code. You simply need to remove the first (`Module Model`) and the last (`End Module`) lines, and manually adjust the code to meet the following limitations: - nested floating-point expressions have level limits, e.g. 8 in 32-bit environment (**expression too complex**: https://docs.microsoft.com/ru-ru/office/vba/language/reference/user-interface-help/expression-too-complex-error-16); - **fixed or static data can't be larger than 64K**: https://docs.microsoft.com/ru-ru/office/vba/language/reference/user-interface-help/fixed-or-static-data-can-t-be-larger-than-64k; - code line length cannot contain more than 1023 characters (**line too long**: https://docs.microsoft.com/ru-ru/office/vba/language/reference/user-interface-help/line-too-long); - segment boundaries are 64K (**out of memory**: https://docs.microsoft.com/ru-ru/office/vba/language/reference/user-interface-help/out-of-memory-error-7); - nested function calls have depth limit (**out of stack space**: https://docs.microsoft.com/ru-ru/office/vba/language/reference/user-interface-help/out-of-stack-space-error-28); - compiled procedure cannot exceed 64K size limit (**procedure too large**: https://docs.microsoft.com/ru-ru/office/vba/language/reference/user-interface-help/procedure-too-large); - project's name table is limited by 32768 names (**project contains too many procedure, variable, and constant names**: https://docs.microsoft.com/ru-ru/office/vba/language/reference/user-interface-help/project-contains-too-many-procedure-variable-and-constant-names); - statements cannot be extremely complex (**statement too complex**: https://docs.microsoft.com/ru-ru/office/vba/language/reference/user-interface-help/statement-too-complex); - there can't be more than 24 consecutive line-continuation characters (**too many line continuations**: https://docs.microsoft.com/ru-ru/office/vba/language/reference/user-interface-help/too-many-line-continuations); - procedure's local, nonstatic variables and compiler-generated temporary variables cannot exceed 32K size limit (**too many local, nonstatic variables**: https://docs.microsoft.com/ru-ru/office/vba/language/reference/user-interface-help/too-many-local-nonstatic-variables); - and some others... Parameters ---------- model : object The model object that should be transpiled into code. module_name : string, optional The name of the generated module. indent : int, optional The size of indents in the generated code. function_name : string, optional Name of the function in the generated code. Returns ------- code : string |
2,520 | from m2cgen import interpreters
from m2cgen.assemblers import get_assembler_cls
def _export(model, interpreter):
assembler_cls = get_assembler_cls(model)
model_ast = assembler_cls(model).assemble()
return interpreter.interpret(model_ast)
The provided code snippet includes necessary dependencies for implementing the `export_to_c_sharp` function. Write a Python function `def export_to_c_sharp(model, namespace="ML", class_name="Model", indent=4, function_name="Score")` to solve the following problem:
Generates a C# code representation of the given model. Parameters ---------- model : object The model object that should be transpiled into code. namespace : string, optional The namespace for the generated code. class_name : string, optional The name of the generated class. indent : int, optional The size of indents in the generated code. function_name : string, optional Name of the function in the generated code. Returns ------- code : string
Here is the function:
def export_to_c_sharp(model, namespace="ML", class_name="Model", indent=4,
function_name="Score"):
"""
Generates a C# code representation of the given model.
Parameters
----------
model : object
The model object that should be transpiled into code.
namespace : string, optional
The namespace for the generated code.
class_name : string, optional
The name of the generated class.
indent : int, optional
The size of indents in the generated code.
function_name : string, optional
Name of the function in the generated code.
Returns
-------
code : string
"""
interpreter = interpreters.CSharpInterpreter(
namespace=namespace,
class_name=class_name,
indent=indent,
function_name=function_name
)
return _export(model, interpreter) | Generates a C# code representation of the given model. Parameters ---------- model : object The model object that should be transpiled into code. namespace : string, optional The namespace for the generated code. class_name : string, optional The name of the generated class. indent : int, optional The size of indents in the generated code. function_name : string, optional Name of the function in the generated code. Returns ------- code : string |
2,521 | from m2cgen import interpreters
from m2cgen.assemblers import get_assembler_cls
def _export(model, interpreter):
assembler_cls = get_assembler_cls(model)
model_ast = assembler_cls(model).assemble()
return interpreter.interpret(model_ast)
The provided code snippet includes necessary dependencies for implementing the `export_to_powershell` function. Write a Python function `def export_to_powershell(model, indent=4, function_name="Score")` to solve the following problem:
Generates a PowerShell code representation of the given model. Parameters ---------- model : object The model object that should be transpiled into code. indent : int, optional The size of indents in the generated code. function_name : string, optional Name of the function in the generated code. Returns ------- code : string
Here is the function:
def export_to_powershell(model, indent=4, function_name="Score"):
"""
Generates a PowerShell code representation of the given model.
Parameters
----------
model : object
The model object that should be transpiled into code.
indent : int, optional
The size of indents in the generated code.
function_name : string, optional
Name of the function in the generated code.
Returns
-------
code : string
"""
interpreter = interpreters.PowershellInterpreter(
indent=indent,
function_name=function_name
)
return _export(model, interpreter) | Generates a PowerShell code representation of the given model. Parameters ---------- model : object The model object that should be transpiled into code. indent : int, optional The size of indents in the generated code. function_name : string, optional Name of the function in the generated code. Returns ------- code : string |
2,522 | from m2cgen import interpreters
from m2cgen.assemblers import get_assembler_cls
def _export(model, interpreter):
assembler_cls = get_assembler_cls(model)
model_ast = assembler_cls(model).assemble()
return interpreter.interpret(model_ast)
The provided code snippet includes necessary dependencies for implementing the `export_to_r` function. Write a Python function `def export_to_r(model, indent=4, function_name="score")` to solve the following problem:
Generates a R code representation of the given model. Parameters ---------- model : object The model object that should be transpiled into code. indent : int, optional The size of indents in the generated code. function_name : string, optional Name of the function in the generated code. Returns ------- code : string
Here is the function:
def export_to_r(model, indent=4, function_name="score"):
"""
Generates a R code representation of the given model.
Parameters
----------
model : object
The model object that should be transpiled into code.
indent : int, optional
The size of indents in the generated code.
function_name : string, optional
Name of the function in the generated code.
Returns
-------
code : string
"""
interpreter = interpreters.RInterpreter(
indent=indent,
function_name=function_name
)
return _export(model, interpreter) | Generates a R code representation of the given model. Parameters ---------- model : object The model object that should be transpiled into code. indent : int, optional The size of indents in the generated code. function_name : string, optional Name of the function in the generated code. Returns ------- code : string |
2,523 | from m2cgen import interpreters
from m2cgen.assemblers import get_assembler_cls
def _export(model, interpreter):
assembler_cls = get_assembler_cls(model)
model_ast = assembler_cls(model).assemble()
return interpreter.interpret(model_ast)
The provided code snippet includes necessary dependencies for implementing the `export_to_php` function. Write a Python function `def export_to_php(model, indent=4, function_name="score")` to solve the following problem:
Generates a PHP code representation of the given model. Parameters ---------- model : object The model object that should be transpiled into code. indent : int, optional The size of indents in the generated code. function_name : string, optional Name of the function in the generated code. Returns ------- code : string
Here is the function:
def export_to_php(model, indent=4, function_name="score"):
"""
Generates a PHP code representation of the given model.
Parameters
----------
model : object
The model object that should be transpiled into code.
indent : int, optional
The size of indents in the generated code.
function_name : string, optional
Name of the function in the generated code.
Returns
-------
code : string
"""
interpreter = interpreters.PhpInterpreter(
indent=indent,
function_name=function_name
)
return _export(model, interpreter) | Generates a PHP code representation of the given model. Parameters ---------- model : object The model object that should be transpiled into code. indent : int, optional The size of indents in the generated code. function_name : string, optional Name of the function in the generated code. Returns ------- code : string |
2,524 | from m2cgen import interpreters
from m2cgen.assemblers import get_assembler_cls
def _export(model, interpreter):
assembler_cls = get_assembler_cls(model)
model_ast = assembler_cls(model).assemble()
return interpreter.interpret(model_ast)
The provided code snippet includes necessary dependencies for implementing the `export_to_dart` function. Write a Python function `def export_to_dart(model, indent=4, function_name="score")` to solve the following problem:
Generates a Dart code representation of the given model. Parameters ---------- model : object The model object that should be transpiled into code. indent : int, optional The size of indents in the generated code. function_name : string, optional Name of the function in the generated code. Returns ------- code : string
Here is the function:
def export_to_dart(model, indent=4, function_name="score"):
"""
Generates a Dart code representation of the given model.
Parameters
----------
model : object
The model object that should be transpiled into code.
indent : int, optional
The size of indents in the generated code.
function_name : string, optional
Name of the function in the generated code.
Returns
-------
code : string
"""
interpreter = interpreters.DartInterpreter(
indent=indent,
function_name=function_name,
)
return _export(model, interpreter) | Generates a Dart code representation of the given model. Parameters ---------- model : object The model object that should be transpiled into code. indent : int, optional The size of indents in the generated code. function_name : string, optional Name of the function in the generated code. Returns ------- code : string |
2,525 | from m2cgen import interpreters
from m2cgen.assemblers import get_assembler_cls
def _export(model, interpreter):
assembler_cls = get_assembler_cls(model)
model_ast = assembler_cls(model).assemble()
return interpreter.interpret(model_ast)
The provided code snippet includes necessary dependencies for implementing the `export_to_haskell` function. Write a Python function `def export_to_haskell(model, module_name="Model", indent=4, function_name="score")` to solve the following problem:
Generates a Haskell code representation of the given model. Parameters ---------- model : object The model object that should be transpiled into code. module_name : string, optional The name of the generated module. indent : int, optional The size of indents in the generated code. function_name : string, optional Name of the function in the generated code. Returns ------- code : string
Here is the function:
def export_to_haskell(model, module_name="Model", indent=4,
function_name="score"):
"""
Generates a Haskell code representation of the given model.
Parameters
----------
model : object
The model object that should be transpiled into code.
module_name : string, optional
The name of the generated module.
indent : int, optional
The size of indents in the generated code.
function_name : string, optional
Name of the function in the generated code.
Returns
-------
code : string
"""
interpreter = interpreters.HaskellInterpreter(
module_name=module_name,
indent=indent,
function_name=function_name,
)
return _export(model, interpreter) | Generates a Haskell code representation of the given model. Parameters ---------- model : object The model object that should be transpiled into code. module_name : string, optional The name of the generated module. indent : int, optional The size of indents in the generated code. function_name : string, optional Name of the function in the generated code. Returns ------- code : string |
2,526 | from m2cgen import interpreters
from m2cgen.assemblers import get_assembler_cls
def _export(model, interpreter):
assembler_cls = get_assembler_cls(model)
model_ast = assembler_cls(model).assemble()
return interpreter.interpret(model_ast)
The provided code snippet includes necessary dependencies for implementing the `export_to_ruby` function. Write a Python function `def export_to_ruby(model, indent=4, function_name="score")` to solve the following problem:
Generates a Ruby code representation of the given model. Parameters ---------- model : object The model object that should be transpiled into code. indent : int, optional The size of indents in the generated code. function_name : string, optional Name of the function in the generated code. Returns ------- code : string
Here is the function:
def export_to_ruby(model, indent=4, function_name="score"):
"""
Generates a Ruby code representation of the given model.
Parameters
----------
model : object
The model object that should be transpiled into code.
indent : int, optional
The size of indents in the generated code.
function_name : string, optional
Name of the function in the generated code.
Returns
-------
code : string
"""
interpreter = interpreters.RubyInterpreter(
indent=indent,
function_name=function_name,
)
return _export(model, interpreter) | Generates a Ruby code representation of the given model. Parameters ---------- model : object The model object that should be transpiled into code. indent : int, optional The size of indents in the generated code. function_name : string, optional Name of the function in the generated code. Returns ------- code : string |
2,527 | from m2cgen import interpreters
from m2cgen.assemblers import get_assembler_cls
def _export(model, interpreter):
assembler_cls = get_assembler_cls(model)
model_ast = assembler_cls(model).assemble()
return interpreter.interpret(model_ast)
The provided code snippet includes necessary dependencies for implementing the `export_to_f_sharp` function. Write a Python function `def export_to_f_sharp(model, indent=4, function_name="score")` to solve the following problem:
Generates a F# code representation of the given model. Parameters ---------- model : object The model object that should be transpiled into code. indent : int, optional The size of indents in the generated code. function_name : string, optional Name of the function in the generated code. Returns ------- code : string
Here is the function:
def export_to_f_sharp(model, indent=4, function_name="score"):
"""
Generates a F# code representation of the given model.
Parameters
----------
model : object
The model object that should be transpiled into code.
indent : int, optional
The size of indents in the generated code.
function_name : string, optional
Name of the function in the generated code.
Returns
-------
code : string
"""
interpreter = interpreters.FSharpInterpreter(
indent=indent,
function_name=function_name,
)
return _export(model, interpreter) | Generates a F# code representation of the given model. Parameters ---------- model : object The model object that should be transpiled into code. indent : int, optional The size of indents in the generated code. function_name : string, optional Name of the function in the generated code. Returns ------- code : string |
2,528 | from m2cgen import interpreters
from m2cgen.assemblers import get_assembler_cls
def _export(model, interpreter):
assembler_cls = get_assembler_cls(model)
model_ast = assembler_cls(model).assemble()
return interpreter.interpret(model_ast)
The provided code snippet includes necessary dependencies for implementing the `export_to_rust` function. Write a Python function `def export_to_rust(model, indent=4, function_name="score")` to solve the following problem:
Generates a Rust code representation of the given model. Parameters ---------- model : object The model object that should be transpiled into code. indent : int, optional The size of indents in the generated code. function_name : string, optional Name of the function in the generated code. Returns ------- code : string
Here is the function:
def export_to_rust(model, indent=4, function_name="score"):
"""
Generates a Rust code representation of the given model.
Parameters
----------
model : object
The model object that should be transpiled into code.
indent : int, optional
The size of indents in the generated code.
function_name : string, optional
Name of the function in the generated code.
Returns
-------
code : string
"""
interpreter = interpreters.RustInterpreter(
indent=indent,
function_name=function_name,
)
return _export(model, interpreter) | Generates a Rust code representation of the given model. Parameters ---------- model : object The model object that should be transpiled into code. indent : int, optional The size of indents in the generated code. function_name : string, optional Name of the function in the generated code. Returns ------- code : string |
2,529 | from m2cgen import interpreters
from m2cgen.assemblers import get_assembler_cls
def _export(model, interpreter):
assembler_cls = get_assembler_cls(model)
model_ast = assembler_cls(model).assemble()
return interpreter.interpret(model_ast)
The provided code snippet includes necessary dependencies for implementing the `export_to_elixir` function. Write a Python function `def export_to_elixir(model, module_name="Model", indent=4, function_name="score")` to solve the following problem:
Generates an Elixir code representation of the given model. Parameters ---------- model : object The model object that should be transpiled into code. module_name : string, optional The name of the generated module. indent : int, optional The size of indents in the generated code. function_name : string, optional Name of the function in the generated code. Returns ------- code : string
Here is the function:
def export_to_elixir(model, module_name="Model", indent=4,
function_name="score"):
"""
Generates an Elixir code representation of the given model.
Parameters
----------
model : object
The model object that should be transpiled into code.
module_name : string, optional
The name of the generated module.
indent : int, optional
The size of indents in the generated code.
function_name : string, optional
Name of the function in the generated code.
Returns
-------
code : string
"""
interpreter = interpreters.ElixirInterpreter(
module_name=module_name,
indent=indent,
function_name=function_name,
)
return _export(model, interpreter) | Generates an Elixir code representation of the given model. Parameters ---------- model : object The model object that should be transpiled into code. module_name : string, optional The name of the generated module. indent : int, optional The size of indents in the generated code. function_name : string, optional Name of the function in the generated code. Returns ------- code : string |
2,530 | import math
from m2cgen import ast
from m2cgen.assemblers import utils
def abs(expr):
expr = ast.IdExpr(expr, to_reuse=True)
return ast.IfExpr(
utils.lt(expr, ast.NumVal(0.0)),
utils.sub(ast.NumVal(0.0), expr),
expr) | null |
2,531 | import math
from m2cgen import ast
from m2cgen.assemblers import utils
def tanh(expr):
expr = ast.IdExpr(expr, to_reuse=True)
tanh_expr = utils.sub(
ast.NumVal(1.0),
utils.div(
ast.NumVal(2.0),
utils.add(
ast.ExpExpr(
utils.mul(
ast.NumVal(2.0),
expr)),
ast.NumVal(1.0))))
return ast.IfExpr(
utils.gt(expr, ast.NumVal(44.0)), # exp(2*x) <= 2^127
ast.NumVal(1.0),
ast.IfExpr(
utils.lt(expr, ast.NumVal(-44.0)),
ast.NumVal(-1.0),
tanh_expr)) | null |
2,532 | import math
from m2cgen import ast
from m2cgen.assemblers import utils
def sqrt(expr):
return ast.PowExpr(
base_expr=expr,
exp_expr=ast.NumVal(0.5)) | null |
2,533 | import math
from m2cgen import ast
from m2cgen.assemblers import utils
def exp(expr):
return ast.PowExpr(
base_expr=ast.NumVal(math.e),
exp_expr=expr) | null |
2,534 | import math
from m2cgen import ast
from m2cgen.assemblers import utils
def log1p(expr):
# Use trick to compute log1p for small values more accurate
# https://www.johndcook.com/blog/2012/07/25/trick-for-computing-log1x/
expr = ast.IdExpr(expr, to_reuse=True)
expr1p = utils.add(ast.NumVal(1.0), expr, to_reuse=True)
expr1pm1 = utils.sub(expr1p, ast.NumVal(1.0), to_reuse=True)
return ast.IfExpr(
utils.eq(expr1pm1, ast.NumVal(0.0)),
expr,
utils.div(utils.mul(expr, ast.LogExpr(expr1p)), expr1pm1)) | null |
2,535 | import math
from m2cgen import ast
from m2cgen.assemblers import utils
def atan(expr):
expr = ast.IdExpr(expr, to_reuse=True)
expr_abs = ast.AbsExpr(expr, to_reuse=True)
expr_reduced = ast.IdExpr(
ast.IfExpr(
utils.gt(expr_abs, ast.NumVal(2.4142135623730950488)),
utils.div(ast.NumVal(1.0), expr_abs),
ast.IfExpr(
utils.gt(expr_abs, ast.NumVal(0.66)),
utils.div(
utils.sub(expr_abs, ast.NumVal(1.0)),
utils.add(expr_abs, ast.NumVal(1.0))),
expr_abs)),
to_reuse=True)
P0 = ast.NumVal(-8.750608600031904122785e-01)
P1 = ast.NumVal(1.615753718733365076637e+01)
P2 = ast.NumVal(7.500855792314704667340e+01)
P3 = ast.NumVal(1.228866684490136173410e+02)
P4 = ast.NumVal(6.485021904942025371773e+01)
Q0 = ast.NumVal(2.485846490142306297962e+01)
Q1 = ast.NumVal(1.650270098316988542046e+02)
Q2 = ast.NumVal(4.328810604912902668951e+02)
Q3 = ast.NumVal(4.853903996359136964868e+02)
Q4 = ast.NumVal(1.945506571482613964425e+02)
expr2 = utils.mul(expr_reduced, expr_reduced, to_reuse=True)
z = utils.mul(
expr2,
utils.div(
utils.sub(
utils.mul(
expr2,
utils.sub(
utils.mul(
expr2,
utils.sub(
utils.mul(
expr2,
utils.sub(
utils.mul(
expr2,
P0
),
P1
)
),
P2
)
),
P3
)
),
P4
),
utils.add(
Q4,
utils.mul(
expr2,
utils.add(
Q3,
utils.mul(
expr2,
utils.add(
Q2,
utils.mul(
expr2,
utils.add(
Q1,
utils.mul(
expr2,
utils.add(
Q0,
expr2
)
)
)
)
)
)
)
)
)
)
)
z = utils.add(utils.mul(expr_reduced, z), expr_reduced)
ret = utils.mul(
z,
ast.IfExpr(
utils.gt(expr_abs, ast.NumVal(2.4142135623730950488)),
ast.NumVal(-1.0),
ast.NumVal(1.0)))
ret = utils.add(
ret,
ast.IfExpr(
utils.lte(expr_abs, ast.NumVal(0.66)),
ast.NumVal(0.0),
ast.IfExpr(
utils.gt(expr_abs, ast.NumVal(2.4142135623730950488)),
ast.NumVal(1.570796326794896680463661649),
ast.NumVal(0.7853981633974483402318308245))))
ret = utils.mul(
ret,
ast.IfExpr(
utils.lt(expr, ast.NumVal(0.0)),
ast.NumVal(-1.0),
ast.NumVal(1.0)))
return ret | null |
2,536 | import math
from m2cgen import ast
from m2cgen.assemblers import utils
def sigmoid(expr, to_reuse=False):
neg_expr = ast.BinNumExpr(ast.NumVal(0.0), expr, ast.BinNumOpType.SUB)
exp_expr = ast.ExpExpr(neg_expr)
return ast.BinNumExpr(
ast.NumVal(1.0),
ast.BinNumExpr(ast.NumVal(1.0), exp_expr, ast.BinNumOpType.ADD),
ast.BinNumOpType.DIV,
to_reuse=to_reuse) | null |
2,537 | import math
from m2cgen import ast
from m2cgen.assemblers import utils
def softmax(exprs):
exp_exprs = [ast.ExpExpr(e, to_reuse=True) for e in exprs]
exp_sum_expr = utils.apply_op_to_expressions(ast.BinNumOpType.ADD, *exp_exprs, to_reuse=True)
return ast.VectorVal([
ast.BinNumExpr(e, exp_sum_expr, ast.BinNumOpType.DIV)
for e in exp_exprs
]) | null |
2,538 | import numpy as np
from m2cgen import ast
def to_1d_array(var):
return np.ravel(np.asarray(var)) | null |
2,539 | import numpy as np
from m2cgen import ast
def to_2d_array(var):
shape = np.shape(var)
if len(shape) == 2:
x, y = shape
else:
x, y = 1, np.size(var)
return np.reshape(np.asarray(var), (x, y)) | null |
2,540 | import json
import math
import numpy as np
from m2cgen import ast
from m2cgen.assemblers import utils
from m2cgen.assemblers.base import ModelAssembler
from m2cgen.assemblers.linear import _linear_to_ast
def _split_estimator_params_by_classes(values, n_classes, params_seq_len):
# Splits are computed based on a comment
# https://github.com/dmlc/xgboost/issues/1746#issuecomment-267400592
# and the enhancement to support boosted forests in XGBoost.
values_len = len(values)
block_len = n_classes * params_seq_len
indices = list(range(values_len))
indices_by_class = np.array(
[[indices[i:i + params_seq_len]
for i in range(j, values_len, block_len)]
for j in range(0, block_len, params_seq_len)]
).reshape(n_classes, -1)
return [[values[idx] for idx in class_idxs] for class_idxs in indices_by_class] | null |
2,541 | import math
import numpy as np
from m2cgen import ast
from m2cgen.assemblers import utils
from m2cgen.assemblers.base import ModelAssembler
def _linear_to_ast(coef, intercept):
feature_weight_mul_ops = [
utils.mul(ast.FeatureRef(index), ast.NumVal(value))
for index, value in enumerate(coef)
]
return utils.apply_op_to_expressions(
ast.BinNumOpType.ADD,
ast.NumVal(intercept),
*feature_weight_mul_ops) | null |
2,542 | from enum import Enum
from inspect import getmembers, isclass
from sys import modules
import numpy as np
class FeatureRef(Expr):
def __init__(self, index):
self.index = index
def __str__(self):
return f"FeatureRef({self.index})"
def __eq__(self, other):
return type(other) is FeatureRef and self.index == other.index
def __hash__(self):
return hash(self.index)
class NumVal(NumExpr):
def __init__(self, value, dtype=np.float64):
self.value = dtype(value)
def __str__(self):
return f"NumVal({self.value})"
def __eq__(self, other):
return type(other) is NumVal and self.value == other.value
def __hash__(self):
return hash(self.value)
NESTED_EXPRS_MAPPINGS = [
((BinExpr, CompExpr), lambda e: [e.left, e.right]),
((PowExpr), lambda e: [e.base_expr, e.exp_expr]),
((VectorVal, SoftmaxExpr), lambda e: e.exprs),
((IfExpr), lambda e: [e.test, e.body, e.orelse]),
((AbsExpr, AtanExpr, ExpExpr, IdExpr, LogExpr, Log1pExpr,
SigmoidExpr, SqrtExpr, TanhExpr),
lambda e: [e.expr]),
]
def count_exprs(expr, exclude_list=None):
expr_type = type(expr)
excluded = tuple(exclude_list) if exclude_list else ()
init = 1
if issubclass(expr_type, excluded):
init = 0
if isinstance(expr, (NumVal, FeatureRef)):
return init
for tpes, nested_f in NESTED_EXPRS_MAPPINGS:
if issubclass(expr_type, tpes):
return init + sum(map(
lambda e: count_exprs(e, exclude_list),
nested_f(expr)))
expr_type_name = expr_type.__name__
raise ValueError(f"Unexpected expression type '{expr_type_name}'") | null |
2,543 | from enum import Enum
from inspect import getmembers, isclass
from sys import modules
import numpy as np
def _eq_bin_exprs(expr_one, expr_two, expected_type):
return (type(expr_one) is expected_type and
type(expr_two) is expected_type and
expr_one.left == expr_two.left and
expr_one.right == expr_two.right and
expr_one.op == expr_two.op) | null |
2,544 | import math
import re
from collections import namedtuple
from functools import lru_cache
import numpy as np
from m2cgen.ast import TOTAL_NUMBER_OF_EXPRESSIONS
def get_file_content(path):
return path.read_text(encoding="utf-8") | null |
2,545 | import math
import re
from collections import namedtuple
from functools import lru_cache
import numpy as np
from m2cgen.ast import TOTAL_NUMBER_OF_EXPRESSIONS
def _normalize_expr_name(name):
return re.sub("(?!^)([A-Z]+)", r"_\1", name).lower()
def _get_handler_name(expr_tpe):
return f"interpret_{_normalize_expr_name(expr_tpe.__name__)}" | null |
2,546 | import math
import re
from collections import namedtuple
from functools import lru_cache
import numpy as np
from m2cgen.ast import TOTAL_NUMBER_OF_EXPRESSIONS
def format_float(value):
return np.format_float_positional(value, unique=True, trim="0") | null |
2,547 |
def add_vectors(v1, v2):
return [sum(i) for i in zip(v1, v2)] | null |
2,548 |
def mul_vector_number(v1, num):
return [i * num for i in v1] | null |
2,549 |
def softmax(x):
m = max(x)
exps = [math.exp(i - m) for i in x]
s = sum(exps)
for idx, _ in enumerate(exps):
exps[idx] /= s
return exps | null |
2,550 |
def sigmoid(x):
if x < 0.0:
z = math.exp(x)
return z / (1.0 + z)
return 1.0 / (1.0 + math.exp(-x)) | null |
2,551 |
def score(input):
if input[2] <= 2.449999988079071:
var0 = [1.0, 0.0, 0.0]
else:
if input[3] <= 1.75:
if input[2] <= 4.950000047683716:
if input[3] <= 1.6500000357627869:
var0 = [0.0, 1.0, 0.0]
else:
var0 = [0.0, 0.0, 1.0]
else:
var0 = [0.0, 0.3333333333333333, 0.6666666666666666]
else:
var0 = [0.0, 0.021739130434782608, 0.9782608695652174]
return var0 | null |
2,552 | def add_vectors(v1, v2):
return [sum(i) for i in zip(v1, v2)]
def mul_vector_number(v1, num):
return [i * num for i in v1]
def score(input):
if input[3] <= 0.75:
var0 = [1.0, 0.0, 0.0]
else:
if input[2] <= 4.75:
var0 = [0.0, 1.0, 0.0]
else:
if input[2] <= 5.049999952316284:
if input[3] <= 1.75:
var0 = [0.0, 0.8333333333333334, 0.16666666666666666]
else:
var0 = [0.0, 0.08333333333333333, 0.9166666666666666]
else:
var0 = [0.0, 0.0, 1.0]
if input[3] <= 0.800000011920929:
var1 = [1.0, 0.0, 0.0]
else:
if input[0] <= 6.25:
if input[2] <= 4.8500001430511475:
var1 = [0.0, 0.9487179487179487, 0.05128205128205128]
else:
var1 = [0.0, 0.0, 1.0]
else:
if input[3] <= 1.550000011920929:
var1 = [0.0, 0.8333333333333334, 0.16666666666666666]
else:
var1 = [0.0, 0.02564102564102564, 0.9743589743589743]
return mul_vector_number(add_vectors(var0, var1), 0.5) | null |
2,553 | import math
def score(input):
var0 = math.exp(-0.06389634699048878 * (math.pow(5.1 - input[0], 2.0) + math.pow(2.5 - input[1], 2.0) + math.pow(3.0 - input[2], 2.0) + math.pow(1.1 - input[3], 2.0)))
var1 = math.exp(-0.06389634699048878 * (math.pow(4.9 - input[0], 2.0) + math.pow(2.4 - input[1], 2.0) + math.pow(3.3 - input[2], 2.0) + math.pow(1.0 - input[3], 2.0)))
var2 = math.exp(-0.06389634699048878 * (math.pow(6.3 - input[0], 2.0) + math.pow(2.5 - input[1], 2.0) + math.pow(4.9 - input[2], 2.0) + math.pow(1.5 - input[3], 2.0)))
var3 = math.exp(-0.06389634699048878 * (math.pow(5.4 - input[0], 2.0) + math.pow(3.0 - input[1], 2.0) + math.pow(4.5 - input[2], 2.0) + math.pow(1.5 - input[3], 2.0)))
var4 = math.exp(-0.06389634699048878 * (math.pow(6.2 - input[0], 2.0) + math.pow(2.2 - input[1], 2.0) + math.pow(4.5 - input[2], 2.0) + math.pow(1.5 - input[3], 2.0)))
var5 = math.exp(-0.06389634699048878 * (math.pow(5.6 - input[0], 2.0) + math.pow(2.9 - input[1], 2.0) + math.pow(3.6 - input[2], 2.0) + math.pow(1.3 - input[3], 2.0)))
var6 = math.exp(-0.06389634699048878 * (math.pow(6.7 - input[0], 2.0) + math.pow(3.0 - input[1], 2.0) + math.pow(5.0 - input[2], 2.0) + math.pow(1.7 - input[3], 2.0)))
var7 = math.exp(-0.06389634699048878 * (math.pow(5.0 - input[0], 2.0) + math.pow(2.3 - input[1], 2.0) + math.pow(3.3 - input[2], 2.0) + math.pow(1.0 - input[3], 2.0)))
var8 = math.exp(-0.06389634699048878 * (math.pow(6.0 - input[0], 2.0) + math.pow(2.7 - input[1], 2.0) + math.pow(5.1 - input[2], 2.0) + math.pow(1.6 - input[3], 2.0)))
var9 = math.exp(-0.06389634699048878 * (math.pow(5.9 - input[0], 2.0) + math.pow(3.2 - input[1], 2.0) + math.pow(4.8 - input[2], 2.0) + math.pow(1.8 - input[3], 2.0)))
var10 = math.exp(-0.06389634699048878 * (math.pow(5.7 - input[0], 2.0) + math.pow(2.6 - input[1], 2.0) + math.pow(3.5 - input[2], 2.0) + math.pow(1.0 - input[3], 2.0)))
var11 = math.exp(-0.06389634699048878 * (math.pow(5.0 - input[0], 2.0) + math.pow(3.0 - input[1], 2.0) + math.pow(1.6 - input[2], 2.0) + math.pow(0.2 - input[3], 2.0)))
var12 = math.exp(-0.06389634699048878 * (math.pow(5.4 - input[0], 2.0) + math.pow(3.4 - input[1], 2.0) + math.pow(1.7 - input[2], 2.0) + math.pow(0.2 - input[3], 2.0)))
var13 = math.exp(-0.06389634699048878 * (math.pow(5.7 - input[0], 2.0) + math.pow(3.8 - input[1], 2.0) + math.pow(1.7 - input[2], 2.0) + math.pow(0.3 - input[3], 2.0)))
var14 = math.exp(-0.06389634699048878 * (math.pow(4.8 - input[0], 2.0) + math.pow(3.4 - input[1], 2.0) + math.pow(1.9 - input[2], 2.0) + math.pow(0.2 - input[3], 2.0)))
var15 = math.exp(-0.06389634699048878 * (math.pow(4.5 - input[0], 2.0) + math.pow(2.3 - input[1], 2.0) + math.pow(1.3 - input[2], 2.0) + math.pow(0.3 - input[3], 2.0)))
var16 = math.exp(-0.06389634699048878 * (math.pow(5.7 - input[0], 2.0) + math.pow(4.4 - input[1], 2.0) + math.pow(1.5 - input[2], 2.0) + math.pow(0.4 - input[3], 2.0)))
var17 = math.exp(-0.06389634699048878 * (math.pow(5.1 - input[0], 2.0) + math.pow(3.8 - input[1], 2.0) + math.pow(1.9 - input[2], 2.0) + math.pow(0.4 - input[3], 2.0)))
var18 = math.exp(-0.06389634699048878 * (math.pow(5.1 - input[0], 2.0) + math.pow(3.3 - input[1], 2.0) + math.pow(1.7 - input[2], 2.0) + math.pow(0.5 - input[3], 2.0)))
var19 = math.exp(-0.06389634699048878 * (math.pow(6.2 - input[0], 2.0) + math.pow(2.8 - input[1], 2.0) + math.pow(4.8 - input[2], 2.0) + math.pow(1.8 - input[3], 2.0)))
var20 = math.exp(-0.06389634699048878 * (math.pow(7.2 - input[0], 2.0) + math.pow(3.0 - input[1], 2.0) + math.pow(5.8 - input[2], 2.0) + math.pow(1.6 - input[3], 2.0)))
var21 = math.exp(-0.06389634699048878 * (math.pow(6.1 - input[0], 2.0) + math.pow(3.0 - input[1], 2.0) + math.pow(4.9 - input[2], 2.0) + math.pow(1.8 - input[3], 2.0)))
var22 = math.exp(-0.06389634699048878 * (math.pow(6.0 - input[0], 2.0) + math.pow(3.0 - input[1], 2.0) + math.pow(4.8 - input[2], 2.0) + math.pow(1.8 - input[3], 2.0)))
var23 = math.exp(-0.06389634699048878 * (math.pow(4.9 - input[0], 2.0) + math.pow(2.5 - input[1], 2.0) + math.pow(4.5 - input[2], 2.0) + math.pow(1.7 - input[3], 2.0)))
var24 = math.exp(-0.06389634699048878 * (math.pow(7.9 - input[0], 2.0) + math.pow(3.8 - input[1], 2.0) + math.pow(6.4 - input[2], 2.0) + math.pow(2.0 - input[3], 2.0)))
var25 = math.exp(-0.06389634699048878 * (math.pow(5.6 - input[0], 2.0) + math.pow(2.8 - input[1], 2.0) + math.pow(4.9 - input[2], 2.0) + math.pow(2.0 - input[3], 2.0)))
var26 = math.exp(-0.06389634699048878 * (math.pow(6.0 - input[0], 2.0) + math.pow(2.2 - input[1], 2.0) + math.pow(5.0 - input[2], 2.0) + math.pow(1.5 - input[3], 2.0)))
var27 = math.exp(-0.06389634699048878 * (math.pow(6.3 - input[0], 2.0) + math.pow(2.8 - input[1], 2.0) + math.pow(5.1 - input[2], 2.0) + math.pow(1.5 - input[3], 2.0)))
return [0.11172510039290856 + var0 * -0.8898986041811555 + var1 * -0.8898986041811555 + var2 * -0.0 + var3 * -0.0 + var4 * -0.0 + var5 * -0.756413813553974 + var6 * -0.0 + var7 * -0.8898986041811555 + var8 * -0.0 + var9 * -0.0 + var10 * -0.8898986041811555 + var11 * 0.04218875216876044 + var12 * 0.7142250613852136 + var13 * 0.0 + var14 * 0.8898986041811555 + var15 * 0.8898986041811555 + var16 * 0.0 + var17 * 0.8898986041811555 + var18 * 0.8898986041811555, -0.04261957451303831 + var19 * -0.37953658977037247 + var20 * -0.0 + var21 * -0.0 + var22 * -0.37953658977037247 + var23 * -0.37953658977037247 + var24 * -0.26472396872040066 + var25 * -0.3745962010653211 + var26 * -0.10077618026650095 + var27 * -0.0 + var11 * 0.0 + var12 * 0.0 + var13 * 0.37953658977037247 + var14 * 0.37953658977037247 + var15 * 0.3044555865539922 + var16 * 0.05610417372785803 + var17 * 0.37953658977037247 + var18 * 0.37953658977037247, 1.8136162062461285 + var19 * -110.34516826676301 + var20 * -13.999391039896215 + var21 * -108.44329471899991 + var22 * -110.34516826676301 + var23 * -22.21095753342801 + var24 * -0.0 + var25 * -0.0 + var26 * -65.00217641452454 + var27 * -110.34516826676301 + var0 * 0.0 + var1 * 0.0 + var2 * 110.34516826676301 + var3 * 62.115561183470184 + var4 * 37.19509025661546 + var5 * 0.0 + var6 * 110.34516826676301 + var7 * 0.0 + var8 * 110.34516826676301 + var9 * 110.34516826676301 + var10 * 0.0] | null |
2,554 | import math
def softmax(x):
m = max(x)
exps = [math.exp(i - m) for i in x]
s = sum(exps)
for idx, _ in enumerate(exps):
exps[idx] /= s
return exps
def score(input):
if input[2] >= 2.45:
var0 = -0.21995015
else:
var0 = 0.4302439
if input[2] >= 2.45:
var1 = -0.19691855
else:
var1 = 0.29493433
if input[2] >= 2.45:
if input[3] >= 1.75:
var2 = -0.20051816
else:
var2 = 0.36912444
else:
var2 = -0.21512198
if input[2] >= 2.45:
if input[2] >= 4.8500004:
var3 = -0.14888482
else:
var3 = 0.2796613
else:
var3 = -0.19143805
if input[3] >= 1.6500001:
var4 = 0.40298507
else:
if input[2] >= 4.95:
var4 = 0.21724138
else:
var4 = -0.21974029
if input[2] >= 4.75:
if input[3] >= 1.75:
var5 = 0.28692952
else:
var5 = 0.06272897
else:
if input[3] >= 1.55:
var5 = 0.009899145
else:
var5 = -0.19659369
return softmax([0.5 + (var0 + var1), 0.5 + (var2 + var3), 0.5 + (var4 + var5)]) | null |
2,555 | import math
def softmax(x):
m = max(x)
exps = [math.exp(i - m) for i in x]
s = sum(exps)
for idx, _ in enumerate(exps):
exps[idx] /= s
return exps
def score(input):
if input[2] > 3.1500000000000004:
var0 = -1.1986122886681099
else:
if input[1] > 3.35:
var0 = -0.8986122886681098
else:
var0 = -0.9136122886681098
if input[2] > 3.1500000000000004:
if input[2] > 4.450000000000001:
var1 = -0.09503010837903424
else:
var1 = -0.09563272415214283
else:
if input[1] > 3.35:
var1 = 0.16640323607832397
else:
var1 = 0.15374604217339707
if input[2] > 1.8:
if input[3] > 1.6500000000000001:
var2 = -1.2055899476674514
else:
var2 = -0.9500445227622534
else:
var2 = -1.2182214705715104
if input[3] > 0.45000000000000007:
if input[3] > 1.6500000000000001:
var3 = -0.08146437273923739
else:
var3 = 0.14244886188108738
else:
if input[2] > 1.4500000000000002:
var3 = -0.0950888159264695
else:
var3 = -0.09438233722389686
if input[3] > 1.6500000000000001:
if input[2] > 5.3500000000000005:
var4 = -0.8824095771015287
else:
var4 = -0.9121126703829481
else:
if input[2] > 4.450000000000001:
var4 = -1.1277829563828181
else:
var4 = -1.1794405099157212
if input[2] > 4.750000000000001:
if input[2] > 5.150000000000001:
var5 = 0.16625543464258166
else:
var5 = 0.09608601737074281
else:
if input[0] > 4.950000000000001:
var5 = -0.09644547407948921
else:
var5 = -0.08181864271444342
return softmax([var0 + var1, var2 + var3, var4 + var5]) | null |
2,556 |
def score(input):
return [9.700311953536998 + input[0] * -0.4128360473754751 + input[1] * 0.9680426131053453 + input[2] * -2.498310603183548 + input[3] * -1.0723230787022542, 2.1575759475871163 + input[0] * 0.5400806228605453 + input[1] * -0.3245383349519669 + input[2] * -0.2034493200950831 + input[3] * -0.9338183426196143, -11.857887901124615 + input[0] * -0.12724457548509432 + input[1] * -0.6435042781533917 + input[2] * 2.7017599232786216 + input[3] * 2.006141421321863] | null |
2,557 |
def score(input):
if input[12] <= 9.724999904632568:
if input[5] <= 7.437000036239624:
if input[7] <= 1.4849499464035034:
var0 = 50.0
else:
var0 = 26.681034482758605
else:
var0 = 44.96896551724139
else:
if input[12] <= 16.085000038146973:
var0 = 20.284353741496595
else:
var0 = 14.187142857142863
return var0 | null |
2,558 |
def score(input):
if input[12] <= 9.845000267028809:
if input[5] <= 6.959500074386597:
if input[6] <= 96.20000076293945:
var0 = 25.093162393162395
else:
var0 = 50.0
else:
var0 = 38.074999999999996
else:
if input[12] <= 15.074999809265137:
var0 = 20.518439716312056
else:
var0 = 14.451282051282046
if input[12] <= 9.650000095367432:
if input[5] <= 7.437000036239624:
if input[7] <= 1.47284996509552:
var1 = 50.0
else:
var1 = 26.7965317919075
else:
var1 = 44.21176470588236
else:
if input[12] <= 17.980000495910645:
var1 = 19.645652173913035
else:
var1 = 12.791919191919195
return (var0 + var1) * 0.5 | null |
2,559 | import math
def score(input):
return 25.346480984077544 + math.exp(-0.0000036459736698188483 * (math.pow(16.8118 - input[0], 2.0) + math.pow(0.0 - input[1], 2.0) + math.pow(18.1 - input[2], 2.0) + math.pow(0.0 - input[3], 2.0) + math.pow(0.7 - input[4], 2.0) + math.pow(5.277 - input[5], 2.0) + math.pow(98.1 - input[6], 2.0) + math.pow(1.4261 - input[7], 2.0) + math.pow(24.0 - input[8], 2.0) + math.pow(666.0 - input[9], 2.0) + math.pow(20.2 - input[10], 2.0) + math.pow(396.9 - input[11], 2.0) + math.pow(30.81 - input[12], 2.0))) * -1.0 + math.exp(-0.0000036459736698188483 * (math.pow(38.3518 - input[0], 2.0) + math.pow(0.0 - input[1], 2.0) + math.pow(18.1 - input[2], 2.0) + math.pow(0.0 - input[3], 2.0) + math.pow(0.693 - input[4], 2.0) + math.pow(5.453 - input[5], 2.0) + math.pow(100.0 - input[6], 2.0) + math.pow(1.4896 - input[7], 2.0) + math.pow(24.0 - input[8], 2.0) + math.pow(666.0 - input[9], 2.0) + math.pow(20.2 - input[10], 2.0) + math.pow(396.9 - input[11], 2.0) + math.pow(30.59 - input[12], 2.0))) * -1.0 + math.exp(-0.0000036459736698188483 * (math.pow(0.84054 - input[0], 2.0) + math.pow(0.0 - input[1], 2.0) + math.pow(8.14 - input[2], 2.0) + math.pow(0.0 - input[3], 2.0) + math.pow(0.538 - input[4], 2.0) + math.pow(5.599 - input[5], 2.0) + math.pow(85.7 - input[6], 2.0) + math.pow(4.4546 - input[7], 2.0) + math.pow(4.0 - input[8], 2.0) + math.pow(307.0 - input[9], 2.0) + math.pow(21.0 - input[10], 2.0) + math.pow(303.42 - input[11], 2.0) + math.pow(16.51 - input[12], 2.0))) * -1.0 + math.exp(-0.0000036459736698188483 * (math.pow(1.15172 - input[0], 2.0) + math.pow(0.0 - input[1], 2.0) + math.pow(8.14 - input[2], 2.0) + math.pow(0.0 - input[3], 2.0) + math.pow(0.538 - input[4], 2.0) + math.pow(5.701 - input[5], 2.0) + math.pow(95.0 - input[6], 2.0) + math.pow(3.7872 - input[7], 2.0) + math.pow(4.0 - input[8], 2.0) + math.pow(307.0 - input[9], 2.0) + math.pow(21.0 - input[10], 2.0) + math.pow(358.77 - input[11], 2.0) + math.pow(18.35 - input[12], 2.0))) * -1.0 + math.exp(-0.0000036459736698188483 * (math.pow(24.8017 - input[0], 2.0) + math.pow(0.0 - input[1], 2.0) + math.pow(18.1 - input[2], 2.0) + math.pow(0.0 - input[3], 2.0) + math.pow(0.693 - input[4], 2.0) + math.pow(5.349 - input[5], 2.0) + math.pow(96.0 - input[6], 2.0) + math.pow(1.7028 - input[7], 2.0) + math.pow(24.0 - input[8], 2.0) + math.pow(666.0 - input[9], 2.0) + math.pow(20.2 - input[10], 2.0) + math.pow(396.9 - input[11], 2.0) + math.pow(19.77 - input[12], 2.0))) * -1.0 + math.exp(-0.0000036459736698188483 * (math.pow(41.5292 - input[0], 2.0) + math.pow(0.0 - input[1], 2.0) + math.pow(18.1 - input[2], 2.0) + math.pow(0.0 - input[3], 2.0) + math.pow(0.693 - input[4], 2.0) + math.pow(5.531 - input[5], 2.0) + math.pow(85.4 - input[6], 2.0) + math.pow(1.6074 - input[7], 2.0) + math.pow(24.0 - input[8], 2.0) + math.pow(666.0 - input[9], 2.0) + math.pow(20.2 - input[10], 2.0) + math.pow(329.46 - input[11], 2.0) + math.pow(27.38 - input[12], 2.0))) * -0.3490103966325617 + math.exp(-0.0000036459736698188483 * (math.pow(0.38735 - input[0], 2.0) + math.pow(0.0 - input[1], 2.0) + math.pow(25.65 - input[2], 2.0) + math.pow(0.0 - input[3], 2.0) + math.pow(0.581 - input[4], 2.0) + math.pow(5.613 - input[5], 2.0) + math.pow(95.6 - input[6], 2.0) + math.pow(1.7572 - input[7], 2.0) + math.pow(2.0 - input[8], 2.0) + math.pow(188.0 - input[9], 2.0) + math.pow(19.1 - input[10], 2.0) + math.pow(359.29 - input[11], 2.0) + math.pow(27.26 - input[12], 2.0))) * -1.0 + math.exp(-0.0000036459736698188483 * (math.pow(0.05602 - input[0], 2.0) + math.pow(0.0 - input[1], 2.0) + math.pow(2.46 - input[2], 2.0) + math.pow(0.0 - input[3], 2.0) + math.pow(0.488 - input[4], 2.0) + math.pow(7.831 - input[5], 2.0) + math.pow(53.6 - input[6], 2.0) + math.pow(3.1992 - input[7], 2.0) + math.pow(3.0 - input[8], 2.0) + math.pow(193.0 - input[9], 2.0) + math.pow(17.8 - input[10], 2.0) + math.pow(392.63 - input[11], 2.0) + math.pow(4.45 - input[12], 2.0))) * 1.0 + math.exp(-0.0000036459736698188483 * (math.pow(25.0461 - input[0], 2.0) + math.pow(0.0 - input[1], 2.0) + math.pow(18.1 - input[2], 2.0) + math.pow(0.0 - input[3], 2.0) + math.pow(0.693 - input[4], 2.0) + math.pow(5.987 - input[5], 2.0) + math.pow(100.0 - input[6], 2.0) + math.pow(1.5888 - input[7], 2.0) + math.pow(24.0 - input[8], 2.0) + math.pow(666.0 - input[9], 2.0) + math.pow(20.2 - input[10], 2.0) + math.pow(396.9 - input[11], 2.0) + math.pow(26.77 - input[12], 2.0))) * -1.0 + math.exp(-0.0000036459736698188483 * (math.pow(8.26725 - input[0], 2.0) + math.pow(0.0 - input[1], 2.0) + math.pow(18.1 - input[2], 2.0) + math.pow(1.0 - input[3], 2.0) + math.pow(0.668 - input[4], 2.0) + math.pow(5.875 - input[5], 2.0) + math.pow(89.6 - input[6], 2.0) + math.pow(1.1296 - input[7], 2.0) + math.pow(24.0 - input[8], 2.0) + math.pow(666.0 - input[9], 2.0) + math.pow(20.2 - input[10], 2.0) + math.pow(347.88 - input[11], 2.0) + math.pow(8.88 - input[12], 2.0))) * 1.0 + math.exp(-0.0000036459736698188483 * (math.pow(5.66998 - input[0], 2.0) + math.pow(0.0 - input[1], 2.0) + math.pow(18.1 - input[2], 2.0) + math.pow(1.0 - input[3], 2.0) + math.pow(0.631 - input[4], 2.0) + math.pow(6.683 - input[5], 2.0) + math.pow(96.8 - input[6], 2.0) + math.pow(1.3567 - input[7], 2.0) + math.pow(24.0 - input[8], 2.0) + math.pow(666.0 - input[9], 2.0) + math.pow(20.2 - input[10], 2.0) + math.pow(375.33 - input[11], 2.0) + math.pow(3.73 - input[12], 2.0))) * 1.0 + math.exp(-0.0000036459736698188483 * (math.pow(1.51902 - input[0], 2.0) + math.pow(0.0 - input[1], 2.0) + math.pow(19.58 - input[2], 2.0) + math.pow(1.0 - input[3], 2.0) + math.pow(0.605 - input[4], 2.0) + math.pow(8.375 - input[5], 2.0) + math.pow(93.9 - input[6], 2.0) + math.pow(2.162 - input[7], 2.0) + math.pow(5.0 - input[8], 2.0) + math.pow(403.0 - input[9], 2.0) + math.pow(14.7 - input[10], 2.0) + math.pow(388.45 - input[11], 2.0) + math.pow(3.32 - input[12], 2.0))) * 1.0 + math.exp(-0.0000036459736698188483 * (math.pow(0.29819 - input[0], 2.0) + math.pow(0.0 - input[1], 2.0) + math.pow(6.2 - input[2], 2.0) + math.pow(0.0 - input[3], 2.0) + math.pow(0.504 - input[4], 2.0) + math.pow(7.686 - input[5], 2.0) + math.pow(17.0 - input[6], 2.0) + math.pow(3.3751 - input[7], 2.0) + math.pow(8.0 - input[8], 2.0) + math.pow(307.0 - input[9], 2.0) + math.pow(17.4 - input[10], 2.0) + math.pow(377.51 - input[11], 2.0) + math.pow(3.92 - input[12], 2.0))) * 1.0 + math.exp(-0.0000036459736698188483 * (math.pow(3.32105 - input[0], 2.0) + math.pow(0.0 - input[1], 2.0) + math.pow(19.58 - input[2], 2.0) + math.pow(1.0 - input[3], 2.0) + math.pow(0.871 - input[4], 2.0) + math.pow(5.403 - input[5], 2.0) + math.pow(100.0 - input[6], 2.0) + math.pow(1.3216 - input[7], 2.0) + math.pow(5.0 - input[8], 2.0) + math.pow(403.0 - input[9], 2.0) + math.pow(14.7 - input[10], 2.0) + math.pow(396.9 - input[11], 2.0) + math.pow(26.82 - input[12], 2.0))) * -0.400989603367655 + math.exp(-0.0000036459736698188483 * (math.pow(0.61154 - input[0], 2.0) + math.pow(20.0 - input[1], 2.0) + math.pow(3.97 - input[2], 2.0) + math.pow(0.0 - input[3], 2.0) + math.pow(0.647 - input[4], 2.0) + math.pow(8.704 - input[5], 2.0) + math.pow(86.9 - input[6], 2.0) + math.pow(1.801 - input[7], 2.0) + math.pow(5.0 - input[8], 2.0) + math.pow(264.0 - input[9], 2.0) + math.pow(13.0 - input[10], 2.0) + math.pow(389.7 - input[11], 2.0) + math.pow(5.12 - input[12], 2.0))) * 1.0 + math.exp(-0.0000036459736698188483 * (math.pow(0.02009 - input[0], 2.0) + math.pow(95.0 - input[1], 2.0) + math.pow(2.68 - input[2], 2.0) + math.pow(0.0 - input[3], 2.0) + math.pow(0.4161 - input[4], 2.0) + math.pow(8.034 - input[5], 2.0) + math.pow(31.9 - input[6], 2.0) + math.pow(5.118 - input[7], 2.0) + math.pow(4.0 - input[8], 2.0) + math.pow(224.0 - input[9], 2.0) + math.pow(14.7 - input[10], 2.0) + math.pow(390.55 - input[11], 2.0) + math.pow(2.88 - input[12], 2.0))) * 1.0 + math.exp(-0.0000036459736698188483 * (math.pow(0.08187 - input[0], 2.0) + math.pow(0.0 - input[1], 2.0) + math.pow(2.89 - input[2], 2.0) + math.pow(0.0 - input[3], 2.0) + math.pow(0.445 - input[4], 2.0) + math.pow(7.82 - input[5], 2.0) + math.pow(36.9 - input[6], 2.0) + math.pow(3.4952 - input[7], 2.0) + math.pow(2.0 - input[8], 2.0) + math.pow(276.0 - input[9], 2.0) + math.pow(18.0 - input[10], 2.0) + math.pow(393.53 - input[11], 2.0) + math.pow(3.57 - input[12], 2.0))) * 1.0 + math.exp(-0.0000036459736698188483 * (math.pow(0.57834 - input[0], 2.0) + math.pow(20.0 - input[1], 2.0) + math.pow(3.97 - input[2], 2.0) + math.pow(0.0 - input[3], 2.0) + math.pow(0.575 - input[4], 2.0) + math.pow(8.297 - input[5], 2.0) + math.pow(67.0 - input[6], 2.0) + math.pow(2.4216 - input[7], 2.0) + math.pow(5.0 - input[8], 2.0) + math.pow(264.0 - input[9], 2.0) + math.pow(13.0 - input[10], 2.0) + math.pow(384.54 - input[11], 2.0) + math.pow(7.44 - input[12], 2.0))) * 1.0 + math.exp(-0.0000036459736698188483 * (math.pow(1.35472 - input[0], 2.0) + math.pow(0.0 - input[1], 2.0) + math.pow(8.14 - input[2], 2.0) + math.pow(0.0 - input[3], 2.0) + math.pow(0.538 - input[4], 2.0) + math.pow(6.072 - input[5], 2.0) + math.pow(100.0 - input[6], 2.0) + math.pow(4.175 - input[7], 2.0) + math.pow(4.0 - input[8], 2.0) + math.pow(307.0 - input[9], 2.0) + math.pow(21.0 - input[10], 2.0) + math.pow(376.73 - input[11], 2.0) + math.pow(13.04 - input[12], 2.0))) * -1.0 + math.exp(-0.0000036459736698188483 * (math.pow(0.52693 - input[0], 2.0) + math.pow(0.0 - input[1], 2.0) + math.pow(6.2 - input[2], 2.0) + math.pow(0.0 - input[3], 2.0) + math.pow(0.504 - input[4], 2.0) + math.pow(8.725 - input[5], 2.0) + math.pow(83.0 - input[6], 2.0) + math.pow(2.8944 - input[7], 2.0) + math.pow(8.0 - input[8], 2.0) + math.pow(307.0 - input[9], 2.0) + math.pow(17.4 - input[10], 2.0) + math.pow(382.0 - input[11], 2.0) + math.pow(4.63 - input[12], 2.0))) * 1.0 + math.exp(-0.0000036459736698188483 * (math.pow(0.33147 - input[0], 2.0) + math.pow(0.0 - input[1], 2.0) + math.pow(6.2 - input[2], 2.0) + math.pow(0.0 - input[3], 2.0) + math.pow(0.507 - input[4], 2.0) + math.pow(8.247 - input[5], 2.0) + math.pow(70.4 - input[6], 2.0) + math.pow(3.6519 - input[7], 2.0) + math.pow(8.0 - input[8], 2.0) + math.pow(307.0 - input[9], 2.0) + math.pow(17.4 - input[10], 2.0) + math.pow(378.95 - input[11], 2.0) + math.pow(3.95 - input[12], 2.0))) * 1.0 + math.exp(-0.0000036459736698188483 * (math.pow(1.13081 - input[0], 2.0) + math.pow(0.0 - input[1], 2.0) + math.pow(8.14 - input[2], 2.0) + math.pow(0.0 - input[3], 2.0) + math.pow(0.538 - input[4], 2.0) + math.pow(5.713 - input[5], 2.0) + math.pow(94.1 - input[6], 2.0) + math.pow(4.233 - input[7], 2.0) + math.pow(4.0 - input[8], 2.0) + math.pow(307.0 - input[9], 2.0) + math.pow(21.0 - input[10], 2.0) + math.pow(360.17 - input[11], 2.0) + math.pow(22.6 - input[12], 2.0))) * -1.0 + math.exp(-0.0000036459736698188483 * (math.pow(4.89822 - input[0], 2.0) + math.pow(0.0 - input[1], 2.0) + math.pow(18.1 - input[2], 2.0) + math.pow(0.0 - input[3], 2.0) + math.pow(0.631 - input[4], 2.0) + math.pow(4.97 - input[5], 2.0) + math.pow(100.0 - input[6], 2.0) + math.pow(1.3325 - input[7], 2.0) + math.pow(24.0 - input[8], 2.0) + math.pow(666.0 - input[9], 2.0) + math.pow(20.2 - input[10], 2.0) + math.pow(375.52 - input[11], 2.0) + math.pow(3.26 - input[12], 2.0))) * 1.0 + math.exp(-0.0000036459736698188483 * (math.pow(1.25179 - input[0], 2.0) + math.pow(0.0 - input[1], 2.0) + math.pow(8.14 - input[2], 2.0) + math.pow(0.0 - input[3], 2.0) + math.pow(0.538 - input[4], 2.0) + math.pow(5.57 - input[5], 2.0) + math.pow(98.1 - input[6], 2.0) + math.pow(3.7979 - input[7], 2.0) + math.pow(4.0 - input[8], 2.0) + math.pow(307.0 - input[9], 2.0) + math.pow(21.0 - input[10], 2.0) + math.pow(376.57 - input[11], 2.0) + math.pow(21.02 - input[12], 2.0))) * -1.0 + math.exp(-0.0000036459736698188483 * (math.pow(0.06129 - input[0], 2.0) + math.pow(20.0 - input[1], 2.0) + math.pow(3.33 - input[2], 2.0) + math.pow(1.0 - input[3], 2.0) + math.pow(0.4429 - input[4], 2.0) + math.pow(7.645 - input[5], 2.0) + math.pow(49.7 - input[6], 2.0) + math.pow(5.2119 - input[7], 2.0) + math.pow(5.0 - input[8], 2.0) + math.pow(216.0 - input[9], 2.0) + math.pow(14.9 - input[10], 2.0) + math.pow(377.07 - input[11], 2.0) + math.pow(3.01 - input[12], 2.0))) * 1.0 + math.exp(-0.0000036459736698188483 * (math.pow(9.2323 - input[0], 2.0) + math.pow(0.0 - input[1], 2.0) + math.pow(18.1 - input[2], 2.0) + math.pow(0.0 - input[3], 2.0) + math.pow(0.631 - input[4], 2.0) + math.pow(6.216 - input[5], 2.0) + math.pow(100.0 - input[6], 2.0) + math.pow(1.1691 - input[7], 2.0) + math.pow(24.0 - input[8], 2.0) + math.pow(666.0 - input[9], 2.0) + math.pow(20.2 - input[10], 2.0) + math.pow(366.15 - input[11], 2.0) + math.pow(9.53 - input[12], 2.0))) * 1.0 + math.exp(-0.0000036459736698188483 * (math.pow(2.77974 - input[0], 2.0) + math.pow(0.0 - input[1], 2.0) + math.pow(19.58 - input[2], 2.0) + math.pow(0.0 - input[3], 2.0) + math.pow(0.871 - input[4], 2.0) + math.pow(4.903 - input[5], 2.0) + math.pow(97.8 - input[6], 2.0) + math.pow(1.3459 - input[7], 2.0) + math.pow(5.0 - input[8], 2.0) + math.pow(403.0 - input[9], 2.0) + math.pow(14.7 - input[10], 2.0) + math.pow(396.9 - input[11], 2.0) + math.pow(29.29 - input[12], 2.0))) * -1.0 + math.exp(-0.0000036459736698188483 * (math.pow(0.01381 - input[0], 2.0) + math.pow(80.0 - input[1], 2.0) + math.pow(0.46 - input[2], 2.0) + math.pow(0.0 - input[3], 2.0) + math.pow(0.422 - input[4], 2.0) + math.pow(7.875 - input[5], 2.0) + math.pow(32.0 - input[6], 2.0) + math.pow(5.6484 - input[7], 2.0) + math.pow(4.0 - input[8], 2.0) + math.pow(255.0 - input[9], 2.0) + math.pow(14.4 - input[10], 2.0) + math.pow(394.23 - input[11], 2.0) + math.pow(2.97 - input[12], 2.0))) * 1.0 + math.exp(-0.0000036459736698188483 * (math.pow(0.01538 - input[0], 2.0) + math.pow(90.0 - input[1], 2.0) + math.pow(3.75 - input[2], 2.0) + math.pow(0.0 - input[3], 2.0) + math.pow(0.394 - input[4], 2.0) + math.pow(7.454 - input[5], 2.0) + math.pow(34.2 - input[6], 2.0) + math.pow(6.3361 - input[7], 2.0) + math.pow(3.0 - input[8], 2.0) + math.pow(244.0 - input[9], 2.0) + math.pow(15.9 - input[10], 2.0) + math.pow(386.34 - input[11], 2.0) + math.pow(3.11 - input[12], 2.0))) * 0.7500000000002167 + math.exp(-0.0000036459736698188483 * (math.pow(1.38799 - input[0], 2.0) + math.pow(0.0 - input[1], 2.0) + math.pow(8.14 - input[2], 2.0) + math.pow(0.0 - input[3], 2.0) + math.pow(0.538 - input[4], 2.0) + math.pow(5.95 - input[5], 2.0) + math.pow(82.0 - input[6], 2.0) + math.pow(3.99 - input[7], 2.0) + math.pow(4.0 - input[8], 2.0) + math.pow(307.0 - input[9], 2.0) + math.pow(21.0 - input[10], 2.0) + math.pow(232.6 - input[11], 2.0) + math.pow(27.71 - input[12], 2.0))) * -1.0 + math.exp(-0.0000036459736698188483 * (math.pow(1.83377 - input[0], 2.0) + math.pow(0.0 - input[1], 2.0) + math.pow(19.58 - input[2], 2.0) + math.pow(1.0 - input[3], 2.0) + math.pow(0.605 - input[4], 2.0) + math.pow(7.802 - input[5], 2.0) + math.pow(98.2 - input[6], 2.0) + math.pow(2.0407 - input[7], 2.0) + math.pow(5.0 - input[8], 2.0) + math.pow(403.0 - input[9], 2.0) + math.pow(14.7 - input[10], 2.0) + math.pow(389.61 - input[11], 2.0) + math.pow(1.92 - input[12], 2.0))) * 1.0 + math.exp(-0.0000036459736698188483 * (math.pow(0.31533 - input[0], 2.0) + math.pow(0.0 - input[1], 2.0) + math.pow(6.2 - input[2], 2.0) + math.pow(0.0 - input[3], 2.0) + math.pow(0.504 - input[4], 2.0) + math.pow(8.266 - input[5], 2.0) + math.pow(78.3 - input[6], 2.0) + math.pow(2.8944 - input[7], 2.0) + math.pow(8.0 - input[8], 2.0) + math.pow(307.0 - input[9], 2.0) + math.pow(17.4 - input[10], 2.0) + math.pow(385.05 - input[11], 2.0) + math.pow(4.14 - input[12], 2.0))) * 1.0 + math.exp(-0.0000036459736698188483 * (math.pow(9.91655 - input[0], 2.0) + math.pow(0.0 - input[1], 2.0) + math.pow(18.1 - input[2], 2.0) + math.pow(0.0 - input[3], 2.0) + math.pow(0.693 - input[4], 2.0) + math.pow(5.852 - input[5], 2.0) + math.pow(77.8 - input[6], 2.0) + math.pow(1.5004 - input[7], 2.0) + math.pow(24.0 - input[8], 2.0) + math.pow(666.0 - input[9], 2.0) + math.pow(20.2 - input[10], 2.0) + math.pow(338.16 - input[11], 2.0) + math.pow(29.97 - input[12], 2.0))) * -1.0 + math.exp(-0.0000036459736698188483 * (math.pow(0.01501 - input[0], 2.0) + math.pow(90.0 - input[1], 2.0) + math.pow(1.21 - input[2], 2.0) + math.pow(1.0 - input[3], 2.0) + math.pow(0.401 - input[4], 2.0) + math.pow(7.923 - input[5], 2.0) + math.pow(24.8 - input[6], 2.0) + math.pow(5.885 - input[7], 2.0) + math.pow(1.0 - input[8], 2.0) + math.pow(198.0 - input[9], 2.0) + math.pow(13.6 - input[10], 2.0) + math.pow(395.52 - input[11], 2.0) + math.pow(3.16 - input[12], 2.0))) * 1.0 + math.exp(-0.0000036459736698188483 * (math.pow(0.25387 - input[0], 2.0) + math.pow(0.0 - input[1], 2.0) + math.pow(6.91 - input[2], 2.0) + math.pow(0.0 - input[3], 2.0) + math.pow(0.448 - input[4], 2.0) + math.pow(5.399 - input[5], 2.0) + math.pow(95.3 - input[6], 2.0) + math.pow(5.87 - input[7], 2.0) + math.pow(3.0 - input[8], 2.0) + math.pow(233.0 - input[9], 2.0) + math.pow(17.9 - input[10], 2.0) + math.pow(396.9 - input[11], 2.0) + math.pow(30.81 - input[12], 2.0))) * -1.0 + math.exp(-0.0000036459736698188483 * (math.pow(14.2362 - input[0], 2.0) + math.pow(0.0 - input[1], 2.0) + math.pow(18.1 - input[2], 2.0) + math.pow(0.0 - input[3], 2.0) + math.pow(0.693 - input[4], 2.0) + math.pow(6.343 - input[5], 2.0) + math.pow(100.0 - input[6], 2.0) + math.pow(1.5741 - input[7], 2.0) + math.pow(24.0 - input[8], 2.0) + math.pow(666.0 - input[9], 2.0) + math.pow(20.2 - input[10], 2.0) + math.pow(396.9 - input[11], 2.0) + math.pow(20.32 - input[12], 2.0))) * -1.0 + math.exp(-0.0000036459736698188483 * (math.pow(22.5971 - input[0], 2.0) + math.pow(0.0 - input[1], 2.0) + math.pow(18.1 - input[2], 2.0) + math.pow(0.0 - input[3], 2.0) + math.pow(0.7 - input[4], 2.0) + math.pow(5.0 - input[5], 2.0) + math.pow(89.5 - input[6], 2.0) + math.pow(1.5184 - input[7], 2.0) + math.pow(24.0 - input[8], 2.0) + math.pow(666.0 - input[9], 2.0) + math.pow(20.2 - input[10], 2.0) + math.pow(396.9 - input[11], 2.0) + math.pow(31.99 - input[12], 2.0))) * -1.0 + math.exp(-0.0000036459736698188483 * (math.pow(67.9208 - input[0], 2.0) + math.pow(0.0 - input[1], 2.0) + math.pow(18.1 - input[2], 2.0) + math.pow(0.0 - input[3], 2.0) + math.pow(0.693 - input[4], 2.0) + math.pow(5.683 - input[5], 2.0) + math.pow(100.0 - input[6], 2.0) + math.pow(1.4254 - input[7], 2.0) + math.pow(24.0 - input[8], 2.0) + math.pow(666.0 - input[9], 2.0) + math.pow(20.2 - input[10], 2.0) + math.pow(384.97 - input[11], 2.0) + math.pow(22.98 - input[12], 2.0))) * -1.0 + math.exp(-0.0000036459736698188483 * (math.pow(1.61282 - input[0], 2.0) + math.pow(0.0 - input[1], 2.0) + math.pow(8.14 - input[2], 2.0) + math.pow(0.0 - input[3], 2.0) + math.pow(0.538 - input[4], 2.0) + math.pow(6.096 - input[5], 2.0) + math.pow(96.9 - input[6], 2.0) + math.pow(3.7598 - input[7], 2.0) + math.pow(4.0 - input[8], 2.0) + math.pow(307.0 - input[9], 2.0) + math.pow(21.0 - input[10], 2.0) + math.pow(248.31 - input[11], 2.0) + math.pow(20.34 - input[12], 2.0))) * -1.0 + math.exp(-0.0000036459736698188483 * (math.pow(1.46336 - input[0], 2.0) + math.pow(0.0 - input[1], 2.0) + math.pow(19.58 - input[2], 2.0) + math.pow(0.0 - input[3], 2.0) + math.pow(0.605 - input[4], 2.0) + math.pow(7.489 - input[5], 2.0) + math.pow(90.8 - input[6], 2.0) + math.pow(1.9709 - input[7], 2.0) + math.pow(5.0 - input[8], 2.0) + math.pow(403.0 - input[9], 2.0) + math.pow(14.7 - input[10], 2.0) + math.pow(374.43 - input[11], 2.0) + math.pow(1.73 - input[12], 2.0))) * 1.0 + math.exp(-0.0000036459736698188483 * (math.pow(7.67202 - input[0], 2.0) + math.pow(0.0 - input[1], 2.0) + math.pow(18.1 - input[2], 2.0) + math.pow(0.0 - input[3], 2.0) + math.pow(0.693 - input[4], 2.0) + math.pow(5.747 - input[5], 2.0) + math.pow(98.9 - input[6], 2.0) + math.pow(1.6334 - input[7], 2.0) + math.pow(24.0 - input[8], 2.0) + math.pow(666.0 - input[9], 2.0) + math.pow(20.2 - input[10], 2.0) + math.pow(393.1 - input[11], 2.0) + math.pow(19.92 - input[12], 2.0))) * -1.0 + math.exp(-0.0000036459736698188483 * (math.pow(2.01019 - input[0], 2.0) + math.pow(0.0 - input[1], 2.0) + math.pow(19.58 - input[2], 2.0) + math.pow(0.0 - input[3], 2.0) + math.pow(0.605 - input[4], 2.0) + math.pow(7.929 - input[5], 2.0) + math.pow(96.2 - input[6], 2.0) + math.pow(2.0459 - input[7], 2.0) + math.pow(5.0 - input[8], 2.0) + math.pow(403.0 - input[9], 2.0) + math.pow(14.7 - input[10], 2.0) + math.pow(369.3 - input[11], 2.0) + math.pow(3.7 - input[12], 2.0))) * 1.0 + math.exp(-0.0000036459736698188483 * (math.pow(45.7461 - input[0], 2.0) + math.pow(0.0 - input[1], 2.0) + math.pow(18.1 - input[2], 2.0) + math.pow(0.0 - input[3], 2.0) + math.pow(0.693 - input[4], 2.0) + math.pow(4.519 - input[5], 2.0) + math.pow(100.0 - input[6], 2.0) + math.pow(1.6582 - input[7], 2.0) + math.pow(24.0 - input[8], 2.0) + math.pow(666.0 - input[9], 2.0) + math.pow(20.2 - input[10], 2.0) + math.pow(88.27 - input[11], 2.0) + math.pow(36.98 - input[12], 2.0))) * -1.0 + math.exp(-0.0000036459736698188483 * (math.pow(0.03578 - input[0], 2.0) + math.pow(20.0 - input[1], 2.0) + math.pow(3.33 - input[2], 2.0) + math.pow(0.0 - input[3], 2.0) + math.pow(0.4429 - input[4], 2.0) + math.pow(7.82 - input[5], 2.0) + math.pow(64.5 - input[6], 2.0) + math.pow(4.6947 - input[7], 2.0) + math.pow(5.0 - input[8], 2.0) + math.pow(216.0 - input[9], 2.0) + math.pow(14.9 - input[10], 2.0) + math.pow(387.31 - input[11], 2.0) + math.pow(3.76 - input[12], 2.0))) * 1.0 + math.exp(-0.0000036459736698188483 * (math.pow(0.18337 - input[0], 2.0) + math.pow(0.0 - input[1], 2.0) + math.pow(27.74 - input[2], 2.0) + math.pow(0.0 - input[3], 2.0) + math.pow(0.609 - input[4], 2.0) + math.pow(5.414 - input[5], 2.0) + math.pow(98.3 - input[6], 2.0) + math.pow(1.7554 - input[7], 2.0) + math.pow(4.0 - input[8], 2.0) + math.pow(711.0 - input[9], 2.0) + math.pow(20.1 - input[10], 2.0) + math.pow(344.05 - input[11], 2.0) + math.pow(23.97 - input[12], 2.0))) * -1.0 + math.exp(-0.0000036459736698188483 * (math.pow(6.53876 - input[0], 2.0) + math.pow(0.0 - input[1], 2.0) + math.pow(18.1 - input[2], 2.0) + math.pow(1.0 - input[3], 2.0) + math.pow(0.631 - input[4], 2.0) + math.pow(7.016 - input[5], 2.0) + math.pow(97.5 - input[6], 2.0) + math.pow(1.2024 - input[7], 2.0) + math.pow(24.0 - input[8], 2.0) + math.pow(666.0 - input[9], 2.0) + math.pow(20.2 - input[10], 2.0) + math.pow(392.05 - input[11], 2.0) + math.pow(2.96 - input[12], 2.0))) * 1.0 + math.exp(-0.0000036459736698188483 * (math.pow(1.22358 - input[0], 2.0) + math.pow(0.0 - input[1], 2.0) + math.pow(19.58 - input[2], 2.0) + math.pow(0.0 - input[3], 2.0) + math.pow(0.605 - input[4], 2.0) + math.pow(6.943 - input[5], 2.0) + math.pow(97.4 - input[6], 2.0) + math.pow(1.8773 - input[7], 2.0) + math.pow(5.0 - input[8], 2.0) + math.pow(403.0 - input[9], 2.0) + math.pow(14.7 - input[10], 2.0) + math.pow(363.43 - input[11], 2.0) + math.pow(4.59 - input[12], 2.0))) * 1.0 + math.exp(-0.0000036459736698188483 * (math.pow(10.8342 - input[0], 2.0) + math.pow(0.0 - input[1], 2.0) + math.pow(18.1 - input[2], 2.0) + math.pow(0.0 - input[3], 2.0) + math.pow(0.679 - input[4], 2.0) + math.pow(6.782 - input[5], 2.0) + math.pow(90.8 - input[6], 2.0) + math.pow(1.8195 - input[7], 2.0) + math.pow(24.0 - input[8], 2.0) + math.pow(666.0 - input[9], 2.0) + math.pow(20.2 - input[10], 2.0) + math.pow(21.57 - input[11], 2.0) + math.pow(25.79 - input[12], 2.0))) * -1.0 + math.exp(-0.0000036459736698188483 * (math.pow(0.98843 - input[0], 2.0) + math.pow(0.0 - input[1], 2.0) + math.pow(8.14 - input[2], 2.0) + math.pow(0.0 - input[3], 2.0) + math.pow(0.538 - input[4], 2.0) + math.pow(5.813 - input[5], 2.0) + math.pow(100.0 - input[6], 2.0) + math.pow(4.0952 - input[7], 2.0) + math.pow(4.0 - input[8], 2.0) + math.pow(307.0 - input[9], 2.0) + math.pow(21.0 - input[10], 2.0) + math.pow(394.54 - input[11], 2.0) + math.pow(19.88 - input[12], 2.0))) * -1.0 + math.exp(-0.0000036459736698188483 * (math.pow(18.0846 - input[0], 2.0) + math.pow(0.0 - input[1], 2.0) + math.pow(18.1 - input[2], 2.0) + math.pow(0.0 - input[3], 2.0) + math.pow(0.679 - input[4], 2.0) + math.pow(6.434 - input[5], 2.0) + math.pow(100.0 - input[6], 2.0) + math.pow(1.8347 - input[7], 2.0) + math.pow(24.0 - input[8], 2.0) + math.pow(666.0 - input[9], 2.0) + math.pow(20.2 - input[10], 2.0) + math.pow(27.25 - input[11], 2.0) + math.pow(29.05 - input[12], 2.0))) * -1.0 + math.exp(-0.0000036459736698188483 * (math.pow(0.0351 - input[0], 2.0) + math.pow(95.0 - input[1], 2.0) + math.pow(2.68 - input[2], 2.0) + math.pow(0.0 - input[3], 2.0) + math.pow(0.4161 - input[4], 2.0) + math.pow(7.853 - input[5], 2.0) + math.pow(33.2 - input[6], 2.0) + math.pow(5.118 - input[7], 2.0) + math.pow(4.0 - input[8], 2.0) + math.pow(224.0 - input[9], 2.0) + math.pow(14.7 - input[10], 2.0) + math.pow(392.78 - input[11], 2.0) + math.pow(3.81 - input[12], 2.0))) * 1.0 | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.