id
int64 0
458k
| file_name
stringlengths 4
119
| file_path
stringlengths 14
227
| content
stringlengths 24
9.96M
| size
int64 24
9.96M
| language
stringclasses 1
value | extension
stringclasses 14
values | total_lines
int64 1
219k
| avg_line_length
float64 2.52
4.63M
| max_line_length
int64 5
9.91M
| alphanum_fraction
float64 0
1
| repo_name
stringlengths 7
101
| repo_stars
int64 100
139k
| repo_forks
int64 0
26.4k
| repo_open_issues
int64 0
2.27k
| repo_license
stringclasses 12
values | repo_extraction_date
stringclasses 433
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
13,100
|
executor.py
|
ansible_ansible/test/lib/ansible_test/_internal/executor.py
|
"""Execute Ansible tests."""
from __future__ import annotations
import typing as t
from .io import (
read_text_file,
)
from .util import (
ApplicationWarning,
display,
)
from .ci import (
get_ci_provider,
)
from .classification import (
categorize_changes,
)
from .config import (
TestConfig,
)
from .metadata import (
ChangeDescription,
)
from .provisioning import (
HostState,
)
def get_changes_filter(args: TestConfig) -> list[str]:
"""Return a list of targets which should be tested based on the changes made."""
paths = detect_changes(args)
if not args.metadata.change_description:
if paths:
changes = categorize_changes(args, paths, args.command)
else:
changes = ChangeDescription()
args.metadata.change_description = changes
if paths is None:
return [] # change detection not enabled, do not filter targets
if not paths:
raise NoChangesDetected()
if args.metadata.change_description.targets is None:
raise NoTestsForChanges()
return args.metadata.change_description.targets
def detect_changes(args: TestConfig) -> t.Optional[list[str]]:
"""Return a list of changed paths."""
if args.changed:
paths = get_ci_provider().detect_changes(args)
elif args.changed_from or args.changed_path:
paths = args.changed_path or []
if args.changed_from:
paths += read_text_file(args.changed_from).splitlines()
else:
return None # change detection not enabled
if paths is None:
return None # act as though change detection not enabled, do not filter targets
display.info('Detected changes in %d file(s).' % len(paths))
for path in paths:
display.info(path, verbosity=1)
return paths
class NoChangesDetected(ApplicationWarning):
"""Exception when change detection was performed, but no changes were found."""
def __init__(self) -> None:
super().__init__('No changes detected.')
class NoTestsForChanges(ApplicationWarning):
"""Exception when changes detected, but no tests trigger as a result."""
def __init__(self) -> None:
super().__init__('No tests found for detected changes.')
class Delegate(Exception):
"""Trigger command delegation."""
def __init__(self, host_state: HostState, exclude: list[str] = None, require: list[str] = None) -> None:
super().__init__()
self.host_state = host_state
self.exclude = exclude or []
self.require = require or []
class ListTargets(Exception):
"""List integration test targets instead of executing them."""
def __init__(self, target_names: list[str]) -> None:
super().__init__()
self.target_names = target_names
class AllTargetsSkipped(ApplicationWarning):
"""All targets skipped."""
def __init__(self) -> None:
super().__init__('All targets skipped.')
| 2,959
|
Python
|
.py
| 81
| 30.901235
| 108
| 0.674181
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,101
|
junit_xml.py
|
ansible_ansible/test/lib/ansible_test/_internal/junit_xml.py
|
"""
Dataclasses for creating JUnit XML files.
See: https://github.com/junit-team/junit5/blob/main/platform-tests/src/test/resources/jenkins-junit.xsd
"""
from __future__ import annotations
import abc
import dataclasses
import datetime
import decimal
from xml.dom import minidom
# noinspection PyPep8Naming
from xml.etree import ElementTree as ET
@dataclasses.dataclass
class TestResult(metaclass=abc.ABCMeta):
"""Base class for the result of a test case."""
output: str | None = None
message: str | None = None
type: str | None = None
def __post_init__(self):
if self.type is None:
self.type = self.tag
@property
@abc.abstractmethod
def tag(self) -> str:
"""Tag name for the XML element created by this result type."""
def get_attributes(self) -> dict[str, str]:
"""Return a dictionary of attributes for this instance."""
return _attributes(
message=self.message,
type=self.type,
)
def get_xml_element(self) -> ET.Element:
"""Return an XML element representing this instance."""
element = ET.Element(self.tag, self.get_attributes())
element.text = self.output
return element
@dataclasses.dataclass
class TestFailure(TestResult):
"""Failure info for a test case."""
@property
def tag(self) -> str:
"""Tag name for the XML element created by this result type."""
return 'failure'
@dataclasses.dataclass
class TestError(TestResult):
"""Error info for a test case."""
@property
def tag(self) -> str:
"""Tag name for the XML element created by this result type."""
return 'error'
@dataclasses.dataclass
class TestCase:
"""An individual test case."""
name: str
assertions: int | None = None
classname: str | None = None
status: str | None = None
time: decimal.Decimal | None = None
errors: list[TestError] = dataclasses.field(default_factory=list)
failures: list[TestFailure] = dataclasses.field(default_factory=list)
skipped: str | None = None
system_out: str | None = None
system_err: str | None = None
is_disabled: bool = False
@property
def is_failure(self) -> bool:
"""True if the test case contains failure info."""
return bool(self.failures)
@property
def is_error(self) -> bool:
"""True if the test case contains error info."""
return bool(self.errors)
@property
def is_skipped(self) -> bool:
"""True if the test case was skipped."""
return bool(self.skipped)
def get_attributes(self) -> dict[str, str]:
"""Return a dictionary of attributes for this instance."""
return _attributes(
assertions=self.assertions,
classname=self.classname,
name=self.name,
status=self.status,
time=self.time,
)
def get_xml_element(self) -> ET.Element:
"""Return an XML element representing this instance."""
element = ET.Element('testcase', self.get_attributes())
if self.skipped:
ET.SubElement(element, 'skipped').text = self.skipped
element.extend([error.get_xml_element() for error in self.errors])
element.extend([failure.get_xml_element() for failure in self.failures])
if self.system_out:
ET.SubElement(element, 'system-out').text = self.system_out
if self.system_err:
ET.SubElement(element, 'system-err').text = self.system_err
return element
@dataclasses.dataclass
class TestSuite:
"""A collection of test cases."""
name: str
hostname: str | None = None
id: str | None = None
package: str | None = None
timestamp: datetime.datetime | None = None
properties: dict[str, str] = dataclasses.field(default_factory=dict)
cases: list[TestCase] = dataclasses.field(default_factory=list)
system_out: str | None = None
system_err: str | None = None
def __post_init__(self):
if self.timestamp and self.timestamp.tzinfo != datetime.timezone.utc:
raise ValueError(f'timestamp.tzinfo must be {datetime.timezone.utc!r}')
@property
def disabled(self) -> int:
"""The number of disabled test cases."""
return sum(case.is_disabled for case in self.cases)
@property
def errors(self) -> int:
"""The number of test cases containing error info."""
return sum(case.is_error for case in self.cases)
@property
def failures(self) -> int:
"""The number of test cases containing failure info."""
return sum(case.is_failure for case in self.cases)
@property
def skipped(self) -> int:
"""The number of test cases containing skipped info."""
return sum(case.is_skipped for case in self.cases)
@property
def tests(self) -> int:
"""The number of test cases."""
return len(self.cases)
@property
def time(self) -> decimal.Decimal:
"""The total time from all test cases."""
return decimal.Decimal(sum(case.time for case in self.cases if case.time))
def get_attributes(self) -> dict[str, str]:
"""Return a dictionary of attributes for this instance."""
return _attributes(
disabled=self.disabled,
errors=self.errors,
failures=self.failures,
hostname=self.hostname,
id=self.id,
name=self.name,
package=self.package,
skipped=self.skipped,
tests=self.tests,
time=self.time,
timestamp=self.timestamp.replace(tzinfo=None).isoformat(timespec='seconds') if self.timestamp else None,
)
def get_xml_element(self) -> ET.Element:
"""Return an XML element representing this instance."""
element = ET.Element('testsuite', self.get_attributes())
if self.properties:
ET.SubElement(element, 'properties').extend([ET.Element('property', dict(name=name, value=value)) for name, value in self.properties.items()])
element.extend([test_case.get_xml_element() for test_case in self.cases])
if self.system_out:
ET.SubElement(element, 'system-out').text = self.system_out
if self.system_err:
ET.SubElement(element, 'system-err').text = self.system_err
return element
@dataclasses.dataclass
class TestSuites:
"""A collection of test suites."""
name: str | None = None
suites: list[TestSuite] = dataclasses.field(default_factory=list)
@property
def disabled(self) -> int:
"""The number of disabled test cases."""
return sum(suite.disabled for suite in self.suites)
@property
def errors(self) -> int:
"""The number of test cases containing error info."""
return sum(suite.errors for suite in self.suites)
@property
def failures(self) -> int:
"""The number of test cases containing failure info."""
return sum(suite.failures for suite in self.suites)
@property
def tests(self) -> int:
"""The number of test cases."""
return sum(suite.tests for suite in self.suites)
@property
def time(self) -> decimal.Decimal:
"""The total time from all test cases."""
return decimal.Decimal(sum(suite.time for suite in self.suites))
def get_attributes(self) -> dict[str, str]:
"""Return a dictionary of attributes for this instance."""
return _attributes(
disabled=self.disabled,
errors=self.errors,
failures=self.failures,
name=self.name,
tests=self.tests,
time=self.time,
)
def get_xml_element(self) -> ET.Element:
"""Return an XML element representing this instance."""
element = ET.Element('testsuites', self.get_attributes())
element.extend([suite.get_xml_element() for suite in self.suites])
return element
def to_pretty_xml(self) -> str:
"""Return a pretty formatted XML string representing this instance."""
return _pretty_xml(self.get_xml_element())
def _attributes(**kwargs) -> dict[str, str]:
"""Return the given kwargs as a dictionary with values converted to strings. Items with a value of None will be omitted."""
return {key: str(value) for key, value in kwargs.items() if value is not None}
def _pretty_xml(element: ET.Element) -> str:
"""Return a pretty formatted XML string representing the given element."""
return minidom.parseString(ET.tostring(element, encoding='unicode')).toprettyxml()
| 8,671
|
Python
|
.py
| 211
| 33.691943
| 154
| 0.65078
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,102
|
ssh.py
|
ansible_ansible/test/lib/ansible_test/_internal/ssh.py
|
"""High level functions for working with SSH."""
from __future__ import annotations
import dataclasses
import itertools
import json
import os
import random
import re
import subprocess
import shlex
import typing as t
from .encoding import (
to_bytes,
to_text,
)
from .util import (
ApplicationError,
common_environment,
display,
exclude_none_values,
sanitize_host_name,
)
from .config import (
EnvironmentConfig,
)
@dataclasses.dataclass
class SshConnectionDetail:
"""Information needed to establish an SSH connection to a host."""
name: str
host: str
port: t.Optional[int]
user: str
identity_file: str
python_interpreter: t.Optional[str] = None
shell_type: t.Optional[str] = None
enable_rsa_sha1: bool = False
def __post_init__(self):
self.name = sanitize_host_name(self.name)
@property
def options(self) -> dict[str, str]:
"""OpenSSH config options, which can be passed to the `ssh` CLI with the `-o` argument."""
options: dict[str, str] = {}
if self.enable_rsa_sha1:
# Newer OpenSSH clients connecting to older SSH servers must explicitly enable ssh-rsa support.
# OpenSSH 8.8, released on 2021-09-26, deprecated using RSA with the SHA-1 hash algorithm (ssh-rsa).
# OpenSSH 7.2, released on 2016-02-29, added support for using RSA with SHA-256/512 hash algorithms.
# See: https://www.openssh.com/txt/release-8.8
algorithms = '+ssh-rsa' # append the algorithm to the default list, requires OpenSSH 7.0 or later
options.update(
# Host key signature algorithms that the client wants to use.
# Available options can be found with `ssh -Q HostKeyAlgorithms` or `ssh -Q key` on older clients.
# This option was updated in OpenSSH 7.0, released on 2015-08-11, to support the "+" prefix.
# See: https://www.openssh.com/txt/release-7.0
HostKeyAlgorithms=algorithms,
# Signature algorithms that will be used for public key authentication.
# Available options can be found with `ssh -Q PubkeyAcceptedAlgorithms` or `ssh -Q key` on older clients.
# This option was added in OpenSSH 7.0, released on 2015-08-11.
# See: https://www.openssh.com/txt/release-7.0
# This option is an alias for PubkeyAcceptedAlgorithms, which was added in OpenSSH 8.5.
# See: https://www.openssh.com/txt/release-8.5
PubkeyAcceptedKeyTypes=algorithms,
)
return options
class SshProcess:
"""Wrapper around an SSH process."""
def __init__(self, process: t.Optional[subprocess.Popen]) -> None:
self._process = process
self.pending_forwards: t.Optional[list[tuple[str, int]]] = None
self.forwards: dict[tuple[str, int], int] = {}
def terminate(self) -> None:
"""Terminate the SSH process."""
if not self._process:
return # explain mode
# noinspection PyBroadException
try:
self._process.terminate()
except Exception: # pylint: disable=broad-except
pass
def wait(self) -> None:
"""Wait for the SSH process to terminate."""
if not self._process:
return # explain mode
self._process.wait()
def collect_port_forwards(self) -> dict[tuple[str, int], int]:
"""Collect port assignments for dynamic SSH port forwards."""
errors: list[str] = []
display.info('Collecting %d SSH port forward(s).' % len(self.pending_forwards), verbosity=2)
while self.pending_forwards:
if self._process:
line_bytes = self._process.stderr.readline()
if not line_bytes:
if errors:
details = ':\n%s' % '\n'.join(errors)
else:
details = '.'
raise ApplicationError('SSH port forwarding failed%s' % details)
line = to_text(line_bytes).strip()
match = re.search(r'^Allocated port (?P<src_port>[0-9]+) for remote forward to (?P<dst_host>[^:]+):(?P<dst_port>[0-9]+)$', line)
if not match:
if re.search(r'^Warning: Permanently added .* to the list of known hosts\.$', line):
continue
display.warning('Unexpected SSH port forwarding output: %s' % line, verbosity=2)
errors.append(line)
continue
src_port = int(match.group('src_port'))
dst_host = str(match.group('dst_host'))
dst_port = int(match.group('dst_port'))
dst = (dst_host, dst_port)
else:
# explain mode
dst = self.pending_forwards[0]
src_port = random.randint(40000, 50000)
self.pending_forwards.remove(dst)
self.forwards[dst] = src_port
display.info('Collected %d SSH port forward(s):\n%s' % (
len(self.forwards), '\n'.join('%s -> %s:%s' % (src_port, dst[0], dst[1]) for dst, src_port in sorted(self.forwards.items()))), verbosity=2)
return self.forwards
def create_ssh_command(
ssh: SshConnectionDetail,
options: t.Optional[dict[str, t.Union[str, int]]] = None,
cli_args: list[str] = None,
command: t.Optional[str] = None,
) -> list[str]:
"""Create an SSH command using the specified options."""
cmd = [
'ssh',
'-n', # prevent reading from stdin
'-i', ssh.identity_file, # file from which the identity for public key authentication is read
] # fmt: skip
if not command:
cmd.append('-N') # do not execute a remote command
if ssh.port:
cmd.extend(['-p', str(ssh.port)]) # port to connect to on the remote host
if ssh.user:
cmd.extend(['-l', ssh.user]) # user to log in as on the remote machine
ssh_options: dict[str, t.Union[int, str]] = dict(
BatchMode='yes',
ExitOnForwardFailure='yes',
LogLevel='ERROR',
ServerAliveCountMax=4,
ServerAliveInterval=15,
StrictHostKeyChecking='no',
UserKnownHostsFile='/dev/null',
)
ssh_options.update(options or {})
cmd.extend(ssh_options_to_list(ssh_options))
cmd.extend(cli_args or [])
cmd.append(ssh.host)
if command:
cmd.append(command)
return cmd
def ssh_options_to_list(options: t.Union[dict[str, t.Union[int, str]], dict[str, str]]) -> list[str]:
"""Format a dictionary of SSH options as a list suitable for passing to the `ssh` command."""
return list(itertools.chain.from_iterable(
('-o', f'{key}={value}') for key, value in sorted(options.items())
))
def ssh_options_to_str(options: t.Union[dict[str, t.Union[int, str]], dict[str, str]]) -> str:
"""Format a dictionary of SSH options as a string suitable for passing as `ansible_ssh_extra_args` in inventory."""
return shlex.join(ssh_options_to_list(options))
def run_ssh_command(
args: EnvironmentConfig,
ssh: SshConnectionDetail,
options: t.Optional[dict[str, t.Union[str, int]]] = None,
cli_args: list[str] = None,
command: t.Optional[str] = None,
) -> SshProcess:
"""Run the specified SSH command, returning the created SshProcess instance created."""
cmd = create_ssh_command(ssh, options, cli_args, command)
env = common_environment()
cmd_show = shlex.join(cmd)
display.info('Run background command: %s' % cmd_show, verbosity=1, truncate=True)
cmd_bytes = [to_bytes(arg) for arg in cmd]
env_bytes = dict((to_bytes(k), to_bytes(v)) for k, v in env.items())
if args.explain:
process = SshProcess(None)
else:
process = SshProcess(subprocess.Popen(cmd_bytes, env=env_bytes, bufsize=-1, # pylint: disable=consider-using-with
stdin=subprocess.DEVNULL, stdout=subprocess.PIPE, stderr=subprocess.PIPE))
return process
def create_ssh_port_forwards(
args: EnvironmentConfig,
ssh: SshConnectionDetail,
forwards: list[tuple[str, int]],
) -> SshProcess:
"""
Create SSH port forwards using the provided list of tuples (target_host, target_port).
Port bindings will be automatically assigned by SSH and must be collected with a subsequent call to collect_port_forwards.
"""
options: dict[str, t.Union[str, int]] = dict(
LogLevel='INFO', # info level required to get messages on stderr indicating the ports assigned to each forward
ControlPath='none', # if the user has ControlPath set up for every host, it will prevent creation of forwards
)
cli_args = []
for forward_host, forward_port in forwards:
cli_args.extend(['-R', ':'.join([str(0), forward_host, str(forward_port)])])
process = run_ssh_command(args, ssh, options, cli_args)
process.pending_forwards = forwards
return process
def create_ssh_port_redirects(
args: EnvironmentConfig,
ssh: SshConnectionDetail,
redirects: list[tuple[int, str, int]],
) -> SshProcess:
"""Create SSH port redirections using the provided list of tuples (bind_port, target_host, target_port)."""
options: dict[str, t.Union[str, int]] = {}
cli_args = []
for bind_port, target_host, target_port in redirects:
cli_args.extend(['-R', ':'.join([str(bind_port), target_host, str(target_port)])])
process = run_ssh_command(args, ssh, options, cli_args)
return process
def generate_ssh_inventory(ssh_connections: list[SshConnectionDetail]) -> str:
"""Return an inventory file in JSON format, created from the provided SSH connection details."""
inventory = dict(
all=dict(
hosts=dict((ssh.name, exclude_none_values(dict(
ansible_host=ssh.host,
ansible_port=ssh.port,
ansible_user=ssh.user,
ansible_ssh_private_key_file=os.path.abspath(ssh.identity_file),
ansible_connection='ssh',
ansible_pipelining='yes',
ansible_python_interpreter=ssh.python_interpreter,
ansible_shell_type=ssh.shell_type,
ansible_ssh_extra_args=ssh_options_to_str(dict(UserKnownHostsFile='/dev/null', **ssh.options)), # avoid changing the test environment
ansible_ssh_host_key_checking='no',
))) for ssh in ssh_connections),
),
)
inventory_text = json.dumps(inventory, indent=4, sort_keys=True)
display.info('>>> SSH Inventory\n%s' % inventory_text, verbosity=3)
return inventory_text
| 10,781
|
Python
|
.py
| 231
| 37.805195
| 151
| 0.628972
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,103
|
locale_util.py
|
ansible_ansible/test/lib/ansible_test/_internal/locale_util.py
|
"""Initialize locale settings. This must be imported very early in ansible-test startup."""
from __future__ import annotations
import locale
import sys
import typing as t
STANDARD_LOCALE = 'en_US.UTF-8'
"""
The standard locale used by ansible-test and its subprocesses and delegated instances.
"""
FALLBACK_LOCALE = 'C.UTF-8'
"""
The fallback locale to use when the standard locale is not available.
This was added in ansible-core 2.14 to allow testing in environments without the standard locale.
It was not needed in previous ansible-core releases since they do not verify the locale during startup.
"""
class LocaleError(SystemExit):
"""Exception to raise when locale related errors occur."""
def __init__(self, message: str) -> None:
super().__init__(f'ERROR: {message}')
def configure_locale() -> tuple[str, t.Optional[str]]:
"""Configure the locale, returning the selected locale and an optional warning."""
if (fs_encoding := sys.getfilesystemencoding()).lower() != 'utf-8':
raise LocaleError(f'ansible-test requires the filesystem encoding to be UTF-8, but "{fs_encoding}" was detected.')
candidate_locales = STANDARD_LOCALE, FALLBACK_LOCALE
errors: dict[str, str] = {}
warning: t.Optional[str] = None
configured_locale: t.Optional[str] = None
for candidate_locale in candidate_locales:
try:
locale.setlocale(locale.LC_ALL, candidate_locale)
locale.getlocale()
except (locale.Error, ValueError) as ex:
errors[candidate_locale] = str(ex)
else:
configured_locale = candidate_locale
break
if not configured_locale:
raise LocaleError('ansible-test could not initialize a supported locale:\n' +
'\n'.join(f'{key}: {value}' for key, value in errors.items()))
if configured_locale != STANDARD_LOCALE:
warning = (f'Using locale "{configured_locale}" instead of "{STANDARD_LOCALE}". '
'Tests which depend on the locale may behave unexpectedly.')
return configured_locale, warning
CONFIGURED_LOCALE, LOCALE_WARNING = configure_locale()
| 2,161
|
Python
|
.py
| 44
| 42.954545
| 122
| 0.696522
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,104
|
test.py
|
ansible_ansible/test/lib/ansible_test/_internal/test.py
|
"""Classes for storing and processing test results."""
from __future__ import annotations
import collections.abc as c
import datetime
import typing as t
from .util import (
display,
)
from .util_common import (
get_docs_url,
write_text_test_results,
write_json_test_results,
ResultType,
)
from .metadata import (
Metadata,
)
from .config import (
TestConfig,
)
from . import junit_xml
def calculate_best_confidence(choices: tuple[tuple[str, int], ...], metadata: Metadata) -> int:
"""Return the best confidence value available from the given choices and metadata."""
best_confidence = 0
for path, line in choices:
confidence = calculate_confidence(path, line, metadata)
best_confidence = max(confidence, best_confidence)
return best_confidence
def calculate_confidence(path: str, line: int, metadata: Metadata) -> int:
"""Return the confidence level for a test result associated with the given file path and line number."""
ranges = metadata.changes.get(path)
# no changes were made to the file
if not ranges:
return 0
# changes were made to the same file and line
if any(r[0] <= line <= r[1] for r in ranges):
return 100
# changes were made to the same file and the line number is unknown
if line == 0:
return 75
# changes were made to the same file and the line number is different
return 50
class TestResult:
"""Base class for test results."""
def __init__(self, command: str, test: str, python_version: t.Optional[str] = None) -> None:
self.command = command
self.test = test
self.python_version = python_version
self.name = self.test or self.command
if self.python_version:
self.name += '-python-%s' % self.python_version
def write(self, args: TestConfig) -> None:
"""Write the test results to various locations."""
self.write_console()
self.write_bot(args)
if args.lint:
self.write_lint()
if args.junit:
self.write_junit(args)
def write_console(self) -> None:
"""Write results to console."""
def write_lint(self) -> None:
"""Write lint results to stdout."""
def write_bot(self, args: TestConfig) -> None:
"""Write results to a file for ansibullbot to consume."""
def write_junit(self, args: TestConfig) -> None:
"""Write results to a junit XML file."""
def create_result_name(self, extension: str) -> str:
"""Return the name of the result file using the given extension."""
name = 'ansible-test-%s' % self.command
if self.test:
name += '-%s' % self.test
if self.python_version:
name += '-python-%s' % self.python_version
name += extension
return name
def save_junit(self, args: TestConfig, test_case: junit_xml.TestCase) -> None:
"""Save the given test case results to disk as JUnit XML."""
suites = junit_xml.TestSuites(
suites=[
junit_xml.TestSuite(
name='ansible-test',
cases=[test_case],
timestamp=datetime.datetime.now(tz=datetime.timezone.utc),
),
],
)
report = suites.to_pretty_xml()
if args.explain:
return
write_text_test_results(ResultType.JUNIT, self.create_result_name('.xml'), report)
class TestTimeout(TestResult):
"""Test timeout."""
def __init__(self, timeout_duration: int | float) -> None:
super().__init__(command='timeout', test='')
self.timeout_duration = timeout_duration
def write(self, args: TestConfig) -> None:
"""Write the test results to various locations."""
message = 'Tests were aborted after exceeding the %d minute time limit.' % self.timeout_duration
# Include a leading newline to improve readability on Shippable "Tests" tab.
# Without this, the first line becomes indented.
output = '''
One or more of the following situations may be responsible:
- Code changes have resulted in tests that hang or run for an excessive amount of time.
- Tests have been added which exceed the time limit when combined with existing tests.
- Test infrastructure and/or external dependencies are operating slower than normal.'''
if args.coverage:
output += '\n- Additional overhead from collecting code coverage has resulted in tests exceeding the time limit.'
output += '\n\nConsult the console log for additional details on where the timeout occurred.'
suites = junit_xml.TestSuites(
suites=[
junit_xml.TestSuite(
name='ansible-test',
timestamp=datetime.datetime.now(tz=datetime.timezone.utc),
cases=[
junit_xml.TestCase(
name='timeout',
classname='timeout',
errors=[
junit_xml.TestError(
message=message,
),
],
),
],
)
],
)
report = suites.to_pretty_xml()
write_text_test_results(ResultType.JUNIT, self.create_result_name('.xml'), report)
class TestSuccess(TestResult):
"""Test success."""
def write_junit(self, args: TestConfig) -> None:
"""Write results to a junit XML file."""
test_case = junit_xml.TestCase(classname=self.command, name=self.name)
self.save_junit(args, test_case)
class TestSkipped(TestResult):
"""Test skipped."""
def __init__(self, command: str, test: str, python_version: t.Optional[str] = None) -> None:
super().__init__(command, test, python_version)
self.reason: t.Optional[str] = None
def write_console(self) -> None:
"""Write results to console."""
if self.reason:
display.warning(self.reason)
else:
display.info('No tests applicable.', verbosity=1)
def write_junit(self, args: TestConfig) -> None:
"""Write results to a junit XML file."""
test_case = junit_xml.TestCase(
classname=self.command,
name=self.name,
skipped=self.reason or 'No tests applicable.',
)
self.save_junit(args, test_case)
class TestFailure(TestResult):
"""Test failure."""
def __init__(
self,
command: str,
test: str,
python_version: t.Optional[str] = None,
messages: t.Optional[c.Sequence[TestMessage]] = None,
summary: t.Optional[str] = None,
):
super().__init__(command, test, python_version)
if messages:
messages = sorted(messages)
else:
messages = []
self.messages = messages
self.summary = summary
def write(self, args: TestConfig) -> None:
"""Write the test results to various locations."""
if args.metadata.changes:
self.populate_confidence(args.metadata)
super().write(args)
def write_console(self) -> None:
"""Write results to console."""
if self.summary:
display.error(self.summary)
else:
if self.python_version:
specifier = ' on python %s' % self.python_version
else:
specifier = ''
display.error('Found %d %s issue(s)%s which need to be resolved:' % (len(self.messages), self.test or self.command, specifier))
for message in self.messages:
display.error(message.format(show_confidence=True))
doc_url = self.find_docs()
if doc_url:
display.info('See documentation for help: %s' % doc_url)
def write_lint(self) -> None:
"""Write lint results to stdout."""
if self.summary:
command = self.format_command()
message = 'The test `%s` failed. See stderr output for details.' % command
path = ''
message = TestMessage(message, path)
print(message) # display goes to stderr, this should be on stdout
else:
for message in self.messages:
print(message) # display goes to stderr, this should be on stdout
def write_junit(self, args: TestConfig) -> None:
"""Write results to a junit XML file."""
title = self.format_title()
output = self.format_block()
test_case = junit_xml.TestCase(
classname=self.command,
name=self.name,
failures=[
junit_xml.TestFailure(
message=title,
output=output,
),
],
)
self.save_junit(args, test_case)
def write_bot(self, args: TestConfig) -> None:
"""Write results to a file for ansibullbot to consume."""
docs = self.find_docs()
message = self.format_title(help_link=docs)
output = self.format_block()
if self.messages:
verified = all((m.confidence or 0) >= 50 for m in self.messages)
else:
verified = False
bot_data = dict(
verified=verified,
docs=docs,
results=[
dict(
message=message,
output=output,
),
],
)
if args.explain:
return
write_json_test_results(ResultType.BOT, self.create_result_name('.json'), bot_data)
def populate_confidence(self, metadata: Metadata) -> None:
"""Populate test result confidence using the provided metadata."""
for message in self.messages:
if message.confidence is None:
message.confidence = calculate_confidence(message.path, message.line, metadata)
def format_command(self) -> str:
"""Return a string representing the CLI command associated with the test failure."""
command = 'ansible-test %s' % self.command
if self.test:
command += ' --test %s' % self.test
if self.python_version:
command += ' --python %s' % self.python_version
return command
def find_docs(self) -> t.Optional[str]:
"""Return the docs URL for this test or None if there is no docs URL."""
if self.command != 'sanity':
return None # only sanity tests have docs links
filename = f'{self.test}.html' if self.test else ''
url = get_docs_url(f'https://docs.ansible.com/ansible-core/devel/dev_guide/testing/{self.command}/{filename}')
return url
def format_title(self, help_link: t.Optional[str] = None) -> str:
"""Return a string containing a title/heading for this test failure, including an optional help link to explain the test."""
command = self.format_command()
if self.summary:
reason = 'the error'
else:
reason = '1 error' if len(self.messages) == 1 else '%d errors' % len(self.messages)
if help_link:
help_link_markup = ' [[explain](%s)]' % help_link
else:
help_link_markup = ''
title = 'The test `%s`%s failed with %s:' % (command, help_link_markup, reason)
return title
def format_block(self) -> str:
"""Format the test summary or messages as a block of text and return the result."""
if self.summary:
block = self.summary
else:
block = '\n'.join(m.format() for m in self.messages)
message = block.strip()
# Hack to remove ANSI color reset code from SubprocessError messages.
message = message.replace(display.clear, '')
return message
class TestMessage:
"""Single test message for one file."""
def __init__(
self,
message: str,
path: str,
line: int = 0,
column: int = 0,
level: str = 'error',
code: t.Optional[str] = None,
confidence: t.Optional[int] = None,
):
self.__path = path
self.__line = line
self.__column = column
self.__level = level
self.__code = code
self.__message = message
self.confidence = confidence
@property
def path(self) -> str:
"""Return the path."""
return self.__path
@property
def line(self) -> int:
"""Return the line number, or 0 if none is available."""
return self.__line
@property
def column(self) -> int:
"""Return the column number, or 0 if none is available."""
return self.__column
@property
def level(self) -> str:
"""Return the level."""
return self.__level
@property
def code(self) -> t.Optional[str]:
"""Return the code, if any."""
return self.__code
@property
def message(self) -> str:
"""Return the message."""
return self.__message
@property
def tuple(self) -> tuple[str, int, int, str, t.Optional[str], str]:
"""Return a tuple with all the immutable values of this test message."""
return self.__path, self.__line, self.__column, self.__level, self.__code, self.__message
def __lt__(self, other):
return self.tuple < other.tuple
def __le__(self, other):
return self.tuple <= other.tuple
def __eq__(self, other):
return self.tuple == other.tuple
def __ne__(self, other):
return self.tuple != other.tuple
def __gt__(self, other):
return self.tuple > other.tuple
def __ge__(self, other):
return self.tuple >= other.tuple
def __hash__(self):
return hash(self.tuple)
def __str__(self):
return self.format()
def format(self, show_confidence: bool = False) -> str:
"""Return a string representation of this message, optionally including the confidence level."""
if self.__code:
msg = '%s: %s' % (self.__code, self.__message)
else:
msg = self.__message
if show_confidence and self.confidence is not None:
msg += ' (%d%%)' % self.confidence
return '%s:%s:%s: %s' % (self.__path, self.__line, self.__column, msg)
| 14,515
|
Python
|
.py
| 355
| 31.171831
| 139
| 0.587666
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,105
|
docker_util.py
|
ansible_ansible/test/lib/ansible_test/_internal/docker_util.py
|
"""Functions for accessing docker via the docker cli."""
from __future__ import annotations
import dataclasses
import enum
import json
import os
import pathlib
import re
import socket
import time
import urllib.parse
import typing as t
from .util import (
ApplicationError,
common_environment,
display,
find_executable,
SubprocessError,
cache,
OutputStream,
InternalError,
format_command_output,
)
from .util_common import (
run_command,
raw_command,
)
from .config import (
CommonConfig,
)
from .thread import (
mutex,
named_lock,
)
from .cgroup import (
CGroupEntry,
MountEntry,
MountType,
)
DOCKER_COMMANDS = [
'docker',
'podman',
]
UTILITY_IMAGE = 'quay.io/ansible/ansible-test-utility-container:3.1.0'
# Max number of open files in a docker container.
# Passed with --ulimit option to the docker run command.
MAX_NUM_OPEN_FILES = 10240
# The value of /proc/*/loginuid when it is not set.
# It is a reserved UID, which is the maximum 32-bit unsigned integer value.
# See: https://access.redhat.com/solutions/25404
LOGINUID_NOT_SET = 4294967295
class DockerInfo:
"""The results of `docker info` and `docker version` for the container runtime."""
@classmethod
def init(cls, args: CommonConfig) -> DockerInfo:
"""Initialize and return a DockerInfo instance."""
command = require_docker().command
info_stdout = docker_command(args, ['info', '--format', '{{ json . }}'], capture=True, always=True)[0]
info = json.loads(info_stdout)
if server_errors := info.get('ServerErrors'):
# This can occur when a remote docker instance is in use and the instance is not responding, such as when the system is still starting up.
# In that case an error such as the following may be returned:
# error during connect: Get "http://{hostname}:2375/v1.24/info": dial tcp {ip_address}:2375: connect: no route to host
raise ApplicationError('Unable to get container host information: ' + '\n'.join(server_errors))
version_stdout = docker_command(args, ['version', '--format', '{{ json . }}'], capture=True, always=True)[0]
version = json.loads(version_stdout)
info = DockerInfo(args, command, info, version)
return info
def __init__(self, args: CommonConfig, engine: str, info: dict[str, t.Any], version: dict[str, t.Any]) -> None:
self.args = args
self.engine = engine
self.info = info
self.version = version
@property
def client(self) -> dict[str, t.Any]:
"""The client version details."""
client = self.version.get('Client')
if not client:
raise ApplicationError('Unable to get container host client information.')
return client
@property
def server(self) -> dict[str, t.Any]:
"""The server version details."""
server = self.version.get('Server')
if not server:
if self.engine == 'podman':
# Some Podman versions always report server version info (verified with 1.8.0 and 1.9.3).
# Others do not unless Podman remote is being used.
# To provide consistency, use the client version if the server version isn't provided.
# See: https://github.com/containers/podman/issues/2671#issuecomment-804382934
return self.client
raise ApplicationError('Unable to get container host server information.')
return server
@property
def client_version(self) -> str:
"""The client version."""
return self.client['Version']
@property
def server_version(self) -> str:
"""The server version."""
return self.server['Version']
@property
def client_major_minor_version(self) -> tuple[int, int]:
"""The client major and minor version."""
major, minor = self.client_version.split('.')[:2]
return int(major), int(minor)
@property
def server_major_minor_version(self) -> tuple[int, int]:
"""The server major and minor version."""
major, minor = self.server_version.split('.')[:2]
return int(major), int(minor)
@property
def cgroupns_option_supported(self) -> bool:
"""Return True if the `--cgroupns` option is supported, otherwise return False."""
if self.engine == 'docker':
# Docker added support for the `--cgroupns` option in version 20.10.
# Both the client and server must support the option to use it.
# See: https://docs.docker.com/engine/release-notes/#20100
return self.client_major_minor_version >= (20, 10) and self.server_major_minor_version >= (20, 10)
raise NotImplementedError(self.engine)
@property
def cgroup_version(self) -> int:
"""The cgroup version of the container host."""
info = self.info
host = info.get('host')
# When the container host reports cgroup v1 it is running either cgroup v1 legacy mode or cgroup v2 hybrid mode.
# When the container host reports cgroup v2 it is running under cgroup v2 unified mode.
# See: https://github.com/containers/podman/blob/8356621249e36ed62fc7f35f12d17db9027ff076/libpod/info_linux.go#L52-L56
# See: https://github.com/moby/moby/blob/d082bbcc0557ec667faca81b8b33bec380b75dac/daemon/info_unix.go#L24-L27
if host:
return int(host['cgroupVersion'].lstrip('v')) # podman
try:
return int(info['CgroupVersion']) # docker
except KeyError:
pass
# Docker 20.10 (API version 1.41) added support for cgroup v2.
# Unfortunately the client or server is too old to report the cgroup version.
# If the server is old, we can infer the cgroup version.
# Otherwise, we'll need to fall back to detection.
# See: https://docs.docker.com/engine/release-notes/#20100
# See: https://docs.docker.com/engine/api/version-history/#v141-api-changes
if self.server_major_minor_version < (20, 10):
return 1 # old docker server with only cgroup v1 support
# Tell the user what versions they have and recommend they upgrade the client.
# Downgrading the server should also work, but we won't mention that.
message = (
f'The Docker client version is {self.client_version}. '
f'The Docker server version is {self.server_version}. '
'Upgrade your Docker client to version 20.10 or later.'
)
if detect_host_properties(self.args).cgroup_v2:
# Unfortunately cgroup v2 was detected on the Docker server.
# A newer client is needed to support the `--cgroupns` option for use with cgroup v2.
raise ApplicationError(f'Unsupported Docker client and server combination using cgroup v2. {message}')
display.warning(f'Detected Docker server cgroup v1 using probing. {message}', unique=True)
return 1 # docker server is using cgroup v1 (or cgroup v2 hybrid)
@property
def docker_desktop_wsl2(self) -> bool:
"""Return True if Docker Desktop integrated with WSL2 is detected, otherwise False."""
info = self.info
kernel_version = info.get('KernelVersion')
operating_system = info.get('OperatingSystem')
dd_wsl2 = kernel_version and kernel_version.endswith('-WSL2') and operating_system == 'Docker Desktop'
return dd_wsl2
@property
def description(self) -> str:
"""Describe the container runtime."""
tags = dict(
client=self.client_version,
server=self.server_version,
cgroup=f'v{self.cgroup_version}',
)
labels = [self.engine] + [f'{key}={value}' for key, value in tags.items()]
if self.docker_desktop_wsl2:
labels.append('DD+WSL2')
return f'Container runtime: {" ".join(labels)}'
@mutex
def get_docker_info(args: CommonConfig) -> DockerInfo:
"""Return info for the current container runtime. The results are cached."""
try:
return get_docker_info.info # type: ignore[attr-defined]
except AttributeError:
pass
info = DockerInfo.init(args)
display.info(info.description, verbosity=1)
get_docker_info.info = info # type: ignore[attr-defined]
return info
class SystemdControlGroupV1Status(enum.Enum):
"""The state of the cgroup v1 systemd hierarchy on the container host."""
SUBSYSTEM_MISSING = 'The systemd cgroup subsystem was not found.'
FILESYSTEM_NOT_MOUNTED = 'The "/sys/fs/cgroup/systemd" filesystem is not mounted.'
MOUNT_TYPE_NOT_CORRECT = 'The "/sys/fs/cgroup/systemd" mount type is not correct.'
VALID = 'The "/sys/fs/cgroup/systemd" mount is valid.'
@dataclasses.dataclass(frozen=True)
class ContainerHostProperties:
"""Container host properties detected at run time."""
audit_code: str
max_open_files: int
loginuid: t.Optional[int]
cgroup_v1: SystemdControlGroupV1Status
cgroup_v2: bool
@mutex
def detect_host_properties(args: CommonConfig) -> ContainerHostProperties:
"""
Detect and return properties of the container host.
The information collected is:
- The errno result from attempting to query the container host's audit status.
- The max number of open files supported by the container host to run containers.
This value may be capped to the maximum value used by ansible-test.
If the value is below the desired limit, a warning is displayed.
- The loginuid used by the container host to run containers, or None if the audit subsystem is unavailable.
- The cgroup subsystems registered with the Linux kernel.
- The mounts visible within a container.
- The status of the systemd cgroup v1 hierarchy.
This information is collected together to reduce the number of container runs to probe the container host.
"""
try:
return detect_host_properties.properties # type: ignore[attr-defined]
except AttributeError:
pass
single_line_commands = (
'audit-status',
'cat /proc/sys/fs/nr_open',
'ulimit -Hn',
'(cat /proc/1/loginuid; echo)',
)
multi_line_commands = (
' && '.join(single_line_commands),
'cat /proc/1/cgroup',
'cat /proc/1/mountinfo',
)
options = ['--volume', '/sys/fs/cgroup:/probe:ro']
cmd = ['sh', '-c', ' && echo "-" && '.join(multi_line_commands)]
stdout, stderr = run_utility_container(args, 'ansible-test-probe', cmd, options)
if args.explain:
return ContainerHostProperties(
audit_code='???',
max_open_files=MAX_NUM_OPEN_FILES,
loginuid=LOGINUID_NOT_SET,
cgroup_v1=SystemdControlGroupV1Status.VALID,
cgroup_v2=False,
)
blocks = stdout.split('\n-\n')
if len(blocks) != len(multi_line_commands):
message = f'Unexpected probe output. Expected {len(multi_line_commands)} blocks but found {len(blocks)}.\n'
message += format_command_output(stdout, stderr)
raise InternalError(message.strip())
values = blocks[0].split('\n')
audit_parts = values[0].split(' ', 1)
audit_status = int(audit_parts[0])
audit_code = audit_parts[1]
system_limit = int(values[1])
hard_limit = int(values[2])
loginuid = int(values[3]) if values[3] else None
cgroups = CGroupEntry.loads(blocks[1])
mounts = MountEntry.loads(blocks[2])
if hard_limit < MAX_NUM_OPEN_FILES and hard_limit < system_limit and require_docker().command == 'docker':
# Podman will use the highest possible limits, up to its default of 1M.
# See: https://github.com/containers/podman/blob/009afb50b308548eb129bc68e654db6c6ad82e7a/pkg/specgen/generate/oci.go#L39-L58
# Docker limits are less predictable. They could be the system limit or the user's soft limit.
# If Docker is running as root it should be able to use the system limit.
# When Docker reports a limit below the preferred value and the system limit, attempt to use the preferred value, up to the system limit.
options = ['--ulimit', f'nofile={min(system_limit, MAX_NUM_OPEN_FILES)}']
cmd = ['sh', '-c', 'ulimit -Hn']
try:
stdout = run_utility_container(args, 'ansible-test-ulimit', cmd, options)[0]
except SubprocessError as ex:
display.warning(str(ex))
else:
hard_limit = int(stdout)
# Check the audit error code from attempting to query the container host's audit status.
#
# The following error codes are known to occur:
#
# EPERM - Operation not permitted
# This occurs when the root user runs a container but lacks the AUDIT_WRITE capability.
# This will cause patched versions of OpenSSH to disconnect after a login succeeds.
# See: https://src.fedoraproject.org/rpms/openssh/blob/f36/f/openssh-7.6p1-audit.patch
#
# EBADF - Bad file number
# This occurs when the host doesn't support the audit system (the open_audit call fails).
# This allows SSH logins to succeed despite the failure.
# See: https://github.com/Distrotech/libaudit/blob/4fc64f79c2a7f36e3ab7b943ce33ab5b013a7782/lib/netlink.c#L204-L209
#
# ECONNREFUSED - Connection refused
# This occurs when a non-root user runs a container without the AUDIT_WRITE capability.
# When sending an audit message, libaudit ignores this error condition.
# This allows SSH logins to succeed despite the failure.
# See: https://github.com/Distrotech/libaudit/blob/4fc64f79c2a7f36e3ab7b943ce33ab5b013a7782/lib/deprecated.c#L48-L52
subsystems = set(cgroup.subsystem for cgroup in cgroups)
mount_types = {mount.path: mount.type for mount in mounts}
if 'systemd' not in subsystems:
cgroup_v1 = SystemdControlGroupV1Status.SUBSYSTEM_MISSING
elif not (mount_type := mount_types.get(pathlib.PurePosixPath('/probe/systemd'))):
cgroup_v1 = SystemdControlGroupV1Status.FILESYSTEM_NOT_MOUNTED
elif mount_type != MountType.CGROUP_V1:
cgroup_v1 = SystemdControlGroupV1Status.MOUNT_TYPE_NOT_CORRECT
else:
cgroup_v1 = SystemdControlGroupV1Status.VALID
cgroup_v2 = mount_types.get(pathlib.PurePosixPath('/probe')) == MountType.CGROUP_V2
display.info(f'Container host audit status: {audit_code} ({audit_status})', verbosity=1)
display.info(f'Container host max open files: {hard_limit}', verbosity=1)
display.info(f'Container loginuid: {loginuid if loginuid is not None else "unavailable"}'
f'{" (not set)" if loginuid == LOGINUID_NOT_SET else ""}', verbosity=1)
if hard_limit < MAX_NUM_OPEN_FILES:
display.warning(f'Unable to set container max open files to {MAX_NUM_OPEN_FILES}. Using container host limit of {hard_limit} instead.')
else:
hard_limit = MAX_NUM_OPEN_FILES
properties = ContainerHostProperties(
# The errno (audit_status) is intentionally not exposed here, as it can vary across systems and architectures.
# Instead, the symbolic name (audit_code) is used, which is resolved inside the container which generated the error.
# See: https://man7.org/linux/man-pages/man3/errno.3.html
audit_code=audit_code,
max_open_files=hard_limit,
loginuid=loginuid,
cgroup_v1=cgroup_v1,
cgroup_v2=cgroup_v2,
)
detect_host_properties.properties = properties # type: ignore[attr-defined]
return properties
def get_session_container_name(args: CommonConfig, name: str) -> str:
"""Return the given container name with the current test session name applied to it."""
return f'{name}-{args.session_name}'
def run_utility_container(
args: CommonConfig,
name: str,
cmd: list[str],
options: list[str],
data: t.Optional[str] = None,
) -> tuple[t.Optional[str], t.Optional[str]]:
"""Run the specified command using the ansible-test utility container, returning stdout and stderr."""
name = get_session_container_name(args, name)
options = options + [
'--name', name,
'--rm',
] # fmt: skip
if data:
options.append('-i')
docker_pull(args, UTILITY_IMAGE)
return docker_run(args, UTILITY_IMAGE, options, cmd, data)
class DockerCommand:
"""Details about the available docker command."""
def __init__(self, command: str, executable: str, version: str) -> None:
self.command = command
self.executable = executable
self.version = version
@staticmethod
def detect() -> t.Optional[DockerCommand]:
"""Detect and return the available docker command, or None."""
if os.environ.get('ANSIBLE_TEST_PREFER_PODMAN'):
commands = list(reversed(DOCKER_COMMANDS))
else:
commands = DOCKER_COMMANDS
for command in commands:
executable = find_executable(command, required=False)
if executable:
version = raw_command([command, '-v'], env=docker_environment(), capture=True)[0].strip()
if command == 'docker' and 'podman' in version:
continue # avoid detecting podman as docker
display.info('Detected "%s" container runtime version: %s' % (command, version), verbosity=1)
return DockerCommand(command, executable, version)
return None
def require_docker() -> DockerCommand:
"""Return the docker command to invoke. Raises an exception if docker is not available."""
if command := get_docker_command():
return command
raise ApplicationError(f'No container runtime detected. Supported commands: {", ".join(DOCKER_COMMANDS)}')
@cache
def get_docker_command() -> t.Optional[DockerCommand]:
"""Return the docker command to invoke, or None if docker is not available."""
return DockerCommand.detect()
def docker_available() -> bool:
"""Return True if docker is available, otherwise return False."""
return bool(get_docker_command())
@cache
def get_docker_host_ip() -> str:
"""Return the IP of the Docker host."""
docker_host_ip = socket.gethostbyname(get_docker_hostname())
display.info('Detected docker host IP: %s' % docker_host_ip, verbosity=1)
return docker_host_ip
@cache
def get_docker_hostname() -> str:
"""Return the hostname of the Docker service."""
docker_host = os.environ.get('DOCKER_HOST')
if docker_host and docker_host.startswith(('tcp://', 'ssh://')):
try:
hostname = urllib.parse.urlparse(docker_host)[1].split(':')[0]
display.info('Detected Docker host: %s' % hostname, verbosity=1)
except ValueError:
hostname = 'localhost'
display.warning('Could not parse DOCKER_HOST environment variable "%s", falling back to localhost.' % docker_host)
else:
hostname = 'localhost'
display.info('Assuming Docker is available on localhost.', verbosity=1)
return hostname
@cache
def get_podman_host_ip() -> str:
"""Return the IP of the Podman host."""
podman_host_ip = socket.gethostbyname(get_podman_hostname())
display.info('Detected Podman host IP: %s' % podman_host_ip, verbosity=1)
return podman_host_ip
@cache
def get_podman_default_hostname() -> t.Optional[str]:
"""
Return the default hostname of the Podman service.
--format was added in podman 3.3.0, this functionality depends on its availability
"""
hostname: t.Optional[str] = None
try:
stdout = raw_command(['podman', 'system', 'connection', 'list', '--format=json'], env=docker_environment(), capture=True)[0]
except SubprocessError:
stdout = '[]'
try:
connections = json.loads(stdout)
except json.decoder.JSONDecodeError:
return hostname
for connection in connections:
# A trailing indicates the default
if connection['Name'][-1] == '*':
hostname = connection['URI']
break
return hostname
@cache
def get_podman_remote() -> t.Optional[str]:
"""Return the remote podman hostname, if any, otherwise return None."""
# URL value resolution precedence:
# - command line value
# - environment variable CONTAINER_HOST
# - containers.conf
# - unix://run/podman/podman.sock
hostname = None
podman_host = os.environ.get('CONTAINER_HOST')
if not podman_host:
podman_host = get_podman_default_hostname()
if podman_host and podman_host.startswith('ssh://'):
try:
hostname = urllib.parse.urlparse(podman_host).hostname
except ValueError:
display.warning('Could not parse podman URI "%s"' % podman_host)
else:
display.info('Detected Podman remote: %s' % hostname, verbosity=1)
return hostname
@cache
def get_podman_hostname() -> str:
"""Return the hostname of the Podman service."""
hostname = get_podman_remote()
if not hostname:
hostname = 'localhost'
display.info('Assuming Podman is available on localhost.', verbosity=1)
return hostname
@cache
def get_docker_container_id() -> t.Optional[str]:
"""Return the current container ID if running in a container, otherwise return None."""
mountinfo_path = pathlib.Path('/proc/self/mountinfo')
container_id = None
engine = None
if mountinfo_path.is_file():
# NOTE: This method of detecting the container engine and container ID relies on implementation details of each container engine.
# Although the implementation details have remained unchanged for some time, there is no guarantee they will continue to work.
# There have been proposals to create a standard mechanism for this, but none is currently available.
# See: https://github.com/opencontainers/runtime-spec/issues/1105
mounts = MountEntry.loads(mountinfo_path.read_text())
for mount in mounts:
if str(mount.path) == '/etc/hostname':
# Podman generates /etc/hostname in the makePlatformBindMounts function.
# That function ends up using ContainerRunDirectory to generate a path like: {prefix}/{container_id}/userdata/hostname
# NOTE: The {prefix} portion of the path can vary, so should not be relied upon.
# See: https://github.com/containers/podman/blob/480c7fbf5361f3bd8c1ed81fe4b9910c5c73b186/libpod/container_internal_linux.go#L660-L664
# See: https://github.com/containers/podman/blob/480c7fbf5361f3bd8c1ed81fe4b9910c5c73b186/vendor/github.com/containers/storage/store.go#L3133
# This behavior has existed for ~5 years and was present in Podman version 0.2.
# See: https://github.com/containers/podman/pull/248
if match := re.search('/(?P<id>[0-9a-f]{64})/userdata/hostname$', str(mount.root)):
container_id = match.group('id')
engine = 'Podman'
break
# Docker generates /etc/hostname in the BuildHostnameFile function.
# That function ends up using the containerRoot function to generate a path like: {prefix}/{container_id}/hostname
# NOTE: The {prefix} portion of the path can vary, so should not be relied upon.
# See: https://github.com/moby/moby/blob/cd8a090e6755bee0bdd54ac8a894b15881787097/container/container_unix.go#L58
# See: https://github.com/moby/moby/blob/92e954a2f05998dc05773b6c64bbe23b188cb3a0/daemon/container.go#L86
# This behavior has existed for at least ~7 years and was present in Docker version 1.0.1.
# See: https://github.com/moby/moby/blob/v1.0.1/daemon/container.go#L351
# See: https://github.com/moby/moby/blob/v1.0.1/daemon/daemon.go#L133
if match := re.search('/(?P<id>[0-9a-f]{64})/hostname$', str(mount.root)):
container_id = match.group('id')
engine = 'Docker'
break
if container_id:
display.info(f'Detected execution in {engine} container ID: {container_id}', verbosity=1)
return container_id
def docker_pull(args: CommonConfig, image: str) -> None:
"""
Pull the specified image if it is not available.
Images without a tag or digest will not be pulled.
Retries up to 10 times if the pull fails.
A warning will be shown for any image with volumes defined.
Images will be pulled only once.
Concurrent pulls for the same image will block until the first completes.
"""
with named_lock(f'docker_pull:{image}') as first:
if first:
__docker_pull(args, image)
def __docker_pull(args: CommonConfig, image: str) -> None:
"""Internal implementation for docker_pull. Do not call directly."""
if '@' not in image and ':' not in image:
display.info('Skipping pull of image without tag or digest: %s' % image, verbosity=2)
inspect = docker_image_inspect(args, image)
elif inspect := docker_image_inspect(args, image, always=True):
display.info('Skipping pull of existing image: %s' % image, verbosity=2)
else:
for _iteration in range(1, 10):
try:
docker_command(args, ['pull', image], capture=False)
if (inspect := docker_image_inspect(args, image)) or args.explain:
break
display.warning(f'Image "{image}" not found after pull completed. Waiting a few seconds before trying again.')
except SubprocessError:
display.warning(f'Failed to pull container image "{image}". Waiting a few seconds before trying again.')
time.sleep(3)
else:
raise ApplicationError(f'Failed to pull container image "{image}".')
if inspect and inspect.volumes:
display.warning(f'Image "{image}" contains {len(inspect.volumes)} volume(s): {", ".join(sorted(inspect.volumes))}\n'
'This may result in leaking anonymous volumes. It may also prevent the image from working on some hosts or container engines.\n'
'The image should be rebuilt without the use of the VOLUME instruction.',
unique=True)
def docker_cp_to(args: CommonConfig, container_id: str, src: str, dst: str) -> None:
"""Copy a file to the specified container."""
docker_command(args, ['cp', src, '%s:%s' % (container_id, dst)], capture=True)
def docker_create(
args: CommonConfig,
image: str,
options: list[str],
cmd: list[str] = None,
) -> tuple[t.Optional[str], t.Optional[str]]:
"""Create a container using the given docker image."""
return docker_command(args, ['create'] + options + [image] + cmd, capture=True)
def docker_run(
args: CommonConfig,
image: str,
options: list[str],
cmd: list[str] = None,
data: t.Optional[str] = None,
) -> tuple[t.Optional[str], t.Optional[str]]:
"""Run a container using the given docker image."""
return docker_command(args, ['run'] + options + [image] + cmd, data=data, capture=True)
def docker_start(
args: CommonConfig,
container_id: str,
options: list[str],
) -> tuple[t.Optional[str], t.Optional[str]]:
"""Start a container by name or ID."""
return docker_command(args, ['start'] + options + [container_id], capture=True)
def docker_rm(args: CommonConfig, container_id: str) -> None:
"""Remove the specified container."""
try:
# Stop the container with SIGKILL immediately, then remove the container.
# Podman supports the `--time` option on `rm`, but only since version 4.0.0.
# Docker does not support the `--time` option on `rm`.
docker_command(args, ['stop', '--time', '0', container_id], capture=True)
docker_command(args, ['rm', container_id], capture=True)
except SubprocessError as ex:
# Both Podman and Docker report an error if the container does not exist.
# The error messages contain the same "no such container" string, differing only in capitalization.
if 'no such container' not in ex.stderr.lower():
raise ex
class DockerError(Exception):
"""General Docker error."""
class ContainerNotFoundError(DockerError):
"""The container identified by `identifier` was not found."""
def __init__(self, identifier: str) -> None:
super().__init__('The container "%s" was not found.' % identifier)
self.identifier = identifier
class DockerInspect:
"""The results of `docker inspect` for a single container."""
def __init__(self, args: CommonConfig, inspection: dict[str, t.Any]) -> None:
self.args = args
self.inspection = inspection
# primary properties
@property
def id(self) -> str:
"""Return the ID of the container."""
return self.inspection['Id']
@property
def network_settings(self) -> dict[str, t.Any]:
"""Return a dictionary of the container network settings."""
return self.inspection['NetworkSettings']
@property
def state(self) -> dict[str, t.Any]:
"""Return a dictionary of the container state."""
return self.inspection['State']
@property
def config(self) -> dict[str, t.Any]:
"""Return a dictionary of the container configuration."""
return self.inspection['Config']
# nested properties
@property
def ports(self) -> dict[str, list[dict[str, str]]]:
"""Return a dictionary of ports the container has published."""
return self.network_settings['Ports']
@property
def networks(self) -> t.Optional[dict[str, dict[str, t.Any]]]:
"""Return a dictionary of the networks the container is attached to, or None if running under podman, which does not support networks."""
return self.network_settings.get('Networks')
@property
def running(self) -> bool:
"""Return True if the container is running, otherwise False."""
return self.state['Running']
@property
def pid(self) -> int:
"""Return the PID of the init process."""
if self.args.explain:
return 0
return self.state['Pid']
@property
def env(self) -> list[str]:
"""Return a list of the environment variables used to create the container."""
return self.config['Env']
@property
def image(self) -> str:
"""Return the image used to create the container."""
return self.config['Image']
# functions
def env_dict(self) -> dict[str, str]:
"""Return a dictionary of the environment variables used to create the container."""
return dict((item[0], item[1]) for item in [e.split('=', 1) for e in self.env])
def get_tcp_port(self, port: int) -> t.Optional[list[dict[str, str]]]:
"""Return a list of the endpoints published by the container for the specified TCP port, or None if it is not published."""
return self.ports.get('%d/tcp' % port)
def get_network_names(self) -> t.Optional[list[str]]:
"""Return a list of the network names the container is attached to."""
if self.networks is None:
return None
return sorted(self.networks)
def get_network_name(self) -> str:
"""Return the network name the container is attached to. Raises an exception if no network, or more than one, is attached."""
networks = self.get_network_names()
if not networks:
raise ApplicationError('No network found for Docker container: %s.' % self.id)
if len(networks) > 1:
raise ApplicationError('Found multiple networks for Docker container %s instead of only one: %s' % (self.id, ', '.join(networks)))
return networks[0]
def docker_inspect(args: CommonConfig, identifier: str, always: bool = False) -> DockerInspect:
"""
Return the results of `docker container inspect` for the specified container.
Raises a ContainerNotFoundError if the container was not found.
"""
try:
stdout = docker_command(args, ['container', 'inspect', identifier], capture=True, always=always)[0]
except SubprocessError as ex:
stdout = ex.stdout
if args.explain and not always:
items = []
else:
items = json.loads(stdout)
if len(items) == 1:
return DockerInspect(args, items[0])
raise ContainerNotFoundError(identifier)
def docker_network_disconnect(args: CommonConfig, container_id: str, network: str) -> None:
"""Disconnect the specified docker container from the given network."""
docker_command(args, ['network', 'disconnect', network, container_id], capture=True)
class DockerImageInspect:
"""The results of `docker image inspect` for a single image."""
def __init__(self, args: CommonConfig, inspection: dict[str, t.Any]) -> None:
self.args = args
self.inspection = inspection
# primary properties
@property
def config(self) -> dict[str, t.Any]:
"""Return a dictionary of the image config."""
return self.inspection['Config']
# nested properties
@property
def volumes(self) -> dict[str, t.Any]:
"""Return a dictionary of the image volumes."""
return self.config.get('Volumes') or {}
@property
def cmd(self) -> list[str]:
"""The command to run when the container starts."""
return self.config['Cmd']
@mutex
def docker_image_inspect(args: CommonConfig, image: str, always: bool = False) -> t.Optional[DockerImageInspect]:
"""
Return the results of `docker image inspect` for the specified image or None if the image does not exist.
"""
inspect_cache: dict[str, DockerImageInspect]
try:
inspect_cache = docker_image_inspect.cache # type: ignore[attr-defined]
except AttributeError:
inspect_cache = docker_image_inspect.cache = {} # type: ignore[attr-defined]
if inspect_result := inspect_cache.get(image):
return inspect_result
try:
stdout = docker_command(args, ['image', 'inspect', image], capture=True, always=always)[0]
except SubprocessError:
stdout = '[]'
if args.explain and not always:
items = []
else:
items = json.loads(stdout)
if len(items) > 1:
raise ApplicationError(f'Inspection of image "{image}" resulted in {len(items)} items:\n{json.dumps(items, indent=4)}')
if len(items) == 1:
inspect_result = DockerImageInspect(args, items[0])
inspect_cache[image] = inspect_result
return inspect_result
return None
class DockerNetworkInspect:
"""The results of `docker network inspect` for a single network."""
def __init__(self, args: CommonConfig, inspection: dict[str, t.Any]) -> None:
self.args = args
self.inspection = inspection
def docker_network_inspect(args: CommonConfig, network: str, always: bool = False) -> t.Optional[DockerNetworkInspect]:
"""
Return the results of `docker network inspect` for the specified network or None if the network does not exist.
"""
try:
stdout = docker_command(args, ['network', 'inspect', network], capture=True, always=always)[0]
except SubprocessError:
stdout = '[]'
if args.explain and not always:
items = []
else:
items = json.loads(stdout)
if len(items) == 1:
return DockerNetworkInspect(args, items[0])
return None
def docker_logs(args: CommonConfig, container_id: str) -> None:
"""Display logs for the specified container. If an error occurs, it is displayed rather than raising an exception."""
try:
docker_command(args, ['logs', container_id], capture=False)
except SubprocessError as ex:
display.error(str(ex))
def docker_exec(
args: CommonConfig,
container_id: str,
cmd: list[str],
capture: bool,
options: t.Optional[list[str]] = None,
stdin: t.Optional[t.IO[bytes]] = None,
stdout: t.Optional[t.IO[bytes]] = None,
interactive: bool = False,
output_stream: t.Optional[OutputStream] = None,
data: t.Optional[str] = None,
) -> tuple[t.Optional[str], t.Optional[str]]:
"""Execute the given command in the specified container."""
if not options:
options = []
if data or stdin or stdout:
options.append('-i')
return docker_command(
args,
['exec'] + options + [container_id] + cmd,
capture=capture,
stdin=stdin,
stdout=stdout,
interactive=interactive,
output_stream=output_stream,
data=data,
)
def docker_command(
args: CommonConfig,
cmd: list[str],
capture: bool,
stdin: t.Optional[t.IO[bytes]] = None,
stdout: t.Optional[t.IO[bytes]] = None,
interactive: bool = False,
output_stream: t.Optional[OutputStream] = None,
always: bool = False,
data: t.Optional[str] = None,
) -> tuple[t.Optional[str], t.Optional[str]]:
"""Run the specified docker command."""
env = docker_environment()
command = [require_docker().command]
if command[0] == 'podman' and get_podman_remote():
command.append('--remote')
return run_command(
args,
command + cmd,
env=env,
capture=capture,
stdin=stdin,
stdout=stdout,
interactive=interactive,
always=always,
output_stream=output_stream,
data=data,
)
def docker_environment() -> dict[str, str]:
"""Return a dictionary of docker related environment variables found in the current environment."""
env = common_environment()
var_names = {
'XDG_RUNTIME_DIR', # podman
}
var_prefixes = {
'CONTAINER_', # podman remote
'DOCKER_', # docker
}
env.update({name: value for name, value in os.environ.items() if name in var_names or any(name.startswith(prefix) for prefix in var_prefixes)})
return env
| 38,220
|
Python
|
.py
| 800
| 40.3375
| 157
| 0.663806
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,106
|
host_configs.py
|
ansible_ansible/test/lib/ansible_test/_internal/host_configs.py
|
"""Configuration for the test hosts requested by the user."""
from __future__ import annotations
import abc
import dataclasses
import enum
import os
import pickle
import sys
import typing as t
from .constants import (
SUPPORTED_PYTHON_VERSIONS,
)
from .io import (
open_binary_file,
)
from .completion import (
AuditMode,
CGroupVersion,
CompletionConfig,
docker_completion,
DockerCompletionConfig,
InventoryCompletionConfig,
network_completion,
NetworkRemoteCompletionConfig,
PosixCompletionConfig,
PosixRemoteCompletionConfig,
PosixSshCompletionConfig,
remote_completion,
RemoteCompletionConfig,
windows_completion,
WindowsRemoteCompletionConfig,
filter_completion,
)
from .util import (
find_python,
get_available_python_versions,
str_to_version,
version_to_str,
Architecture,
)
@dataclasses.dataclass(frozen=True)
class OriginCompletionConfig(PosixCompletionConfig):
"""Pseudo completion config for the origin."""
def __init__(self) -> None:
super().__init__(name='origin')
@property
def supported_pythons(self) -> list[str]:
"""Return a list of the supported Python versions."""
current_version = version_to_str(sys.version_info[:2])
versions = [version for version in SUPPORTED_PYTHON_VERSIONS if version == current_version] + \
[version for version in SUPPORTED_PYTHON_VERSIONS if version != current_version]
return versions
def get_python_path(self, version: str) -> str:
"""Return the path of the requested Python version."""
version = find_python(version)
return version
@property
def is_default(self) -> bool:
"""True if the completion entry is only used for defaults, otherwise False."""
return False
@dataclasses.dataclass(frozen=True)
class HostContext:
"""Context used when getting and applying defaults for host configurations."""
controller_config: t.Optional['PosixConfig']
@property
def controller(self) -> bool:
"""True if the context is for the controller, otherwise False."""
return not self.controller_config
@dataclasses.dataclass
class HostConfig(metaclass=abc.ABCMeta):
"""Base class for host configuration."""
@abc.abstractmethod
def get_defaults(self, context: HostContext) -> CompletionConfig:
"""Return the default settings."""
@abc.abstractmethod
def apply_defaults(self, context: HostContext, defaults: CompletionConfig) -> None:
"""Apply default settings."""
@property
def is_managed(self) -> bool:
"""
True if the host is a managed instance, otherwise False.
Managed instances are used exclusively by ansible-test and can safely have destructive operations performed without explicit permission from the user.
"""
return False
@dataclasses.dataclass
class PythonConfig(metaclass=abc.ABCMeta):
"""Configuration for Python."""
version: t.Optional[str] = None
path: t.Optional[str] = None
@property
def tuple(self) -> tuple[int, ...]:
"""Return the Python version as a tuple."""
return str_to_version(self.version)
@property
def major_version(self) -> int:
"""Return the Python major version."""
return self.tuple[0]
def apply_defaults(self, context: HostContext, defaults: PosixCompletionConfig) -> None:
"""Apply default settings."""
if self.version in (None, 'default'):
self.version = defaults.get_default_python(context.controller)
if self.path:
if self.path.endswith('/'):
self.path = os.path.join(self.path, f'python{self.version}')
# FUTURE: If the host is origin, the python path could be validated here.
else:
self.path = defaults.get_python_path(self.version)
@property
@abc.abstractmethod
def is_managed(self) -> bool:
"""
True if this Python is a managed instance, otherwise False.
Managed instances are used exclusively by ansible-test and can safely have requirements installed without explicit permission from the user.
"""
@dataclasses.dataclass
class NativePythonConfig(PythonConfig):
"""Configuration for native Python."""
@property
def is_managed(self) -> bool:
"""
True if this Python is a managed instance, otherwise False.
Managed instances are used exclusively by ansible-test and can safely have requirements installed without explicit permission from the user.
"""
return False
@dataclasses.dataclass
class VirtualPythonConfig(PythonConfig):
"""Configuration for Python in a virtual environment."""
system_site_packages: t.Optional[bool] = None
def apply_defaults(self, context: HostContext, defaults: PosixCompletionConfig) -> None:
"""Apply default settings."""
super().apply_defaults(context, defaults)
if self.system_site_packages is None:
self.system_site_packages = False
@property
def is_managed(self) -> bool:
"""
True if this Python is a managed instance, otherwise False.
Managed instances are used exclusively by ansible-test and can safely have requirements installed without explicit permission from the user.
"""
return True
@dataclasses.dataclass
class PosixConfig(HostConfig, metaclass=abc.ABCMeta):
"""Base class for POSIX host configuration."""
python: t.Optional[PythonConfig] = None
@property
@abc.abstractmethod
def have_root(self) -> bool:
"""True if root is available, otherwise False."""
@abc.abstractmethod
def get_defaults(self, context: HostContext) -> PosixCompletionConfig:
"""Return the default settings."""
def apply_defaults(self, context: HostContext, defaults: CompletionConfig) -> None:
"""Apply default settings."""
assert isinstance(defaults, PosixCompletionConfig)
super().apply_defaults(context, defaults)
self.python = self.python or NativePythonConfig()
self.python.apply_defaults(context, defaults)
@dataclasses.dataclass
class ControllerHostConfig(PosixConfig, metaclass=abc.ABCMeta):
"""Base class for host configurations which support the controller."""
@abc.abstractmethod
def get_default_targets(self, context: HostContext) -> list[ControllerConfig]:
"""Return the default targets for this host config."""
@dataclasses.dataclass
class RemoteConfig(HostConfig, metaclass=abc.ABCMeta):
"""Base class for remote host configuration."""
name: t.Optional[str] = None
provider: t.Optional[str] = None
arch: t.Optional[str] = None
@property
def platform(self) -> str:
"""The name of the platform."""
return self.name.partition('/')[0]
@property
def version(self) -> str:
"""The version of the platform."""
return self.name.partition('/')[2]
def apply_defaults(self, context: HostContext, defaults: CompletionConfig) -> None:
"""Apply default settings."""
assert isinstance(defaults, RemoteCompletionConfig)
super().apply_defaults(context, defaults)
if self.provider == 'default':
self.provider = None
self.provider = self.provider or defaults.provider or 'aws'
self.arch = self.arch or defaults.arch or Architecture.X86_64
@property
def is_managed(self) -> bool:
"""
True if this host is a managed instance, otherwise False.
Managed instances are used exclusively by ansible-test and can safely have destructive operations performed without explicit permission from the user.
"""
return True
@dataclasses.dataclass
class PosixSshConfig(PosixConfig):
"""Configuration for a POSIX SSH host."""
user: t.Optional[str] = None
host: t.Optional[str] = None
port: t.Optional[int] = None
def get_defaults(self, context: HostContext) -> PosixSshCompletionConfig:
"""Return the default settings."""
return PosixSshCompletionConfig(
user=self.user,
host=self.host,
)
@property
def have_root(self) -> bool:
"""True if root is available, otherwise False."""
return self.user == 'root'
@dataclasses.dataclass
class InventoryConfig(HostConfig):
"""Configuration using inventory."""
path: t.Optional[str] = None
def get_defaults(self, context: HostContext) -> InventoryCompletionConfig:
"""Return the default settings."""
return InventoryCompletionConfig()
def apply_defaults(self, context: HostContext, defaults: CompletionConfig) -> None:
"""Apply default settings."""
assert isinstance(defaults, InventoryCompletionConfig)
@dataclasses.dataclass
class DockerConfig(ControllerHostConfig, PosixConfig):
"""Configuration for a docker host."""
name: t.Optional[str] = None
image: t.Optional[str] = None
memory: t.Optional[int] = None
privileged: t.Optional[bool] = None
seccomp: t.Optional[str] = None
cgroup: t.Optional[CGroupVersion] = None
audit: t.Optional[AuditMode] = None
def get_defaults(self, context: HostContext) -> DockerCompletionConfig:
"""Return the default settings."""
return filter_completion(docker_completion()).get(self.name) or DockerCompletionConfig(
name=self.name,
image=self.name,
placeholder=True,
)
def get_default_targets(self, context: HostContext) -> list[ControllerConfig]:
"""Return the default targets for this host config."""
if self.name in filter_completion(docker_completion()):
defaults = self.get_defaults(context)
pythons = {version: defaults.get_python_path(version) for version in defaults.supported_pythons}
else:
pythons = {context.controller_config.python.version: context.controller_config.python.path}
return [ControllerConfig(python=NativePythonConfig(version=version, path=path)) for version, path in pythons.items()]
def apply_defaults(self, context: HostContext, defaults: CompletionConfig) -> None:
"""Apply default settings."""
assert isinstance(defaults, DockerCompletionConfig)
super().apply_defaults(context, defaults)
self.name = defaults.name
self.image = defaults.image
if self.seccomp is None:
self.seccomp = defaults.seccomp
if self.cgroup is None:
self.cgroup = defaults.cgroup_enum
if self.audit is None:
self.audit = defaults.audit_enum
if self.privileged is None:
self.privileged = False
@property
def is_managed(self) -> bool:
"""
True if this host is a managed instance, otherwise False.
Managed instances are used exclusively by ansible-test and can safely have destructive operations performed without explicit permission from the user.
"""
return True
@property
def have_root(self) -> bool:
"""True if root is available, otherwise False."""
return True
@dataclasses.dataclass
class PosixRemoteConfig(RemoteConfig, ControllerHostConfig, PosixConfig):
"""Configuration for a POSIX remote host."""
become: t.Optional[str] = None
def get_defaults(self, context: HostContext) -> PosixRemoteCompletionConfig:
"""Return the default settings."""
# pylint: disable=unexpected-keyword-arg # see: https://github.com/PyCQA/pylint/issues/7434
return filter_completion(remote_completion()).get(self.name) or remote_completion().get(self.platform) or PosixRemoteCompletionConfig(
name=self.name,
placeholder=True,
)
def get_default_targets(self, context: HostContext) -> list[ControllerConfig]:
"""Return the default targets for this host config."""
if self.name in filter_completion(remote_completion()):
defaults = self.get_defaults(context)
pythons = {version: defaults.get_python_path(version) for version in defaults.supported_pythons}
else:
pythons = {context.controller_config.python.version: context.controller_config.python.path}
return [ControllerConfig(python=NativePythonConfig(version=version, path=path)) for version, path in pythons.items()]
def apply_defaults(self, context: HostContext, defaults: CompletionConfig) -> None:
"""Apply default settings."""
assert isinstance(defaults, PosixRemoteCompletionConfig)
super().apply_defaults(context, defaults)
self.become = self.become or defaults.become
@property
def have_root(self) -> bool:
"""True if root is available, otherwise False."""
return True
@dataclasses.dataclass
class WindowsConfig(HostConfig, metaclass=abc.ABCMeta):
"""Base class for Windows host configuration."""
@dataclasses.dataclass
class WindowsRemoteConfig(RemoteConfig, WindowsConfig):
"""Configuration for a remote Windows host."""
connection: t.Optional[str] = None
def get_defaults(self, context: HostContext) -> WindowsRemoteCompletionConfig:
"""Return the default settings."""
return filter_completion(windows_completion()).get(self.name) or windows_completion().get(self.platform)
def apply_defaults(self, context: HostContext, defaults: CompletionConfig) -> None:
"""Apply default settings."""
assert isinstance(defaults, WindowsRemoteCompletionConfig)
super().apply_defaults(context, defaults)
self.connection = self.connection or defaults.connection
@dataclasses.dataclass
class WindowsInventoryConfig(InventoryConfig, WindowsConfig):
"""Configuration for Windows hosts using inventory."""
@dataclasses.dataclass
class NetworkConfig(HostConfig, metaclass=abc.ABCMeta):
"""Base class for network host configuration."""
@dataclasses.dataclass
class NetworkRemoteConfig(RemoteConfig, NetworkConfig):
"""Configuration for a remote network host."""
collection: t.Optional[str] = None
connection: t.Optional[str] = None
def get_defaults(self, context: HostContext) -> NetworkRemoteCompletionConfig:
"""Return the default settings."""
return filter_completion(network_completion()).get(self.name) or NetworkRemoteCompletionConfig(
name=self.name,
placeholder=True,
)
def apply_defaults(self, context: HostContext, defaults: CompletionConfig) -> None:
"""Apply default settings."""
assert isinstance(defaults, NetworkRemoteCompletionConfig)
super().apply_defaults(context, defaults)
self.collection = self.collection or defaults.collection
self.connection = self.connection or defaults.connection
@dataclasses.dataclass
class NetworkInventoryConfig(InventoryConfig, NetworkConfig):
"""Configuration for network hosts using inventory."""
@dataclasses.dataclass
class OriginConfig(ControllerHostConfig, PosixConfig):
"""Configuration for the origin host."""
def get_defaults(self, context: HostContext) -> OriginCompletionConfig:
"""Return the default settings."""
return OriginCompletionConfig()
def get_default_targets(self, context: HostContext) -> list[ControllerConfig]:
"""Return the default targets for this host config."""
return [ControllerConfig(python=NativePythonConfig(version=version, path=path)) for version, path in get_available_python_versions().items()]
@property
def have_root(self) -> bool:
"""True if root is available, otherwise False."""
return os.getuid() == 0
@dataclasses.dataclass
class ControllerConfig(PosixConfig):
"""Configuration for the controller host."""
controller: t.Optional[PosixConfig] = None
def get_defaults(self, context: HostContext) -> PosixCompletionConfig:
"""Return the default settings."""
return context.controller_config.get_defaults(context)
def apply_defaults(self, context: HostContext, defaults: CompletionConfig) -> None:
"""Apply default settings."""
assert isinstance(defaults, PosixCompletionConfig)
self.controller = context.controller_config
if not self.python and not defaults.supported_pythons:
# The user did not specify a target Python and supported Pythons are unknown, so use the controller Python specified by the user instead.
self.python = context.controller_config.python
super().apply_defaults(context, defaults)
@property
def is_managed(self) -> bool:
"""
True if the host is a managed instance, otherwise False.
Managed instances are used exclusively by ansible-test and can safely have destructive operations performed without explicit permission from the user.
"""
return self.controller.is_managed
@property
def have_root(self) -> bool:
"""True if root is available, otherwise False."""
return self.controller.have_root
class FallbackReason(enum.Enum):
"""Reason fallback was performed."""
ENVIRONMENT = enum.auto()
PYTHON = enum.auto()
@dataclasses.dataclass(frozen=True)
class FallbackDetail:
"""Details about controller fallback behavior."""
reason: FallbackReason
message: str
@dataclasses.dataclass(frozen=True)
class HostSettings:
"""Host settings for the controller and targets."""
controller: ControllerHostConfig
targets: list[HostConfig]
skipped_python_versions: list[str]
filtered_args: list[str]
controller_fallback: t.Optional[FallbackDetail]
def serialize(self, path: str) -> None:
"""Serialize the host settings to the given path."""
with open_binary_file(path, 'wb') as settings_file:
pickle.dump(self, settings_file)
@staticmethod
def deserialize(path: str) -> HostSettings:
"""Deserialize host settings from the path."""
with open_binary_file(path) as settings_file:
return pickle.load(settings_file)
def apply_defaults(self) -> None:
"""Apply defaults to the host settings."""
context = HostContext(controller_config=None)
self.controller.apply_defaults(context, self.controller.get_defaults(context))
for target in self.targets:
context = HostContext(controller_config=self.controller)
target.apply_defaults(context, target.get_defaults(context))
| 18,634
|
Python
|
.py
| 409
| 38.645477
| 158
| 0.702671
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,107
|
target.py
|
ansible_ansible/test/lib/ansible_test/_internal/target.py
|
"""Test target identification, iteration and inclusion/exclusion."""
from __future__ import annotations
import collections
import collections.abc as c
import enum
import os
import re
import itertools
import abc
import typing as t
from .encoding import (
to_bytes,
to_text,
)
from .io import (
read_text_file,
)
from .util import (
ApplicationError,
display,
read_lines_without_comments,
is_subdir,
)
from .data import (
data_context,
content_plugins,
)
MODULE_EXTENSIONS = '.py', '.ps1'
def find_target_completion(target_func: c.Callable[[], c.Iterable[CompletionTarget]], prefix: str, short: bool) -> list[str]:
"""Return a list of targets from the given target function which match the given prefix."""
try:
targets = target_func()
matches = list(walk_completion_targets(targets, prefix, short))
return matches
except Exception as ex: # pylint: disable=locally-disabled, broad-except
return ['%s' % ex]
def walk_completion_targets(targets: c.Iterable[CompletionTarget], prefix: str, short: bool = False) -> tuple[str, ...]:
"""Return a tuple of targets from the given target iterable which match the given prefix."""
aliases = set(alias for target in targets for alias in target.aliases)
if prefix.endswith('/') and prefix in aliases:
aliases.remove(prefix)
matches = [alias for alias in aliases if alias.startswith(prefix) and '/' not in alias[len(prefix):-1]]
if short:
offset = len(os.path.dirname(prefix))
if offset:
offset += 1
relative_matches = [match[offset:] for match in matches if len(match) > offset]
if len(relative_matches) > 1:
matches = relative_matches
return tuple(sorted(matches))
def walk_internal_targets(
targets: c.Iterable[TCompletionTarget],
includes: t.Optional[list[str]] = None,
excludes: t.Optional[list[str]] = None,
requires: t.Optional[list[str]] = None,
) -> tuple[TCompletionTarget, ...]:
"""Return a tuple of matching completion targets."""
targets = tuple(targets)
include_targets = sorted(filter_targets(targets, includes), key=lambda include_target: include_target.name)
if requires:
require_targets = set(filter_targets(targets, requires))
include_targets = [require_target for require_target in include_targets if require_target in require_targets]
if excludes:
list(filter_targets(targets, excludes, include=False))
internal_targets = set(filter_targets(include_targets, excludes, errors=False, include=False))
return tuple(sorted(internal_targets, key=lambda sort_target: sort_target.name))
def filter_targets(
targets: c.Iterable[TCompletionTarget],
patterns: list[str],
include: bool = True,
errors: bool = True,
) -> c.Iterable[TCompletionTarget]:
"""Iterate over the given targets and filter them based on the supplied arguments."""
unmatched = set(patterns or ())
compiled_patterns = dict((p, re.compile('^%s$' % p)) for p in patterns) if patterns else None
for target in targets:
matched_directories = set()
match = False
if patterns:
for alias in target.aliases:
for pattern in patterns:
if compiled_patterns[pattern].match(alias):
match = True
try:
unmatched.remove(pattern)
except KeyError:
pass
if alias.endswith('/'):
if target.base_path and len(target.base_path) > len(alias):
matched_directories.add(target.base_path)
else:
matched_directories.add(alias)
elif include:
match = True
if not target.base_path:
matched_directories.add('.')
for alias in target.aliases:
if alias.endswith('/'):
if target.base_path and len(target.base_path) > len(alias):
matched_directories.add(target.base_path)
else:
matched_directories.add(alias)
if match != include:
continue
yield target
if errors:
if unmatched:
raise TargetPatternsNotMatched(unmatched)
def walk_module_targets() -> c.Iterable[TestTarget]:
"""Iterate through the module test targets."""
for target in walk_test_targets(path=data_context().content.module_path, module_path=data_context().content.module_path, extensions=MODULE_EXTENSIONS):
if not target.module:
continue
yield target
def walk_units_targets() -> c.Iterable[TestTarget]:
"""Return an iterable of units targets."""
return walk_test_targets(path=data_context().content.unit_path, module_path=data_context().content.unit_module_path, extensions=('.py',), prefix='test_')
def walk_compile_targets(include_symlinks: bool = True) -> c.Iterable[TestTarget]:
"""Return an iterable of compile targets."""
return walk_test_targets(module_path=data_context().content.module_path, extensions=('.py',), extra_dirs=('bin',), include_symlinks=include_symlinks)
def walk_powershell_targets(include_symlinks: bool = True) -> c.Iterable[TestTarget]:
"""Return an iterable of PowerShell targets."""
return walk_test_targets(module_path=data_context().content.module_path, extensions=('.ps1', '.psm1'), include_symlinks=include_symlinks)
def walk_sanity_targets() -> c.Iterable[TestTarget]:
"""Return an iterable of sanity targets."""
return walk_test_targets(module_path=data_context().content.module_path, include_symlinks=True, include_symlinked_directories=True)
def walk_posix_integration_targets(include_hidden: bool = False) -> c.Iterable[IntegrationTarget]:
"""Return an iterable of POSIX integration targets."""
for target in walk_integration_targets():
if 'posix/' in target.aliases or (include_hidden and 'hidden/posix/' in target.aliases):
yield target
def walk_network_integration_targets(include_hidden: bool = False) -> c.Iterable[IntegrationTarget]:
"""Return an iterable of network integration targets."""
for target in walk_integration_targets():
if 'network/' in target.aliases or (include_hidden and 'hidden/network/' in target.aliases):
yield target
def walk_windows_integration_targets(include_hidden: bool = False) -> c.Iterable[IntegrationTarget]:
"""Return an iterable of windows integration targets."""
for target in walk_integration_targets():
if 'windows/' in target.aliases or (include_hidden and 'hidden/windows/' in target.aliases):
yield target
def walk_integration_targets() -> c.Iterable[IntegrationTarget]:
"""Return an iterable of integration targets."""
path = data_context().content.integration_targets_path
modules = frozenset(target.module for target in walk_module_targets())
paths = data_context().content.walk_files(path)
prefixes = load_integration_prefixes()
targets_path_tuple = tuple(path.split(os.path.sep))
entry_dirs = (
'defaults',
'files',
'handlers',
'meta',
'tasks',
'templates',
'vars',
)
entry_files = (
'main.yml',
'main.yaml',
)
entry_points = []
for entry_dir in entry_dirs:
for entry_file in entry_files:
entry_points.append(os.path.join(os.path.sep, entry_dir, entry_file))
# any directory with at least one file is a target
path_tuples = set(tuple(os.path.dirname(p).split(os.path.sep))
for p in paths)
# also detect targets which are ansible roles, looking for standard entry points
path_tuples.update(tuple(os.path.dirname(os.path.dirname(p)).split(os.path.sep))
for p in paths if any(p.endswith(entry_point) for entry_point in entry_points))
# remove the top-level directory if it was included
if targets_path_tuple in path_tuples:
path_tuples.remove(targets_path_tuple)
previous_path_tuple = None
paths = []
for path_tuple in sorted(path_tuples):
if previous_path_tuple and previous_path_tuple == path_tuple[:len(previous_path_tuple)]:
# ignore nested directories
continue
previous_path_tuple = path_tuple
paths.append(os.path.sep.join(path_tuple))
for path in paths:
yield IntegrationTarget(to_text(path), modules, prefixes)
def load_integration_prefixes() -> dict[str, str]:
"""Load and return the integration test prefixes."""
path = data_context().content.integration_path
file_paths = sorted(f for f in data_context().content.get_files(path) if os.path.splitext(os.path.basename(f))[0] == 'target-prefixes')
prefixes = {}
for file_path in file_paths:
prefix = os.path.splitext(file_path)[1][1:]
prefixes.update(dict((k, prefix) for k in read_text_file(file_path).splitlines()))
return prefixes
def walk_test_targets(
path: t.Optional[str] = None,
module_path: t.Optional[str] = None,
extensions: t.Optional[tuple[str, ...]] = None,
prefix: t.Optional[str] = None,
extra_dirs: t.Optional[tuple[str, ...]] = None,
include_symlinks: bool = False,
include_symlinked_directories: bool = False,
) -> c.Iterable[TestTarget]:
"""Iterate over available test targets."""
if path:
file_paths = data_context().content.walk_files(path, include_symlinked_directories=include_symlinked_directories)
else:
file_paths = data_context().content.all_files(include_symlinked_directories=include_symlinked_directories)
for file_path in file_paths:
name, ext = os.path.splitext(os.path.basename(file_path))
if extensions and ext not in extensions:
continue
if prefix and not name.startswith(prefix):
continue
symlink = os.path.islink(to_bytes(file_path.rstrip(os.path.sep)))
if symlink and not include_symlinks:
continue
yield TestTarget(to_text(file_path), module_path, prefix, path, symlink)
file_paths = []
if extra_dirs:
for extra_dir in extra_dirs:
for file_path in data_context().content.get_files(extra_dir):
file_paths.append(file_path)
for file_path in file_paths:
symlink = os.path.islink(to_bytes(file_path.rstrip(os.path.sep)))
if symlink and not include_symlinks:
continue
yield TestTarget(file_path, module_path, prefix, path, symlink)
def analyze_integration_target_dependencies(integration_targets: list[IntegrationTarget]) -> dict[str, set[str]]:
"""Analyze the given list of integration test targets and return a dictionary expressing target names and the target names which depend on them."""
real_target_root = os.path.realpath(data_context().content.integration_targets_path) + '/'
role_targets = [target for target in integration_targets if target.type == 'role']
hidden_role_target_names = set(target.name for target in role_targets if 'hidden/' in target.aliases)
dependencies: collections.defaultdict[str, set[str]] = collections.defaultdict(set)
# handle setup dependencies
for target in integration_targets:
for setup_target_name in target.setup_always + target.setup_once:
dependencies[setup_target_name].add(target.name)
# handle target dependencies
for target in integration_targets:
for need_target in target.needs_target:
dependencies[need_target].add(target.name)
# handle symlink dependencies between targets
# this use case is supported, but discouraged
for target in integration_targets:
for path in data_context().content.walk_files(target.path):
if not os.path.islink(to_bytes(path.rstrip(os.path.sep))):
continue
real_link_path = os.path.realpath(path)
if not real_link_path.startswith(real_target_root):
continue
link_target = real_link_path[len(real_target_root):].split('/')[0]
if link_target == target.name:
continue
dependencies[link_target].add(target.name)
# intentionally primitive analysis of role meta to avoid a dependency on pyyaml
# script based targets are scanned as they may execute a playbook with role dependencies
for target in integration_targets:
meta_dir = os.path.join(target.path, 'meta')
if not os.path.isdir(meta_dir):
continue
meta_paths = data_context().content.get_files(meta_dir)
for meta_path in meta_paths:
if os.path.exists(meta_path):
# try and decode the file as a utf-8 string, skip if it contains invalid chars (binary file)
try:
meta_lines = read_text_file(meta_path).splitlines()
except UnicodeDecodeError:
continue
for meta_line in meta_lines:
if re.search(r'^ *#.*$', meta_line):
continue
if not meta_line.strip():
continue
for hidden_target_name in hidden_role_target_names:
if hidden_target_name in meta_line:
dependencies[hidden_target_name].add(target.name)
while True:
changes = 0
for dummy, dependent_target_names in dependencies.items():
for dependent_target_name in list(dependent_target_names):
new_target_names = dependencies.get(dependent_target_name)
if new_target_names:
for new_target_name in new_target_names:
if new_target_name not in dependent_target_names:
dependent_target_names.add(new_target_name)
changes += 1
if not changes:
break
for target_name in sorted(dependencies):
consumers = dependencies[target_name]
if not consumers:
continue
display.info('%s:' % target_name, verbosity=4)
for consumer in sorted(consumers):
display.info(' %s' % consumer, verbosity=4)
return dependencies
class CompletionTarget(metaclass=abc.ABCMeta):
"""Command-line argument completion target base class."""
def __init__(self) -> None:
self.name = ''
self.path = ''
self.base_path: t.Optional[str] = None
self.modules: tuple[str, ...] = tuple()
self.aliases: tuple[str, ...] = tuple()
def __eq__(self, other):
if isinstance(other, CompletionTarget):
return self.__repr__() == other.__repr__()
return False
def __ne__(self, other):
return not self.__eq__(other)
def __lt__(self, other):
return self.name.__lt__(other.name)
def __gt__(self, other):
return self.name.__gt__(other.name)
def __hash__(self):
return hash(self.__repr__())
def __repr__(self):
if self.modules:
return '%s (%s)' % (self.name, ', '.join(self.modules))
return self.name
class TestTarget(CompletionTarget):
"""Generic test target."""
def __init__(
self,
path: str,
module_path: t.Optional[str],
module_prefix: t.Optional[str],
base_path: str,
symlink: t.Optional[bool] = None,
) -> None:
super().__init__()
if symlink is None:
symlink = os.path.islink(to_bytes(path.rstrip(os.path.sep)))
self.name = path
self.path = path
self.base_path = base_path + '/' if base_path else None
self.symlink = symlink
name, ext = os.path.splitext(os.path.basename(self.path))
if module_path and is_subdir(path, module_path) and name != '__init__' and ext in MODULE_EXTENSIONS:
self.module = name[len(module_prefix or ''):].lstrip('_')
self.modules = (self.module,)
else:
self.module = None
self.modules = tuple()
aliases = [self.path, self.module]
parts = self.path.split('/')
for i in range(1, len(parts)):
alias = '%s/' % '/'.join(parts[:i])
aliases.append(alias)
aliases = [a for a in aliases if a]
self.aliases = tuple(sorted(aliases))
class IntegrationTargetType(enum.Enum):
"""Type of integration test target."""
CONTROLLER = enum.auto()
TARGET = enum.auto()
UNKNOWN = enum.auto()
CONFLICT = enum.auto()
def extract_plugin_references(name: str, aliases: list[str]) -> list[tuple[str, str]]:
"""Return a list of plugin references found in the given integration test target name and aliases."""
plugins = content_plugins()
found: list[tuple[str, str]] = []
for alias in [name] + aliases:
plugin_type = 'modules'
plugin_name = alias
if plugin_name in plugins.get(plugin_type, {}):
found.append((plugin_type, plugin_name))
parts = alias.split('_')
for type_length in (1, 2):
if len(parts) > type_length:
plugin_type = '_'.join(parts[:type_length])
plugin_name = '_'.join(parts[type_length:])
if plugin_name in plugins.get(plugin_type, {}):
found.append((plugin_type, plugin_name))
return found
def categorize_integration_test(name: str, aliases: list[str], force_target: bool) -> tuple[IntegrationTargetType, IntegrationTargetType]:
"""Return the integration test target types (used and actual) based on the given target name and aliases."""
context_controller = f'context/{IntegrationTargetType.CONTROLLER.name.lower()}' in aliases
context_target = f'context/{IntegrationTargetType.TARGET.name.lower()}' in aliases or force_target
actual_type = None
strict_mode = data_context().content.is_ansible
if context_controller and context_target:
target_type = IntegrationTargetType.CONFLICT
elif context_controller and not context_target:
target_type = IntegrationTargetType.CONTROLLER
elif context_target and not context_controller:
target_type = IntegrationTargetType.TARGET
else:
target_types = {IntegrationTargetType.TARGET if plugin_type in ('modules', 'module_utils') else IntegrationTargetType.CONTROLLER
for plugin_type, plugin_name in extract_plugin_references(name, aliases)}
if len(target_types) == 1:
target_type = target_types.pop()
elif not target_types:
actual_type = IntegrationTargetType.UNKNOWN
target_type = actual_type if strict_mode else IntegrationTargetType.TARGET
else:
target_type = IntegrationTargetType.CONFLICT
return target_type, actual_type or target_type
class IntegrationTarget(CompletionTarget):
"""Integration test target."""
non_posix = frozenset((
'network',
'windows',
))
categories = frozenset(non_posix | frozenset((
'posix',
'module',
'needs',
'skip',
)))
def __init__(self, path: str, modules: frozenset[str], prefixes: dict[str, str]) -> None:
super().__init__()
self.relative_path = os.path.relpath(path, data_context().content.integration_targets_path)
self.name = self.relative_path.replace(os.path.sep, '.')
self.path = path
# script_path and type
file_paths = data_context().content.get_files(path)
runme_path = os.path.join(path, 'runme.sh')
if runme_path in file_paths:
self.type = 'script'
self.script_path = runme_path
else:
self.type = 'role' # ansible will consider these empty roles, so ansible-test should as well
self.script_path = None
# static_aliases
aliases_path = os.path.join(path, 'aliases')
if aliases_path in file_paths:
static_aliases = tuple(read_lines_without_comments(aliases_path, remove_blank_lines=True))
else:
static_aliases = tuple()
# modules
if self.name in modules:
module_name = self.name
elif self.name.startswith('win_') and self.name[4:] in modules:
module_name = self.name[4:]
else:
module_name = None
self.modules = tuple(sorted(a for a in static_aliases + tuple([module_name]) if a in modules))
# groups
groups = [self.type]
groups += [a for a in static_aliases if a not in modules]
groups += ['module/%s' % m for m in self.modules]
if data_context().content.is_ansible and (self.name == 'ansible-test' or self.name.startswith('ansible-test-')):
groups.append('ansible-test')
if not self.modules:
groups.append('non_module')
if 'destructive' not in groups:
groups.append('non_destructive')
if 'needs/httptester' in groups:
groups.append('cloud/httptester') # backwards compatibility for when it was not a cloud plugin
for prefix, group in prefixes.items():
if not self.name.startswith(f'{prefix}_'):
continue
if group != prefix:
group = '%s/%s' % (group, prefix)
groups.append(group)
if self.name.startswith('win_'):
groups.append('windows')
if self.name.startswith('connection_'):
groups.append('connection')
if self.name.startswith('setup_') or self.name.startswith('prepare_'):
groups.append('hidden')
if self.type not in ('script', 'role'):
groups.append('hidden')
targets_relative_path = data_context().content.integration_targets_path
# Collect skip entries before group expansion to avoid registering more specific skip entries as less specific versions.
self.skips = tuple(g for g in groups if g.startswith('skip/'))
# Collect file paths before group expansion to avoid including the directories.
# Ignore references to test targets, as those must be defined using `needs/target/*` or other target references.
self.needs_file = tuple(sorted(set('/'.join(g.split('/')[2:]) for g in groups if
g.startswith('needs/file/') and not g.startswith('needs/file/%s/' % targets_relative_path))))
# network platform
networks = [g.split('/')[1] for g in groups if g.startswith('network/')]
self.network_platform = networks[0] if networks else None
for group in itertools.islice(groups, 0, len(groups)):
if '/' in group:
parts = group.split('/')
for i in range(1, len(parts)):
groups.append('/'.join(parts[:i]))
if not any(g in self.non_posix for g in groups):
groups.append('posix')
# target type
# targets which are non-posix test against the target, even if they also support posix
force_target = any(group in self.non_posix for group in groups)
target_type, actual_type = categorize_integration_test(self.name, list(static_aliases), force_target)
groups.extend(['context/', f'context/{target_type.name.lower()}'])
if target_type != actual_type:
# allow users to query for the actual type
groups.extend(['context/', f'context/{actual_type.name.lower()}'])
self.target_type = target_type
self.actual_type = actual_type
# aliases
aliases = [self.name] + \
['%s/' % g for g in groups] + \
['%s/%s' % (g, self.name) for g in groups if g not in self.categories]
if 'hidden/' in aliases:
aliases = ['hidden/'] + ['hidden/%s' % a for a in aliases if not a.startswith('hidden/')]
self.aliases = tuple(sorted(set(aliases)))
# configuration
self.retry_never = 'retry/never/' in self.aliases
self.setup_once = tuple(sorted(set(g.split('/')[2] for g in groups if g.startswith('setup/once/'))))
self.setup_always = tuple(sorted(set(g.split('/')[2] for g in groups if g.startswith('setup/always/'))))
self.needs_target = tuple(sorted(set(g.split('/')[2] for g in groups if g.startswith('needs/target/'))))
class TargetPatternsNotMatched(ApplicationError):
"""One or more targets were not matched when a match was required."""
def __init__(self, patterns: set[str]) -> None:
self.patterns = sorted(patterns)
if len(patterns) > 1:
message = 'Target patterns not matched:\n%s' % '\n'.join(self.patterns)
else:
message = 'Target pattern not matched: %s' % self.patterns[0]
super().__init__(message)
TCompletionTarget = t.TypeVar('TCompletionTarget', bound=CompletionTarget)
TIntegrationTarget = t.TypeVar('TIntegrationTarget', bound=IntegrationTarget)
| 25,320
|
Python
|
.py
| 513
| 39.844055
| 157
| 0.636356
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,108
|
provisioning.py
|
ansible_ansible/test/lib/ansible_test/_internal/provisioning.py
|
"""Provision hosts for running tests."""
from __future__ import annotations
import collections.abc as c
import dataclasses
import functools
import itertools
import os
import pickle
import sys
import time
import traceback
import typing as t
from .config import (
EnvironmentConfig,
)
from .util import (
ApplicationError,
HostConnectionError,
display,
open_binary_file,
verify_sys_executable,
version_to_str,
type_guard,
)
from .util_common import (
ExitHandler,
)
from .thread import (
WrappedThread,
)
from .host_profiles import (
ControllerHostProfile,
DockerProfile,
HostProfile,
SshConnection,
SshTargetHostProfile,
create_host_profile,
)
from .pypi_proxy import (
run_pypi_proxy,
)
THostProfile = t.TypeVar('THostProfile', bound=HostProfile)
TEnvironmentConfig = t.TypeVar('TEnvironmentConfig', bound=EnvironmentConfig)
class PrimeContainers(ApplicationError):
"""Exception raised to end execution early after priming containers."""
@dataclasses.dataclass(frozen=True)
class HostState:
"""State of hosts and profiles to be passed to ansible-test during delegation."""
controller_profile: ControllerHostProfile
target_profiles: list[HostProfile]
@property
def profiles(self) -> list[HostProfile]:
"""Return all the profiles as a list."""
return [t.cast(HostProfile, self.controller_profile)] + self.target_profiles
def serialize(self, path: str) -> None:
"""Serialize the host state to the given path."""
with open_binary_file(path, 'wb') as state_file:
pickle.dump(self, state_file)
@staticmethod
def deserialize(args: EnvironmentConfig, path: str) -> HostState:
"""Deserialize host state from the given args and path."""
with open_binary_file(path) as state_file:
host_state: HostState = pickle.load(state_file)
host_state.controller_profile.args = args
for target in host_state.target_profiles:
target.args = args
return host_state
def get_controller_target_connections(self) -> list[SshConnection]:
"""Return SSH connection(s) for accessing all target hosts from the controller."""
return list(itertools.chain.from_iterable([target.get_controller_target_connections() for
target in self.target_profiles if isinstance(target, SshTargetHostProfile)]))
def targets(self, profile_type: t.Type[THostProfile]) -> list[THostProfile]:
"""The list of target(s), verified to be of the specified type."""
if not self.target_profiles:
raise Exception('No target profiles found.')
assert type_guard(self.target_profiles, profile_type)
return t.cast(list[THostProfile], self.target_profiles)
def prepare_profiles(
args: TEnvironmentConfig,
targets_use_pypi: bool = False,
skip_setup: bool = False,
requirements: t.Optional[c.Callable[[HostProfile], None]] = None,
) -> HostState:
"""
Create new profiles, or load existing ones, and return them.
If a requirements callback was provided, it will be used before configuring hosts if delegation has already been performed.
"""
if args.host_path:
host_state = HostState.deserialize(args, os.path.join(args.host_path, 'state.dat'))
else:
run_pypi_proxy(args, targets_use_pypi)
host_state = HostState(
controller_profile=t.cast(ControllerHostProfile, create_host_profile(args, args.controller, True)),
target_profiles=[create_host_profile(args, target, False) for target in args.targets],
)
if args.prime_containers:
for host_profile in host_state.profiles:
if isinstance(host_profile, DockerProfile):
host_profile.provision()
raise PrimeContainers()
ExitHandler.register(functools.partial(cleanup_profiles, host_state))
def provision(profile: HostProfile) -> None:
"""Provision the given profile."""
profile.provision()
if not skip_setup:
profile.setup()
dispatch_jobs([(profile, WrappedThread(functools.partial(provision, profile))) for profile in host_state.profiles])
host_state.controller_profile.configure()
if not args.delegate:
check_controller_python(args, host_state)
if requirements:
requirements(host_state.controller_profile)
def configure(profile: HostProfile) -> None:
"""Configure the given profile."""
profile.wait()
if not skip_setup:
profile.configure()
if requirements:
requirements(profile)
dispatch_jobs([(profile, WrappedThread(functools.partial(configure, profile))) for profile in host_state.target_profiles])
return host_state
def check_controller_python(args: EnvironmentConfig, host_state: HostState) -> None:
"""Check the running environment to make sure it is what we expected."""
sys_version = version_to_str(sys.version_info[:2])
controller_python = host_state.controller_profile.python
if expected_executable := verify_sys_executable(controller_python.path):
raise ApplicationError(f'Running under Python interpreter "{sys.executable}" instead of "{expected_executable}".')
expected_version = controller_python.version
if expected_version != sys_version:
raise ApplicationError(f'Running under Python version {sys_version} instead of {expected_version}.')
args.controller_python = controller_python
def cleanup_profiles(host_state: HostState) -> None:
"""Cleanup provisioned hosts when exiting."""
for profile in host_state.profiles:
profile.deprovision()
def dispatch_jobs(jobs: list[tuple[HostProfile, WrappedThread]]) -> None:
"""Run the given profile job threads and wait for them to complete."""
for profile, thread in jobs:
thread.daemon = True
thread.start()
while any(thread.is_alive() for profile, thread in jobs):
time.sleep(1)
failed = False
connection_failures = 0
for profile, thread in jobs:
try:
thread.wait_for_result()
except HostConnectionError as ex:
display.error(f'Host {profile.config} connection failed:\n{ex}')
failed = True
connection_failures += 1
except ApplicationError as ex:
display.error(f'Host {profile.config} job failed:\n{ex}')
failed = True
except Exception as ex: # pylint: disable=broad-except
name = f'{"" if ex.__class__.__module__ == "builtins" else ex.__class__.__module__ + "."}{ex.__class__.__qualname__}'
display.error(f'Host {profile.config} job failed:\nTraceback (most recent call last):\n'
f'{"".join(traceback.format_tb(ex.__traceback__)).rstrip()}\n{name}: {ex}')
failed = True
if connection_failures:
raise HostConnectionError(f'Host job(s) failed, including {connection_failures} connection failure(s). See previous error(s) for details.')
if failed:
raise ApplicationError('Host job(s) failed. See previous error(s) for details.')
| 7,320
|
Python
|
.py
| 163
| 37.380368
| 147
| 0.68178
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,109
|
cgroup.py
|
ansible_ansible/test/lib/ansible_test/_internal/cgroup.py
|
"""Linux control group constants, classes and utilities."""
from __future__ import annotations
import codecs
import dataclasses
import pathlib
import re
class CGroupPath:
"""Linux cgroup path constants."""
ROOT = '/sys/fs/cgroup'
SYSTEMD = '/sys/fs/cgroup/systemd'
SYSTEMD_RELEASE_AGENT = '/sys/fs/cgroup/systemd/release_agent'
class MountType:
"""Linux filesystem mount type constants."""
TMPFS = 'tmpfs'
CGROUP_V1 = 'cgroup'
CGROUP_V2 = 'cgroup2'
@dataclasses.dataclass(frozen=True)
class CGroupEntry:
"""A single cgroup entry parsed from '/proc/{pid}/cgroup' in the proc filesystem."""
id: int
subsystem: str
path: pathlib.PurePosixPath
@property
def root_path(self) -> pathlib.PurePosixPath:
"""The root path for this cgroup subsystem."""
return pathlib.PurePosixPath(CGroupPath.ROOT, self.subsystem)
@property
def full_path(self) -> pathlib.PurePosixPath:
"""The full path for this cgroup subsystem."""
return pathlib.PurePosixPath(self.root_path, str(self.path).lstrip('/'))
@classmethod
def parse(cls, value: str) -> CGroupEntry:
"""Parse the given cgroup line from the proc filesystem and return a cgroup entry."""
cid, subsystem, path = value.split(':', maxsplit=2)
return cls(
id=int(cid),
subsystem=subsystem.removeprefix('name='),
path=pathlib.PurePosixPath(path),
)
@classmethod
def loads(cls, value: str) -> tuple[CGroupEntry, ...]:
"""Parse the given output from the proc filesystem and return a tuple of cgroup entries."""
return tuple(cls.parse(line) for line in value.splitlines())
@dataclasses.dataclass(frozen=True)
class MountEntry:
"""A single mount info entry parsed from '/proc/{pid}/mountinfo' in the proc filesystem."""
mount_id: int
parent_id: int
device_major: int
device_minor: int
root: pathlib.PurePosixPath
path: pathlib.PurePosixPath
options: tuple[str, ...]
fields: tuple[str, ...]
type: str
source: pathlib.PurePosixPath
super_options: tuple[str, ...]
@classmethod
def parse(cls, value: str) -> MountEntry:
"""Parse the given mount info line from the proc filesystem and return a mount entry."""
# See: https://man7.org/linux/man-pages/man5/proc.5.html
# See: https://github.com/torvalds/linux/blob/aea23e7c464bfdec04b52cf61edb62030e9e0d0a/fs/proc_namespace.c#L135
mount_id, parent_id, device_major_minor, root, path, options, *remainder = value.split(' ')
fields = remainder[:-4]
separator, mtype, source, super_options = remainder[-4:]
assert separator == '-'
device_major, device_minor = device_major_minor.split(':')
return cls(
mount_id=int(mount_id),
parent_id=int(parent_id),
device_major=int(device_major),
device_minor=int(device_minor),
root=_decode_path(root),
path=_decode_path(path),
options=tuple(options.split(',')),
fields=tuple(fields),
type=mtype,
source=_decode_path(source),
super_options=tuple(super_options.split(',')),
)
@classmethod
def loads(cls, value: str) -> tuple[MountEntry, ...]:
"""Parse the given output from the proc filesystem and return a tuple of mount info entries."""
return tuple(cls.parse(line) for line in value.splitlines())
def _decode_path(value: str) -> pathlib.PurePosixPath:
"""Decode and return a path which may contain octal escape sequences."""
# See: https://github.com/torvalds/linux/blob/aea23e7c464bfdec04b52cf61edb62030e9e0d0a/fs/proc_namespace.c#L150
path = re.sub(r'(\\[0-7]{3})', lambda m: codecs.decode(m.group(0).encode('ascii'), 'unicode_escape'), value)
return pathlib.PurePosixPath(path)
| 3,916
|
Python
|
.py
| 89
| 37.05618
| 119
| 0.665176
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,110
|
constants.py
|
ansible_ansible/test/lib/ansible_test/_internal/constants.py
|
"""Constants used by ansible-test. Imports should not be used in this file (other than to import the target common constants)."""
from __future__ import annotations
from .._util.target.common.constants import (
CONTROLLER_PYTHON_VERSIONS,
REMOTE_ONLY_PYTHON_VERSIONS,
)
STATUS_HOST_CONNECTION_ERROR = 4
# Setting a low soft RLIMIT_NOFILE value will improve the performance of subprocess.Popen on Python 2.x when close_fds=True.
# This will affect all Python subprocesses. It will also affect the current Python process if set before subprocess is imported for the first time.
SOFT_RLIMIT_NOFILE = 1024
# File used to track the ansible-test test execution timeout.
TIMEOUT_PATH = '.ansible-test-timeout.json'
CONTROLLER_MIN_PYTHON_VERSION = CONTROLLER_PYTHON_VERSIONS[0]
SUPPORTED_PYTHON_VERSIONS = REMOTE_ONLY_PYTHON_VERSIONS + CONTROLLER_PYTHON_VERSIONS
REMOTE_PROVIDERS = [
'default',
'aws',
'azure',
'parallels',
]
SECCOMP_CHOICES = [
'default',
'unconfined',
]
# This bin symlink map must exactly match the contents of the bin directory.
# It is necessary for payload creation to reconstruct the bin directory when running ansible-test from an installed version of ansible.
# It is also used to construct the injector directory at runtime.
# It is also used to construct entry points when not running ansible-test from source.
ANSIBLE_BIN_SYMLINK_MAP = {
'ansible': '../lib/ansible/cli/adhoc.py',
'ansible-config': '../lib/ansible/cli/config.py',
'ansible-console': '../lib/ansible/cli/console.py',
'ansible-doc': '../lib/ansible/cli/doc.py',
'ansible-galaxy': '../lib/ansible/cli/galaxy.py',
'ansible-inventory': '../lib/ansible/cli/inventory.py',
'ansible-playbook': '../lib/ansible/cli/playbook.py',
'ansible-pull': '../lib/ansible/cli/pull.py',
'ansible-test': '../test/lib/ansible_test/_util/target/cli/ansible_test_cli_stub.py',
'ansible-vault': '../lib/ansible/cli/vault.py',
}
| 1,969
|
Python
|
.py
| 40
| 46.225
| 147
| 0.741281
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,111
|
util_common.py
|
ansible_ansible/test/lib/ansible_test/_internal/util_common.py
|
"""Common utility code that depends on CommonConfig."""
from __future__ import annotations
import collections.abc as c
import contextlib
import json
import os
import re
import shlex
import sys
import tempfile
import textwrap
import typing as t
from .constants import (
ANSIBLE_BIN_SYMLINK_MAP,
)
from .encoding import (
to_bytes,
)
from .util import (
cache,
display,
get_ansible_version,
remove_tree,
MODE_DIRECTORY,
MODE_FILE_EXECUTE,
MODE_FILE,
OutputStream,
PYTHON_PATHS,
raw_command,
ANSIBLE_TEST_DATA_ROOT,
ANSIBLE_TEST_TARGET_ROOT,
ANSIBLE_TEST_TARGET_TOOLS_ROOT,
ApplicationError,
SubprocessError,
generate_name,
verified_chmod,
)
from .io import (
make_dirs,
read_text_file,
write_text_file,
write_json_file,
)
from .data import (
data_context,
)
from .provider.layout import (
LayoutMessages,
)
from .host_configs import (
PythonConfig,
VirtualPythonConfig,
)
CHECK_YAML_VERSIONS: dict[str, t.Any] = {}
class ExitHandler:
"""Simple exit handler implementation."""
_callbacks: list[tuple[t.Callable, tuple[t.Any, ...], dict[str, t.Any]]] = []
@staticmethod
def register(func: t.Callable, *args, **kwargs) -> None:
"""Register the given function and args as a callback to execute during program termination."""
ExitHandler._callbacks.append((func, args, kwargs))
@staticmethod
@contextlib.contextmanager
def context() -> t.Generator[None, None, None]:
"""Run all registered handlers when the context is exited."""
last_exception: BaseException | None = None
try:
yield
finally:
queue = list(ExitHandler._callbacks)
while queue:
func, args, kwargs = queue.pop()
try:
func(*args, **kwargs)
except BaseException as ex: # pylint: disable=broad-exception-caught
last_exception = ex
display.fatal(f'Exit handler failed: {ex}')
if last_exception:
raise last_exception
class ShellScriptTemplate:
"""A simple substitution template for shell scripts."""
def __init__(self, template: str) -> None:
self.template = template
def substitute(self, **kwargs: t.Union[str, list[str]]) -> str:
"""Return a string templated with the given arguments."""
kvp = dict((k, self.quote(v)) for k, v in kwargs.items())
pattern = re.compile(r'#{(?P<name>[^}]+)}')
value = pattern.sub(lambda match: kvp[match.group('name')], self.template)
return value
@staticmethod
def quote(value: t.Union[str, list[str]]) -> str:
"""Return a shell quoted version of the given value."""
if isinstance(value, list):
return shlex.quote(' '.join(value))
return shlex.quote(value)
class ResultType:
"""Test result type."""
BOT: ResultType = None
COVERAGE: ResultType = None
DATA: ResultType = None
JUNIT: ResultType = None
LOGS: ResultType = None
REPORTS: ResultType = None
TMP: ResultType = None
@staticmethod
def _populate() -> None:
ResultType.BOT = ResultType('bot')
ResultType.COVERAGE = ResultType('coverage')
ResultType.DATA = ResultType('data')
ResultType.JUNIT = ResultType('junit')
ResultType.LOGS = ResultType('logs')
ResultType.REPORTS = ResultType('reports')
ResultType.TMP = ResultType('.tmp')
def __init__(self, name: str) -> None:
self.name = name
@property
def relative_path(self) -> str:
"""The content relative path to the results."""
return os.path.join(data_context().content.results_path, self.name)
@property
def path(self) -> str:
"""The absolute path to the results."""
return os.path.join(data_context().content.root, self.relative_path)
def __str__(self) -> str:
return self.name
# noinspection PyProtectedMember
ResultType._populate() # pylint: disable=protected-access
class CommonConfig:
"""Configuration common to all commands."""
def __init__(self, args: t.Any, command: str) -> None:
self.command = command
self.interactive = False
self.check_layout = True
self.success: t.Optional[bool] = None
self.color: bool = args.color
self.explain: bool = args.explain
self.verbosity: int = args.verbosity
self.debug: bool = args.debug
self.truncate: int = args.truncate
self.redact: bool = args.redact
self.display_stderr: bool = False
self.session_name = generate_name()
self.cache: dict[str, t.Any] = {}
def get_ansible_config(self) -> str:
"""Return the path to the Ansible config for the given config."""
return os.path.join(ANSIBLE_TEST_DATA_ROOT, 'ansible.cfg')
def get_docs_url(url: str) -> str:
"""
Return the given docs.ansible.com URL updated to match the running ansible-test version, if it is not a pre-release version.
The URL should be in the form: https://docs.ansible.com/ansible/devel/path/to/doc.html
Where 'devel' will be replaced with the current version, unless it is a pre-release version.
When run under a pre-release version, the URL will remain unchanged.
This serves to provide a fallback URL for pre-release versions.
It also makes searching the source for docs links easier, since a full URL is provided to this function.
"""
url_prefix = 'https://docs.ansible.com/ansible-core/devel/'
if not url.startswith(url_prefix):
raise ValueError(f'URL "{url}" does not start with: {url_prefix}')
ansible_version = get_ansible_version()
if re.search(r'^[0-9.]+$', ansible_version):
url_version = '.'.join(ansible_version.split('.')[:2])
new_prefix = f'https://docs.ansible.com/ansible-core/{url_version}/'
url = url.replace(url_prefix, new_prefix)
return url
def create_result_directories(args: CommonConfig) -> None:
"""Create result directories."""
if args.explain:
return
make_dirs(ResultType.COVERAGE.path)
make_dirs(ResultType.DATA.path)
def handle_layout_messages(messages: t.Optional[LayoutMessages]) -> None:
"""Display the given layout messages."""
if not messages:
return
for message in messages.info:
display.info(message, verbosity=1)
for message in messages.warning:
display.warning(message)
if messages.error:
raise ApplicationError('\n'.join(messages.error))
def process_scoped_temporary_file(args: CommonConfig, prefix: t.Optional[str] = 'ansible-test-', suffix: t.Optional[str] = None) -> str:
"""Return the path to a temporary file that will be automatically removed when the process exits."""
if args.explain:
path = os.path.join(tempfile.gettempdir(), f'{prefix or tempfile.gettempprefix()}{generate_name()}{suffix or ""}')
else:
temp_fd, path = tempfile.mkstemp(prefix=prefix, suffix=suffix)
os.close(temp_fd)
ExitHandler.register(lambda: os.remove(path))
return path
def process_scoped_temporary_directory(args: CommonConfig, prefix: t.Optional[str] = 'ansible-test-', suffix: t.Optional[str] = None) -> str:
"""Return the path to a temporary directory that will be automatically removed when the process exits."""
if args.explain:
path = os.path.join(tempfile.gettempdir(), f'{prefix or tempfile.gettempprefix()}{generate_name()}{suffix or ""}')
else:
path = tempfile.mkdtemp(prefix=prefix, suffix=suffix)
ExitHandler.register(lambda: remove_tree(path))
return path
@contextlib.contextmanager
def named_temporary_file(args: CommonConfig, prefix: str, suffix: str, directory: t.Optional[str], content: str) -> c.Iterator[str]:
"""Context manager for a named temporary file."""
if args.explain:
yield os.path.join(directory or '/tmp', '%stemp%s' % (prefix, suffix))
else:
with tempfile.NamedTemporaryFile(prefix=prefix, suffix=suffix, dir=directory) as tempfile_fd:
tempfile_fd.write(to_bytes(content))
tempfile_fd.flush()
try:
yield tempfile_fd.name
finally:
pass
def write_json_test_results(
category: ResultType,
name: str,
content: t.Union[list[t.Any], dict[str, t.Any]],
formatted: bool = True,
encoder: t.Optional[t.Type[json.JSONEncoder]] = None,
) -> None:
"""Write the given json content to the specified test results path, creating directories as needed."""
path = os.path.join(category.path, name)
write_json_file(path, content, create_directories=True, formatted=formatted, encoder=encoder)
def write_text_test_results(category: ResultType, name: str, content: str) -> None:
"""Write the given text content to the specified test results path, creating directories as needed."""
path = os.path.join(category.path, name)
write_text_file(path, content, create_directories=True)
@cache
def get_injector_path() -> str:
"""Return the path to a directory which contains a `python.py` executable and associated injector scripts."""
injector_path = tempfile.mkdtemp(prefix='ansible-test-', suffix='-injector', dir='/tmp')
display.info(f'Initializing "{injector_path}" as the temporary injector directory.', verbosity=1)
injector_names = sorted(list(ANSIBLE_BIN_SYMLINK_MAP) + [
'importer.py',
'pytest',
'ansible_connection_cli_stub.py',
])
scripts = (
('python.py', '/usr/bin/env python', MODE_FILE_EXECUTE),
('virtualenv.sh', '/usr/bin/env bash', MODE_FILE),
)
source_path = os.path.join(ANSIBLE_TEST_TARGET_ROOT, 'injector')
for name in injector_names:
os.symlink('python.py', os.path.join(injector_path, name))
for name, shebang, mode in scripts:
src = os.path.join(source_path, name)
dst = os.path.join(injector_path, name)
script = read_text_file(src)
script = set_shebang(script, shebang)
write_text_file(dst, script)
verified_chmod(dst, mode)
verified_chmod(injector_path, MODE_DIRECTORY)
def cleanup_injector() -> None:
"""Remove the temporary injector directory."""
remove_tree(injector_path)
ExitHandler.register(cleanup_injector)
return injector_path
def set_shebang(script: str, executable: str) -> str:
"""Return the given script with the specified executable used for the shebang."""
prefix = '#!'
shebang = prefix + executable
overwrite = (
prefix,
'# auto-shebang',
'# shellcheck shell=',
)
lines = script.splitlines()
if any(lines[0].startswith(value) for value in overwrite):
lines[0] = shebang
else:
lines.insert(0, shebang)
script = '\n'.join(lines)
return script
def get_python_path(interpreter: str) -> str:
"""Return the path to a directory which contains a `python` executable that runs the specified interpreter."""
python_path = PYTHON_PATHS.get(interpreter)
if python_path:
return python_path
prefix = 'python-'
suffix = '-ansible'
root_temp_dir = '/tmp'
python_path = tempfile.mkdtemp(prefix=prefix, suffix=suffix, dir=root_temp_dir)
injected_interpreter = os.path.join(python_path, 'python')
# A symlink is faster than the execv wrapper, but isn't guaranteed to provide the correct result.
# There are several scenarios known not to work with symlinks:
#
# - A virtual environment where the target is a symlink to another directory.
# - A pyenv environment where the target is a shell script that changes behavior based on the program name.
#
# To avoid issues for these and other scenarios, only an exec wrapper is used.
display.info('Injecting "%s" as a execv wrapper for the "%s" interpreter.' % (injected_interpreter, interpreter), verbosity=1)
create_interpreter_wrapper(interpreter, injected_interpreter)
verified_chmod(python_path, MODE_DIRECTORY)
if not PYTHON_PATHS:
ExitHandler.register(cleanup_python_paths)
PYTHON_PATHS[interpreter] = python_path
return python_path
def create_temp_dir(prefix: t.Optional[str] = None, suffix: t.Optional[str] = None, base_dir: t.Optional[str] = None) -> str:
"""Create a temporary directory that persists until the current process exits."""
temp_path = tempfile.mkdtemp(prefix=prefix or 'tmp', suffix=suffix or '', dir=base_dir)
ExitHandler.register(remove_tree, temp_path)
return temp_path
def create_interpreter_wrapper(interpreter: str, injected_interpreter: str) -> None:
"""Create a wrapper for the given Python interpreter at the specified path."""
# sys.executable is used for the shebang to guarantee it is a binary instead of a script
# injected_interpreter could be a script from the system or our own wrapper created for the --venv option
shebang_interpreter = sys.executable
code = textwrap.dedent('''
#!%s
from __future__ import annotations
from os import execv
from sys import argv
python = '%s'
execv(python, [python] + argv[1:])
''' % (shebang_interpreter, interpreter)).lstrip()
write_text_file(injected_interpreter, code)
verified_chmod(injected_interpreter, MODE_FILE_EXECUTE)
def cleanup_python_paths() -> None:
"""Clean up all temporary python directories."""
for path in sorted(PYTHON_PATHS.values()):
display.info('Cleaning up temporary python directory: %s' % path, verbosity=2)
remove_tree(path)
def intercept_python(
args: CommonConfig,
python: PythonConfig,
cmd: list[str],
env: dict[str, str],
capture: bool,
data: t.Optional[str] = None,
cwd: t.Optional[str] = None,
always: bool = False,
) -> tuple[t.Optional[str], t.Optional[str]]:
"""
Run a command while intercepting invocations of Python to control the version used.
If the specified Python is an ansible-test managed virtual environment, it will be added to PATH to activate it.
Otherwise a temporary directory will be created to ensure the correct Python can be found in PATH.
"""
env = env.copy()
cmd = list(cmd)
inject_path = get_injector_path()
# make sure scripts (including injector.py) find the correct Python interpreter
if isinstance(python, VirtualPythonConfig):
python_path = os.path.dirname(python.path)
else:
python_path = get_python_path(python.path)
env['PATH'] = os.path.pathsep.join([inject_path, python_path, env['PATH']])
env['ANSIBLE_TEST_PYTHON_VERSION'] = python.version
env['ANSIBLE_TEST_PYTHON_INTERPRETER'] = python.path
return run_command(args, cmd, capture=capture, env=env, data=data, cwd=cwd, always=always)
def run_command(
args: CommonConfig,
cmd: c.Iterable[str],
capture: bool,
env: t.Optional[dict[str, str]] = None,
data: t.Optional[str] = None,
cwd: t.Optional[str] = None,
always: bool = False,
stdin: t.Optional[t.IO[bytes]] = None,
stdout: t.Optional[t.IO[bytes]] = None,
interactive: bool = False,
output_stream: t.Optional[OutputStream] = None,
cmd_verbosity: int = 1,
str_errors: str = 'strict',
error_callback: t.Optional[c.Callable[[SubprocessError], None]] = None,
) -> tuple[t.Optional[str], t.Optional[str]]:
"""Run the specified command and return stdout and stderr as a tuple."""
explain = args.explain and not always
return raw_command(
cmd,
capture=capture,
env=env,
data=data,
cwd=cwd,
explain=explain,
stdin=stdin,
stdout=stdout,
interactive=interactive,
output_stream=output_stream,
cmd_verbosity=cmd_verbosity,
str_errors=str_errors,
error_callback=error_callback,
)
def yamlcheck(python: PythonConfig, explain: bool = False) -> t.Optional[bool]:
"""Return True if PyYAML has libyaml support, False if it does not and None if it was not found."""
stdout = raw_command([python.path, os.path.join(ANSIBLE_TEST_TARGET_TOOLS_ROOT, 'yamlcheck.py')], capture=True, explain=explain)[0]
if explain:
return None
result = json.loads(stdout)
if not result['yaml']:
return None
return result['cloader']
def check_pyyaml(python: PythonConfig, required: bool = True, quiet: bool = False) -> t.Optional[bool]:
"""
Return True if PyYAML has libyaml support, False if it does not and None if it was not found.
The result is cached if True or required.
"""
try:
return CHECK_YAML_VERSIONS[python.path]
except KeyError:
pass
state = yamlcheck(python)
if state is not None or required:
# results are cached only if pyyaml is required or present
# it is assumed that tests will not uninstall/re-install pyyaml -- if they do, those changes will go undetected
CHECK_YAML_VERSIONS[python.path] = state
if not quiet:
if state is None:
if required:
display.warning('PyYAML is not installed for interpreter: %s' % python.path)
elif not state:
display.warning('PyYAML will be slow due to installation without libyaml support for interpreter: %s' % python.path)
return state
| 17,490
|
Python
|
.py
| 409
| 36.386308
| 141
| 0.676148
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,112
|
thread.py
|
ansible_ansible/test/lib/ansible_test/_internal/thread.py
|
"""Python threading tools."""
from __future__ import annotations
import collections.abc as c
import contextlib
import functools
import sys
import threading
import queue
import typing as t
TCallable = t.TypeVar('TCallable', bound=t.Callable[..., t.Any])
class WrappedThread(threading.Thread):
"""Wrapper around Thread which captures results and exceptions."""
def __init__(self, action: c.Callable[[], t.Any]) -> None:
super().__init__()
self._result: queue.Queue[t.Any] = queue.Queue()
self.action = action
self.result = None
def run(self) -> None:
"""
Run action and capture results or exception.
Do not override. Do not call directly. Executed by the start() method.
"""
# We truly want to catch anything that the worker thread might do including call sys.exit.
# Therefore, we catch *everything* (including old-style class exceptions)
# noinspection PyBroadException
try:
self._result.put((self.action(), None))
# pylint: disable=locally-disabled, bare-except
except: # noqa
self._result.put((None, sys.exc_info()))
def wait_for_result(self) -> t.Any:
"""Wait for thread to exit and return the result or raise an exception."""
result, exception = self._result.get()
if exception:
raise exception[1].with_traceback(exception[2])
self.result = result
return result
def mutex(func: TCallable) -> TCallable:
"""Enforce exclusive access on a decorated function."""
lock = threading.Lock()
@functools.wraps(func)
def wrapper(*args, **kwargs):
"""Wrapper around `func` which uses a lock to provide exclusive access to the function."""
with lock:
return func(*args, **kwargs)
return wrapper # type: ignore[return-value] # requires https://www.python.org/dev/peps/pep-0612/ support
__named_lock = threading.Lock()
__named_locks: dict[str, threading.Lock] = {}
@contextlib.contextmanager
def named_lock(name: str) -> c.Iterator[bool]:
"""
Context manager that provides named locks using threading.Lock instances.
Once named lock instances are created they are not deleted.
Returns True if this is the first instance of the named lock, otherwise False.
"""
with __named_lock:
if lock_instance := __named_locks.get(name):
first = False
else:
first = True
lock_instance = __named_locks[name] = threading.Lock()
with lock_instance:
yield first
| 2,596
|
Python
|
.py
| 63
| 34.492063
| 110
| 0.659371
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,113
|
pypi_proxy.py
|
ansible_ansible/test/lib/ansible_test/_internal/pypi_proxy.py
|
"""PyPI proxy management."""
from __future__ import annotations
import os
import urllib.parse
from .io import (
write_text_file,
)
from .config import (
EnvironmentConfig,
)
from .host_configs import (
PosixConfig,
)
from .util import (
ApplicationError,
display,
)
from .util_common import (
ExitHandler,
process_scoped_temporary_file,
)
from .docker_util import (
docker_available,
)
from .containers import (
HostType,
get_container_database,
run_support_container,
)
from .ansible_util import (
run_playbook,
)
from .host_profiles import (
HostProfile,
)
from .inventory import (
create_posix_inventory,
)
def run_pypi_proxy(args: EnvironmentConfig, targets_use_pypi: bool) -> None:
"""Run a PyPI proxy support container."""
if args.pypi_endpoint:
return # user has overridden the proxy endpoint, there is nothing to provision
versions_needing_proxy: tuple[str, ...] = tuple() # preserved for future use, no versions currently require this
posix_targets = [target for target in args.targets if isinstance(target, PosixConfig)]
need_proxy = targets_use_pypi and any(target.python.version in versions_needing_proxy for target in posix_targets)
use_proxy = args.pypi_proxy or need_proxy
if not use_proxy:
return
if not docker_available():
if args.pypi_proxy:
raise ApplicationError('Use of the PyPI proxy was requested, but Docker is not available.')
display.warning('Unable to use the PyPI proxy because Docker is not available. Installation of packages using `pip` may fail.')
return
image = 'quay.io/ansible/pypi-test-container:3.2.0'
port = 3141
run_support_container(
args=args,
context='__pypi_proxy__',
image=image,
name='pypi-test-container',
ports=[port],
)
def configure_pypi_proxy(args: EnvironmentConfig, profile: HostProfile) -> None:
"""Configure the environment to use a PyPI proxy, if present."""
if args.pypi_endpoint:
pypi_endpoint = args.pypi_endpoint
else:
containers = get_container_database(args)
context = containers.data.get(HostType.control if profile.controller else HostType.managed, {}).get('__pypi_proxy__')
if not context:
return # proxy not configured
access = list(context.values())[0]
host = access.host_ip
port = dict(access.port_map())[3141]
pypi_endpoint = f'http://{host}:{port}/root/pypi/+simple/'
pypi_hostname = urllib.parse.urlparse(pypi_endpoint)[1].split(':')[0]
if profile.controller:
configure_controller_pypi_proxy(args, profile, pypi_endpoint, pypi_hostname)
else:
configure_target_pypi_proxy(args, profile, pypi_endpoint, pypi_hostname)
def configure_controller_pypi_proxy(args: EnvironmentConfig, profile: HostProfile, pypi_endpoint: str, pypi_hostname: str) -> None:
"""Configure the controller environment to use a PyPI proxy."""
configure_pypi_proxy_pip(args, profile, pypi_endpoint, pypi_hostname)
configure_pypi_proxy_easy_install(args, profile, pypi_endpoint)
def configure_target_pypi_proxy(args: EnvironmentConfig, profile: HostProfile, pypi_endpoint: str, pypi_hostname: str) -> None:
"""Configure the target environment to use a PyPI proxy."""
inventory_path = process_scoped_temporary_file(args)
create_posix_inventory(args, inventory_path, [profile])
def cleanup_pypi_proxy() -> None:
"""Undo changes made to configure the PyPI proxy."""
run_playbook(args, inventory_path, 'pypi_proxy_restore.yml', capture=True)
force = 'yes' if profile.config.is_managed else 'no'
run_playbook(args, inventory_path, 'pypi_proxy_prepare.yml', capture=True, variables=dict(
pypi_endpoint=pypi_endpoint, pypi_hostname=pypi_hostname, force=force))
ExitHandler.register(cleanup_pypi_proxy)
def configure_pypi_proxy_pip(args: EnvironmentConfig, profile: HostProfile, pypi_endpoint: str, pypi_hostname: str) -> None:
"""Configure a custom index for pip based installs."""
pip_conf_path = os.path.expanduser('~/.pip/pip.conf')
pip_conf = '''
[global]
index-url = {0}
trusted-host = {1}
'''.format(pypi_endpoint, pypi_hostname).strip()
def pip_conf_cleanup() -> None:
"""Remove custom pip PyPI config."""
display.info('Removing custom PyPI config: %s' % pip_conf_path, verbosity=1)
os.remove(pip_conf_path)
if os.path.exists(pip_conf_path) and not profile.config.is_managed:
raise ApplicationError('Refusing to overwrite existing file: %s' % pip_conf_path)
display.info('Injecting custom PyPI config: %s' % pip_conf_path, verbosity=1)
display.info('Config: %s\n%s' % (pip_conf_path, pip_conf), verbosity=3)
if not args.explain:
write_text_file(pip_conf_path, pip_conf, True)
ExitHandler.register(pip_conf_cleanup)
def configure_pypi_proxy_easy_install(args: EnvironmentConfig, profile: HostProfile, pypi_endpoint: str) -> None:
"""Configure a custom index for easy_install based installs."""
pydistutils_cfg_path = os.path.expanduser('~/.pydistutils.cfg')
pydistutils_cfg = '''
[easy_install]
index_url = {0}
'''.format(pypi_endpoint).strip()
if os.path.exists(pydistutils_cfg_path) and not profile.config.is_managed:
raise ApplicationError('Refusing to overwrite existing file: %s' % pydistutils_cfg_path)
def pydistutils_cfg_cleanup() -> None:
"""Remove custom PyPI config."""
display.info('Removing custom PyPI config: %s' % pydistutils_cfg_path, verbosity=1)
os.remove(pydistutils_cfg_path)
display.info('Injecting custom PyPI config: %s' % pydistutils_cfg_path, verbosity=1)
display.info('Config: %s\n%s' % (pydistutils_cfg_path, pydistutils_cfg), verbosity=3)
if not args.explain:
write_text_file(pydistutils_cfg_path, pydistutils_cfg, True)
ExitHandler.register(pydistutils_cfg_cleanup)
| 6,019
|
Python
|
.py
| 132
| 40.174242
| 135
| 0.7056
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,114
|
git.py
|
ansible_ansible/test/lib/ansible_test/_internal/git.py
|
"""Wrapper around git command-line tools."""
from __future__ import annotations
import re
import typing as t
from .util import (
SubprocessError,
raw_command,
)
class Git:
"""Wrapper around git command-line tools."""
def __init__(self, root: t.Optional[str] = None) -> None:
self.git = 'git'
self.root = root
def get_diff(self, args: list[str], git_options: t.Optional[list[str]] = None) -> list[str]:
"""Run `git diff` and return the result as a list."""
cmd = ['diff'] + args
if git_options is None:
git_options = ['-c', 'core.quotePath=']
return self.run_git_split(git_options + cmd, '\n', str_errors='replace')
def get_diff_names(self, args: list[str]) -> list[str]:
"""Return a list of file names from the `git diff` command."""
cmd = ['diff', '--name-only', '--no-renames', '-z'] + args
return self.run_git_split(cmd, '\0')
def get_submodule_paths(self) -> list[str]:
"""Return a list of submodule paths recursively."""
cmd = ['submodule', 'status', '--recursive']
output = self.run_git_split(cmd, '\n')
submodule_paths = [re.search(r'^.[0-9a-f]+ (?P<path>[^ ]+)', line).group('path') for line in output]
# status is returned for all submodules in the current git repository relative to the current directory
# when the current directory is not the root of the git repository this can yield relative paths which are not below the current directory
# this can occur when multiple collections are in a git repo and some collections are submodules when others are not
# specifying "." as the path to enumerate would limit results to the current directory, but can cause the git command to fail with the error:
# error: pathspec '.' did not match any file(s) known to git
# this can occur when the current directory contains no files tracked by git
# instead we'll filter out the relative paths, since we're only interested in those at or below the current directory
submodule_paths = [path for path in submodule_paths if not path.startswith('../')]
return submodule_paths
def get_file_names(self, args: list[str]) -> list[str]:
"""Return a list of file names from the `git ls-files` command."""
cmd = ['ls-files', '-z'] + args
return self.run_git_split(cmd, '\0')
def get_branches(self) -> list[str]:
"""Return the list of branches."""
cmd = ['for-each-ref', 'refs/heads/', '--format', '%(refname:strip=2)']
return self.run_git_split(cmd)
def get_branch(self) -> str:
"""Return the current branch name."""
cmd = ['symbolic-ref', '--short', 'HEAD']
return self.run_git(cmd).strip()
def get_rev_list(self, commits: t.Optional[list[str]] = None, max_count: t.Optional[int] = None) -> list[str]:
"""Return the list of results from the `git rev-list` command."""
cmd = ['rev-list']
if commits:
cmd += commits
else:
cmd += ['HEAD']
if max_count:
cmd += ['--max-count', '%s' % max_count]
return self.run_git_split(cmd)
def get_branch_fork_point(self, branch: str) -> str:
"""Return a reference to the point at which the given branch was forked."""
cmd = ['merge-base', branch, 'HEAD']
return self.run_git(cmd).strip()
def is_valid_ref(self, ref: str) -> bool:
"""Return True if the given reference is valid, otherwise return False."""
cmd = ['show', ref]
try:
self.run_git(cmd, str_errors='replace')
return True
except SubprocessError:
return False
def run_git_split(self, cmd: list[str], separator: t.Optional[str] = None, str_errors: str = 'strict') -> list[str]:
"""Run the given `git` command and return the results as a list."""
output = self.run_git(cmd, str_errors=str_errors).strip(separator)
if not output:
return []
return output.split(separator)
def run_git(self, cmd: list[str], str_errors: str = 'strict') -> str:
"""Run the given `git` command and return the results as a string."""
return raw_command([self.git] + cmd, cwd=self.root, capture=True, str_errors=str_errors)[0]
| 4,366
|
Python
|
.py
| 80
| 46.3375
| 149
| 0.620221
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,115
|
connections.py
|
ansible_ansible/test/lib/ansible_test/_internal/connections.py
|
"""Connection abstraction for interacting with test hosts."""
from __future__ import annotations
import abc
import shlex
import tempfile
import typing as t
from .io import (
read_text_file,
)
from .config import (
EnvironmentConfig,
)
from .util import (
Display,
OutputStream,
SubprocessError,
retry,
)
from .util_common import (
run_command,
)
from .docker_util import (
DockerInspect,
docker_exec,
docker_inspect,
docker_network_disconnect,
)
from .ssh import (
SshConnectionDetail,
ssh_options_to_list,
)
from .become import (
Become,
)
class Connection(metaclass=abc.ABCMeta):
"""Base class for connecting to a host."""
@abc.abstractmethod
def run(
self,
command: list[str],
capture: bool,
interactive: bool = False,
data: t.Optional[str] = None,
stdin: t.Optional[t.IO[bytes]] = None,
stdout: t.Optional[t.IO[bytes]] = None,
output_stream: t.Optional[OutputStream] = None,
) -> tuple[t.Optional[str], t.Optional[str]]:
"""Run the specified command and return the result."""
def extract_archive(
self,
chdir: str,
src: t.IO[bytes],
):
"""Extract the given archive file stream in the specified directory."""
tar_cmd = ['tar', 'oxzf', '-', '-C', chdir]
retry(lambda: self.run(tar_cmd, stdin=src, capture=True))
def create_archive(
self,
chdir: str,
name: str,
dst: t.IO[bytes],
exclude: t.Optional[str] = None,
):
"""Create the specified archive file stream from the specified directory, including the given name and optionally excluding the given name."""
tar_cmd = ['tar', 'cf', '-', '-C', chdir]
gzip_cmd = ['gzip']
if exclude:
tar_cmd += ['--exclude', exclude]
tar_cmd.append(name)
# Using gzip to compress the archive allows this to work on all POSIX systems we support.
commands = [tar_cmd, gzip_cmd]
sh_cmd = ['sh', '-c', ' | '.join(shlex.join(command) for command in commands)]
retry(lambda: self.run(sh_cmd, stdout=dst, capture=True))
class LocalConnection(Connection):
"""Connect to localhost."""
def __init__(self, args: EnvironmentConfig) -> None:
self.args = args
def run(
self,
command: list[str],
capture: bool,
interactive: bool = False,
data: t.Optional[str] = None,
stdin: t.Optional[t.IO[bytes]] = None,
stdout: t.Optional[t.IO[bytes]] = None,
output_stream: t.Optional[OutputStream] = None,
) -> tuple[t.Optional[str], t.Optional[str]]:
"""Run the specified command and return the result."""
return run_command(
args=self.args,
cmd=command,
capture=capture,
data=data,
stdin=stdin,
stdout=stdout,
interactive=interactive,
output_stream=output_stream,
)
class SshConnection(Connection):
"""Connect to a host using SSH."""
def __init__(self, args: EnvironmentConfig, settings: SshConnectionDetail, become: t.Optional[Become] = None) -> None:
self.args = args
self.settings = settings
self.become = become
self.options = ['-i', settings.identity_file]
ssh_options: dict[str, t.Union[int, str]] = dict(
BatchMode='yes',
StrictHostKeyChecking='no',
UserKnownHostsFile='/dev/null',
ServerAliveInterval=15,
ServerAliveCountMax=4,
)
ssh_options.update(settings.options)
self.options.extend(ssh_options_to_list(ssh_options))
def run(
self,
command: list[str],
capture: bool,
interactive: bool = False,
data: t.Optional[str] = None,
stdin: t.Optional[t.IO[bytes]] = None,
stdout: t.Optional[t.IO[bytes]] = None,
output_stream: t.Optional[OutputStream] = None,
) -> tuple[t.Optional[str], t.Optional[str]]:
"""Run the specified command and return the result."""
options = list(self.options)
if self.become:
command = self.become.prepare_command(command)
options.append('-q')
if interactive:
options.append('-tt')
with tempfile.NamedTemporaryFile(prefix='ansible-test-ssh-debug-', suffix='.log') as ssh_logfile:
options.extend(['-vvv', '-E', ssh_logfile.name])
if self.settings.port:
options.extend(['-p', str(self.settings.port)])
options.append(f'{self.settings.user}@{self.settings.host}')
options.append(shlex.join(command))
def error_callback(ex: SubprocessError) -> None:
"""Error handler."""
self.capture_log_details(ssh_logfile.name, ex)
return run_command(
args=self.args,
cmd=['ssh'] + options,
capture=capture,
data=data,
stdin=stdin,
stdout=stdout,
interactive=interactive,
output_stream=output_stream,
error_callback=error_callback,
)
@staticmethod
def capture_log_details(path: str, ex: SubprocessError) -> None:
"""Read the specified SSH debug log and add relevant details to the provided exception."""
if ex.status != 255:
return
markers = [
'debug1: Connection Established',
'debug1: Authentication successful',
'debug1: Entering interactive session',
'debug1: Sending command',
'debug2: PTY allocation request accepted',
'debug2: exec request accepted',
]
file_contents = read_text_file(path)
messages = []
for line in reversed(file_contents.splitlines()):
messages.append(line)
if any(line.startswith(marker) for marker in markers):
break
message = '\n'.join(reversed(messages))
ex.message += '>>> SSH Debug Output\n'
ex.message += '%s%s\n' % (message.strip(), Display.clear)
class DockerConnection(Connection):
"""Connect to a host using Docker."""
def __init__(self, args: EnvironmentConfig, container_id: str, user: t.Optional[str] = None) -> None:
self.args = args
self.container_id = container_id
self.user: t.Optional[str] = user
def run(
self,
command: list[str],
capture: bool,
interactive: bool = False,
data: t.Optional[str] = None,
stdin: t.Optional[t.IO[bytes]] = None,
stdout: t.Optional[t.IO[bytes]] = None,
output_stream: t.Optional[OutputStream] = None,
) -> tuple[t.Optional[str], t.Optional[str]]:
"""Run the specified command and return the result."""
options = []
if self.user:
options.extend(['--user', self.user])
if interactive:
options.append('-it')
return docker_exec(
args=self.args,
container_id=self.container_id,
cmd=command,
options=options,
capture=capture,
data=data,
stdin=stdin,
stdout=stdout,
interactive=interactive,
output_stream=output_stream,
)
def inspect(self) -> DockerInspect:
"""Inspect the container and return a DockerInspect instance with the results."""
return docker_inspect(self.args, self.container_id)
def disconnect_network(self, network: str) -> None:
"""Disconnect the container from the specified network."""
docker_network_disconnect(self.args, self.container_id, network)
| 7,855
|
Python
|
.py
| 213
| 27.920188
| 150
| 0.594965
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,116
|
init.py
|
ansible_ansible/test/lib/ansible_test/_internal/init.py
|
"""Early initialization for ansible-test before most other imports have been performed."""
from __future__ import annotations
import resource
from .constants import (
SOFT_RLIMIT_NOFILE,
)
CURRENT_RLIMIT_NOFILE = resource.getrlimit(resource.RLIMIT_NOFILE)
DESIRED_RLIMIT_NOFILE = (SOFT_RLIMIT_NOFILE, CURRENT_RLIMIT_NOFILE[1])
if DESIRED_RLIMIT_NOFILE < CURRENT_RLIMIT_NOFILE:
resource.setrlimit(resource.RLIMIT_NOFILE, DESIRED_RLIMIT_NOFILE)
CURRENT_RLIMIT_NOFILE = DESIRED_RLIMIT_NOFILE
| 505
|
Python
|
.py
| 11
| 43.454545
| 90
| 0.802041
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,117
|
encoding.py
|
ansible_ansible/test/lib/ansible_test/_internal/encoding.py
|
"""Functions for encoding and decoding strings."""
from __future__ import annotations
import typing as t
ENCODING = 'utf-8'
def to_optional_bytes(value: t.Optional[str | bytes], errors: str = 'strict') -> t.Optional[bytes]:
"""Return the given value as bytes encoded using UTF-8 if not already bytes, or None if the value is None."""
return None if value is None else to_bytes(value, errors)
def to_optional_text(value: t.Optional[str | bytes], errors: str = 'strict') -> t.Optional[str]:
"""Return the given value as text decoded using UTF-8 if not already text, or None if the value is None."""
return None if value is None else to_text(value, errors)
def to_bytes(value: str | bytes, errors: str = 'strict') -> bytes:
"""Return the given value as bytes encoded using UTF-8 if not already bytes."""
if isinstance(value, bytes):
return value
if isinstance(value, str):
return value.encode(ENCODING, errors)
raise Exception('value is not bytes or text: %s' % type(value))
def to_text(value: str | bytes, errors: str = 'strict') -> str:
"""Return the given value as text decoded using UTF-8 if not already text."""
if isinstance(value, bytes):
return value.decode(ENCODING, errors)
if isinstance(value, str):
return value
raise Exception('value is not bytes or text: %s' % type(value))
| 1,379
|
Python
|
.py
| 24
| 52.541667
| 113
| 0.695749
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,118
|
cache.py
|
ansible_ansible/test/lib/ansible_test/_internal/cache.py
|
"""Cache for commonly shared data that is intended to be immutable."""
from __future__ import annotations
import collections.abc as c
import typing as t
from .config import (
CommonConfig,
)
TValue = t.TypeVar('TValue')
class CommonCache:
"""Common cache."""
def __init__(self, args: CommonConfig) -> None:
self.args = args
def get(self, key: str, factory: c.Callable[[], TValue]) -> TValue:
"""Return the value from the cache identified by the given key, using the specified factory method if it is not found."""
if key not in self.args.cache:
self.args.cache[key] = factory()
return self.args.cache[key]
def get_with_args(self, key: str, factory: c.Callable[[CommonConfig], TValue]) -> TValue:
"""Return the value from the cache identified by the given key, using the specified factory method (which accepts args) if it is not found."""
if key not in self.args.cache:
self.args.cache[key] = factory(self.args)
return self.args.cache[key]
| 1,050
|
Python
|
.py
| 22
| 41.727273
| 150
| 0.6778
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,119
|
venv.py
|
ansible_ansible/test/lib/ansible_test/_internal/venv.py
|
"""Virtual environment management."""
from __future__ import annotations
import collections.abc as c
import json
import os
import pathlib
import sys
import typing as t
from .config import (
EnvironmentConfig,
)
from .util import (
find_python,
SubprocessError,
ANSIBLE_TEST_TARGET_TOOLS_ROOT,
display,
remove_tree,
ApplicationError,
str_to_version,
raw_command,
)
from .util_common import (
run_command,
ResultType,
)
from .host_configs import (
VirtualPythonConfig,
PythonConfig,
)
from .python_requirements import (
collect_bootstrap,
run_pip,
)
def get_virtual_python(
args: EnvironmentConfig,
python: VirtualPythonConfig,
) -> VirtualPythonConfig:
"""Create a virtual environment for the given Python and return the path to its root."""
if python.system_site_packages:
suffix = '-ssp'
else:
suffix = ''
virtual_environment_path = os.path.join(ResultType.TMP.path, 'delegation', f'python{python.version}{suffix}')
virtual_environment_marker = os.path.join(virtual_environment_path, 'marker.txt')
virtual_environment_python = VirtualPythonConfig(
version=python.version,
path=os.path.join(virtual_environment_path, 'bin', 'python'),
system_site_packages=python.system_site_packages,
)
if os.path.exists(virtual_environment_marker):
display.info('Using existing Python %s virtual environment: %s' % (python.version, virtual_environment_path), verbosity=1)
else:
# a virtualenv without a marker is assumed to have been partially created
remove_tree(virtual_environment_path)
if not create_virtual_environment(args, python, virtual_environment_path, python.system_site_packages):
raise ApplicationError(f'Python {python.version} does not provide virtual environment support.')
commands = collect_bootstrap(virtual_environment_python)
run_pip(args, virtual_environment_python, commands, None) # get_virtual_python()
# touch the marker to keep track of when the virtualenv was last used
pathlib.Path(virtual_environment_marker).touch()
return virtual_environment_python
def create_virtual_environment(
args: EnvironmentConfig,
python: PythonConfig,
path: str,
system_site_packages: bool = False,
pip: bool = False,
) -> bool:
"""Create a virtual environment using venv for the requested Python version."""
if not os.path.exists(python.path):
# the requested python version could not be found
return False
# creating a virtual environment using 'venv' when running in a virtual environment created by 'virtualenv' results
# in a copy of the original virtual environment instead of creation of a new one
# avoid this issue by only using "real" python interpreters to invoke 'venv'
for real_python in iterate_real_pythons(python.version):
if run_venv(args, real_python, system_site_packages, pip, path):
display.info('Created Python %s virtual environment using "venv": %s' % (python.version, path), verbosity=1)
return True
# something went wrong, most likely the package maintainer for the Python installation removed ensurepip
# which will prevent creation of a virtual environment without installation of other OS packages
return False
def iterate_real_pythons(version: str) -> c.Iterable[str]:
"""
Iterate through available real python interpreters of the requested version.
The current interpreter will be checked and then the path will be searched.
"""
version_info = str_to_version(version)
current_python = None
if version_info == sys.version_info[:len(version_info)]:
current_python = sys.executable
real_prefix = get_python_real_prefix(current_python)
if real_prefix:
current_python = find_python(version, os.path.join(real_prefix, 'bin'))
if current_python:
yield current_python
path = os.environ.get('PATH', os.path.defpath)
if not path:
return
found_python = find_python(version, path)
if not found_python:
return
if found_python == current_python:
return
real_prefix = get_python_real_prefix(found_python)
if real_prefix:
found_python = find_python(version, os.path.join(real_prefix, 'bin'))
if found_python:
yield found_python
def get_python_real_prefix(python_path: str) -> t.Optional[str]:
"""
Return the real prefix of the specified interpreter or None if the interpreter is not a virtual environment created by 'virtualenv'.
"""
cmd = [python_path, os.path.join(os.path.join(ANSIBLE_TEST_TARGET_TOOLS_ROOT, 'virtualenvcheck.py'))]
check_result = json.loads(raw_command(cmd, capture=True)[0])
real_prefix = check_result['real_prefix']
return real_prefix
def run_venv(
args: EnvironmentConfig,
run_python: str,
system_site_packages: bool,
pip: bool,
path: str,
) -> bool:
"""Create a virtual environment using the 'venv' module."""
cmd = [run_python, '-m', 'venv']
if system_site_packages:
cmd.append('--system-site-packages')
if not pip:
cmd.append('--without-pip')
cmd.append(path)
try:
run_command(args, cmd, capture=True)
except SubprocessError as ex:
remove_tree(path)
if args.verbosity > 1:
display.error(ex.message)
return False
return True
| 5,521
|
Python
|
.py
| 139
| 33.971223
| 136
| 0.704946
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,120
|
python_requirements.py
|
ansible_ansible/test/lib/ansible_test/_internal/python_requirements.py
|
"""Python requirements management"""
from __future__ import annotations
import base64
import dataclasses
import json
import os
import typing as t
from .encoding import (
to_text,
to_bytes,
)
from .io import (
read_text_file,
)
from .util import (
ANSIBLE_TEST_DATA_ROOT,
ANSIBLE_TEST_TARGET_ROOT,
ApplicationError,
SubprocessError,
display,
)
from .util_common import (
check_pyyaml,
create_result_directories,
)
from .config import (
EnvironmentConfig,
IntegrationConfig,
UnitsConfig,
)
from .data import (
data_context,
)
from .host_configs import (
PosixConfig,
PythonConfig,
VirtualPythonConfig,
)
from .connections import (
LocalConnection,
Connection,
)
from .coverage_util import (
get_coverage_version,
)
QUIET_PIP_SCRIPT_PATH = os.path.join(ANSIBLE_TEST_TARGET_ROOT, 'setup', 'quiet_pip.py')
REQUIREMENTS_SCRIPT_PATH = os.path.join(ANSIBLE_TEST_TARGET_ROOT, 'setup', 'requirements.py')
# Pip Abstraction
class PipUnavailableError(ApplicationError):
"""Exception raised when pip is not available."""
def __init__(self, python: PythonConfig) -> None:
super().__init__(f'Python {python.version} at "{python.path}" does not have pip available.')
@dataclasses.dataclass(frozen=True)
class PipCommand:
"""Base class for pip commands."""
def serialize(self) -> tuple[str, dict[str, t.Any]]:
"""Return a serialized representation of this command."""
name = type(self).__name__[3:].lower()
return name, self.__dict__
@dataclasses.dataclass(frozen=True)
class PipInstall(PipCommand):
"""Details required to perform a pip install."""
requirements: list[tuple[str, str]]
constraints: list[tuple[str, str]]
packages: list[str]
def has_package(self, name: str) -> bool:
"""Return True if the specified package will be installed, otherwise False."""
name = name.lower()
return (any(name in package.lower() for package in self.packages) or
any(name in contents.lower() for path, contents in self.requirements))
@dataclasses.dataclass(frozen=True)
class PipUninstall(PipCommand):
"""Details required to perform a pip uninstall."""
packages: list[str]
ignore_errors: bool
@dataclasses.dataclass(frozen=True)
class PipVersion(PipCommand):
"""Details required to get the pip version."""
@dataclasses.dataclass(frozen=True)
class PipBootstrap(PipCommand):
"""Details required to bootstrap pip."""
pip_version: str
packages: list[str]
setuptools: bool
wheel: bool
# Entry Points
def install_requirements(
args: EnvironmentConfig,
python: PythonConfig,
ansible: bool = False,
command: bool = False,
coverage: bool = False,
controller: bool = True,
connection: t.Optional[Connection] = None,
) -> None:
"""Install requirements for the given Python using the specified arguments."""
create_result_directories(args)
if not requirements_allowed(args, controller):
return
if command and isinstance(args, (UnitsConfig, IntegrationConfig)) and args.coverage:
coverage = True
if ansible:
try:
ansible_cache = install_requirements.ansible_cache # type: ignore[attr-defined]
except AttributeError:
ansible_cache = install_requirements.ansible_cache = {} # type: ignore[attr-defined]
ansible_installed = ansible_cache.get(python.path)
if ansible_installed:
ansible = False
else:
ansible_cache[python.path] = True
commands = collect_requirements(
python=python,
controller=controller,
ansible=ansible,
command=args.command if command else None,
coverage=coverage,
minimize=False,
sanity=None,
)
if not commands:
return
run_pip(args, python, commands, connection)
# false positive: pylint: disable=no-member
if any(isinstance(command, PipInstall) and command.has_package('pyyaml') for command in commands):
check_pyyaml(python)
def collect_bootstrap(python: PythonConfig) -> list[PipCommand]:
"""Return the details necessary to bootstrap pip into an empty virtual environment."""
infrastructure_packages = get_venv_packages(python)
pip_version = infrastructure_packages['pip']
packages = [f'{name}=={version}' for name, version in infrastructure_packages.items()]
bootstrap = PipBootstrap(
pip_version=pip_version,
packages=packages,
setuptools=False,
wheel=False,
)
return [bootstrap]
def collect_requirements(
python: PythonConfig,
controller: bool,
ansible: bool,
coverage: bool,
minimize: bool,
command: t.Optional[str],
sanity: t.Optional[str],
) -> list[PipCommand]:
"""Collect requirements for the given Python using the specified arguments."""
commands: list[PipCommand] = []
if coverage:
commands.extend(collect_package_install(packages=[f'coverage=={get_coverage_version(python.version).coverage_version}'], constraints=False))
if ansible or command:
commands.extend(collect_general_install(command, ansible))
if sanity:
commands.extend(collect_sanity_install(sanity))
if command == 'units':
commands.extend(collect_units_install())
if command in ('integration', 'windows-integration', 'network-integration'):
commands.extend(collect_integration_install(command, controller))
if (sanity or minimize) and any(isinstance(command, PipInstall) for command in commands):
# bootstrap the managed virtual environment, which will have been created without any installed packages
# sanity tests which install no packages skip this step
commands = collect_bootstrap(python) + commands
# most infrastructure packages can be removed from sanity test virtual environments after they've been created
# removing them reduces the size of environments cached in containers
uninstall_packages = list(get_venv_packages(python))
commands.extend(collect_uninstall(packages=uninstall_packages))
return commands
def run_pip(
args: EnvironmentConfig,
python: PythonConfig,
commands: list[PipCommand],
connection: t.Optional[Connection],
) -> None:
"""Run the specified pip commands for the given Python, and optionally the specified host."""
connection = connection or LocalConnection(args)
script = prepare_pip_script(commands)
if isinstance(args, IntegrationConfig):
# Integration tests can involve two hosts (controller and target).
# The connection type can be used to disambiguate between the two.
context = " (controller)" if isinstance(connection, LocalConnection) else " (target)"
else:
context = ""
if isinstance(python, VirtualPythonConfig):
context += " [venv]"
# The interpreter path is not included below.
# It can be seen by running ansible-test with increased verbosity (showing all commands executed).
display.info(f'Installing requirements for Python {python.version}{context}')
if not args.explain:
try:
connection.run([python.path], data=script, capture=False)
except SubprocessError:
script = prepare_pip_script([PipVersion()])
try:
connection.run([python.path], data=script, capture=True)
except SubprocessError as ex:
if 'pip is unavailable:' in ex.stdout + ex.stderr:
raise PipUnavailableError(python) from None
raise
# Collect
def collect_general_install(
command: t.Optional[str] = None,
ansible: bool = False,
) -> list[PipInstall]:
"""Return details necessary for the specified general-purpose pip install(s)."""
requirements_paths: list[tuple[str, str]] = []
constraints_paths: list[tuple[str, str]] = []
if ansible:
path = os.path.join(ANSIBLE_TEST_DATA_ROOT, 'requirements', 'ansible.txt')
requirements_paths.append((ANSIBLE_TEST_DATA_ROOT, path))
if command:
path = os.path.join(ANSIBLE_TEST_DATA_ROOT, 'requirements', f'{command}.txt')
requirements_paths.append((ANSIBLE_TEST_DATA_ROOT, path))
return collect_install(requirements_paths, constraints_paths)
def collect_package_install(packages: list[str], constraints: bool = True) -> list[PipInstall]:
"""Return the details necessary to install the specified packages."""
return collect_install([], [], packages, constraints=constraints)
def collect_sanity_install(sanity: str) -> list[PipInstall]:
"""Return the details necessary for the specified sanity pip install(s)."""
requirements_paths: list[tuple[str, str]] = []
constraints_paths: list[tuple[str, str]] = []
path = os.path.join(ANSIBLE_TEST_DATA_ROOT, 'requirements', f'sanity.{sanity}.txt')
requirements_paths.append((ANSIBLE_TEST_DATA_ROOT, path))
if data_context().content.is_ansible:
path = os.path.join(data_context().content.sanity_path, 'code-smell', f'{sanity}.requirements.txt')
requirements_paths.append((data_context().content.root, path))
return collect_install(requirements_paths, constraints_paths, constraints=False)
def collect_units_install() -> list[PipInstall]:
"""Return details necessary for the specified units pip install(s)."""
requirements_paths: list[tuple[str, str]] = []
constraints_paths: list[tuple[str, str]] = []
path = os.path.join(data_context().content.unit_path, 'requirements.txt')
requirements_paths.append((data_context().content.root, path))
path = os.path.join(data_context().content.unit_path, 'constraints.txt')
constraints_paths.append((data_context().content.root, path))
return collect_install(requirements_paths, constraints_paths)
def collect_integration_install(command: str, controller: bool) -> list[PipInstall]:
"""Return details necessary for the specified integration pip install(s)."""
requirements_paths: list[tuple[str, str]] = []
constraints_paths: list[tuple[str, str]] = []
# Support for prefixed files was added to ansible-test in ansible-core 2.12 when split controller/target testing was implemented.
# Previous versions of ansible-test only recognize non-prefixed files.
# If a prefixed file exists (even if empty), it takes precedence over the non-prefixed file.
prefixes = ('controller.' if controller else 'target.', '')
for prefix in prefixes:
path = os.path.join(data_context().content.integration_path, f'{prefix}requirements.txt')
if os.path.exists(path):
requirements_paths.append((data_context().content.root, path))
break
for prefix in prefixes:
path = os.path.join(data_context().content.integration_path, f'{command}.{prefix}requirements.txt')
if os.path.exists(path):
requirements_paths.append((data_context().content.root, path))
break
for prefix in prefixes:
path = os.path.join(data_context().content.integration_path, f'{prefix}constraints.txt')
if os.path.exists(path):
constraints_paths.append((data_context().content.root, path))
break
return collect_install(requirements_paths, constraints_paths)
def collect_install(
requirements_paths: list[tuple[str, str]],
constraints_paths: list[tuple[str, str]],
packages: t.Optional[list[str]] = None,
constraints: bool = True,
) -> list[PipInstall]:
"""Build a pip install list from the given requirements, constraints and packages."""
# listing content constraints first gives them priority over constraints provided by ansible-test
constraints_paths = list(constraints_paths)
if constraints:
constraints_paths.append((ANSIBLE_TEST_DATA_ROOT, os.path.join(ANSIBLE_TEST_DATA_ROOT, 'requirements', 'constraints.txt')))
requirements = [(os.path.relpath(path, root), read_text_file(path)) for root, path in requirements_paths if usable_pip_file(path)]
constraints = [(os.path.relpath(path, root), read_text_file(path)) for root, path in constraints_paths if usable_pip_file(path)]
packages = packages or []
if requirements or packages:
installs = [PipInstall(
requirements=requirements,
constraints=constraints,
packages=packages,
)]
else:
installs = []
return installs
def collect_uninstall(packages: list[str], ignore_errors: bool = False) -> list[PipUninstall]:
"""Return the details necessary for the specified pip uninstall."""
uninstall = PipUninstall(
packages=packages,
ignore_errors=ignore_errors,
)
return [uninstall]
# Support
def get_venv_packages(python: PythonConfig) -> dict[str, str]:
"""Return a dictionary of Python packages needed for a consistent virtual environment specific to the given Python version."""
# NOTE: This same information is needed for building the base-test-container image.
# See: https://github.com/ansible/base-test-container/blob/main/files/installer.py
default_packages = dict(
pip='24.2',
)
override_packages: dict[str, dict[str, str]] = {
}
packages = {name: version or default_packages[name] for name, version in override_packages.get(python.version, default_packages).items()}
return packages
def requirements_allowed(args: EnvironmentConfig, controller: bool) -> bool:
"""
Return True if requirements can be installed, otherwise return False.
Requirements are only allowed if one of the following conditions is met:
The user specified --requirements manually.
The install will occur on the controller and the controller or controller Python is managed by ansible-test.
The install will occur on the target and the target or target Python is managed by ansible-test.
"""
if args.requirements:
return True
if controller:
return args.controller.is_managed or args.controller.python.is_managed
target = args.only_targets(PosixConfig)[0]
return target.is_managed or target.python.is_managed
def prepare_pip_script(commands: list[PipCommand]) -> str:
"""Generate a Python script to perform the requested pip commands."""
data = [command.serialize() for command in commands]
display.info(f'>>> Requirements Commands\n{json.dumps(data, indent=4)}', verbosity=3)
args = dict(
script=read_text_file(QUIET_PIP_SCRIPT_PATH),
verbosity=display.verbosity,
commands=data,
)
payload = to_text(base64.b64encode(to_bytes(json.dumps(args))))
path = REQUIREMENTS_SCRIPT_PATH
template = read_text_file(path)
script = template.format(payload=payload)
display.info(f'>>> Python Script from Template ({path})\n{script.strip()}', verbosity=4)
return script
def usable_pip_file(path: t.Optional[str]) -> bool:
"""Return True if the specified pip file is usable, otherwise False."""
return bool(path) and os.path.exists(path) and bool(os.path.getsize(path))
| 15,205
|
Python
|
.py
| 334
| 39.452096
| 148
| 0.705447
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,121
|
payload.py
|
ansible_ansible/test/lib/ansible_test/_internal/payload.py
|
"""Payload management for sending Ansible files and test content to other systems (VMs, containers)."""
from __future__ import annotations
import os
import stat
import tarfile
import tempfile
import time
import typing as t
from .constants import (
ANSIBLE_BIN_SYMLINK_MAP,
)
from .config import (
IntegrationConfig,
ShellConfig,
)
from .util import (
display,
ANSIBLE_SOURCE_ROOT,
remove_tree,
is_subdir,
)
from .data import (
data_context,
PayloadConfig,
)
from .util_common import (
CommonConfig,
ExitHandler,
)
# improve performance by disabling uid/gid lookups
tarfile.pwd = None # type: ignore[attr-defined] # undocumented attribute
tarfile.grp = None # type: ignore[attr-defined] # undocumented attribute
def create_payload(args: CommonConfig, dst_path: str) -> None:
"""Create a payload for delegation."""
if args.explain:
return
files = list(data_context().ansible_source)
permissions: dict[str, int] = {}
filters: dict[str, t.Callable[[tarfile.TarInfo], t.Optional[tarfile.TarInfo]]] = {}
# Exclude vendored files from the payload.
# They may not be compatible with the delegated environment.
files = [
(abs_path, rel_path) for abs_path, rel_path in files
if not rel_path.startswith('lib/ansible/_vendor/')
or rel_path == 'lib/ansible/_vendor/__init__.py'
]
def apply_permissions(tar_info: tarfile.TarInfo, mode: int) -> t.Optional[tarfile.TarInfo]:
"""
Apply the specified permissions to the given file.
Existing file type bits are preserved.
"""
tar_info.mode &= ~(stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)
tar_info.mode |= mode
return tar_info
def make_executable(tar_info: tarfile.TarInfo) -> t.Optional[tarfile.TarInfo]:
"""
Make the given file executable and readable by all, and writeable by the owner.
Existing file type bits are preserved.
This ensures consistency of test results when using unprivileged users.
"""
return apply_permissions(
tar_info,
stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH |
stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH |
stat.S_IWUSR
) # fmt: skip
def make_non_executable(tar_info: tarfile.TarInfo) -> t.Optional[tarfile.TarInfo]:
"""
Make the given file readable by all, and writeable by the owner.
Existing file type bits are preserved.
This ensures consistency of test results when using unprivileged users.
"""
return apply_permissions(
tar_info,
stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH |
stat.S_IWUSR
) # fmt: skip
def detect_permissions(tar_info: tarfile.TarInfo) -> t.Optional[tarfile.TarInfo]:
"""
Detect and apply the appropriate permissions for a file.
Existing file type bits are preserved.
This ensures consistency of test results when using unprivileged users.
"""
if tar_info.path.startswith('ansible/'):
mode = permissions.get(os.path.relpath(tar_info.path, 'ansible'))
elif data_context().content.collection and is_subdir(tar_info.path, data_context().content.collection.directory):
mode = permissions.get(os.path.relpath(tar_info.path, data_context().content.collection.directory))
else:
mode = None
if mode:
tar_info = apply_permissions(tar_info, mode)
elif tar_info.mode & (stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH):
# If any execute bit is set, treat the file as executable.
# This ensures that sanity tests which check execute bits behave correctly.
tar_info = make_executable(tar_info)
else:
tar_info = make_non_executable(tar_info)
return tar_info
if not ANSIBLE_SOURCE_ROOT:
# reconstruct the bin directory which is not available when running from an ansible install
files.extend(create_temporary_bin_files(args))
filters.update(dict((os.path.join('ansible', path[3:]), make_executable) for path in ANSIBLE_BIN_SYMLINK_MAP.values() if path.startswith('../')))
if not data_context().content.is_ansible:
# exclude unnecessary files when not testing ansible itself
files = [f for f in files if
is_subdir(f[1], 'bin/') or
is_subdir(f[1], 'lib/ansible/') or
is_subdir(f[1], 'test/lib/ansible_test/')]
if not isinstance(args, (ShellConfig, IntegrationConfig)):
# exclude built-in ansible modules when they are not needed
files = [f for f in files if not is_subdir(f[1], 'lib/ansible/modules/') or f[1] == 'lib/ansible/modules/__init__.py']
collection_layouts = data_context().create_collection_layouts()
content_files: list[tuple[str, str]] = []
extra_files: list[tuple[str, str]] = []
for layout in collection_layouts:
if layout == data_context().content:
# include files from the current collection (layout.collection.directory will be added later)
content_files.extend((os.path.join(layout.root, path), path) for path in data_context().content.all_files())
else:
# include files from each collection in the same collection root as the content being tested
extra_files.extend((os.path.join(layout.root, path), os.path.join(layout.collection.directory, path)) for path in layout.all_files())
else:
# when testing ansible itself the ansible source is the content
content_files = files
# there are no extra files when testing ansible itself
extra_files = []
payload_config = PayloadConfig(
files=content_files,
permissions=permissions,
)
for callback in data_context().payload_callbacks:
# execute callbacks only on the content paths
# this is done before placing them in the appropriate subdirectory (see below)
callback(payload_config)
# place ansible source files under the 'ansible' directory on the delegated host
files = [(src, os.path.join('ansible', dst)) for src, dst in files]
if data_context().content.collection:
# place collection files under the 'ansible_collections/{namespace}/{collection}' directory on the delegated host
files.extend((src, os.path.join(data_context().content.collection.directory, dst)) for src, dst in content_files)
# extra files already have the correct destination path
files.extend(extra_files)
# maintain predictable file order
files = sorted(set(files))
display.info('Creating a payload archive containing %d files...' % len(files), verbosity=1)
start = time.time()
with tarfile.open(dst_path, mode='w:gz', compresslevel=4, format=tarfile.GNU_FORMAT) as tar:
for src, dst in files:
display.info('%s -> %s' % (src, dst), verbosity=4)
tar.add(src, dst, filter=filters.get(dst, detect_permissions))
duration = time.time() - start
payload_size_bytes = os.path.getsize(dst_path)
display.info('Created a %d byte payload archive containing %d files in %d seconds.' % (payload_size_bytes, len(files), duration), verbosity=1)
def create_temporary_bin_files(args: CommonConfig) -> tuple[tuple[str, str], ...]:
"""Create a temporary ansible bin directory populated using the symlink map."""
if args.explain:
temp_path = '/tmp/ansible-tmp-bin'
else:
temp_path = tempfile.mkdtemp(prefix='ansible', suffix='bin')
ExitHandler.register(remove_tree, temp_path)
for name, dest in ANSIBLE_BIN_SYMLINK_MAP.items():
path = os.path.join(temp_path, name)
os.symlink(dest, path)
return tuple((os.path.join(temp_path, name), os.path.join('bin', name)) for name in sorted(ANSIBLE_BIN_SYMLINK_MAP))
| 8,012
|
Python
|
.py
| 163
| 41.226994
| 153
| 0.663167
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,122
|
metadata.py
|
ansible_ansible/test/lib/ansible_test/_internal/metadata.py
|
"""Test metadata for passing data to delegated tests."""
from __future__ import annotations
import typing as t
from .util import (
display,
generate_name,
)
from .io import (
write_json_file,
read_json_file,
)
from .diff import (
parse_diff,
FileDiff,
)
class Metadata:
"""Metadata object for passing data to delegated tests."""
def __init__(self) -> None:
"""Initialize metadata."""
self.changes: dict[str, tuple[tuple[int, int], ...]] = {}
self.cloud_config: t.Optional[dict[str, dict[str, t.Union[int, str, bool]]]] = None
self.change_description: t.Optional[ChangeDescription] = None
self.ci_provider: t.Optional[str] = None
self.session_id = generate_name()
def populate_changes(self, diff: t.Optional[list[str]]) -> None:
"""Populate the changeset using the given diff."""
patches = parse_diff(diff)
patches: list[FileDiff] = sorted(patches, key=lambda k: k.new.path)
self.changes = dict((patch.new.path, tuple(patch.new.ranges)) for patch in patches)
renames = [patch.old.path for patch in patches if patch.old.path != patch.new.path and patch.old.exists and patch.new.exists]
deletes = [patch.old.path for patch in patches if not patch.new.exists]
# make sure old paths which were renamed or deleted are registered in changes
for path in renames + deletes:
if path in self.changes:
# old path was replaced with another file
continue
# failed tests involving deleted files should be using line 0 since there is no content remaining
self.changes[path] = ((0, 0),)
def to_dict(self) -> dict[str, t.Any]:
"""Return a dictionary representation of the metadata."""
return dict(
changes=self.changes,
cloud_config=self.cloud_config,
ci_provider=self.ci_provider,
change_description=self.change_description.to_dict(),
session_id=self.session_id,
)
def to_file(self, path: str) -> None:
"""Write the metadata to the specified file."""
data = self.to_dict()
display.info('>>> Metadata: %s\n%s' % (path, data), verbosity=3)
write_json_file(path, data)
@staticmethod
def from_file(path: str) -> Metadata:
"""Return metadata loaded from the specified file."""
data = read_json_file(path)
return Metadata.from_dict(data)
@staticmethod
def from_dict(data: dict[str, t.Any]) -> Metadata:
"""Return metadata loaded from the specified dictionary."""
metadata = Metadata()
metadata.changes = data['changes']
metadata.cloud_config = data['cloud_config']
metadata.ci_provider = data['ci_provider']
metadata.change_description = ChangeDescription.from_dict(data['change_description'])
metadata.session_id = data['session_id']
return metadata
class ChangeDescription:
"""Description of changes."""
def __init__(self) -> None:
self.command: str = ''
self.changed_paths: list[str] = []
self.deleted_paths: list[str] = []
self.regular_command_targets: dict[str, list[str]] = {}
self.focused_command_targets: dict[str, list[str]] = {}
self.no_integration_paths: list[str] = []
@property
def targets(self) -> t.Optional[list[str]]:
"""Optional list of target names."""
return self.regular_command_targets.get(self.command)
@property
def focused_targets(self) -> t.Optional[list[str]]:
"""Optional list of focused target names."""
return self.focused_command_targets.get(self.command)
def to_dict(self) -> dict[str, t.Any]:
"""Return a dictionary representation of the change description."""
return dict(
command=self.command,
changed_paths=self.changed_paths,
deleted_paths=self.deleted_paths,
regular_command_targets=self.regular_command_targets,
focused_command_targets=self.focused_command_targets,
no_integration_paths=self.no_integration_paths,
)
@staticmethod
def from_dict(data: dict[str, t.Any]) -> ChangeDescription:
"""Return a change description loaded from the given dictionary."""
changes = ChangeDescription()
changes.command = data['command']
changes.changed_paths = data['changed_paths']
changes.deleted_paths = data['deleted_paths']
changes.regular_command_targets = data['regular_command_targets']
changes.focused_command_targets = data['focused_command_targets']
changes.no_integration_paths = data['no_integration_paths']
return changes
| 4,791
|
Python
|
.py
| 105
| 37.447619
| 133
| 0.645064
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,123
|
__init__.py
|
ansible_ansible/test/lib/ansible_test/_internal/__init__.py
|
"""Test runner for all Ansible tests."""
from __future__ import annotations
import os
import sys
import typing as t
# This import should occur as early as possible.
# It must occur before subprocess has been imported anywhere in the current process.
from .init import (
CURRENT_RLIMIT_NOFILE,
)
from .constants import (
STATUS_HOST_CONNECTION_ERROR,
)
from .util import (
ApplicationError,
HostConnectionError,
TimeoutExpiredError,
display,
report_locale,
)
from .delegation import (
delegate,
)
from .executor import (
ApplicationWarning,
Delegate,
ListTargets,
)
from .timeout import (
configure_timeout,
)
from .data import (
data_context,
)
from .util_common import (
CommonConfig,
ExitHandler,
)
from .cli import (
parse_args,
)
from .provisioning import (
PrimeContainers,
)
from .config import (
TestConfig,
)
def main(cli_args: t.Optional[list[str]] = None) -> None:
"""Wrapper around the main program function to invoke cleanup functions at exit."""
with ExitHandler.context():
main_internal(cli_args)
def main_internal(cli_args: t.Optional[list[str]] = None) -> None:
"""Main program function."""
try:
os.chdir(data_context().content.root)
args = parse_args(cli_args)
config: CommonConfig = args.config(args)
display.verbosity = config.verbosity
display.truncate = config.truncate
display.redact = config.redact
display.color = config.color
display.fd = sys.stderr if config.display_stderr else sys.stdout
configure_timeout(config)
report_locale(isinstance(config, TestConfig) and not config.delegate)
display.info('RLIMIT_NOFILE: %s' % (CURRENT_RLIMIT_NOFILE,), verbosity=2)
delegate_args = None
target_names = None
try:
if config.check_layout:
data_context().check_layout()
args.func(config)
except PrimeContainers:
pass
except ListTargets as ex:
# save target_names for use once we exit the exception handler
target_names = ex.target_names
except Delegate as ex:
# save delegation args for use once we exit the exception handler
delegate_args = (ex.host_state, ex.exclude, ex.require)
if delegate_args:
delegate(config, *delegate_args)
if target_names:
for target_name in target_names:
print(target_name) # display goes to stderr, this should be on stdout
display.review_warnings()
config.success = True
except HostConnectionError as ex:
display.fatal(str(ex))
ex.run_callback()
sys.exit(STATUS_HOST_CONNECTION_ERROR)
except ApplicationWarning as ex:
display.warning('%s' % ex)
sys.exit(0)
except ApplicationError as ex:
display.fatal('%s' % ex)
sys.exit(1)
except TimeoutExpiredError as ex:
display.fatal('%s' % ex)
sys.exit(1)
except KeyboardInterrupt:
sys.exit(2)
except BrokenPipeError:
sys.exit(3)
| 3,156
|
Python
|
.py
| 103
| 24.407767
| 87
| 0.663366
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,124
|
become.py
|
ansible_ansible/test/lib/ansible_test/_internal/become.py
|
"""Become abstraction for interacting with test hosts."""
from __future__ import annotations
import abc
import shlex
from .util import (
get_subclasses,
)
class Become(metaclass=abc.ABCMeta):
"""Base class for become implementations."""
@classmethod
def name(cls) -> str:
"""The name of this plugin."""
return cls.__name__.lower()
@property
@abc.abstractmethod
def method(self) -> str:
"""The name of the Ansible become plugin that is equivalent to this."""
@abc.abstractmethod
def prepare_command(self, command: list[str]) -> list[str]:
"""Return the given command, if any, with privilege escalation."""
class Doas(Become):
"""Become using 'doas'."""
@property
def method(self) -> str:
"""The name of the Ansible become plugin that is equivalent to this."""
raise NotImplementedError('Ansible has no built-in doas become plugin.')
def prepare_command(self, command: list[str]) -> list[str]:
"""Return the given command, if any, with privilege escalation."""
become = ['doas', '-n']
if command:
become.extend(['sh', '-c', shlex.join(command)])
else:
become.extend(['-s'])
return become
class DoasSudo(Doas):
"""Become using 'doas' in ansible-test and then after bootstrapping use 'sudo' for other ansible commands."""
@classmethod
def name(cls) -> str:
"""The name of this plugin."""
return 'doas_sudo'
@property
def method(self) -> str:
"""The name of the Ansible become plugin that is equivalent to this."""
return 'sudo'
class Su(Become):
"""Become using 'su'."""
@property
def method(self) -> str:
"""The name of the Ansible become plugin that is equivalent to this."""
return 'su'
def prepare_command(self, command: list[str]) -> list[str]:
"""Return the given command, if any, with privilege escalation."""
become = ['su', '-l', 'root']
if command:
become.extend(['-c', shlex.join(command)])
return become
class SuSudo(Su):
"""Become using 'su' in ansible-test and then after bootstrapping use 'sudo' for other ansible commands."""
@classmethod
def name(cls) -> str:
"""The name of this plugin."""
return 'su_sudo'
@property
def method(self) -> str:
"""The name of the Ansible become plugin that is equivalent to this."""
return 'sudo'
class Sudo(Become):
"""Become using 'sudo'."""
@property
def method(self) -> str:
"""The name of the Ansible become plugin that is equivalent to this."""
return 'sudo'
def prepare_command(self, command: list[str]) -> list[str]:
"""Return the given command, if any, with privilege escalation."""
become = ['sudo', '-in']
if command:
become.extend(['sh', '-c', shlex.join(command)])
return become
SUPPORTED_BECOME_METHODS = {cls.name(): cls for cls in get_subclasses(Become)}
| 3,071
|
Python
|
.py
| 79
| 32.113924
| 113
| 0.628001
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,125
|
content_config.py
|
ansible_ansible/test/lib/ansible_test/_internal/content_config.py
|
"""Content configuration."""
from __future__ import annotations
import os
import pickle
import typing as t
from .constants import (
CONTROLLER_PYTHON_VERSIONS,
SUPPORTED_PYTHON_VERSIONS,
)
from .compat.packaging import (
PACKAGING_IMPORT_ERROR,
SpecifierSet,
Version,
)
from .compat.yaml import (
YAML_IMPORT_ERROR,
yaml_load,
)
from .io import (
open_binary_file,
read_text_file,
)
from .util import (
ApplicationError,
display,
)
from .data import (
data_context,
)
from .config import (
EnvironmentConfig,
ContentConfig,
ModulesConfig,
)
MISSING = object()
def parse_modules_config(data: t.Any) -> ModulesConfig:
"""Parse the given dictionary as module config and return it."""
if not isinstance(data, dict):
raise Exception('config must be type `dict` not `%s`' % type(data))
python_requires = data.get('python_requires', MISSING)
if python_requires == MISSING:
raise KeyError('python_requires is required')
return ModulesConfig(
python_requires=python_requires,
python_versions=parse_python_requires(python_requires),
controller_only=python_requires == 'controller',
)
def parse_content_config(data: t.Any) -> ContentConfig:
"""Parse the given dictionary as content config and return it."""
if not isinstance(data, dict):
raise Exception('config must be type `dict` not `%s`' % type(data))
# Configuration specific to modules/module_utils.
modules = parse_modules_config(data.get('modules', {}))
# Python versions supported by the controller, combined with Python versions supported by modules/module_utils.
# Mainly used for display purposes and to limit the Python versions used for sanity tests.
python_versions = tuple(version for version in SUPPORTED_PYTHON_VERSIONS
if version in CONTROLLER_PYTHON_VERSIONS or version in modules.python_versions)
return ContentConfig(
modules=modules,
python_versions=python_versions,
)
def load_config(path: str) -> t.Optional[ContentConfig]:
"""Load and parse the specified config file and return the result or None if loading/parsing failed."""
if YAML_IMPORT_ERROR:
raise ApplicationError('The "PyYAML" module is required to parse config: %s' % YAML_IMPORT_ERROR)
if PACKAGING_IMPORT_ERROR:
raise ApplicationError('The "packaging" module is required to parse config: %s' % PACKAGING_IMPORT_ERROR)
value = read_text_file(path)
try:
yaml_value = yaml_load(value)
except Exception as ex: # pylint: disable=broad-except
display.warning('Ignoring config "%s" due to a YAML parsing error: %s' % (path, ex))
return None
try:
config = parse_content_config(yaml_value)
except Exception as ex: # pylint: disable=broad-except
display.warning('Ignoring config "%s" due a config parsing error: %s' % (path, ex))
return None
display.info('Loaded configuration: %s' % path, verbosity=1)
return config
def get_content_config(args: EnvironmentConfig) -> ContentConfig:
"""
Parse and return the content configuration (if any) for the current collection.
For ansible-core, a default configuration is used.
Results are cached.
"""
if args.host_path:
args.content_config = deserialize_content_config(os.path.join(args.host_path, 'config.dat'))
if args.content_config:
return args.content_config
collection_config_path = 'tests/config.yml'
config = None
if data_context().content.collection and os.path.exists(collection_config_path):
config = load_config(collection_config_path)
if not config:
config = parse_content_config(dict(
modules=dict(
python_requires='default',
),
))
if not config.modules.python_versions:
raise ApplicationError('This collection does not declare support for modules/module_utils on any known Python version.\n'
'Ansible supports modules/module_utils on Python versions: %s\n'
'This collection provides the Python requirement: %s' % (
', '.join(SUPPORTED_PYTHON_VERSIONS), config.modules.python_requires))
args.content_config = config
return config
def parse_python_requires(value: t.Any) -> tuple[str, ...]:
"""Parse the given 'python_requires' version specifier and return the matching Python versions."""
if not isinstance(value, str):
raise ValueError('python_requires must must be of type `str` not type `%s`' % type(value))
versions: tuple[str, ...]
if value == 'default':
versions = SUPPORTED_PYTHON_VERSIONS
elif value == 'controller':
versions = CONTROLLER_PYTHON_VERSIONS
else:
specifier_set = SpecifierSet(value)
versions = tuple(version for version in SUPPORTED_PYTHON_VERSIONS if specifier_set.contains(Version(version)))
return versions
def serialize_content_config(args: EnvironmentConfig, path: str) -> None:
"""Serialize the content config to the given path. If the config has not been loaded, an empty config will be serialized."""
with open_binary_file(path, 'wb') as config_file:
pickle.dump(args.content_config, config_file)
def deserialize_content_config(path: str) -> ContentConfig:
"""Deserialize content config from the path."""
with open_binary_file(path) as config_file:
return pickle.load(config_file)
| 5,588
|
Python
|
.py
| 128
| 37.164063
| 129
| 0.692833
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,126
|
host_profiles.py
|
ansible_ansible/test/lib/ansible_test/_internal/host_profiles.py
|
"""Profiles to represent individual test hosts or a user-provided inventory file."""
from __future__ import annotations
import abc
import dataclasses
import os
import shlex
import tempfile
import time
import typing as t
from .io import (
read_text_file,
write_text_file,
)
from .config import (
CommonConfig,
EnvironmentConfig,
IntegrationConfig,
TerminateMode,
)
from .host_configs import (
ControllerConfig,
ControllerHostConfig,
DockerConfig,
HostConfig,
NetworkInventoryConfig,
NetworkRemoteConfig,
OriginConfig,
PosixConfig,
PosixRemoteConfig,
PosixSshConfig,
PythonConfig,
RemoteConfig,
VirtualPythonConfig,
WindowsInventoryConfig,
WindowsRemoteConfig,
)
from .core_ci import (
AnsibleCoreCI,
SshKey,
VmResource,
)
from .util import (
ApplicationError,
SubprocessError,
cache,
display,
get_type_map,
sanitize_host_name,
sorted_versions,
InternalError,
HostConnectionError,
ANSIBLE_TEST_TARGET_ROOT,
WINDOWS_CONNECTION_VARIABLES,
)
from .util_common import (
get_docs_url,
intercept_python,
)
from .docker_util import (
docker_exec,
docker_image_inspect,
docker_logs,
docker_pull,
docker_rm,
get_docker_hostname,
require_docker,
get_docker_info,
detect_host_properties,
run_utility_container,
SystemdControlGroupV1Status,
LOGINUID_NOT_SET,
UTILITY_IMAGE,
)
from .bootstrap import (
BootstrapDocker,
BootstrapRemote,
)
from .venv import (
get_virtual_python,
)
from .ssh import (
SshConnectionDetail,
)
from .ansible_util import (
ansible_environment,
get_hosts,
parse_inventory,
)
from .containers import (
HostType,
get_container_database,
run_support_container,
)
from .connections import (
Connection,
DockerConnection,
LocalConnection,
SshConnection,
)
from .become import (
Become,
SUPPORTED_BECOME_METHODS,
Sudo,
)
from .completion import (
AuditMode,
CGroupVersion,
)
from .dev.container_probe import (
CGroupMount,
CGroupPath,
CGroupState,
MountType,
check_container_cgroup_status,
)
TControllerHostConfig = t.TypeVar('TControllerHostConfig', bound=ControllerHostConfig)
THostConfig = t.TypeVar('THostConfig', bound=HostConfig)
TPosixConfig = t.TypeVar('TPosixConfig', bound=PosixConfig)
TRemoteConfig = t.TypeVar('TRemoteConfig', bound=RemoteConfig)
class ControlGroupError(ApplicationError):
"""Raised when the container host does not have the necessary cgroup support to run a container."""
def __init__(self, args: CommonConfig, reason: str) -> None:
engine = require_docker().command
dd_wsl2 = get_docker_info(args).docker_desktop_wsl2
message = f'''
{reason}
Run the following commands as root on the container host to resolve this issue:
mkdir /sys/fs/cgroup/systemd
mount cgroup -t cgroup /sys/fs/cgroup/systemd -o none,name=systemd,xattr
chown -R {{user}}:{{group}} /sys/fs/cgroup/systemd # only when rootless
NOTE: These changes must be applied each time the container host is rebooted.
'''.strip()
podman_message = '''
If rootless Podman is already running [1], you may need to stop it before
containers are able to use the new mount point.
[1] Check for 'podman' and 'catatonit' processes.
'''
dd_wsl_message = f'''
When using Docker Desktop with WSL2, additional configuration [1] is required.
[1] {get_docs_url("https://docs.ansible.com/ansible-core/devel/dev_guide/testing_running_locally.html#docker-desktop-with-wsl2")}
'''
if engine == 'podman':
message += podman_message
elif dd_wsl2:
message += dd_wsl_message
message = message.strip()
super().__init__(message)
@dataclasses.dataclass(frozen=True)
class Inventory:
"""Simple representation of an Ansible inventory."""
host_groups: dict[str, dict[str, dict[str, t.Union[str, int]]]]
extra_groups: t.Optional[dict[str, list[str]]] = None
@staticmethod
def create_single_host(name: str, variables: dict[str, t.Union[str, int]]) -> Inventory:
"""Return an inventory instance created from the given hostname and variables."""
return Inventory(host_groups=dict(all={name: variables}))
def write(self, args: CommonConfig, path: str) -> None:
"""Write the given inventory to the specified path on disk."""
# NOTE: Switching the inventory generation to write JSON would be nice, but is currently not possible due to the use of hard-coded inventory filenames.
# The name `inventory` works for the POSIX integration tests, but `inventory.winrm` and `inventory.networking` will only parse in INI format.
# If tests are updated to use the `INVENTORY_PATH` environment variable, then this could be changed.
# Also, some tests detect the test type by inspecting the suffix on the inventory filename, which would break if it were changed.
inventory_text = ''
for group, hosts in self.host_groups.items():
inventory_text += f'[{group}]\n'
for host, variables in hosts.items():
kvp = ' '.join(f'{key}="{value}"' for key, value in variables.items())
inventory_text += f'{host} {kvp}\n'
inventory_text += '\n'
for group, children in (self.extra_groups or {}).items():
inventory_text += f'[{group}]\n'
for child in children:
inventory_text += f'{child}\n'
inventory_text += '\n'
inventory_text = inventory_text.strip()
if not args.explain:
write_text_file(path, inventory_text + '\n')
display.info(f'>>> Inventory\n{inventory_text}', verbosity=3)
class HostProfile(t.Generic[THostConfig], metaclass=abc.ABCMeta):
"""Base class for host profiles."""
def __init__(
self,
*,
args: EnvironmentConfig,
config: THostConfig,
targets: t.Optional[list[HostConfig]],
) -> None:
self.args = args
self.config = config
self.controller = bool(targets)
self.targets = targets or []
self.state: dict[str, t.Any] = {}
"""State that must be persisted across delegation."""
self.cache: dict[str, t.Any] = {}
"""Cache that must not be persisted across delegation."""
def provision(self) -> None:
"""Provision the host before delegation."""
def setup(self) -> None:
"""Perform out-of-band setup before delegation."""
def on_target_failure(self) -> None:
"""Executed during failure handling if this profile is a target."""
def deprovision(self) -> None:
"""Deprovision the host after delegation has completed."""
def wait(self) -> None:
"""Wait for the instance to be ready. Executed before delegation for the controller and after delegation for targets."""
def configure(self) -> None:
"""Perform in-band configuration. Executed before delegation for the controller and after delegation for targets."""
def __getstate__(self):
return {key: value for key, value in self.__dict__.items() if key not in ('args', 'cache')}
def __setstate__(self, state):
self.__dict__.update(state)
# args will be populated after the instances are restored
self.cache = {}
class PosixProfile(HostProfile[TPosixConfig], metaclass=abc.ABCMeta):
"""Base class for POSIX host profiles."""
@property
def python(self) -> PythonConfig:
"""
The Python to use for this profile.
If it is a virtual python, it will be created the first time it is requested.
"""
python = self.state.get('python')
if not python:
python = self.config.python
if isinstance(python, VirtualPythonConfig):
python = get_virtual_python(self.args, python)
self.state['python'] = python
return python
class ControllerHostProfile(PosixProfile[TControllerHostConfig], metaclass=abc.ABCMeta):
"""Base class for profiles usable as a controller."""
@abc.abstractmethod
def get_origin_controller_connection(self) -> Connection:
"""Return a connection for accessing the host as a controller from the origin."""
@abc.abstractmethod
def get_working_directory(self) -> str:
"""Return the working directory for the host."""
class SshTargetHostProfile(HostProfile[THostConfig], metaclass=abc.ABCMeta):
"""Base class for profiles offering SSH connectivity."""
@abc.abstractmethod
def get_controller_target_connections(self) -> list[SshConnection]:
"""Return SSH connection(s) for accessing the host as a target from the controller."""
class RemoteProfile(SshTargetHostProfile[TRemoteConfig], metaclass=abc.ABCMeta):
"""Base class for remote instance profiles."""
@property
def core_ci_state(self) -> t.Optional[dict[str, str]]:
"""The saved Ansible Core CI state."""
return self.state.get('core_ci')
@core_ci_state.setter
def core_ci_state(self, value: dict[str, str]) -> None:
"""The saved Ansible Core CI state."""
self.state['core_ci'] = value
def provision(self) -> None:
"""Provision the host before delegation."""
self.core_ci = self.create_core_ci(load=True)
self.core_ci.start()
self.core_ci_state = self.core_ci.save()
def deprovision(self) -> None:
"""Deprovision the host after delegation has completed."""
if self.args.remote_terminate == TerminateMode.ALWAYS or (self.args.remote_terminate == TerminateMode.SUCCESS and self.args.success):
self.delete_instance()
@property
def core_ci(self) -> t.Optional[AnsibleCoreCI]:
"""Return the cached AnsibleCoreCI instance, if any, otherwise None."""
return self.cache.get('core_ci')
@core_ci.setter
def core_ci(self, value: AnsibleCoreCI) -> None:
"""Cache the given AnsibleCoreCI instance."""
self.cache['core_ci'] = value
def get_instance(self) -> t.Optional[AnsibleCoreCI]:
"""Return the current AnsibleCoreCI instance, loading it if not already loaded."""
if not self.core_ci and self.core_ci_state:
self.core_ci = self.create_core_ci(load=False)
self.core_ci.load(self.core_ci_state)
return self.core_ci
def delete_instance(self) -> None:
"""Delete the AnsibleCoreCI VM instance."""
core_ci = self.get_instance()
if not core_ci:
return # instance has not been provisioned
core_ci.stop()
def wait_for_instance(self) -> AnsibleCoreCI:
"""Wait for an AnsibleCoreCI VM instance to become ready."""
core_ci = self.get_instance()
core_ci.wait()
return core_ci
def create_core_ci(self, load: bool) -> AnsibleCoreCI:
"""Create and return an AnsibleCoreCI instance."""
if not self.config.arch:
raise InternalError(f'No arch specified for config: {self.config}')
return AnsibleCoreCI(
args=self.args,
resource=VmResource(
platform=self.config.platform,
version=self.config.version,
architecture=self.config.arch,
provider=self.config.provider,
tag='controller' if self.controller else 'target',
),
load=load,
)
class ControllerProfile(SshTargetHostProfile[ControllerConfig], PosixProfile[ControllerConfig]):
"""Host profile for the controller as a target."""
def get_controller_target_connections(self) -> list[SshConnection]:
"""Return SSH connection(s) for accessing the host as a target from the controller."""
settings = SshConnectionDetail(
name='localhost',
host='localhost',
port=None,
user='root',
identity_file=SshKey(self.args).key,
python_interpreter=self.args.controller_python.path,
)
return [SshConnection(self.args, settings)]
class DockerProfile(ControllerHostProfile[DockerConfig], SshTargetHostProfile[DockerConfig]):
"""Host profile for a docker instance."""
MARKER = 'ansible-test-marker'
@dataclasses.dataclass(frozen=True)
class InitConfig:
"""Configuration details required to run the container init."""
options: list[str]
command: str
command_privileged: bool
expected_mounts: tuple[CGroupMount, ...]
@property
def container_name(self) -> t.Optional[str]:
"""Return the stored container name, if any, otherwise None."""
return self.state.get('container_name')
@container_name.setter
def container_name(self, value: str) -> None:
"""Store the given container name."""
self.state['container_name'] = value
@property
def cgroup_path(self) -> t.Optional[str]:
"""Return the path to the cgroup v1 systemd hierarchy, if any, otherwise None."""
return self.state.get('cgroup_path')
@cgroup_path.setter
def cgroup_path(self, value: str) -> None:
"""Store the path to the cgroup v1 systemd hierarchy."""
self.state['cgroup_path'] = value
@property
def label(self) -> str:
"""Label to apply to resources related to this profile."""
return f'{"controller" if self.controller else "target"}'
def provision(self) -> None:
"""Provision the host before delegation."""
init_probe = self.args.dev_probe_cgroups is not None
init_config = self.get_init_config()
container = run_support_container(
args=self.args,
context='__test_hosts__',
image=self.config.image,
name=f'ansible-test-{self.label}',
ports=[22],
publish_ports=not self.controller, # connections to the controller over SSH are not required
options=init_config.options,
cleanup=False,
cmd=self.build_init_command(init_config, init_probe),
)
if not container:
if self.args.prime_containers:
if init_config.command_privileged or init_probe:
docker_pull(self.args, UTILITY_IMAGE)
return
self.container_name = container.name
try:
options = ['--pid', 'host', '--privileged']
if init_config.command and init_config.command_privileged:
init_command = init_config.command
if not init_probe:
init_command += f' && {shlex.join(self.wake_command)}'
cmd = ['nsenter', '-t', str(container.details.container.pid), '-m', '-p', 'sh', '-c', init_command]
run_utility_container(self.args, f'ansible-test-init-{self.label}', cmd, options)
if init_probe:
check_container_cgroup_status(self.args, self.config, self.container_name, init_config.expected_mounts)
cmd = ['nsenter', '-t', str(container.details.container.pid), '-m', '-p'] + self.wake_command
run_utility_container(self.args, f'ansible-test-wake-{self.label}', cmd, options)
except SubprocessError:
display.info(f'Checking container "{self.container_name}" logs...')
docker_logs(self.args, self.container_name)
raise
def get_init_config(self) -> InitConfig:
"""Return init config for running under the current container engine."""
self.check_cgroup_requirements()
engine = require_docker().command
init_config = getattr(self, f'get_{engine}_init_config')()
return init_config
def get_podman_init_config(self) -> InitConfig:
"""Return init config for running under Podman."""
options = self.get_common_run_options()
command: t.Optional[str] = None
command_privileged = False
expected_mounts: tuple[CGroupMount, ...]
cgroup_version = get_docker_info(self.args).cgroup_version
# Podman 4.4.0 updated containers/common to 0.51.0, which removed the SYS_CHROOT capability from the default list.
# This capability is needed by services such as sshd, so is unconditionally added here.
# See: https://github.com/containers/podman/releases/tag/v4.4.0
# See: https://github.com/containers/common/releases/tag/v0.51.0
# See: https://github.com/containers/common/pull/1240
options.extend(('--cap-add', 'SYS_CHROOT'))
# Without AUDIT_WRITE the following errors may appear in the system logs of a container after attempting to log in using SSH:
#
# fatal: linux_audit_write_entry failed: Operation not permitted
#
# This occurs when running containers as root when the container host provides audit support, but the user lacks the AUDIT_WRITE capability.
# The AUDIT_WRITE capability is provided by docker by default, but not podman.
# See: https://github.com/moby/moby/pull/7179
#
# OpenSSH Portable requires AUDIT_WRITE when logging in with a TTY if the Linux audit feature was compiled in.
# Containers with the feature enabled will require the AUDIT_WRITE capability when EPERM is returned while accessing the audit system.
# See: https://github.com/openssh/openssh-portable/blob/2dc328023f60212cd29504fc05d849133ae47355/audit-linux.c#L90
# See: https://github.com/openssh/openssh-portable/blob/715c892f0a5295b391ae92c26ef4d6a86ea96e8e/loginrec.c#L476-L478
#
# Some containers will be running a patched version of OpenSSH which blocks logins when EPERM is received while using the audit system.
# These containers will require the AUDIT_WRITE capability when EPERM is returned while accessing the audit system.
# See: https://src.fedoraproject.org/rpms/openssh/blob/f36/f/openssh-7.6p1-audit.patch
#
# Since only some containers carry the patch or enable the Linux audit feature in OpenSSH, this capability is enabled on a per-container basis.
# No warning is provided when adding this capability, since there's not really anything the user can do about it.
if self.config.audit == AuditMode.REQUIRED and detect_host_properties(self.args).audit_code == 'EPERM':
options.extend(('--cap-add', 'AUDIT_WRITE'))
# Without AUDIT_CONTROL the following errors may appear in the system logs of a container after attempting to log in using SSH:
#
# pam_loginuid(sshd:session): Error writing /proc/self/loginuid: Operation not permitted
# pam_loginuid(sshd:session): set_loginuid failed
#
# Containers configured to use the pam_loginuid module will encounter this error. If the module is required, logins will fail.
# Since most containers will have this configuration, the code to handle this issue is applied to all containers.
#
# This occurs when the loginuid is set on the container host and doesn't match the user on the container host which is running the container.
# Container hosts which do not use systemd are likely to leave the loginuid unset and thus be unaffected.
# The most common source of a mismatch is the use of sudo to run ansible-test, which changes the uid but cannot change the loginuid.
# This condition typically occurs only under podman, since the loginuid is inherited from the current user.
# See: https://github.com/containers/podman/issues/13012#issuecomment-1034049725
#
# This condition is detected by querying the loginuid of a container running on the container host.
# When it occurs, a warning is displayed and the AUDIT_CONTROL capability is added to containers to work around the issue.
# The warning serves as notice to the user that their usage of ansible-test is responsible for the additional capability requirement.
if (loginuid := detect_host_properties(self.args).loginuid) not in (0, LOGINUID_NOT_SET, None):
display.warning(f'Running containers with capability AUDIT_CONTROL since the container loginuid ({loginuid}) is incorrect. '
'This is most likely due to use of sudo to run ansible-test when loginuid is already set.', unique=True)
options.extend(('--cap-add', 'AUDIT_CONTROL'))
if self.config.cgroup == CGroupVersion.NONE:
# Containers which do not require cgroup do not use systemd.
options.extend((
# Disabling systemd support in Podman will allow these containers to work on hosts without systemd.
# Without this, running a container on a host without systemd results in errors such as (from crun):
# Error: crun: error stat'ing file `/sys/fs/cgroup/systemd`: No such file or directory:
# A similar error occurs when using runc:
# OCI runtime attempted to invoke a command that was not found
'--systemd', 'false',
# A private cgroup namespace limits what is visible in /proc/*/cgroup.
'--cgroupns', 'private',
# Mounting a tmpfs overrides the cgroup mount(s) that would otherwise be provided by Podman.
# This helps provide a consistent container environment across various container host configurations.
'--tmpfs', '/sys/fs/cgroup',
))
expected_mounts = (
CGroupMount(path=CGroupPath.ROOT, type=MountType.TMPFS, writable=True, state=None),
)
elif self.config.cgroup in (CGroupVersion.V1_V2, CGroupVersion.V1_ONLY) and cgroup_version == 1:
# Podman hosts providing cgroup v1 will automatically bind mount the systemd hierarchy read-write in the container.
# They will also create a dedicated cgroup v1 systemd hierarchy for the container.
# On hosts with systemd this path is: /sys/fs/cgroup/systemd/libpod_parent/libpod-{container_id}/
# On hosts without systemd this path is: /sys/fs/cgroup/systemd/{container_id}/
options.extend((
# Force Podman to enable systemd support since a command may be used later (to support pre-init diagnostics).
'--systemd', 'always',
# The host namespace must be used to permit the container to access the cgroup v1 systemd hierarchy created by Podman.
'--cgroupns', 'host',
# Mask the host cgroup tmpfs mount to avoid exposing the host cgroup v1 hierarchies (or cgroup v2 hybrid) to the container.
# Podman will provide a cgroup v1 systemd hiearchy on top of this.
'--tmpfs', '/sys/fs/cgroup',
))
self.check_systemd_cgroup_v1(options) # podman
expected_mounts = (
CGroupMount(path=CGroupPath.ROOT, type=MountType.TMPFS, writable=True, state=None),
# The mount point can be writable or not.
# The reason for the variation is not known.
CGroupMount(path=CGroupPath.SYSTEMD, type=MountType.CGROUP_V1, writable=None, state=CGroupState.HOST),
# The filesystem type can be tmpfs or devtmpfs.
# The reason for the variation is not known.
CGroupMount(path=CGroupPath.SYSTEMD_RELEASE_AGENT, type=None, writable=False, state=None),
)
elif self.config.cgroup in (CGroupVersion.V1_V2, CGroupVersion.V2_ONLY) and cgroup_version == 2:
# Podman hosts providing cgroup v2 will give each container a read-write cgroup mount.
options.extend((
# Force Podman to enable systemd support since a command may be used later (to support pre-init diagnostics).
'--systemd', 'always',
# A private cgroup namespace is used to avoid exposing the host cgroup to the container.
'--cgroupns', 'private',
))
expected_mounts = (
CGroupMount(path=CGroupPath.ROOT, type=MountType.CGROUP_V2, writable=True, state=CGroupState.PRIVATE),
)
elif self.config.cgroup == CGroupVersion.V1_ONLY and cgroup_version == 2:
# Containers which require cgroup v1 need explicit volume mounts on container hosts not providing that version.
# We must put the container PID 1 into the cgroup v1 systemd hierarchy we create.
cgroup_path = self.create_systemd_cgroup_v1() # podman
command = f'echo 1 > {cgroup_path}/cgroup.procs'
options.extend((
# Force Podman to enable systemd support since a command is being provided.
'--systemd', 'always',
# A private cgroup namespace is required. Using the host cgroup namespace results in errors such as the following (from crun):
# Error: OCI runtime error: mount `/sys/fs/cgroup` to '/sys/fs/cgroup': Invalid argument
# A similar error occurs when using runc:
# Error: OCI runtime error: runc create failed: unable to start container process: error during container init:
# error mounting "/sys/fs/cgroup" to rootfs at "/sys/fs/cgroup": mount /sys/fs/cgroup:/sys/fs/cgroup (via /proc/self/fd/7), flags: 0x1000:
# invalid argument
'--cgroupns', 'private',
# Unlike Docker, Podman ignores a /sys/fs/cgroup tmpfs mount, instead exposing a cgroup v2 mount.
# The exposed volume will be read-write, but the container will have its own private namespace.
# Provide a read-only cgroup v1 systemd hierarchy under which the dedicated ansible-test cgroup will be mounted read-write.
# Without this systemd will fail while attempting to mount the cgroup v1 systemd hierarchy.
# Podman doesn't support using a tmpfs for this. Attempting to do so results in an error (from crun):
# Error: OCI runtime error: read: Invalid argument
# A similar error occurs when using runc:
# Error: OCI runtime error: runc create failed: unable to start container process: error during container init:
# error mounting "tmpfs" to rootfs at "/sys/fs/cgroup/systemd": tmpcopyup: failed to copy /sys/fs/cgroup/systemd to /proc/self/fd/7
# (/tmp/runctop3876247619/runctmpdir1460907418): read /proc/self/fd/7/cgroup.kill: invalid argument
'--volume', '/sys/fs/cgroup/systemd:/sys/fs/cgroup/systemd:ro',
# Provide the container access to the cgroup v1 systemd hierarchy created by ansible-test.
'--volume', f'{cgroup_path}:{cgroup_path}:rw',
))
expected_mounts = (
CGroupMount(path=CGroupPath.ROOT, type=MountType.CGROUP_V2, writable=True, state=CGroupState.PRIVATE),
CGroupMount(path=CGroupPath.SYSTEMD, type=MountType.CGROUP_V1, writable=False, state=CGroupState.SHADOWED),
CGroupMount(path=cgroup_path, type=MountType.CGROUP_V1, writable=True, state=CGroupState.HOST),
)
else:
raise InternalError(f'Unhandled cgroup configuration: {self.config.cgroup} on cgroup v{cgroup_version}.')
return self.InitConfig(
options=options,
command=command,
command_privileged=command_privileged,
expected_mounts=expected_mounts,
)
def get_docker_init_config(self) -> InitConfig:
"""Return init config for running under Docker."""
options = self.get_common_run_options()
command: t.Optional[str] = None
command_privileged = False
expected_mounts: tuple[CGroupMount, ...]
cgroup_version = get_docker_info(self.args).cgroup_version
if self.config.cgroup == CGroupVersion.NONE:
# Containers which do not require cgroup do not use systemd.
if get_docker_info(self.args).cgroupns_option_supported:
# Use the `--cgroupns` option if it is supported.
# Older servers which do not support the option use the host group namespace.
# Older clients which do not support the option cause newer servers to use the host cgroup namespace (cgroup v1 only).
# See: https://github.com/moby/moby/blob/master/api/server/router/container/container_routes.go#L512-L517
# If the host cgroup namespace is used, cgroup information will be visible, but the cgroup mounts will be unavailable due to the tmpfs below.
options.extend((
# A private cgroup namespace limits what is visible in /proc/*/cgroup.
'--cgroupns', 'private',
))
options.extend((
# Mounting a tmpfs overrides the cgroup mount(s) that would otherwise be provided by Docker.
# This helps provide a consistent container environment across various container host configurations.
'--tmpfs', '/sys/fs/cgroup',
))
expected_mounts = (
CGroupMount(path=CGroupPath.ROOT, type=MountType.TMPFS, writable=True, state=None),
)
elif self.config.cgroup in (CGroupVersion.V1_V2, CGroupVersion.V1_ONLY) and cgroup_version == 1:
# Docker hosts providing cgroup v1 will automatically bind mount the systemd hierarchy read-only in the container.
# They will also create a dedicated cgroup v1 systemd hierarchy for the container.
# The cgroup v1 system hierarchy path is: /sys/fs/cgroup/systemd/{container_id}/
if get_docker_info(self.args).cgroupns_option_supported:
# Use the `--cgroupns` option if it is supported.
# Older servers which do not support the option use the host group namespace.
# Older clients which do not support the option cause newer servers to use the host cgroup namespace (cgroup v1 only).
# See: https://github.com/moby/moby/blob/master/api/server/router/container/container_routes.go#L512-L517
options.extend((
# The host cgroup namespace must be used.
# Otherwise, /proc/1/cgroup will report "/" for the cgroup path, which is incorrect.
# See: https://github.com/systemd/systemd/issues/19245#issuecomment-815954506
# It is set here to avoid relying on the current Docker configuration.
'--cgroupns', 'host',
))
options.extend((
# Mask the host cgroup tmpfs mount to avoid exposing the host cgroup v1 hierarchies (or cgroup v2 hybrid) to the container.
'--tmpfs', '/sys/fs/cgroup',
# A cgroup v1 systemd hierarchy needs to be mounted read-write over the read-only one provided by Docker.
# Alternatives were tested, but were unusable due to various issues:
# - Attempting to remount the existing mount point read-write will result in a "mount point is busy" error.
# - Adding the entire "/sys/fs/cgroup" mount will expose hierarchies other than systemd.
# If the host is a cgroup v2 hybrid host it would also expose the /sys/fs/cgroup/unified/ hierarchy read-write.
# On older systems, such as an Ubuntu 18.04 host, a dedicated v2 cgroup would not be used, exposing the host cgroups to the container.
'--volume', '/sys/fs/cgroup/systemd:/sys/fs/cgroup/systemd:rw',
))
self.check_systemd_cgroup_v1(options) # docker
expected_mounts = (
CGroupMount(path=CGroupPath.ROOT, type=MountType.TMPFS, writable=True, state=None),
CGroupMount(path=CGroupPath.SYSTEMD, type=MountType.CGROUP_V1, writable=True, state=CGroupState.HOST),
)
elif self.config.cgroup in (CGroupVersion.V1_V2, CGroupVersion.V2_ONLY) and cgroup_version == 2:
# Docker hosts providing cgroup v2 will give each container a read-only cgroup mount.
# It must be remounted read-write before systemd starts.
# This must be done in a privileged container, otherwise a "permission denied" error can occur.
command = 'mount -o remount,rw /sys/fs/cgroup/'
command_privileged = True
options.extend((
# A private cgroup namespace is used to avoid exposing the host cgroup to the container.
# This matches the behavior in Podman 1.7.0 and later, which select cgroupns 'host' mode for cgroup v1 and 'private' mode for cgroup v2.
# See: https://github.com/containers/podman/pull/4374
# See: https://github.com/containers/podman/blob/main/RELEASE_NOTES.md#170
'--cgroupns', 'private',
))
expected_mounts = (
CGroupMount(path=CGroupPath.ROOT, type=MountType.CGROUP_V2, writable=True, state=CGroupState.PRIVATE),
)
elif self.config.cgroup == CGroupVersion.V1_ONLY and cgroup_version == 2:
# Containers which require cgroup v1 need explicit volume mounts on container hosts not providing that version.
# We must put the container PID 1 into the cgroup v1 systemd hierarchy we create.
cgroup_path = self.create_systemd_cgroup_v1() # docker
command = f'echo 1 > {cgroup_path}/cgroup.procs'
options.extend((
# A private cgroup namespace is used since no access to the host cgroup namespace is required.
# This matches the configuration used for running cgroup v1 containers under Podman.
'--cgroupns', 'private',
# Provide a read-write tmpfs filesystem to support additional cgroup mount points.
# Without this Docker will provide a read-only cgroup2 mount instead.
'--tmpfs', '/sys/fs/cgroup',
# Provide a read-write tmpfs filesystem to simulate a systemd cgroup v1 hierarchy.
# Without this systemd will fail while attempting to mount the cgroup v1 systemd hierarchy.
'--tmpfs', '/sys/fs/cgroup/systemd',
# Provide the container access to the cgroup v1 systemd hierarchy created by ansible-test.
'--volume', f'{cgroup_path}:{cgroup_path}:rw',
))
expected_mounts = (
CGroupMount(path=CGroupPath.ROOT, type=MountType.TMPFS, writable=True, state=None),
CGroupMount(path=CGroupPath.SYSTEMD, type=MountType.TMPFS, writable=True, state=None),
CGroupMount(path=cgroup_path, type=MountType.CGROUP_V1, writable=True, state=CGroupState.HOST),
)
else:
raise InternalError(f'Unhandled cgroup configuration: {self.config.cgroup} on cgroup v{cgroup_version}.')
return self.InitConfig(
options=options,
command=command,
command_privileged=command_privileged,
expected_mounts=expected_mounts,
)
def build_init_command(self, init_config: InitConfig, sleep: bool) -> t.Optional[list[str]]:
"""
Build and return the command to start in the container.
Returns None if the default command for the container should be used.
The sleep duration below was selected to:
- Allow enough time to perform necessary operations in the container before waking it.
- Make the delay obvious if the wake command doesn't run or succeed.
- Avoid hanging indefinitely or for an unreasonably long time.
NOTE: The container must have a POSIX-compliant default shell "sh" with a non-builtin "sleep" command.
The "sleep" command is invoked through "env" to avoid using a shell builtin "sleep" (if present).
"""
command = ''
if init_config.command and not init_config.command_privileged:
command += f'{init_config.command} && '
if sleep or init_config.command_privileged:
command += 'env sleep 60 ; '
if not command:
return None
docker_pull(self.args, self.config.image)
inspect = docker_image_inspect(self.args, self.config.image)
command += f'exec {shlex.join(inspect.cmd)}'
return ['sh', '-c', command]
@property
def wake_command(self) -> list[str]:
"""
The command used to wake the container from sleep.
This will be run inside our utility container, so the command used does not need to be present in the container being woken up.
"""
return ['pkill', 'sleep']
def check_systemd_cgroup_v1(self, options: list[str]) -> None:
"""Check the cgroup v1 systemd hierarchy to verify it is writeable for our container."""
probe_script = (read_text_file(os.path.join(ANSIBLE_TEST_TARGET_ROOT, 'setup', 'check_systemd_cgroup_v1.sh'))
.replace('@MARKER@', self.MARKER)
.replace('@LABEL@', f'{self.label}-{self.args.session_name}'))
cmd = ['sh']
try:
run_utility_container(self.args, f'ansible-test-cgroup-check-{self.label}', cmd, options, data=probe_script)
except SubprocessError as ex:
if error := self.extract_error(ex.stderr):
raise ControlGroupError(self.args, 'Unable to create a v1 cgroup within the systemd hierarchy.\n'
f'Reason: {error}') from ex # cgroup probe failed
raise
def create_systemd_cgroup_v1(self) -> str:
"""Create a unique ansible-test cgroup in the v1 systemd hierarchy and return its path."""
self.cgroup_path = f'/sys/fs/cgroup/systemd/ansible-test-{self.label}-{self.args.session_name}'
# Privileged mode is required to create the cgroup directories on some hosts, such as Fedora 36 and RHEL 9.0.
# The mkdir command will fail with "Permission denied" otherwise.
options = ['--volume', '/sys/fs/cgroup/systemd:/sys/fs/cgroup/systemd:rw', '--privileged']
cmd = ['sh', '-c', f'>&2 echo {shlex.quote(self.MARKER)} && mkdir {shlex.quote(self.cgroup_path)}']
try:
run_utility_container(self.args, f'ansible-test-cgroup-create-{self.label}', cmd, options)
except SubprocessError as ex:
if error := self.extract_error(ex.stderr):
raise ControlGroupError(self.args, f'Unable to create a v1 cgroup within the systemd hierarchy.\n'
f'Reason: {error}') from ex # cgroup create permission denied
raise
return self.cgroup_path
@property
def delete_systemd_cgroup_v1_command(self) -> list[str]:
"""The command used to remove the previously created ansible-test cgroup in the v1 systemd hierarchy."""
return ['find', self.cgroup_path, '-type', 'd', '-delete']
def delete_systemd_cgroup_v1(self) -> None:
"""Delete a previously created ansible-test cgroup in the v1 systemd hierarchy."""
# Privileged mode is required to remove the cgroup directories on some hosts, such as Fedora 36 and RHEL 9.0.
# The BusyBox find utility will report "Permission denied" otherwise, although it still exits with a status code of 0.
options = ['--volume', '/sys/fs/cgroup/systemd:/sys/fs/cgroup/systemd:rw', '--privileged']
cmd = ['sh', '-c', f'>&2 echo {shlex.quote(self.MARKER)} && {shlex.join(self.delete_systemd_cgroup_v1_command)}']
try:
run_utility_container(self.args, f'ansible-test-cgroup-delete-{self.label}', cmd, options)
except SubprocessError as ex:
if error := self.extract_error(ex.stderr):
if error.endswith(': No such file or directory'):
return
display.error(str(ex))
def extract_error(self, value: str) -> t.Optional[str]:
"""
Extract the ansible-test portion of the error message from the given value and return it.
Returns None if no ansible-test marker was found.
"""
lines = value.strip().splitlines()
try:
idx = lines.index(self.MARKER)
except ValueError:
return None
lines = lines[idx + 1:]
message = '\n'.join(lines)
return message
def check_cgroup_requirements(self) -> None:
"""Check cgroup requirements for the container."""
cgroup_version = get_docker_info(self.args).cgroup_version
if cgroup_version not in (1, 2):
raise ApplicationError(f'The container host provides cgroup v{cgroup_version}, but only version v1 and v2 are supported.')
# Stop early for containers which require cgroup v2 when the container host does not provide it.
# None of the containers included with ansible-test currently use this configuration.
# Support for v2-only was added in preparation for the eventual removal of cgroup v1 support from systemd after EOY 2023.
# See: https://github.com/systemd/systemd/pull/24086
if self.config.cgroup == CGroupVersion.V2_ONLY and cgroup_version != 2:
raise ApplicationError(f'Container {self.config.name} requires cgroup v2 but the container host provides cgroup v{cgroup_version}.')
# Containers which use old versions of systemd (earlier than version 226) require cgroup v1 support.
# If the host is a cgroup v2 (unified) host, changes must be made to how the container is run.
#
# See: https://github.com/systemd/systemd/blob/main/NEWS
# Under the "CHANGES WITH 226" section:
# > systemd now optionally supports the new Linux kernel "unified" control group hierarchy.
#
# NOTE: The container host must have the cgroup v1 mount already present.
# If the container is run rootless, the user it runs under must have permissions to the mount.
#
# The following commands can be used to make the mount available:
#
# mkdir /sys/fs/cgroup/systemd
# mount cgroup -t cgroup /sys/fs/cgroup/systemd -o none,name=systemd,xattr
# chown -R {user}:{group} /sys/fs/cgroup/systemd # only when rootless
#
# See: https://github.com/containers/crun/blob/main/crun.1.md#runocisystemdforce_cgroup_v1path
if self.config.cgroup == CGroupVersion.V1_ONLY or (self.config.cgroup != CGroupVersion.NONE and get_docker_info(self.args).cgroup_version == 1):
if (cgroup_v1 := detect_host_properties(self.args).cgroup_v1) != SystemdControlGroupV1Status.VALID:
if self.config.cgroup == CGroupVersion.V1_ONLY:
if get_docker_info(self.args).cgroup_version == 2:
reason = f'Container {self.config.name} requires cgroup v1, but the container host only provides cgroup v2.'
else:
reason = f'Container {self.config.name} requires cgroup v1, but the container host does not appear to be running systemd.'
else:
reason = 'The container host provides cgroup v1, but does not appear to be running systemd.'
reason += f'\n{cgroup_v1.value}'
raise ControlGroupError(self.args, reason) # cgroup probe reported invalid state
def setup(self) -> None:
"""Perform out-of-band setup before delegation."""
bootstrapper = BootstrapDocker(
controller=self.controller,
python_interpreters={self.python.version: self.python.path},
ssh_key=SshKey(self.args),
)
setup_sh = bootstrapper.get_script()
shell = setup_sh.splitlines()[0][2:]
try:
docker_exec(self.args, self.container_name, [shell], data=setup_sh, capture=False)
except SubprocessError:
display.info(f'Checking container "{self.container_name}" logs...')
docker_logs(self.args, self.container_name)
raise
def deprovision(self) -> None:
"""Deprovision the host after delegation has completed."""
container_exists = False
if self.container_name:
if self.args.docker_terminate == TerminateMode.ALWAYS or (self.args.docker_terminate == TerminateMode.SUCCESS and self.args.success):
docker_rm(self.args, self.container_name)
else:
container_exists = True
if self.cgroup_path:
if container_exists:
display.notice(f'Remember to run `{require_docker().command} rm -f {self.container_name}` when finished testing. '
f'Then run `{shlex.join(self.delete_systemd_cgroup_v1_command)}` on the container host.')
else:
self.delete_systemd_cgroup_v1()
elif container_exists:
display.notice(f'Remember to run `{require_docker().command} rm -f {self.container_name}` when finished testing.')
def wait(self) -> None:
"""Wait for the instance to be ready. Executed before delegation for the controller and after delegation for targets."""
if not self.controller:
con = self.get_controller_target_connections()[0]
last_error = ''
for dummy in range(1, 10):
try:
con.run(['id'], capture=True)
except SubprocessError as ex:
if 'Permission denied' in ex.message:
raise
last_error = str(ex)
time.sleep(1)
else:
return
display.info('Checking SSH debug output...')
display.info(last_error)
if not self.args.delegate and not self.args.host_path:
def callback() -> None:
"""Callback to run during error display."""
self.on_target_failure() # when the controller is not delegated, report failures immediately
else:
callback = None
raise HostConnectionError(f'Timeout waiting for {self.config.name} container {self.container_name}.', callback)
def get_controller_target_connections(self) -> list[SshConnection]:
"""Return SSH connection(s) for accessing the host as a target from the controller."""
containers = get_container_database(self.args)
access = containers.data[HostType.control]['__test_hosts__'][self.container_name]
host = access.host_ip
port = dict(access.port_map())[22]
settings = SshConnectionDetail(
name=self.config.name,
user='root',
host=host,
port=port,
identity_file=SshKey(self.args).key,
python_interpreter=self.python.path,
# CentOS 6 uses OpenSSH 5.3, making it incompatible with the default configuration of OpenSSH 8.8 and later clients.
# Since only CentOS 6 is affected, and it is only supported by ansible-core 2.12, support for RSA SHA-1 is simply hard-coded here.
# A substring is used to allow custom containers to work, not just the one provided with ansible-test.
enable_rsa_sha1='centos6' in self.config.image,
)
return [SshConnection(self.args, settings)]
def get_origin_controller_connection(self) -> DockerConnection:
"""Return a connection for accessing the host as a controller from the origin."""
return DockerConnection(self.args, self.container_name)
def get_working_directory(self) -> str:
"""Return the working directory for the host."""
return '/root'
def on_target_failure(self) -> None:
"""Executed during failure handling if this profile is a target."""
display.info(f'Checking container "{self.container_name}" logs...')
try:
docker_logs(self.args, self.container_name)
except SubprocessError as ex:
display.error(str(ex))
if self.config.cgroup != CGroupVersion.NONE:
# Containers with cgroup support are assumed to be running systemd.
display.info(f'Checking container "{self.container_name}" systemd logs...')
try:
docker_exec(self.args, self.container_name, ['journalctl'], capture=False)
except SubprocessError as ex:
display.error(str(ex))
display.error(f'Connection to container "{self.container_name}" failed. See logs and original error above.')
def get_common_run_options(self) -> list[str]:
"""Return a list of options needed to run the container."""
options = [
# These temporary mount points need to be created at run time when using Docker.
# They are automatically provided by Podman, but will be overridden by VOLUME instructions for the container, if they exist.
# If supporting containers with VOLUME instructions is not desired, these options could be limited to use with Docker.
# See: https://github.com/containers/podman/pull/1318
# Previously they were handled by the VOLUME instruction during container image creation.
# However, that approach creates anonymous volumes when running the container, which are then left behind after the container is deleted.
# These options eliminate the need for the VOLUME instruction, and override it if they are present.
# The mount options used are those typically found on Linux systems.
# Of special note is the "exec" option for "/tmp", which is required by ansible-test for path injection of executables using temporary directories.
'--tmpfs', '/tmp:exec',
'--tmpfs', '/run:exec',
'--tmpfs', '/run/lock', # some systemd containers require a separate tmpfs here, such as Ubuntu 20.04 and Ubuntu 22.04
]
if self.config.privileged:
options.append('--privileged')
if self.config.memory:
options.extend([
f'--memory={self.config.memory}',
f'--memory-swap={self.config.memory}',
])
if self.config.seccomp != 'default':
options.extend(['--security-opt', f'seccomp={self.config.seccomp}'])
docker_socket = '/var/run/docker.sock'
if get_docker_hostname() != 'localhost' or os.path.exists(docker_socket):
options.extend(['--volume', f'{docker_socket}:{docker_socket}'])
return options
class NetworkInventoryProfile(HostProfile[NetworkInventoryConfig]):
"""Host profile for a network inventory."""
class NetworkRemoteProfile(RemoteProfile[NetworkRemoteConfig]):
"""Host profile for a network remote instance."""
def wait(self) -> None:
"""Wait for the instance to be ready. Executed before delegation for the controller and after delegation for targets."""
self.wait_until_ready()
def get_inventory_variables(self) -> dict[str, t.Optional[t.Union[str, int]]]:
"""Return inventory variables for accessing this host."""
core_ci = self.wait_for_instance()
connection = core_ci.connection
variables: dict[str, t.Optional[t.Union[str, int]]] = dict(
ansible_connection=self.config.connection,
ansible_pipelining='yes',
ansible_host=connection.hostname,
ansible_port=connection.port,
ansible_user=connection.username,
ansible_ssh_private_key_file=core_ci.ssh_key.key,
# VyOS 1.1.8 uses OpenSSH 5.5, making it incompatible with RSA SHA-256/512 used by Paramiko 2.9 and later.
# IOS CSR 1000V uses an ancient SSH server, making it incompatible with RSA SHA-256/512 used by Paramiko 2.9 and later.
# That means all network platforms currently offered by ansible-core-ci require support for RSA SHA-1, so it is simply hard-coded here.
# NOTE: This option only exists in ansible-core 2.14 and later. For older ansible-core versions, use of Paramiko 2.8.x or earlier is required.
# See: https://github.com/ansible/ansible/pull/78789
# See: https://github.com/ansible/ansible/pull/78842
ansible_paramiko_use_rsa_sha2_algorithms='no',
ansible_network_os=f'{self.config.collection}.{self.config.platform}' if self.config.collection else self.config.platform,
)
return variables
def wait_until_ready(self) -> None:
"""Wait for the host to respond to an Ansible module request."""
core_ci = self.wait_for_instance()
if not isinstance(self.args, IntegrationConfig):
return # skip extended checks unless we're running integration tests
inventory = Inventory.create_single_host(sanitize_host_name(self.config.name), self.get_inventory_variables())
env = ansible_environment(self.args)
module_name = f'{self.config.collection + "." if self.config.collection else ""}{self.config.platform}_command'
with tempfile.NamedTemporaryFile() as inventory_file:
inventory.write(self.args, inventory_file.name)
cmd = ['ansible', '-m', module_name, '-a', 'commands=?', '-i', inventory_file.name, 'all']
for dummy in range(1, 90):
try:
intercept_python(self.args, self.args.controller_python, cmd, env, capture=True)
except SubprocessError as ex:
display.warning(str(ex))
time.sleep(10)
else:
return
raise HostConnectionError(f'Timeout waiting for {self.config.name} instance {core_ci.instance_id}.')
def get_controller_target_connections(self) -> list[SshConnection]:
"""Return SSH connection(s) for accessing the host as a target from the controller."""
core_ci = self.wait_for_instance()
settings = SshConnectionDetail(
name=core_ci.name,
host=core_ci.connection.hostname,
port=core_ci.connection.port,
user=core_ci.connection.username,
identity_file=core_ci.ssh_key.key,
# VyOS 1.1.8 uses OpenSSH 5.5, making it incompatible with the default configuration of OpenSSH 8.8 and later clients.
# IOS CSR 1000V uses an ancient SSH server, making it incompatible with the default configuration of OpenSSH 8.8 and later clients.
# That means all network platforms currently offered by ansible-core-ci require support for RSA SHA-1, so it is simply hard-coded here.
enable_rsa_sha1=True,
)
return [SshConnection(self.args, settings)]
class OriginProfile(ControllerHostProfile[OriginConfig]):
"""Host profile for origin."""
def get_origin_controller_connection(self) -> LocalConnection:
"""Return a connection for accessing the host as a controller from the origin."""
return LocalConnection(self.args)
def get_working_directory(self) -> str:
"""Return the working directory for the host."""
return os.getcwd()
class PosixRemoteProfile(ControllerHostProfile[PosixRemoteConfig], RemoteProfile[PosixRemoteConfig]):
"""Host profile for a POSIX remote instance."""
def wait(self) -> None:
"""Wait for the instance to be ready. Executed before delegation for the controller and after delegation for targets."""
self.wait_until_ready()
def configure(self) -> None:
"""Perform in-band configuration. Executed before delegation for the controller and after delegation for targets."""
# a target uses a single python version, but a controller may include additional versions for targets running on the controller
python_interpreters = {self.python.version: self.python.path}
python_interpreters.update({target.python.version: target.python.path for target in self.targets if isinstance(target, ControllerConfig)})
python_interpreters = {version: python_interpreters[version] for version in sorted_versions(list(python_interpreters.keys()))}
core_ci = self.wait_for_instance()
pwd = self.wait_until_ready()
display.info(f'Remote working directory: {pwd}', verbosity=1)
bootstrapper = BootstrapRemote(
controller=self.controller,
platform=self.config.platform,
platform_version=self.config.version,
python_interpreters=python_interpreters,
ssh_key=core_ci.ssh_key,
)
setup_sh = bootstrapper.get_script()
shell = setup_sh.splitlines()[0][2:]
ssh = self.get_origin_controller_connection()
ssh.run([shell], data=setup_sh, capture=False)
def get_ssh_connection(self) -> SshConnection:
"""Return an SSH connection for accessing the host."""
core_ci = self.wait_for_instance()
settings = SshConnectionDetail(
name=core_ci.name,
user=core_ci.connection.username,
host=core_ci.connection.hostname,
port=core_ci.connection.port,
identity_file=core_ci.ssh_key.key,
python_interpreter=self.python.path,
)
if settings.user == 'root':
become: t.Optional[Become] = None
elif self.config.become:
become = SUPPORTED_BECOME_METHODS[self.config.become]()
else:
display.warning(f'Defaulting to "sudo" for platform "{self.config.platform}" become support.', unique=True)
become = Sudo()
return SshConnection(self.args, settings, become)
def wait_until_ready(self) -> str:
"""Wait for instance to respond to SSH, returning the current working directory once connected."""
core_ci = self.wait_for_instance()
for dummy in range(1, 90):
try:
return self.get_working_directory()
except SubprocessError as ex:
# No "Permission denied" check is performed here.
# Unlike containers, with remote instances, user configuration isn't guaranteed to have been completed before SSH connections are attempted.
display.warning(str(ex))
time.sleep(10)
raise HostConnectionError(f'Timeout waiting for {self.config.name} instance {core_ci.instance_id}.')
def get_controller_target_connections(self) -> list[SshConnection]:
"""Return SSH connection(s) for accessing the host as a target from the controller."""
return [self.get_ssh_connection()]
def get_origin_controller_connection(self) -> SshConnection:
"""Return a connection for accessing the host as a controller from the origin."""
return self.get_ssh_connection()
def get_working_directory(self) -> str:
"""Return the working directory for the host."""
if not self.pwd:
ssh = self.get_origin_controller_connection()
stdout = ssh.run(['pwd'], capture=True)[0]
if self.args.explain:
return '/pwd'
pwd = stdout.strip().splitlines()[-1]
if not pwd.startswith('/'):
raise Exception(f'Unexpected current working directory "{pwd}" from "pwd" command output:\n{stdout.strip()}')
self.pwd = pwd
return self.pwd
@property
def pwd(self) -> t.Optional[str]:
"""Return the cached pwd, if any, otherwise None."""
return self.cache.get('pwd')
@pwd.setter
def pwd(self, value: str) -> None:
"""Cache the given pwd."""
self.cache['pwd'] = value
class PosixSshProfile(SshTargetHostProfile[PosixSshConfig], PosixProfile[PosixSshConfig]):
"""Host profile for a POSIX SSH instance."""
def get_controller_target_connections(self) -> list[SshConnection]:
"""Return SSH connection(s) for accessing the host as a target from the controller."""
settings = SshConnectionDetail(
name='target',
user=self.config.user,
host=self.config.host,
port=self.config.port,
identity_file=SshKey(self.args).key,
python_interpreter=self.python.path,
)
return [SshConnection(self.args, settings)]
class WindowsInventoryProfile(SshTargetHostProfile[WindowsInventoryConfig]):
"""Host profile for a Windows inventory."""
def get_controller_target_connections(self) -> list[SshConnection]:
"""Return SSH connection(s) for accessing the host as a target from the controller."""
inventory = parse_inventory(self.args, self.config.path)
hosts = get_hosts(inventory, 'windows')
identity_file = SshKey(self.args).key
settings = [SshConnectionDetail(
name=name,
host=config['ansible_host'],
port=22,
user=config['ansible_user'],
identity_file=identity_file,
shell_type='powershell',
) for name, config in hosts.items()]
if settings:
details = '\n'.join(f'{ssh.name} {ssh.user}@{ssh.host}:{ssh.port}' for ssh in settings)
display.info(f'Generated SSH connection details from inventory:\n{details}', verbosity=1)
return [SshConnection(self.args, setting) for setting in settings]
class WindowsRemoteProfile(RemoteProfile[WindowsRemoteConfig]):
"""Host profile for a Windows remote instance."""
def wait(self) -> None:
"""Wait for the instance to be ready. Executed before delegation for the controller and after delegation for targets."""
self.wait_until_ready()
def get_inventory_variables(self) -> dict[str, t.Optional[t.Union[str, int]]]:
"""Return inventory variables for accessing this host."""
core_ci = self.wait_for_instance()
connection = core_ci.connection
variables: dict[str, t.Optional[t.Union[str, int]]] = dict(
ansible_host=connection.hostname,
# ansible_port is intentionally not set using connection.port -- connection-specific variables can set this instead
ansible_user=connection.username,
ansible_ssh_private_key_file=core_ci.ssh_key.key, # required for scenarios which change the connection plugin to SSH
ansible_test_connection_password=connection.password, # required for scenarios which change the connection plugin to require a password
)
variables.update(ansible_connection=self.config.connection.split('+')[0])
variables.update(WINDOWS_CONNECTION_VARIABLES[self.config.connection])
if variables.pop('use_password'):
variables.update(ansible_password=connection.password)
return variables
def wait_until_ready(self) -> None:
"""Wait for the host to respond to an Ansible module request."""
core_ci = self.wait_for_instance()
if not isinstance(self.args, IntegrationConfig):
return # skip extended checks unless we're running integration tests
inventory = Inventory.create_single_host(sanitize_host_name(self.config.name), self.get_inventory_variables())
env = ansible_environment(self.args)
module_name = 'ansible.windows.win_ping'
with tempfile.NamedTemporaryFile() as inventory_file:
inventory.write(self.args, inventory_file.name)
cmd = ['ansible', '-m', module_name, '-i', inventory_file.name, 'all']
for dummy in range(1, 120):
try:
intercept_python(self.args, self.args.controller_python, cmd, env, capture=True)
except SubprocessError as ex:
display.warning(str(ex))
time.sleep(10)
else:
return
raise HostConnectionError(f'Timeout waiting for {self.config.name} instance {core_ci.instance_id}.')
def get_controller_target_connections(self) -> list[SshConnection]:
"""Return SSH connection(s) for accessing the host as a target from the controller."""
core_ci = self.wait_for_instance()
settings = SshConnectionDetail(
name=core_ci.name,
host=core_ci.connection.hostname,
port=22,
user=core_ci.connection.username,
identity_file=core_ci.ssh_key.key,
shell_type='powershell',
)
return [SshConnection(self.args, settings)]
@cache
def get_config_profile_type_map() -> dict[t.Type[HostConfig], t.Type[HostProfile]]:
"""Create and return a mapping of HostConfig types to HostProfile types."""
return get_type_map(HostProfile, HostConfig)
def create_host_profile(
args: EnvironmentConfig,
config: HostConfig,
controller: bool,
) -> HostProfile:
"""Create and return a host profile from the given host configuration."""
profile_type = get_config_profile_type_map()[type(config)]
profile = profile_type(args=args, config=config, targets=args.targets if controller else None)
return profile
| 65,629
|
Python
|
.py
| 1,143
| 46.881015
| 159
| 0.652183
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,127
|
bootstrap.py
|
ansible_ansible/test/lib/ansible_test/_internal/bootstrap.py
|
"""Bootstrapping for test hosts."""
from __future__ import annotations
import dataclasses
import os
import typing as t
from .io import (
read_text_file,
)
from .util import (
ANSIBLE_TEST_TARGET_ROOT,
)
from .util_common import (
ShellScriptTemplate,
set_shebang,
)
from .core_ci import (
SshKey,
)
@dataclasses.dataclass
class Bootstrap:
"""Base class for bootstrapping systems."""
controller: bool
python_interpreters: dict[str, str]
ssh_key: SshKey
@property
def bootstrap_type(self) -> str:
"""The bootstrap type to pass to the bootstrapping script."""
return self.__class__.__name__.replace('Bootstrap', '').lower()
def get_variables(self) -> dict[str, t.Union[str, list[str]]]:
"""The variables to template in the bootstrapping script."""
return dict(
bootstrap_type=self.bootstrap_type,
controller='yes' if self.controller else '',
python_interpreters=[f'{key}:{value}' for key, value in self.python_interpreters.items()],
ssh_key_type=self.ssh_key.KEY_TYPE,
ssh_private_key=self.ssh_key.key_contents,
ssh_public_key=self.ssh_key.pub_contents,
)
def get_script(self) -> str:
"""Return a shell script to bootstrap the specified host."""
path = os.path.join(ANSIBLE_TEST_TARGET_ROOT, 'setup', 'bootstrap.sh')
content = read_text_file(path)
content = set_shebang(content, '/bin/sh')
template = ShellScriptTemplate(content)
variables = self.get_variables()
script = template.substitute(**variables)
return script
@dataclasses.dataclass
class BootstrapDocker(Bootstrap):
"""Bootstrap docker instances."""
def get_variables(self) -> dict[str, t.Union[str, list[str]]]:
"""The variables to template in the bootstrapping script."""
variables = super().get_variables()
variables.update(
platform='',
platform_version='',
)
return variables
@dataclasses.dataclass
class BootstrapRemote(Bootstrap):
"""Bootstrap remote instances."""
platform: str
platform_version: str
def get_variables(self) -> dict[str, t.Union[str, list[str]]]:
"""The variables to template in the bootstrapping script."""
variables = super().get_variables()
variables.update(
platform=self.platform,
platform_version=self.platform_version,
)
return variables
| 2,533
|
Python
|
.py
| 71
| 28.943662
| 102
| 0.653799
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,128
|
completion.py
|
ansible_ansible/test/lib/ansible_test/_internal/completion.py
|
"""Loading, parsing and storing of completion configurations."""
from __future__ import annotations
import abc
import dataclasses
import enum
import os
import typing as t
from .constants import (
CONTROLLER_PYTHON_VERSIONS,
SUPPORTED_PYTHON_VERSIONS,
)
from .util import (
ANSIBLE_TEST_DATA_ROOT,
cache,
read_lines_without_comments,
)
from .data import (
data_context,
)
from .become import (
SUPPORTED_BECOME_METHODS,
)
class CGroupVersion(enum.Enum):
"""The control group version(s) required by a container."""
NONE = 'none'
V1_ONLY = 'v1-only'
V2_ONLY = 'v2-only'
V1_V2 = 'v1-v2'
def __repr__(self) -> str:
return f'{self.__class__.__name__}.{self.name}'
class AuditMode(enum.Enum):
"""The audit requirements of a container."""
NONE = 'none'
REQUIRED = 'required'
def __repr__(self) -> str:
return f'{self.__class__.__name__}.{self.name}'
@dataclasses.dataclass(frozen=True)
class CompletionConfig(metaclass=abc.ABCMeta):
"""Base class for completion configuration."""
name: str
@property
@abc.abstractmethod
def is_default(self) -> bool:
"""True if the completion entry is only used for defaults, otherwise False."""
@dataclasses.dataclass(frozen=True)
class PosixCompletionConfig(CompletionConfig, metaclass=abc.ABCMeta):
"""Base class for completion configuration of POSIX environments."""
@property
@abc.abstractmethod
def supported_pythons(self) -> list[str]:
"""Return a list of the supported Python versions."""
@abc.abstractmethod
def get_python_path(self, version: str) -> str:
"""Return the path of the requested Python version."""
def get_default_python(self, controller: bool) -> str:
"""Return the default Python version for a controller or target as specified."""
context_pythons = CONTROLLER_PYTHON_VERSIONS if controller else SUPPORTED_PYTHON_VERSIONS
version = [python for python in self.supported_pythons if python in context_pythons][0]
return version
@property
def controller_supported(self) -> bool:
"""True if at least one Python version is provided which supports the controller, otherwise False."""
return any(version in CONTROLLER_PYTHON_VERSIONS for version in self.supported_pythons)
@dataclasses.dataclass(frozen=True)
class PythonCompletionConfig(PosixCompletionConfig, metaclass=abc.ABCMeta):
"""Base class for completion configuration of Python environments."""
python: str = ''
python_dir: str = '/usr/bin'
@property
def supported_pythons(self) -> list[str]:
"""Return a list of the supported Python versions."""
versions = self.python.split(',') if self.python else []
versions = [version for version in versions if version in SUPPORTED_PYTHON_VERSIONS]
return versions
def get_python_path(self, version: str) -> str:
"""Return the path of the requested Python version."""
return os.path.join(self.python_dir, f'python{version}')
@dataclasses.dataclass(frozen=True)
class RemoteCompletionConfig(CompletionConfig):
"""Base class for completion configuration of remote environments provisioned through Ansible Core CI."""
provider: t.Optional[str] = None
arch: t.Optional[str] = None
@property
def platform(self) -> str:
"""The name of the platform."""
return self.name.partition('/')[0]
@property
def version(self) -> str:
"""The version of the platform."""
return self.name.partition('/')[2]
@property
def is_default(self) -> bool:
"""True if the completion entry is only used for defaults, otherwise False."""
return not self.version
def __post_init__(self):
if not self.provider:
raise Exception(f'Remote completion entry "{self.name}" must provide a "provider" setting.')
if not self.arch:
raise Exception(f'Remote completion entry "{self.name}" must provide a "arch" setting.')
@dataclasses.dataclass(frozen=True)
class InventoryCompletionConfig(CompletionConfig):
"""Configuration for inventory files."""
def __init__(self) -> None:
super().__init__(name='inventory')
@property
def is_default(self) -> bool:
"""True if the completion entry is only used for defaults, otherwise False."""
return False
@dataclasses.dataclass(frozen=True)
class PosixSshCompletionConfig(PythonCompletionConfig):
"""Configuration for a POSIX host reachable over SSH."""
def __init__(self, user: str, host: str) -> None:
super().__init__(
name=f'{user}@{host}',
python=','.join(SUPPORTED_PYTHON_VERSIONS),
)
@property
def is_default(self) -> bool:
"""True if the completion entry is only used for defaults, otherwise False."""
return False
@dataclasses.dataclass(frozen=True)
class DockerCompletionConfig(PythonCompletionConfig):
"""Configuration for Docker containers."""
image: str = ''
seccomp: str = 'default'
cgroup: str = CGroupVersion.V1_V2.value
audit: str = AuditMode.REQUIRED.value # most containers need this, so the default is required, leaving it to be opt-out for containers which don't need it
placeholder: bool = False
@property
def is_default(self) -> bool:
"""True if the completion entry is only used for defaults, otherwise False."""
return False
@property
def audit_enum(self) -> AuditMode:
"""The audit requirements for the container. Raises an exception if the value is invalid."""
try:
return AuditMode(self.audit)
except ValueError:
raise ValueError(f'Docker completion entry "{self.name}" has an invalid value "{self.audit}" for the "audit" setting.') from None
@property
def cgroup_enum(self) -> CGroupVersion:
"""The control group version(s) required by the container. Raises an exception if the value is invalid."""
try:
return CGroupVersion(self.cgroup)
except ValueError:
raise ValueError(f'Docker completion entry "{self.name}" has an invalid value "{self.cgroup}" for the "cgroup" setting.') from None
def __post_init__(self):
if not self.image:
raise Exception(f'Docker completion entry "{self.name}" must provide an "image" setting.')
if not self.supported_pythons and not self.placeholder:
raise Exception(f'Docker completion entry "{self.name}" must provide a "python" setting.')
# verify properties can be correctly parsed to enums
assert self.audit_enum
assert self.cgroup_enum
@dataclasses.dataclass(frozen=True)
class NetworkRemoteCompletionConfig(RemoteCompletionConfig):
"""Configuration for remote network platforms."""
collection: str = ''
connection: str = ''
placeholder: bool = False
def __post_init__(self):
if not self.placeholder:
super().__post_init__()
@dataclasses.dataclass(frozen=True)
class PosixRemoteCompletionConfig(RemoteCompletionConfig, PythonCompletionConfig):
"""Configuration for remote POSIX platforms."""
become: t.Optional[str] = None
placeholder: bool = False
def __post_init__(self):
if not self.placeholder:
super().__post_init__()
if self.become and self.become not in SUPPORTED_BECOME_METHODS:
raise Exception(f'POSIX remote completion entry "{self.name}" setting "become" must be omitted or one of: {", ".join(SUPPORTED_BECOME_METHODS)}')
if not self.supported_pythons:
if self.version and not self.placeholder:
raise Exception(f'POSIX remote completion entry "{self.name}" must provide a "python" setting.')
else:
if not self.version:
raise Exception(f'POSIX remote completion entry "{self.name}" is a platform default and cannot provide a "python" setting.')
@dataclasses.dataclass(frozen=True)
class WindowsRemoteCompletionConfig(RemoteCompletionConfig):
"""Configuration for remote Windows platforms."""
connection: str = ''
TCompletionConfig = t.TypeVar('TCompletionConfig', bound=CompletionConfig)
def load_completion(name: str, completion_type: t.Type[TCompletionConfig]) -> dict[str, TCompletionConfig]:
"""Load the named completion entries, returning them in dictionary form using the specified completion type."""
lines = read_lines_without_comments(os.path.join(ANSIBLE_TEST_DATA_ROOT, 'completion', '%s.txt' % name), remove_blank_lines=True)
if data_context().content.collection:
context = 'collection'
else:
context = 'ansible-core'
items = {name: data for name, data in [parse_completion_entry(line) for line in lines] if data.get('context', context) == context}
for item in items.values():
item.pop('context', None)
item.pop('placeholder', None)
completion = {name: completion_type(name=name, **data) for name, data in items.items()}
return completion
def parse_completion_entry(value: str) -> tuple[str, dict[str, str]]:
"""Parse the given completion entry, returning the entry name and a dictionary of key/value settings."""
values = value.split()
name = values[0]
data = {kvp[0]: kvp[1] if len(kvp) > 1 else '' for kvp in [item.split('=', 1) for item in values[1:]]}
return name, data
def filter_completion(
completion: dict[str, TCompletionConfig],
controller_only: bool = False,
include_defaults: bool = False,
) -> dict[str, TCompletionConfig]:
"""Return the given completion dictionary, filtering out configs which do not support the controller if controller_only is specified."""
if controller_only:
# The cast is needed because mypy gets confused here and forgets that completion values are TCompletionConfig.
completion = {name: t.cast(TCompletionConfig, config) for name, config in completion.items() if
isinstance(config, PosixCompletionConfig) and config.controller_supported}
if not include_defaults:
completion = {name: config for name, config in completion.items() if not config.is_default}
return completion
@cache
def docker_completion() -> dict[str, DockerCompletionConfig]:
"""Return docker completion entries."""
return load_completion('docker', DockerCompletionConfig)
@cache
def remote_completion() -> dict[str, PosixRemoteCompletionConfig]:
"""Return remote completion entries."""
return load_completion('remote', PosixRemoteCompletionConfig)
@cache
def windows_completion() -> dict[str, WindowsRemoteCompletionConfig]:
"""Return windows completion entries."""
return load_completion('windows', WindowsRemoteCompletionConfig)
@cache
def network_completion() -> dict[str, NetworkRemoteCompletionConfig]:
"""Return network completion entries."""
return load_completion('network', NetworkRemoteCompletionConfig)
| 11,052
|
Python
|
.py
| 232
| 41.426724
| 159
| 0.698388
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,129
|
diff.py
|
ansible_ansible/test/lib/ansible_test/_internal/diff.py
|
"""Diff parsing functions and classes."""
from __future__ import annotations
import re
import textwrap
import traceback
import typing as t
from .util import (
ApplicationError,
)
def parse_diff(lines: list[str]) -> list[FileDiff]:
"""Parse the given diff lines and return a list of FileDiff objects representing the changes of each file."""
return DiffParser(lines).files
class FileDiff:
"""Parsed diff for a single file."""
def __init__(self, old_path: str, new_path: str) -> None:
self.old = DiffSide(old_path, new=False)
self.new = DiffSide(new_path, new=True)
self.headers: list[str] = []
self.binary = False
def append_header(self, line: str) -> None:
"""Append the given line to the list of headers for this file."""
self.headers.append(line)
@property
def is_complete(self) -> bool:
"""True if the diff is complete, otherwise False."""
return self.old.is_complete and self.new.is_complete
class DiffSide:
"""Parsed diff for a single 'side' of a single file."""
def __init__(self, path: str, new: bool) -> None:
self.path = path
self.new = new
self.prefix = '+' if self.new else '-'
self.eof_newline = True
self.exists = True
self.lines: list[tuple[int, str]] = []
self.lines_and_context: list[tuple[int, str]] = []
self.ranges: list[tuple[int, int]] = []
self._next_line_number = 0
self._lines_remaining = 0
self._range_start = 0
def set_start(self, line_start: int, line_count: int) -> None:
"""Set the starting line and line count."""
self._next_line_number = line_start
self._lines_remaining = line_count
self._range_start = 0
def append(self, line: str) -> None:
"""Append the given line."""
if self._lines_remaining <= 0:
raise Exception('Diff range overflow.')
entry = self._next_line_number, line
if line.startswith(' '):
pass
elif line.startswith(self.prefix):
self.lines.append(entry)
if not self._range_start:
self._range_start = self._next_line_number
else:
raise Exception('Unexpected diff content prefix.')
self.lines_and_context.append(entry)
self._lines_remaining -= 1
if self._range_start:
if self.is_complete:
range_end = self._next_line_number
elif line.startswith(' '):
range_end = self._next_line_number - 1
else:
range_end = 0
if range_end:
self.ranges.append((self._range_start, range_end))
self._range_start = 0
self._next_line_number += 1
@property
def is_complete(self) -> bool:
"""True if the diff is complete, otherwise False."""
return self._lines_remaining == 0
def format_lines(self, context: bool = True) -> list[str]:
"""Format the diff and return a list of lines, optionally including context."""
if context:
lines = self.lines_and_context
else:
lines = self.lines
return ['%s:%4d %s' % (self.path, line[0], line[1]) for line in lines]
class DiffParser:
"""Parse diff lines."""
def __init__(self, lines: list[str]) -> None:
self.lines = lines
self.files: list[FileDiff] = []
self.action = self.process_start
self.line_number = 0
self.previous_line: t.Optional[str] = None
self.line: t.Optional[str] = None
self.file: t.Optional[FileDiff] = None
for self.line in self.lines:
self.line_number += 1
try:
self.action()
except Exception as ex:
message = textwrap.dedent('''
%s
Line: %d
Previous: %s
Current: %s
%s
''').strip() % (
ex,
self.line_number,
self.previous_line or '',
self.line or '',
traceback.format_exc(),
)
raise ApplicationError(message.strip()) from None
self.previous_line = self.line
self.complete_file()
def process_start(self) -> None:
"""Process a diff start line."""
self.complete_file()
match = re.search(r'^diff --git "?(?:a/)?(?P<old_path>.*)"? "?(?:b/)?(?P<new_path>.*)"?$', self.line)
if not match:
raise Exception('Unexpected diff start line.')
self.file = FileDiff(match.group('old_path'), match.group('new_path'))
self.action = self.process_continue
def process_range(self) -> None:
"""Process a diff range line."""
match = re.search(r'^@@ -((?P<old_start>[0-9]+),)?(?P<old_count>[0-9]+) \+((?P<new_start>[0-9]+),)?(?P<new_count>[0-9]+) @@', self.line)
if not match:
raise Exception('Unexpected diff range line.')
self.file.old.set_start(int(match.group('old_start') or 1), int(match.group('old_count')))
self.file.new.set_start(int(match.group('new_start') or 1), int(match.group('new_count')))
self.action = self.process_content
def process_continue(self) -> None:
"""Process a diff start, range or header line."""
if self.line.startswith('diff '):
self.process_start()
elif self.line.startswith('@@ '):
self.process_range()
else:
self.process_header()
def process_header(self) -> None:
"""Process a diff header line."""
if self.line.startswith('Binary files '):
self.file.binary = True
elif self.line == '--- /dev/null':
self.file.old.exists = False
elif self.line == '+++ /dev/null':
self.file.new.exists = False
else:
self.file.append_header(self.line)
def process_content(self) -> None:
"""Process a diff content line."""
if self.line == r'\ No newline at end of file':
if self.previous_line.startswith(' '):
self.file.old.eof_newline = False
self.file.new.eof_newline = False
elif self.previous_line.startswith('-'):
self.file.old.eof_newline = False
elif self.previous_line.startswith('+'):
self.file.new.eof_newline = False
else:
raise Exception('Unexpected previous diff content line.')
return
if self.file.is_complete:
self.process_continue()
return
if self.line.startswith(' '):
self.file.old.append(self.line)
self.file.new.append(self.line)
elif self.line.startswith('-'):
self.file.old.append(self.line)
elif self.line.startswith('+'):
self.file.new.append(self.line)
else:
raise Exception('Unexpected diff content line.')
def complete_file(self) -> None:
"""Complete processing of the current file, if any."""
if not self.file:
return
self.files.append(self.file)
| 7,310
|
Python
|
.py
| 177
| 30.898305
| 144
| 0.562632
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,130
|
timeout.py
|
ansible_ansible/test/lib/ansible_test/_internal/timeout.py
|
"""Timeout management for tests."""
from __future__ import annotations
import dataclasses
import datetime
import functools
import os
import signal
import time
import typing as t
from .io import (
read_json_file,
)
from .config import (
CommonConfig,
TestConfig,
)
from .util import (
display,
TimeoutExpiredError,
)
from .thread import (
WrappedThread,
)
from .constants import (
TIMEOUT_PATH,
)
from .test import (
TestTimeout,
)
@dataclasses.dataclass(frozen=True)
class TimeoutDetail:
"""Details required to enforce a timeout on test execution."""
_DEADLINE_FORMAT = '%Y-%m-%dT%H:%M:%SZ' # format used to maintain backwards compatibility with previous versions of ansible-test
deadline: datetime.datetime
duration: int | float # minutes
@property
def remaining(self) -> datetime.timedelta:
"""The amount of time remaining before the timeout occurs. If the timeout has passed, this will be a negative duration."""
return self.deadline - datetime.datetime.now(tz=datetime.timezone.utc).replace(microsecond=0)
def to_dict(self) -> dict[str, t.Any]:
"""Return timeout details as a dictionary suitable for JSON serialization."""
return dict(
deadline=self.deadline.strftime(self._DEADLINE_FORMAT),
duration=self.duration,
)
@staticmethod
def from_dict(value: dict[str, t.Any]) -> TimeoutDetail:
"""Return a TimeoutDetail instance using the value previously returned by to_dict."""
return TimeoutDetail(
deadline=datetime.datetime.strptime(value['deadline'], TimeoutDetail._DEADLINE_FORMAT).replace(tzinfo=datetime.timezone.utc),
duration=value['duration'],
)
@staticmethod
def create(duration: int | float) -> TimeoutDetail | None:
"""Return a new TimeoutDetail instance for the specified duration (in minutes), or None if the duration is zero."""
if not duration:
return None
if duration == int(duration):
duration = int(duration)
return TimeoutDetail(
deadline=datetime.datetime.now(datetime.timezone.utc).replace(microsecond=0) + datetime.timedelta(seconds=int(duration * 60)),
duration=duration,
)
def get_timeout() -> TimeoutDetail | None:
"""Return details about the currently set timeout, if any, otherwise return None."""
try:
return TimeoutDetail.from_dict(read_json_file(TIMEOUT_PATH))
except FileNotFoundError:
return None
def configure_timeout(args: CommonConfig) -> None:
"""Configure the timeout."""
if isinstance(args, TestConfig):
configure_test_timeout(args) # only tests are subject to the timeout
def configure_test_timeout(args: TestConfig) -> None:
"""Configure the test timeout."""
timeout = get_timeout()
if not timeout:
return
timeout_remaining = timeout.remaining
test_timeout = TestTimeout(timeout.duration)
if timeout_remaining <= datetime.timedelta():
test_timeout.write(args)
raise TimeoutExpiredError(f'The {timeout.duration} minute test timeout expired {timeout_remaining * -1} ago at {timeout.deadline}.')
display.info(f'The {timeout.duration} minute test timeout expires in {timeout_remaining} at {timeout.deadline}.', verbosity=1)
def timeout_handler(_dummy1: t.Any, _dummy2: t.Any) -> None:
"""Runs when SIGUSR1 is received."""
test_timeout.write(args)
raise TimeoutExpiredError(f'Tests aborted after exceeding the {timeout.duration} minute time limit.')
def timeout_waiter(timeout_seconds: float) -> None:
"""Background thread which will kill the current process if the timeout elapses."""
time.sleep(timeout_seconds)
os.kill(os.getpid(), signal.SIGUSR1)
signal.signal(signal.SIGUSR1, timeout_handler)
instance = WrappedThread(functools.partial(timeout_waiter, timeout_remaining.total_seconds()))
instance.daemon = True
instance.start()
| 4,052
|
Python
|
.py
| 96
| 36.229167
| 140
| 0.704488
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,131
|
data.py
|
ansible_ansible/test/lib/ansible_test/_internal/data.py
|
"""Context information for the current invocation of ansible-test."""
from __future__ import annotations
import collections.abc as c
import dataclasses
import os
import typing as t
from .util import (
ApplicationError,
import_plugins,
is_subdir,
ANSIBLE_LIB_ROOT,
ANSIBLE_TEST_ROOT,
ANSIBLE_SOURCE_ROOT,
display,
cache,
)
from .provider import (
find_path_provider,
get_path_provider_classes,
ProviderNotFoundForPath,
)
from .provider.source import (
SourceProvider,
)
from .provider.source.unversioned import (
UnversionedSource,
)
from .provider.source.installed import (
InstalledSource,
)
from .provider.source.unsupported import (
UnsupportedSource,
)
from .provider.layout import (
ContentLayout,
LayoutProvider,
)
from .provider.layout.unsupported import (
UnsupportedLayout,
)
@dataclasses.dataclass(frozen=True)
class PayloadConfig:
"""Configuration required to build a source tree payload for delegation."""
files: list[tuple[str, str]]
permissions: dict[str, int]
class DataContext:
"""Data context providing details about the current execution environment for ansible-test."""
def __init__(self) -> None:
content_path = os.environ.get('ANSIBLE_TEST_CONTENT_ROOT')
current_path = os.getcwd()
layout_providers = get_path_provider_classes(LayoutProvider)
source_providers = get_path_provider_classes(SourceProvider)
self.__layout_providers = layout_providers
self.__source_providers = source_providers
self.__ansible_source: t.Optional[tuple[tuple[str, str], ...]] = None
self.payload_callbacks: list[c.Callable[[PayloadConfig], None]] = []
if content_path:
content, source_provider = self.__create_content_layout(layout_providers, source_providers, content_path, False)
elif ANSIBLE_SOURCE_ROOT and is_subdir(current_path, ANSIBLE_SOURCE_ROOT):
content, source_provider = self.__create_content_layout(layout_providers, source_providers, ANSIBLE_SOURCE_ROOT, False)
else:
content, source_provider = self.__create_content_layout(layout_providers, source_providers, current_path, True)
self.content: ContentLayout = content
self.source_provider = source_provider
def create_collection_layouts(self) -> list[ContentLayout]:
"""
Return a list of collection layouts, one for each collection in the same collection root as the current collection layout.
An empty list is returned if the current content layout is not a collection layout.
"""
layout = self.content
collection = layout.collection
if not collection:
return []
root_path = os.path.join(collection.root, 'ansible_collections')
display.info('Scanning collection root: %s' % root_path, verbosity=1)
namespace_names = sorted(name for name in os.listdir(root_path) if os.path.isdir(os.path.join(root_path, name)))
collections = []
for namespace_name in namespace_names:
namespace_path = os.path.join(root_path, namespace_name)
collection_names = sorted(name for name in os.listdir(namespace_path) if os.path.isdir(os.path.join(namespace_path, name)))
for collection_name in collection_names:
collection_path = os.path.join(namespace_path, collection_name)
if collection_path == os.path.join(collection.root, collection.directory):
collection_layout = layout
else:
collection_layout = self.__create_content_layout(self.__layout_providers, self.__source_providers, collection_path, False)[0]
file_count = len(collection_layout.all_files())
if not file_count:
continue
display.info('Including collection: %s (%d files)' % (collection_layout.collection.full_name, file_count), verbosity=1)
collections.append(collection_layout)
return collections
@staticmethod
def __create_content_layout(
layout_providers: list[t.Type[LayoutProvider]],
source_providers: list[t.Type[SourceProvider]],
root: str,
walk: bool,
) -> t.Tuple[ContentLayout, SourceProvider]:
"""Create a content layout using the given providers and root path."""
try:
layout_provider = find_path_provider(LayoutProvider, layout_providers, root, walk)
except ProviderNotFoundForPath:
layout_provider = UnsupportedLayout(root)
try:
# Begin the search for the source provider at the layout provider root.
# This intentionally ignores version control within subdirectories of the layout root, a condition which was previously an error.
# Doing so allows support for older git versions for which it is difficult to distinguish between a super project and a sub project.
# It also provides a better user experience, since the solution for the user would effectively be the same -- to remove the nested version control.
if isinstance(layout_provider, UnsupportedLayout):
source_provider: SourceProvider = UnsupportedSource(layout_provider.root)
else:
source_provider = find_path_provider(SourceProvider, source_providers, layout_provider.root, walk)
except ProviderNotFoundForPath:
source_provider = UnversionedSource(layout_provider.root)
layout = layout_provider.create(layout_provider.root, source_provider.get_paths(layout_provider.root))
return layout, source_provider
def __create_ansible_source(self):
"""Return a tuple of Ansible source files with both absolute and relative paths."""
if not ANSIBLE_SOURCE_ROOT:
sources = []
source_provider = InstalledSource(ANSIBLE_LIB_ROOT)
sources.extend((os.path.join(source_provider.root, path), os.path.join('lib', 'ansible', path))
for path in source_provider.get_paths(source_provider.root))
source_provider = InstalledSource(ANSIBLE_TEST_ROOT)
sources.extend((os.path.join(source_provider.root, path), os.path.join('test', 'lib', 'ansible_test', path))
for path in source_provider.get_paths(source_provider.root))
return tuple(sources)
if self.content.is_ansible:
return tuple((os.path.join(self.content.root, path), path) for path in self.content.all_files())
try:
source_provider = find_path_provider(SourceProvider, self.__source_providers, ANSIBLE_SOURCE_ROOT, False)
except ProviderNotFoundForPath:
source_provider = UnversionedSource(ANSIBLE_SOURCE_ROOT)
return tuple((os.path.join(source_provider.root, path), path) for path in source_provider.get_paths(source_provider.root))
@property
def ansible_source(self) -> tuple[tuple[str, str], ...]:
"""Return a tuple of Ansible source files with both absolute and relative paths."""
if not self.__ansible_source:
self.__ansible_source = self.__create_ansible_source()
return self.__ansible_source
def register_payload_callback(self, callback: c.Callable[[PayloadConfig], None]) -> None:
"""Register the given payload callback."""
self.payload_callbacks.append(callback)
def check_layout(self) -> None:
"""Report an error if the layout is unsupported."""
if self.content.unsupported:
raise ApplicationError(self.explain_working_directory())
def explain_working_directory(self) -> str:
"""Return a message explaining the working directory requirements."""
blocks = [
'The current working directory must be within the source tree being tested.',
'',
]
if ANSIBLE_SOURCE_ROOT:
blocks.append(f'Testing Ansible: {ANSIBLE_SOURCE_ROOT}/')
blocks.append('')
cwd = os.getcwd()
blocks.append('Testing an Ansible collection: {...}/ansible_collections/{namespace}/{collection}/')
blocks.append('Example #1: community.general -> ~/code/ansible_collections/community/general/')
blocks.append('Example #2: ansible.util -> ~/.ansible/collections/ansible_collections/ansible/util/')
blocks.append('')
blocks.append(f'Current working directory: {cwd}/')
if os.path.basename(os.path.dirname(cwd)) == 'ansible_collections':
blocks.append(f'Expected parent directory: {os.path.dirname(cwd)}/{{namespace}}/{{collection}}/')
elif os.path.basename(cwd) == 'ansible_collections':
blocks.append(f'Expected parent directory: {cwd}/{{namespace}}/{{collection}}/')
elif 'ansible_collections' not in cwd.split(os.path.sep):
blocks.append('No "ansible_collections" parent directory was found.')
if isinstance(self.content.unsupported, list):
blocks.extend(self.content.unsupported)
message = '\n'.join(blocks)
return message
@cache
def data_context() -> DataContext:
"""Initialize provider plugins."""
provider_types = (
'layout',
'source',
)
for provider_type in provider_types:
import_plugins('provider/%s' % provider_type)
context = DataContext()
return context
@dataclasses.dataclass(frozen=True)
class PluginInfo:
"""Information about an Ansible plugin."""
plugin_type: str
name: str
paths: list[str]
@cache
def content_plugins() -> dict[str, dict[str, PluginInfo]]:
"""
Analyze content.
The primary purpose of this analysis is to facilitate mapping of integration tests to the plugin(s) they are intended to test.
"""
plugins: dict[str, dict[str, PluginInfo]] = {}
for plugin_type, plugin_directory in data_context().content.plugin_paths.items():
plugin_paths = sorted(data_context().content.walk_files(plugin_directory))
plugin_directory_offset = len(plugin_directory.split(os.path.sep))
plugin_files: dict[str, list[str]] = {}
for plugin_path in plugin_paths:
plugin_filename = os.path.basename(plugin_path)
plugin_parts = plugin_path.split(os.path.sep)[plugin_directory_offset:-1]
if plugin_filename == '__init__.py':
if plugin_type != 'module_utils':
continue
else:
plugin_name = os.path.splitext(plugin_filename)[0]
if data_context().content.is_ansible and plugin_type == 'modules':
plugin_name = plugin_name.lstrip('_')
plugin_parts.append(plugin_name)
plugin_name = '.'.join(plugin_parts)
plugin_files.setdefault(plugin_name, []).append(plugin_filename)
plugins[plugin_type] = {plugin_name: PluginInfo(
plugin_type=plugin_type,
name=plugin_name,
paths=paths,
) for plugin_name, paths in plugin_files.items()}
return plugins
| 11,184
|
Python
|
.py
| 220
| 41.822727
| 159
| 0.66835
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,132
|
containers.py
|
ansible_ansible/test/lib/ansible_test/_internal/containers.py
|
"""High level functions for working with containers."""
from __future__ import annotations
import collections.abc as c
import contextlib
import json
import random
import time
import uuid
import threading
import typing as t
from .util import (
ApplicationError,
SubprocessError,
display,
sanitize_host_name,
)
from .util_common import (
ExitHandler,
named_temporary_file,
)
from .config import (
EnvironmentConfig,
IntegrationConfig,
SanityConfig,
ShellConfig,
UnitsConfig,
WindowsIntegrationConfig,
)
from .docker_util import (
ContainerNotFoundError,
DockerInspect,
docker_create,
docker_exec,
docker_inspect,
docker_network_inspect,
docker_pull,
docker_rm,
docker_run,
docker_start,
get_docker_container_id,
get_docker_host_ip,
get_podman_host_ip,
get_session_container_name,
require_docker,
detect_host_properties,
)
from .ansible_util import (
run_playbook,
)
from .core_ci import (
SshKey,
)
from .target import (
IntegrationTarget,
)
from .ssh import (
SshConnectionDetail,
SshProcess,
create_ssh_port_forwards,
create_ssh_port_redirects,
generate_ssh_inventory,
)
from .host_configs import (
ControllerConfig,
DockerConfig,
OriginConfig,
PosixSshConfig,
PythonConfig,
RemoteConfig,
WindowsInventoryConfig,
)
from .connections import (
SshConnection,
)
from .thread import (
mutex,
)
# information about support containers provisioned by the current ansible-test instance
support_containers: dict[str, ContainerDescriptor] = {}
support_containers_mutex = threading.Lock()
class HostType:
"""Enum representing the types of hosts involved in running tests."""
origin = 'origin'
control = 'control'
managed = 'managed'
def run_support_container(
args: EnvironmentConfig,
context: str,
image: str,
name: str,
ports: list[int],
aliases: t.Optional[list[str]] = None,
start: bool = True,
cleanup: bool = True,
cmd: t.Optional[list[str]] = None,
env: t.Optional[dict[str, str]] = None,
options: t.Optional[list[str]] = None,
publish_ports: bool = True,
) -> t.Optional[ContainerDescriptor]:
"""
Start a container used to support tests, but not run them.
Containers created this way will be accessible from tests.
"""
name = get_session_container_name(args, name)
if args.prime_containers:
docker_pull(args, image)
return None
# SSH is required for publishing ports, as well as modifying the hosts file.
# Initializing the SSH key here makes sure it is available for use after delegation.
SshKey(args)
aliases = aliases or [sanitize_host_name(name)]
docker_command = require_docker().command
current_container_id = get_docker_container_id()
if docker_command == 'docker':
if isinstance(args.controller, DockerConfig) and all(isinstance(target, (ControllerConfig, DockerConfig)) for target in args.targets):
publish_ports = False # publishing ports is not needed when test hosts are on the docker network
if current_container_id:
publish_ports = False # publishing ports is pointless if already running in a docker container
options = options or []
if start:
options.append('-dt') # the -t option is required to cause systemd in the container to log output to the console
if publish_ports:
for port in ports:
options.extend(['-p', str(port)])
if env:
for key, value in env.items():
options.extend(['--env', '%s=%s' % (key, value)])
max_open_files = detect_host_properties(args).max_open_files
options.extend(['--ulimit', 'nofile=%s' % max_open_files])
if args.dev_systemd_debug:
options.extend(('--env', 'SYSTEMD_LOG_LEVEL=debug'))
display.info('Starting new "%s" container.' % name)
docker_pull(args, image)
support_container_id = run_container(args, image, name, options, create_only=not start, cmd=cmd)
running = start
descriptor = ContainerDescriptor(
image,
context,
name,
support_container_id,
ports,
aliases,
publish_ports,
running,
cleanup,
env,
)
with support_containers_mutex:
if name in support_containers:
raise Exception(f'Container already defined: {name}')
if not support_containers:
ExitHandler.register(cleanup_containers, args)
support_containers[name] = descriptor
display.info(f'Adding "{name}" to container database.')
if start:
descriptor.register(args)
return descriptor
def run_container(
args: EnvironmentConfig,
image: str,
name: str,
options: t.Optional[list[str]],
cmd: t.Optional[list[str]] = None,
create_only: bool = False,
) -> str:
"""Run a container using the given docker image."""
options = list(options or [])
cmd = list(cmd or [])
options.extend(['--name', name])
network = get_docker_preferred_network_name(args)
if is_docker_user_defined_network(network):
# Only when the network is not the default bridge network.
options.extend(['--network', network])
for _iteration in range(1, 3):
try:
if create_only:
stdout = docker_create(args, image, options, cmd)[0]
else:
stdout = docker_run(args, image, options, cmd)[0]
except SubprocessError as ex:
display.error(ex.message)
display.warning(f'Failed to run docker image "{image}". Waiting a few seconds before trying again.')
docker_rm(args, name) # podman doesn't remove containers after create if run fails
time.sleep(3)
else:
if args.explain:
stdout = ''.join(random.choice('0123456789abcdef') for _iteration in range(64))
return stdout.strip()
raise ApplicationError(f'Failed to run docker image "{image}".')
def start_container(args: EnvironmentConfig, container_id: str) -> tuple[t.Optional[str], t.Optional[str]]:
"""Start a docker container by name or ID."""
options: list[str] = []
for _iteration in range(1, 3):
try:
return docker_start(args, container_id, options)
except SubprocessError as ex:
display.error(ex.message)
display.warning(f'Failed to start docker container "{container_id}". Waiting a few seconds before trying again.')
time.sleep(3)
raise ApplicationError(f'Failed to start docker container "{container_id}".')
def get_container_ip_address(args: EnvironmentConfig, container: DockerInspect) -> t.Optional[str]:
"""Return the IP address of the container for the preferred docker network."""
if container.networks:
network_name = get_docker_preferred_network_name(args)
if not network_name:
# Sort networks and use the first available.
# This assumes all containers will have access to the same networks.
network_name = sorted(container.networks.keys()).pop(0)
ipaddress = container.networks[network_name]['IPAddress']
else:
ipaddress = container.network_settings['IPAddress']
if not ipaddress:
return None
return ipaddress
@mutex
def get_docker_preferred_network_name(args: EnvironmentConfig) -> t.Optional[str]:
"""
Return the preferred network name for use with Docker. The selection logic is:
- the network selected by the user with `--docker-network`
- the network of the currently running docker container (if any)
- the default docker network (returns None)
"""
try:
return get_docker_preferred_network_name.network # type: ignore[attr-defined]
except AttributeError:
pass
network = None
if args.docker_network:
network = args.docker_network
else:
current_container_id = get_docker_container_id()
if current_container_id:
# Make sure any additional containers we launch use the same network as the current container we're running in.
# This is needed when ansible-test is running in a container that is not connected to Docker's default network.
container = docker_inspect(args, current_container_id, always=True)
network = container.get_network_name()
# The default docker behavior puts containers on the same network.
# The default podman behavior puts containers on isolated networks which don't allow communication between containers or network disconnect.
# Starting with podman version 2.1.0 rootless containers are able to join networks.
# Starting with podman version 2.2.0 containers can be disconnected from networks.
# To maintain feature parity with docker, detect and use the default "podman" network when running under podman.
if network is None and require_docker().command == 'podman' and docker_network_inspect(args, 'podman', always=True):
network = 'podman'
get_docker_preferred_network_name.network = network # type: ignore[attr-defined]
return network
def is_docker_user_defined_network(network: str) -> bool:
"""Return True if the network being used is a user-defined network."""
return bool(network) and network != 'bridge'
@mutex
def get_container_database(args: EnvironmentConfig) -> ContainerDatabase:
"""Return the current container database, creating it as needed, or returning the one provided on the command line through delegation."""
try:
return get_container_database.database # type: ignore[attr-defined]
except AttributeError:
pass
if args.containers:
display.info('Parsing container database.', verbosity=1)
database = ContainerDatabase.from_dict(json.loads(args.containers))
else:
display.info('Creating container database.', verbosity=1)
database = create_container_database(args)
display.info('>>> Container Database\n%s' % json.dumps(database.to_dict(), indent=4, sort_keys=True), verbosity=3)
get_container_database.database = database # type: ignore[attr-defined]
return database
class ContainerAccess:
"""Information needed for one test host to access a single container supporting tests."""
def __init__(self, host_ip: str, names: list[str], ports: t.Optional[list[int]], forwards: t.Optional[dict[int, int]]) -> None:
# if forwards is set
# this is where forwards are sent (it is the host that provides an indirect connection to the containers on alternate ports)
# /etc/hosts uses 127.0.0.1 (since port redirection will be used)
# else
# this is what goes into /etc/hosts (it is the container's direct IP)
self.host_ip = host_ip
# primary name + any aliases -- these go into the hosts file and reference the appropriate ip for the origin/control/managed host
self.names = names
# ports available (set if forwards is not set)
self.ports = ports
# port redirections to create through host_ip -- if not set, no port redirections will be used
self.forwards = forwards
def port_map(self) -> list[tuple[int, int]]:
"""Return a port map for accessing this container."""
if self.forwards:
ports = list(self.forwards.items())
else:
ports = [(port, port) for port in self.ports]
return ports
@staticmethod
def from_dict(data: dict[str, t.Any]) -> ContainerAccess:
"""Return a ContainerAccess instance from the given dict."""
forwards = data.get('forwards')
if forwards:
forwards = dict((int(key), value) for key, value in forwards.items())
return ContainerAccess(
host_ip=data['host_ip'],
names=data['names'],
ports=data.get('ports'),
forwards=forwards,
)
def to_dict(self) -> dict[str, t.Any]:
"""Return a dict of the current instance."""
value: dict[str, t.Any] = dict(
host_ip=self.host_ip,
names=self.names,
)
if self.ports:
value.update(ports=self.ports)
if self.forwards:
value.update(forwards=self.forwards)
return value
class ContainerDatabase:
"""Database of running containers used to support tests."""
def __init__(self, data: dict[str, dict[str, dict[str, ContainerAccess]]]) -> None:
self.data = data
@staticmethod
def from_dict(data: dict[str, t.Any]) -> ContainerDatabase:
"""Return a ContainerDatabase instance from the given dict."""
return ContainerDatabase(dict((access_name,
dict((context_name,
dict((container_name, ContainerAccess.from_dict(container))
for container_name, container in containers.items()))
for context_name, containers in contexts.items()))
for access_name, contexts in data.items()))
def to_dict(self) -> dict[str, t.Any]:
"""Return a dict of the current instance."""
return dict((access_name,
dict((context_name,
dict((container_name, container.to_dict())
for container_name, container in containers.items()))
for context_name, containers in contexts.items()))
for access_name, contexts in self.data.items())
def local_ssh(args: EnvironmentConfig, python: PythonConfig) -> SshConnectionDetail:
"""Return SSH connection details for localhost, connecting as root to the default SSH port."""
return SshConnectionDetail('localhost', 'localhost', None, 'root', SshKey(args).key, python.path)
def root_ssh(ssh: SshConnection) -> SshConnectionDetail:
"""Return the SSH connection details from the given SSH connection. If become was specified, the user will be changed to `root`."""
settings = ssh.settings.__dict__.copy()
if ssh.become:
settings.update(
user='root',
)
return SshConnectionDetail(**settings)
def create_container_database(args: EnvironmentConfig) -> ContainerDatabase:
"""Create and return a container database with information necessary for all test hosts to make use of relevant support containers."""
origin: dict[str, dict[str, ContainerAccess]] = {}
control: dict[str, dict[str, ContainerAccess]] = {}
managed: dict[str, dict[str, ContainerAccess]] = {}
for name, container in support_containers.items():
if container.details.published_ports:
if require_docker().command == 'podman':
host_ip_func = get_podman_host_ip
else:
host_ip_func = get_docker_host_ip
published_access = ContainerAccess(
host_ip=host_ip_func(),
names=container.aliases,
ports=None,
forwards=dict((port, published_port) for port, published_port in container.details.published_ports.items()),
)
else:
published_access = None # no published access without published ports (ports are only published if needed)
if container.details.container_ip:
# docker containers, and rootfull podman containers should have a container IP address
container_access = ContainerAccess(
host_ip=container.details.container_ip,
names=container.aliases,
ports=container.ports,
forwards=None,
)
elif require_docker().command == 'podman':
# published ports for rootless podman containers should be accessible from the host's IP
container_access = ContainerAccess(
host_ip=get_podman_host_ip(),
names=container.aliases,
ports=None,
forwards=dict((port, published_port) for port, published_port in container.details.published_ports.items()),
)
else:
container_access = None # no container access without an IP address
if get_docker_container_id():
if not container_access:
raise Exception('Missing IP address for container: %s' % name)
origin_context = origin.setdefault(container.context, {})
origin_context[name] = container_access
elif not published_access:
pass # origin does not have network access to the containers
else:
origin_context = origin.setdefault(container.context, {})
origin_context[name] = published_access
if isinstance(args.controller, RemoteConfig):
pass # SSH forwarding required
elif '-controller-' in name:
pass # hack to avoid exposing the controller container to the controller
elif isinstance(args.controller, DockerConfig) or (isinstance(args.controller, OriginConfig) and get_docker_container_id()):
if container_access:
control_context = control.setdefault(container.context, {})
control_context[name] = container_access
else:
raise Exception('Missing IP address for container: %s' % name)
else:
if not published_access:
raise Exception('Missing published ports for container: %s' % name)
control_context = control.setdefault(container.context, {})
control_context[name] = published_access
if issubclass(args.target_type, (RemoteConfig, WindowsInventoryConfig, PosixSshConfig)):
pass # SSH forwarding required
elif '-controller-' in name or '-target-' in name:
pass # hack to avoid exposing the controller and target containers to the target
elif issubclass(args.target_type, DockerConfig) or (issubclass(args.target_type, OriginConfig) and get_docker_container_id()):
if container_access:
managed_context = managed.setdefault(container.context, {})
managed_context[name] = container_access
else:
raise Exception('Missing IP address for container: %s' % name)
else:
if not published_access:
raise Exception('Missing published ports for container: %s' % name)
managed_context = managed.setdefault(container.context, {})
managed_context[name] = published_access
data = {
HostType.origin: origin,
HostType.control: control,
HostType.managed: managed,
}
data = dict((key, value) for key, value in data.items() if value)
return ContainerDatabase(data)
class SupportContainerContext:
"""Context object for tracking information relating to access of support containers."""
def __init__(self, containers: ContainerDatabase, process: t.Optional[SshProcess]) -> None:
self.containers = containers
self.process = process
def close(self) -> None:
"""Close the process maintaining the port forwards."""
if not self.process:
return # forwarding not in use
self.process.terminate()
display.info('Waiting for the session SSH port forwarding process to terminate.', verbosity=1)
self.process.wait()
@contextlib.contextmanager
def support_container_context(
args: EnvironmentConfig,
ssh: t.Optional[SshConnectionDetail],
) -> c.Iterator[t.Optional[ContainerDatabase]]:
"""Create a context manager for integration tests that use support containers."""
if not isinstance(args, (IntegrationConfig, UnitsConfig, SanityConfig, ShellConfig)):
yield None # containers are only needed for commands that have targets (hosts or pythons)
return
containers = get_container_database(args)
if not containers.data:
yield ContainerDatabase({}) # no containers are being used, return an empty database
return
context = create_support_container_context(args, ssh, containers)
try:
yield context.containers
finally:
context.close()
def create_support_container_context(
args: EnvironmentConfig,
ssh: t.Optional[SshConnectionDetail],
containers: ContainerDatabase,
) -> SupportContainerContext:
"""Context manager that provides SSH port forwards. Returns updated container metadata."""
host_type = HostType.control
revised = ContainerDatabase(containers.data.copy())
source = revised.data.pop(HostType.origin, None)
container_map: dict[tuple[str, int], tuple[str, str, int]] = {}
if host_type not in revised.data:
if not source:
raise Exception('Missing origin container details.')
for context_name, context in source.items():
for container_name, container in context.items():
if '-controller-' in container_name:
continue # hack to avoid exposing the controller container to the controller
for port, access_port in container.port_map():
container_map[(container.host_ip, access_port)] = (context_name, container_name, port)
if not container_map:
return SupportContainerContext(revised, None)
if not ssh:
raise Exception('The %s host was not pre-configured for container access and SSH forwarding is not available.' % host_type)
forwards = list(container_map.keys())
process = create_ssh_port_forwards(args, ssh, forwards)
result = SupportContainerContext(revised, process)
try:
port_forwards = process.collect_port_forwards()
contexts: dict[str, dict[str, ContainerAccess]] = {}
for forward, forwarded_port in port_forwards.items():
access_host, access_port = forward
context_name, container_name, container_port = container_map[(access_host, access_port)]
container = source[context_name][container_name]
context = contexts.setdefault(context_name, {})
forwarded_container = context.setdefault(container_name, ContainerAccess('127.0.0.1', container.names, None, {}))
forwarded_container.forwards[container_port] = forwarded_port
display.info('Container "%s" port %d available at %s:%d is forwarded over SSH as port %d.' % (
container_name, container_port, access_host, access_port, forwarded_port,
), verbosity=1)
revised.data[host_type] = contexts
return result
except Exception:
result.close()
raise
class ContainerDescriptor:
"""Information about a support container."""
def __init__(
self,
image: str,
context: str,
name: str,
container_id: str,
ports: list[int],
aliases: list[str],
publish_ports: bool,
running: bool,
cleanup: bool,
env: t.Optional[dict[str, str]],
) -> None:
self.image = image
self.context = context
self.name = name
self.container_id = container_id
self.ports = ports
self.aliases = aliases
self.publish_ports = publish_ports
self.running = running
self.cleanup = cleanup
self.env = env
self.details: t.Optional[SupportContainer] = None
def start(self, args: EnvironmentConfig) -> None:
"""Start the container. Used for containers which are created, but not started."""
start_container(args, self.name)
self.register(args)
def register(self, args: EnvironmentConfig) -> SupportContainer:
"""Record the container's runtime details. Must be used after the container has been started."""
if self.details:
raise Exception('Container already registered: %s' % self.name)
try:
container = docker_inspect(args, self.name)
except ContainerNotFoundError:
if not args.explain:
raise
# provide enough mock data to keep --explain working
container = DockerInspect(args, dict(
Id=self.container_id,
NetworkSettings=dict(
IPAddress='127.0.0.1',
Ports=dict(('%d/tcp' % port, [dict(HostPort=random.randint(30000, 40000) if self.publish_ports else port)]) for port in self.ports),
),
Config=dict(
Env=['%s=%s' % (key, value) for key, value in self.env.items()] if self.env else [],
),
))
support_container_ip = get_container_ip_address(args, container)
if self.publish_ports:
# inspect the support container to locate the published ports
tcp_ports = dict((port, container.get_tcp_port(port)) for port in self.ports)
if any(not config or len(set(conf['HostPort'] for conf in config)) != 1 for config in tcp_ports.values()):
raise ApplicationError('Unexpected `docker inspect` results for published TCP ports:\n%s' % json.dumps(tcp_ports, indent=4, sort_keys=True))
published_ports = dict((port, int(config[0]['HostPort'])) for port, config in tcp_ports.items())
else:
published_ports = {}
self.details = SupportContainer(
container,
support_container_ip,
published_ports,
)
return self.details
class SupportContainer:
"""Information about a running support container available for use by tests."""
def __init__(
self,
container: DockerInspect,
container_ip: str,
published_ports: dict[int, int],
) -> None:
self.container = container
self.container_ip = container_ip
self.published_ports = published_ports
def wait_for_file(
args: EnvironmentConfig,
container_name: str,
path: str,
sleep: int,
tries: int,
check: t.Optional[c.Callable[[str], bool]] = None,
) -> str:
"""Wait for the specified file to become available in the requested container and return its contents."""
display.info('Waiting for container "%s" to provide file: %s' % (container_name, path))
for _iteration in range(1, tries):
if _iteration > 1:
time.sleep(sleep)
try:
stdout = docker_exec(args, container_name, ['dd', 'if=%s' % path], capture=True)[0]
except SubprocessError:
continue
if not check or check(stdout):
return stdout
raise ApplicationError('Timeout waiting for container "%s" to provide file: %s' % (container_name, path))
def cleanup_containers(args: EnvironmentConfig) -> None:
"""Clean up containers."""
for container in support_containers.values():
if container.cleanup:
docker_rm(args, container.name)
def create_hosts_entries(context: dict[str, ContainerAccess]) -> list[str]:
"""Return hosts entries for the specified context."""
entries = []
unique_id = uuid.uuid4()
for container in context.values():
# forwards require port redirection through localhost
if container.forwards:
host_ip = '127.0.0.1'
else:
host_ip = container.host_ip
entries.append('%s %s # ansible-test %s' % (host_ip, ' '.join(container.names), unique_id))
return entries
def create_container_hooks(
args: IntegrationConfig,
control_connections: list[SshConnectionDetail],
managed_connections: t.Optional[list[SshConnectionDetail]],
) -> tuple[t.Optional[c.Callable[[IntegrationTarget], None]], t.Optional[c.Callable[[IntegrationTarget], None]]]:
"""Return pre and post target callbacks for enabling and disabling container access for each test target."""
containers = get_container_database(args)
control_contexts = containers.data.get(HostType.control)
if control_contexts:
managed_contexts = containers.data.get(HostType.managed)
if not managed_contexts:
managed_contexts = create_managed_contexts(control_contexts)
control_type = 'posix'
if isinstance(args, WindowsIntegrationConfig):
managed_type = 'windows'
else:
managed_type = 'posix'
control_state: dict[str, tuple[list[str], list[SshProcess]]] = {}
managed_state: dict[str, tuple[list[str], list[SshProcess]]] = {}
def pre_target(target: IntegrationTarget) -> None:
"""Configure hosts for SSH port forwarding required by the specified target."""
forward_ssh_ports(args, control_connections, '%s_hosts_prepare.yml' % control_type, control_state, target, HostType.control, control_contexts)
forward_ssh_ports(args, managed_connections, '%s_hosts_prepare.yml' % managed_type, managed_state, target, HostType.managed, managed_contexts)
def post_target(target: IntegrationTarget) -> None:
"""Clean up previously configured SSH port forwarding which was required by the specified target."""
cleanup_ssh_ports(args, control_connections, '%s_hosts_restore.yml' % control_type, control_state, target, HostType.control)
cleanup_ssh_ports(args, managed_connections, '%s_hosts_restore.yml' % managed_type, managed_state, target, HostType.managed)
else:
pre_target, post_target = None, None
return pre_target, post_target
def create_managed_contexts(control_contexts: dict[str, dict[str, ContainerAccess]]) -> dict[str, dict[str, ContainerAccess]]:
"""Create managed contexts from the given control contexts."""
managed_contexts: dict[str, dict[str, ContainerAccess]] = {}
for context_name, control_context in control_contexts.items():
managed_context = managed_contexts[context_name] = {}
for container_name, control_container in control_context.items():
managed_context[container_name] = ContainerAccess(control_container.host_ip, control_container.names, None, dict(control_container.port_map()))
return managed_contexts
def forward_ssh_ports(
args: IntegrationConfig,
ssh_connections: t.Optional[list[SshConnectionDetail]],
playbook: str,
target_state: dict[str, tuple[list[str], list[SshProcess]]],
target: IntegrationTarget,
host_type: str,
contexts: dict[str, dict[str, ContainerAccess]],
) -> None:
"""Configure port forwarding using SSH and write hosts file entries."""
if ssh_connections is None:
return
test_context = None
for context_name, context in contexts.items():
context_alias = 'cloud/%s/' % context_name
if context_alias in target.aliases:
test_context = context
break
if not test_context:
return
if not ssh_connections:
if args.explain:
return
raise Exception('The %s host was not pre-configured for container access and SSH forwarding is not available.' % host_type)
redirects: list[tuple[int, str, int]] = []
messages = []
for container_name, container in test_context.items():
explain = []
for container_port, access_port in container.port_map():
if container.forwards:
redirects.append((container_port, container.host_ip, access_port))
explain.append('%d -> %s:%d' % (container_port, container.host_ip, access_port))
else:
explain.append('%s:%d' % (container.host_ip, container_port))
if explain:
if container.forwards:
message = 'Port forwards for the "%s" container have been established on the %s host' % (container_name, host_type)
else:
message = 'Ports for the "%s" container are available on the %s host as' % (container_name, host_type)
messages.append('%s:\n%s' % (message, '\n'.join(explain)))
hosts_entries = create_hosts_entries(test_context)
inventory = generate_ssh_inventory(ssh_connections)
with named_temporary_file(args, 'ssh-inventory-', '.json', None, inventory) as inventory_path: # type: str
run_playbook(args, inventory_path, playbook, capture=False, variables=dict(hosts_entries=hosts_entries))
ssh_processes: list[SshProcess] = []
if redirects:
for ssh in ssh_connections:
ssh_processes.append(create_ssh_port_redirects(args, ssh, redirects))
target_state[target.name] = (hosts_entries, ssh_processes)
for message in messages:
display.info(message, verbosity=1)
def cleanup_ssh_ports(
args: IntegrationConfig,
ssh_connections: list[SshConnectionDetail],
playbook: str,
target_state: dict[str, tuple[list[str], list[SshProcess]]],
target: IntegrationTarget,
host_type: str,
) -> None:
"""Stop previously configured SSH port forwarding and remove previously written hosts file entries."""
state = target_state.pop(target.name, None)
if not state:
return
(hosts_entries, ssh_processes) = state
inventory = generate_ssh_inventory(ssh_connections)
with named_temporary_file(args, 'ssh-inventory-', '.json', None, inventory) as inventory_path: # type: str
run_playbook(args, inventory_path, playbook, capture=False, variables=dict(hosts_entries=hosts_entries))
if ssh_processes:
for process in ssh_processes:
process.terminate()
display.info('Waiting for the %s host SSH port forwarding process(es) to terminate.' % host_type, verbosity=1)
for process in ssh_processes:
process.wait()
| 33,905
|
Python
|
.py
| 723
| 38.305671
| 156
| 0.659093
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,133
|
inventory.py
|
ansible_ansible/test/lib/ansible_test/_internal/inventory.py
|
"""Inventory creation from host profiles."""
from __future__ import annotations
import shutil
import typing as t
from .config import (
EnvironmentConfig,
)
from .util import (
sanitize_host_name,
exclude_none_values,
)
from .host_profiles import (
ControllerHostProfile,
ControllerProfile,
HostProfile,
Inventory,
NetworkInventoryProfile,
NetworkRemoteProfile,
SshTargetHostProfile,
WindowsInventoryProfile,
WindowsRemoteProfile,
)
from .ssh import (
ssh_options_to_str,
)
def create_controller_inventory(args: EnvironmentConfig, path: str, controller_host: ControllerHostProfile) -> None:
"""Create and return inventory for use in controller-only integration tests."""
inventory = Inventory(
host_groups=dict(
testgroup=dict(
testhost=dict(
ansible_connection='local',
ansible_pipelining='yes',
ansible_python_interpreter=controller_host.python.path,
),
),
),
)
inventory.write(args, path)
def create_windows_inventory(args: EnvironmentConfig, path: str, target_hosts: list[HostProfile]) -> None:
"""Create and return inventory for use in target Windows integration tests."""
first = target_hosts[0]
if isinstance(first, WindowsInventoryProfile):
if args.explain:
return
try:
shutil.copyfile(first.config.path, path)
except shutil.SameFileError:
pass
return
target_hosts = t.cast(list[WindowsRemoteProfile], target_hosts)
hosts = [(target_host, target_host.wait_for_instance().connection) for target_host in target_hosts]
windows_hosts = {sanitize_host_name(host.config.name): host.get_inventory_variables() for host, connection in hosts}
inventory = Inventory(
host_groups=dict(
windows=windows_hosts,
),
# The `testhost` group is needed to support the `binary_modules_winrm` integration test.
# The test should be updated to remove the need for this.
extra_groups={
'testhost:children': [
'windows',
],
},
)
inventory.write(args, path)
def create_network_inventory(args: EnvironmentConfig, path: str, target_hosts: list[HostProfile]) -> None:
"""Create and return inventory for use in target network integration tests."""
first = target_hosts[0]
if isinstance(first, NetworkInventoryProfile):
if args.explain:
return
try:
shutil.copyfile(first.config.path, path)
except shutil.SameFileError:
pass
return
target_hosts = t.cast(list[NetworkRemoteProfile], target_hosts)
host_groups: dict[str, dict[str, dict[str, t.Union[str, int]]]] = {target_host.config.platform: {} for target_host in target_hosts}
for target_host in target_hosts:
host_groups[target_host.config.platform][sanitize_host_name(target_host.config.name)] = target_host.get_inventory_variables()
inventory = Inventory(
host_groups=host_groups,
# The `net` group was added to support platform agnostic testing. It may not longer be needed.
# see: https://github.com/ansible/ansible/pull/34661
# see: https://github.com/ansible/ansible/pull/34707
extra_groups={
'net:children': sorted(host_groups),
},
)
inventory.write(args, path)
def create_posix_inventory(args: EnvironmentConfig, path: str, target_hosts: list[HostProfile], needs_ssh: bool = False) -> None:
"""Create and return inventory for use in POSIX integration tests."""
target_hosts = t.cast(list[SshTargetHostProfile], target_hosts)
if len(target_hosts) != 1:
raise Exception()
target_host = target_hosts[0]
if isinstance(target_host, ControllerProfile) and not needs_ssh:
inventory = Inventory(
host_groups=dict(
testgroup=dict(
testhost=dict(
ansible_connection='local',
ansible_pipelining='yes',
ansible_python_interpreter=target_host.python.path,
),
),
),
)
else:
connections = target_host.get_controller_target_connections()
if len(connections) != 1:
raise Exception()
ssh = connections[0]
testhost: dict[str, t.Optional[t.Union[str, int]]] = dict(
ansible_connection='ssh',
ansible_pipelining='yes',
ansible_python_interpreter=ssh.settings.python_interpreter,
ansible_host=ssh.settings.host,
ansible_port=ssh.settings.port,
ansible_user=ssh.settings.user,
ansible_ssh_private_key_file=ssh.settings.identity_file,
ansible_ssh_extra_args=ssh_options_to_str(ssh.settings.options),
)
if ssh.become:
testhost.update(
ansible_become='yes',
ansible_become_method=ssh.become.method,
)
testhost = exclude_none_values(testhost)
inventory = Inventory(
host_groups=dict(
testgroup=dict(
testhost=testhost,
),
),
)
inventory.write(args, path)
| 5,408
|
Python
|
.py
| 138
| 29.891304
| 135
| 0.630805
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,134
|
http.py
|
ansible_ansible/test/lib/ansible_test/_internal/http.py
|
"""
Primitive replacement for requests to avoid extra dependency.
Avoids use of urllib2 due to lack of SNI support.
"""
from __future__ import annotations
import json
import time
import typing as t
from .util import (
ApplicationError,
SubprocessError,
display,
)
from .util_common import (
CommonConfig,
run_command,
)
class HttpClient:
"""Make HTTP requests via curl."""
def __init__(self, args: CommonConfig, always: bool = False, insecure: bool = False, proxy: t.Optional[str] = None) -> None:
self.args = args
self.always = always
self.insecure = insecure
self.proxy = proxy
self.username = None
self.password = None
def get(self, url: str) -> HttpResponse:
"""Perform an HTTP GET and return the response."""
return self.request('GET', url)
def delete(self, url: str) -> HttpResponse:
"""Perform an HTTP DELETE and return the response."""
return self.request('DELETE', url)
def put(self, url: str, data: t.Optional[str] = None, headers: t.Optional[dict[str, str]] = None) -> HttpResponse:
"""Perform an HTTP PUT and return the response."""
return self.request('PUT', url, data, headers)
def request(self, method: str, url: str, data: t.Optional[str] = None, headers: t.Optional[dict[str, str]] = None) -> HttpResponse:
"""Perform an HTTP request and return the response."""
cmd = ['curl', '-s', '-S', '-i', '-X', method]
if self.insecure:
cmd += ['--insecure']
if headers is None:
headers = {}
headers['Expect'] = '' # don't send expect continue header
if self.username:
if self.password:
display.sensitive.add(self.password)
cmd += ['-u', '%s:%s' % (self.username, self.password)]
else:
cmd += ['-u', self.username]
for header in headers.keys():
cmd += ['-H', '%s: %s' % (header, headers[header])]
if data is not None:
cmd += ['-d', data]
if self.proxy:
cmd += ['-x', self.proxy]
cmd += [url]
attempts = 0
max_attempts = 3
sleep_seconds = 3
# curl error codes which are safe to retry (request never sent to server)
retry_on_status = (
6, # CURLE_COULDNT_RESOLVE_HOST
)
stdout = ''
while True:
attempts += 1
try:
stdout = run_command(self.args, cmd, capture=True, always=self.always, cmd_verbosity=2)[0]
break
except SubprocessError as ex:
if ex.status in retry_on_status and attempts < max_attempts:
display.warning('%s' % ex)
time.sleep(sleep_seconds)
continue
raise
if self.args.explain and not self.always:
return HttpResponse(method, url, 200, '')
header, body = stdout.split('\r\n\r\n', 1)
response_headers = header.split('\r\n')
first_line = response_headers[0]
http_response = first_line.split(' ')
status_code = int(http_response[1])
return HttpResponse(method, url, status_code, body)
class HttpResponse:
"""HTTP response from curl."""
def __init__(self, method: str, url: str, status_code: int, response: str) -> None:
self.method = method
self.url = url
self.status_code = status_code
self.response = response
def json(self) -> t.Any:
"""Return the response parsed as JSON, raising an exception if parsing fails."""
try:
return json.loads(self.response)
except ValueError:
raise HttpError(self.status_code, 'Cannot parse response to %s %s as JSON:\n%s' % (self.method, self.url, self.response)) from None
class HttpError(ApplicationError):
"""HTTP response as an error."""
def __init__(self, status: int, message: str) -> None:
super().__init__('%s: %s' % (status, message))
self.status = status
| 4,123
|
Python
|
.py
| 101
| 31.821782
| 143
| 0.584546
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,135
|
container_probe.py
|
ansible_ansible/test/lib/ansible_test/_internal/dev/container_probe.py
|
"""Diagnostic utilities to probe container cgroup behavior during development and testing (both manual and integration)."""
from __future__ import annotations
import dataclasses
import enum
import json
import os
import pathlib
import pwd
import typing as t
from ..io import (
read_text_file,
write_text_file,
)
from ..util import (
display,
ANSIBLE_TEST_TARGET_ROOT,
)
from ..config import (
EnvironmentConfig,
)
from ..docker_util import (
LOGINUID_NOT_SET,
docker_exec,
get_docker_info,
get_podman_remote,
require_docker,
)
from ..host_configs import (
DockerConfig,
)
from ..cgroup import (
CGroupEntry,
CGroupPath,
MountEntry,
MountType,
)
class CGroupState(enum.Enum):
"""The expected state of a cgroup related mount point."""
HOST = enum.auto()
PRIVATE = enum.auto()
SHADOWED = enum.auto()
@dataclasses.dataclass(frozen=True)
class CGroupMount:
"""Details on a cgroup mount point that is expected to be present in the container."""
path: str
type: t.Optional[str]
writable: t.Optional[bool]
state: t.Optional[CGroupState]
def __post_init__(self):
assert pathlib.PurePosixPath(self.path).is_relative_to(CGroupPath.ROOT)
if self.type is None:
assert self.state is None
elif self.type == MountType.TMPFS:
assert self.writable is True
assert self.state is None
else:
assert self.type in (MountType.CGROUP_V1, MountType.CGROUP_V2)
assert self.state is not None
def check_container_cgroup_status(args: EnvironmentConfig, config: DockerConfig, container_name: str, expected_mounts: tuple[CGroupMount, ...]) -> None:
"""Check the running container to examine the state of the cgroup hierarchies."""
cmd = ['sh', '-c', 'cat /proc/1/cgroup && echo && cat /proc/1/mountinfo']
stdout = docker_exec(args, container_name, cmd, capture=True)[0]
cgroups_stdout, mounts_stdout = stdout.split('\n\n')
cgroups = CGroupEntry.loads(cgroups_stdout)
mounts = MountEntry.loads(mounts_stdout)
mounts = tuple(mount for mount in mounts if mount.path.is_relative_to(CGroupPath.ROOT))
mount_cgroups: dict[MountEntry, CGroupEntry] = {}
probe_paths: dict[pathlib.PurePosixPath, t.Optional[str]] = {}
for cgroup in cgroups:
if cgroup.subsystem:
mount = ([mount for mount in mounts if
mount.type == MountType.CGROUP_V1 and
mount.path.is_relative_to(cgroup.root_path) and
cgroup.full_path.is_relative_to(mount.path)
] or [None])[-1]
else:
mount = ([mount for mount in mounts if
mount.type == MountType.CGROUP_V2 and
mount.path == cgroup.root_path
] or [None])[-1]
if mount:
mount_cgroups[mount] = cgroup
for mount in mounts:
probe_paths[mount.path] = None
if (cgroup := mount_cgroups.get(mount)) and cgroup.full_path != mount.path: # child of mount.path
probe_paths[cgroup.full_path] = None
probe_script = read_text_file(os.path.join(ANSIBLE_TEST_TARGET_ROOT, 'setup', 'probe_cgroups.py'))
probe_command = [config.python.path, '-', f'{container_name}-probe'] + [str(path) for path in probe_paths]
probe_results = json.loads(docker_exec(args, container_name, probe_command, capture=True, data=probe_script)[0])
for path in probe_paths:
probe_paths[path] = probe_results[str(path)]
remaining_mounts: dict[pathlib.PurePosixPath, MountEntry] = {mount.path: mount for mount in mounts}
results: dict[pathlib.PurePosixPath, tuple[bool, str]] = {}
for expected_mount in expected_mounts:
expected_path = pathlib.PurePosixPath(expected_mount.path)
if not (actual_mount := remaining_mounts.pop(expected_path, None)):
results[expected_path] = (False, 'not mounted')
continue
actual_mount_write_error = probe_paths[actual_mount.path]
actual_mount_errors = []
if cgroup := mount_cgroups.get(actual_mount):
if expected_mount.state == CGroupState.SHADOWED:
actual_mount_errors.append('unexpected cgroup association')
if cgroup.root_path == cgroup.full_path and expected_mount.state == CGroupState.HOST:
results[cgroup.root_path.joinpath('???')] = (False, 'missing cgroup')
if cgroup.full_path == actual_mount.path:
if cgroup.root_path != cgroup.full_path and expected_mount.state == CGroupState.PRIVATE:
actual_mount_errors.append('unexpected mount')
else:
cgroup_write_error = probe_paths[cgroup.full_path]
cgroup_errors = []
if expected_mount.state == CGroupState.SHADOWED:
cgroup_errors.append('unexpected cgroup association')
if cgroup.root_path != cgroup.full_path and expected_mount.state == CGroupState.PRIVATE:
cgroup_errors.append('unexpected cgroup')
if cgroup_write_error:
cgroup_errors.append(cgroup_write_error)
if cgroup_errors:
results[cgroup.full_path] = (False, f'directory errors: {", ".join(cgroup_errors)}')
else:
results[cgroup.full_path] = (True, 'directory (writable)')
elif expected_mount.state not in (None, CGroupState.SHADOWED):
actual_mount_errors.append('missing cgroup association')
if actual_mount.type != expected_mount.type and expected_mount.type is not None:
actual_mount_errors.append(f'type not {expected_mount.type}')
if bool(actual_mount_write_error) == expected_mount.writable:
actual_mount_errors.append(f'{actual_mount_write_error or "writable"}')
if actual_mount_errors:
results[actual_mount.path] = (False, f'{actual_mount.type} errors: {", ".join(actual_mount_errors)}')
else:
results[actual_mount.path] = (True, f'{actual_mount.type} ({actual_mount_write_error or "writable"})')
for remaining_mount in remaining_mounts.values():
remaining_mount_write_error = probe_paths[remaining_mount.path]
results[remaining_mount.path] = (False, f'unexpected {remaining_mount.type} mount ({remaining_mount_write_error or "writable"})')
identity = get_identity(args, config, container_name)
messages: list[tuple[pathlib.PurePosixPath, bool, str]] = [(path, result[0], result[1]) for path, result in sorted(results.items())]
message = '\n'.join(f'{"PASS" if result else "FAIL"}: {path} -> {message}' for path, result, message in messages)
display.info(f'>>> Container: {identity}\n{message.rstrip()}')
if args.dev_probe_cgroups:
write_text_file(os.path.join(args.dev_probe_cgroups, f'{identity}.log'), message)
def get_identity(args: EnvironmentConfig, config: DockerConfig, container_name: str) -> str:
"""Generate and return an identity string to use when logging test results."""
engine = require_docker().command
try:
loginuid = int(read_text_file('/proc/self/loginuid'))
except FileNotFoundError:
loginuid = LOGINUID_NOT_SET
user = pwd.getpwuid(os.getuid()).pw_name
login_user = user if loginuid == LOGINUID_NOT_SET else pwd.getpwuid(loginuid).pw_name
remote = engine == 'podman' and get_podman_remote()
tags = (
config.name,
engine,
f'cgroup={config.cgroup.value}@{get_docker_info(args).cgroup_version}',
f'remote={remote}',
f'user={user}',
f'loginuid={login_user}',
container_name,
)
return '|'.join(tags)
| 7,819
|
Python
|
.py
| 160
| 40.25625
| 152
| 0.651242
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,136
|
__init__.py
|
ansible_ansible/test/lib/ansible_test/_internal/dev/__init__.py
|
"""Development and testing support code. Enabled through the use of `--dev-*` command line options."""
from __future__ import annotations
| 138
|
Python
|
.py
| 2
| 68
| 102
| 0.75
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,137
|
local.py
|
ansible_ansible/test/lib/ansible_test/_internal/ci/local.py
|
"""Support code for working without a supported CI provider."""
from __future__ import annotations
import os
import platform
import random
import re
import typing as t
from ..config import (
CommonConfig,
TestConfig,
)
from ..io import (
read_text_file,
)
from ..git import (
Git,
)
from ..util import (
ApplicationError,
display,
is_binary_file,
SubprocessError,
)
from . import (
CIProvider,
)
CODE = '' # not really a CI provider, so use an empty string for the code
class Local(CIProvider):
"""CI provider implementation when not using CI."""
priority = 1000
@staticmethod
def is_supported() -> bool:
"""Return True if this provider is supported in the current running environment."""
return True
@property
def code(self) -> str:
"""Return a unique code representing this provider."""
return CODE
@property
def name(self) -> str:
"""Return descriptive name for this provider."""
return 'Local'
def generate_resource_prefix(self) -> str:
"""Return a resource prefix specific to this CI provider."""
prefix = 'ansible-test-%d-%s' % (
random.randint(10000000, 99999999),
platform.node().split('.')[0],
)
return prefix
def get_base_commit(self, args: CommonConfig) -> str:
"""Return the base commit or an empty string."""
return ''
def detect_changes(self, args: TestConfig) -> t.Optional[list[str]]:
"""Initialize change detection."""
result = LocalChanges(args)
display.info('Detected branch %s forked from %s at commit %s' % (
result.current_branch, result.fork_branch, result.fork_point))
if result.untracked and not args.untracked:
display.warning('Ignored %s untracked file(s). Use --untracked to include them.' %
len(result.untracked))
if result.committed and not args.committed:
display.warning('Ignored %s committed change(s). Omit --ignore-committed to include them.' %
len(result.committed))
if result.staged and not args.staged:
display.warning('Ignored %s staged change(s). Omit --ignore-staged to include them.' %
len(result.staged))
if result.unstaged and not args.unstaged:
display.warning('Ignored %s unstaged change(s). Omit --ignore-unstaged to include them.' %
len(result.unstaged))
names = set()
if args.tracked:
names |= set(result.tracked)
if args.untracked:
names |= set(result.untracked)
if args.committed:
names |= set(result.committed)
if args.staged:
names |= set(result.staged)
if args.unstaged:
names |= set(result.unstaged)
if not args.metadata.changes:
args.metadata.populate_changes(result.diff)
for path in result.untracked:
if is_binary_file(path):
args.metadata.changes[path] = ((0, 0),)
continue
line_count = len(read_text_file(path).splitlines())
args.metadata.changes[path] = ((1, line_count),)
return sorted(names)
def supports_core_ci_auth(self) -> bool:
"""Return True if Ansible Core CI is supported."""
path = self._get_aci_key_path()
return os.path.exists(path)
def prepare_core_ci_auth(self) -> dict[str, t.Any]:
"""Return authentication details for Ansible Core CI."""
path = self._get_aci_key_path()
auth_key = read_text_file(path).strip()
request = dict(
key=auth_key,
nonce=None,
)
auth = dict(
remote=request,
)
return auth
def get_git_details(self, args: CommonConfig) -> t.Optional[dict[str, t.Any]]:
"""Return details about git in the current environment."""
return None # not yet implemented for local
@staticmethod
def _get_aci_key_path() -> str:
path = os.path.expanduser('~/.ansible-core-ci.key')
return path
class InvalidBranch(ApplicationError):
"""Exception for invalid branch specification."""
def __init__(self, branch: str, reason: str) -> None:
message = 'Invalid branch: %s\n%s' % (branch, reason)
super().__init__(message)
self.branch = branch
class LocalChanges:
"""Change information for local work."""
def __init__(self, args: TestConfig) -> None:
self.args = args
self.git = Git()
self.current_branch = self.git.get_branch()
if self.is_official_branch(self.current_branch):
raise InvalidBranch(branch=self.current_branch,
reason='Current branch is not a feature branch.')
self.fork_branch = None
self.fork_point = None
self.local_branches = sorted(self.git.get_branches())
self.official_branches = sorted([b for b in self.local_branches if self.is_official_branch(b)])
for self.fork_branch in self.official_branches:
try:
self.fork_point = self.git.get_branch_fork_point(self.fork_branch)
break
except SubprocessError:
pass
if self.fork_point is None:
raise ApplicationError('Unable to auto-detect fork branch and fork point.')
# tracked files (including unchanged)
self.tracked = sorted(self.git.get_file_names(['--cached']))
# untracked files (except ignored)
self.untracked = sorted(self.git.get_file_names(['--others', '--exclude-standard']))
# tracked changes (including deletions) committed since the branch was forked
self.committed = sorted(self.git.get_diff_names([self.fork_point, 'HEAD']))
# tracked changes (including deletions) which are staged
self.staged = sorted(self.git.get_diff_names(['--cached']))
# tracked changes (including deletions) which are not staged
self.unstaged = sorted(self.git.get_diff_names([]))
# diff of all tracked files from fork point to working copy
self.diff = self.git.get_diff([self.fork_point])
def is_official_branch(self, name: str) -> bool:
"""Return True if the given branch name an official branch for development or releases."""
if self.args.base_branch:
return name == self.args.base_branch
if name == 'devel':
return True
if re.match(r'^stable-[0-9]+\.[0-9]+$', name):
return True
return False
| 6,739
|
Python
|
.py
| 160
| 32.875
| 104
| 0.613121
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,138
|
__init__.py
|
ansible_ansible/test/lib/ansible_test/_internal/ci/__init__.py
|
"""Support code for CI environments."""
from __future__ import annotations
import abc
import base64
import json
import os
import tempfile
import typing as t
from ..encoding import (
to_bytes,
to_text,
)
from ..io import (
read_text_file,
write_text_file,
)
from ..config import (
CommonConfig,
TestConfig,
)
from ..util import (
ApplicationError,
display,
get_subclasses,
import_plugins,
raw_command,
cache,
)
class ChangeDetectionNotSupported(ApplicationError):
"""Exception for cases where change detection is not supported."""
class CIProvider(metaclass=abc.ABCMeta):
"""Base class for CI provider plugins."""
priority = 500
@staticmethod
@abc.abstractmethod
def is_supported() -> bool:
"""Return True if this provider is supported in the current running environment."""
@property
@abc.abstractmethod
def code(self) -> str:
"""Return a unique code representing this provider."""
@property
@abc.abstractmethod
def name(self) -> str:
"""Return descriptive name for this provider."""
@abc.abstractmethod
def generate_resource_prefix(self) -> str:
"""Return a resource prefix specific to this CI provider."""
@abc.abstractmethod
def get_base_commit(self, args: CommonConfig) -> str:
"""Return the base commit or an empty string."""
@abc.abstractmethod
def detect_changes(self, args: TestConfig) -> t.Optional[list[str]]:
"""Initialize change detection."""
@abc.abstractmethod
def supports_core_ci_auth(self) -> bool:
"""Return True if Ansible Core CI is supported."""
@abc.abstractmethod
def prepare_core_ci_auth(self) -> dict[str, t.Any]:
"""Return authentication details for Ansible Core CI."""
@abc.abstractmethod
def get_git_details(self, args: CommonConfig) -> t.Optional[dict[str, t.Any]]:
"""Return details about git in the current environment."""
@cache
def get_ci_provider() -> CIProvider:
"""Return a CI provider instance for the current environment."""
provider = None
import_plugins('ci')
candidates = sorted(get_subclasses(CIProvider), key=lambda subclass: (subclass.priority, subclass.__name__))
for candidate in candidates:
if candidate.is_supported():
provider = candidate()
break
if provider.code:
display.info('Detected CI provider: %s' % provider.name)
return provider
class AuthHelper(metaclass=abc.ABCMeta):
"""Public key based authentication helper for Ansible Core CI."""
def sign_request(self, request: dict[str, t.Any]) -> None:
"""Sign the given auth request and make the public key available."""
payload_bytes = to_bytes(json.dumps(request, sort_keys=True))
signature_raw_bytes = self.sign_bytes(payload_bytes)
signature = to_text(base64.b64encode(signature_raw_bytes))
request.update(signature=signature)
def initialize_private_key(self) -> str:
"""
Initialize and publish a new key pair (if needed) and return the private key.
The private key is cached across ansible-test invocations, so it is only generated and published once per CI job.
"""
path = os.path.expanduser('~/.ansible-core-ci-private.key')
if os.path.exists(to_bytes(path)):
private_key_pem = read_text_file(path)
else:
private_key_pem = self.generate_private_key()
write_text_file(path, private_key_pem)
return private_key_pem
@abc.abstractmethod
def sign_bytes(self, payload_bytes: bytes) -> bytes:
"""Sign the given payload and return the signature, initializing a new key pair if required."""
@abc.abstractmethod
def publish_public_key(self, public_key_pem: str) -> None:
"""Publish the given public key."""
@abc.abstractmethod
def generate_private_key(self) -> str:
"""Generate a new key pair, publishing the public key and returning the private key."""
class CryptographyAuthHelper(AuthHelper, metaclass=abc.ABCMeta):
"""Cryptography based public key based authentication helper for Ansible Core CI."""
def sign_bytes(self, payload_bytes: bytes) -> bytes:
"""Sign the given payload and return the signature, initializing a new key pair if required."""
# import cryptography here to avoid overhead and failures in environments which do not use/provide it
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.asymmetric import ec
from cryptography.hazmat.primitives.serialization import load_pem_private_key
private_key_pem = self.initialize_private_key()
private_key = load_pem_private_key(to_bytes(private_key_pem), None, default_backend())
assert isinstance(private_key, ec.EllipticCurvePrivateKey)
signature_raw_bytes = private_key.sign(payload_bytes, ec.ECDSA(hashes.SHA256()))
return signature_raw_bytes
def generate_private_key(self) -> str:
"""Generate a new key pair, publishing the public key and returning the private key."""
# import cryptography here to avoid overhead and failures in environments which do not use/provide it
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric import ec
private_key = ec.generate_private_key(ec.SECP384R1(), default_backend())
public_key = private_key.public_key()
private_key_pem = to_text(private_key.private_bytes( # type: ignore[attr-defined] # documented method, but missing from type stubs
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.PKCS8,
encryption_algorithm=serialization.NoEncryption(),
))
public_key_pem = to_text(public_key.public_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PublicFormat.SubjectPublicKeyInfo,
))
self.publish_public_key(public_key_pem)
return private_key_pem
class OpenSSLAuthHelper(AuthHelper, metaclass=abc.ABCMeta):
"""OpenSSL based public key based authentication helper for Ansible Core CI."""
def sign_bytes(self, payload_bytes: bytes) -> bytes:
"""Sign the given payload and return the signature, initializing a new key pair if required."""
private_key_pem = self.initialize_private_key()
with tempfile.NamedTemporaryFile() as private_key_file:
private_key_file.write(to_bytes(private_key_pem))
private_key_file.flush()
with tempfile.NamedTemporaryFile() as payload_file:
payload_file.write(payload_bytes)
payload_file.flush()
with tempfile.NamedTemporaryFile() as signature_file:
raw_command(['openssl', 'dgst', '-sha256', '-sign', private_key_file.name, '-out', signature_file.name, payload_file.name], capture=True)
signature_raw_bytes = signature_file.read()
return signature_raw_bytes
def generate_private_key(self) -> str:
"""Generate a new key pair, publishing the public key and returning the private key."""
private_key_pem = raw_command(['openssl', 'ecparam', '-genkey', '-name', 'secp384r1', '-noout'], capture=True)[0]
public_key_pem = raw_command(['openssl', 'ec', '-pubout'], data=private_key_pem, capture=True)[0]
self.publish_public_key(public_key_pem)
return private_key_pem
| 7,738
|
Python
|
.py
| 159
| 41.283019
| 157
| 0.688431
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,139
|
azp.py
|
ansible_ansible/test/lib/ansible_test/_internal/ci/azp.py
|
"""Support code for working with Azure Pipelines."""
from __future__ import annotations
import os
import tempfile
import uuid
import typing as t
import urllib.parse
from ..encoding import (
to_bytes,
)
from ..config import (
CommonConfig,
TestConfig,
)
from ..git import (
Git,
)
from ..http import (
HttpClient,
)
from ..util import (
display,
MissingEnvironmentVariable,
)
from . import (
ChangeDetectionNotSupported,
CIProvider,
CryptographyAuthHelper,
)
CODE = 'azp'
class AzurePipelines(CIProvider):
"""CI provider implementation for Azure Pipelines."""
def __init__(self) -> None:
self.auth = AzurePipelinesAuthHelper()
self._changes: AzurePipelinesChanges | None = None
@staticmethod
def is_supported() -> bool:
"""Return True if this provider is supported in the current running environment."""
return os.environ.get('SYSTEM_COLLECTIONURI', '').startswith('https://dev.azure.com/')
@property
def code(self) -> str:
"""Return a unique code representing this provider."""
return CODE
@property
def name(self) -> str:
"""Return descriptive name for this provider."""
return 'Azure Pipelines'
def generate_resource_prefix(self) -> str:
"""Return a resource prefix specific to this CI provider."""
try:
prefix = 'azp-%s-%s-%s' % (
os.environ['BUILD_BUILDID'],
os.environ['SYSTEM_JOBATTEMPT'],
os.environ['SYSTEM_JOBIDENTIFIER'],
)
except KeyError as ex:
raise MissingEnvironmentVariable(name=ex.args[0]) from None
return prefix
def get_base_commit(self, args: CommonConfig) -> str:
"""Return the base commit or an empty string."""
return self._get_changes(args).base_commit or ''
def _get_changes(self, args: CommonConfig) -> AzurePipelinesChanges:
"""Return an AzurePipelinesChanges instance, which will be created on first use."""
if not self._changes:
self._changes = AzurePipelinesChanges(args)
return self._changes
def detect_changes(self, args: TestConfig) -> t.Optional[list[str]]:
"""Initialize change detection."""
result = self._get_changes(args)
if result.is_pr:
job_type = 'pull request'
else:
job_type = 'merge commit'
display.info('Processing %s for branch %s commit %s' % (job_type, result.branch, result.commit))
if not args.metadata.changes:
args.metadata.populate_changes(result.diff)
if result.paths is None:
# There are several likely causes of this:
# - First run on a new branch.
# - Too many pull requests passed since the last merge run passed.
display.warning('No successful commit found. All tests will be executed.')
return result.paths
def supports_core_ci_auth(self) -> bool:
"""Return True if Ansible Core CI is supported."""
return True
def prepare_core_ci_auth(self) -> dict[str, t.Any]:
"""Return authentication details for Ansible Core CI."""
try:
request = dict(
org_name=os.environ['SYSTEM_COLLECTIONURI'].strip('/').split('/')[-1],
project_name=os.environ['SYSTEM_TEAMPROJECT'],
build_id=int(os.environ['BUILD_BUILDID']),
task_id=str(uuid.UUID(os.environ['SYSTEM_TASKINSTANCEID'])),
)
except KeyError as ex:
raise MissingEnvironmentVariable(name=ex.args[0]) from None
self.auth.sign_request(request)
auth = dict(
azp=request,
)
return auth
def get_git_details(self, args: CommonConfig) -> t.Optional[dict[str, t.Any]]:
"""Return details about git in the current environment."""
changes = self._get_changes(args)
details = dict(
base_commit=changes.base_commit,
commit=changes.commit,
)
return details
class AzurePipelinesAuthHelper(CryptographyAuthHelper):
"""
Authentication helper for Azure Pipelines.
Based on cryptography since it is provided by the default Azure Pipelines environment.
"""
def publish_public_key(self, public_key_pem: str) -> None:
"""Publish the given public key."""
try:
agent_temp_directory = os.environ['AGENT_TEMPDIRECTORY']
except KeyError as ex:
raise MissingEnvironmentVariable(name=ex.args[0]) from None
# the temporary file cannot be deleted because we do not know when the agent has processed it
# placing the file in the agent's temp directory allows it to be picked up when the job is running in a container
with tempfile.NamedTemporaryFile(prefix='public-key-', suffix='.pem', delete=False, dir=agent_temp_directory) as public_key_file:
public_key_file.write(to_bytes(public_key_pem))
public_key_file.flush()
# make the agent aware of the public key by declaring it as an attachment
vso_add_attachment('ansible-core-ci', 'public-key.pem', public_key_file.name)
class AzurePipelinesChanges:
"""Change information for an Azure Pipelines build."""
def __init__(self, args: CommonConfig) -> None:
self.args = args
self.git = Git()
try:
self.org_uri = os.environ['SYSTEM_COLLECTIONURI'] # ex: https://dev.azure.com/{org}/
self.project = os.environ['SYSTEM_TEAMPROJECT']
self.repo_type = os.environ['BUILD_REPOSITORY_PROVIDER'] # ex: GitHub
self.source_branch = os.environ['BUILD_SOURCEBRANCH']
self.source_branch_name = os.environ['BUILD_SOURCEBRANCHNAME']
self.pr_branch_name = os.environ.get('SYSTEM_PULLREQUEST_TARGETBRANCH')
except KeyError as ex:
raise MissingEnvironmentVariable(name=ex.args[0]) from None
if self.source_branch.startswith('refs/tags/'):
raise ChangeDetectionNotSupported('Change detection is not supported for tags.')
self.org = self.org_uri.strip('/').split('/')[-1]
self.is_pr = self.pr_branch_name is not None
if self.is_pr:
# HEAD is a merge commit of the PR branch into the target branch
# HEAD^1 is HEAD of the target branch (first parent of merge commit)
# HEAD^2 is HEAD of the PR branch (second parent of merge commit)
# see: https://git-scm.com/docs/gitrevisions
self.branch = self.pr_branch_name
self.base_commit = 'HEAD^1'
self.commit = 'HEAD^2'
else:
commits = self.get_successful_merge_run_commits()
self.branch = self.source_branch_name
self.base_commit = self.get_last_successful_commit(commits)
self.commit = 'HEAD'
self.commit = self.git.run_git(['rev-parse', self.commit]).strip()
if self.base_commit:
self.base_commit = self.git.run_git(['rev-parse', self.base_commit]).strip()
# <commit>...<commit>
# This form is to view the changes on the branch containing and up to the second <commit>, starting at a common ancestor of both <commit>.
# see: https://git-scm.com/docs/git-diff
dot_range = '%s...%s' % (self.base_commit, self.commit)
self.paths = sorted(self.git.get_diff_names([dot_range]))
self.diff = self.git.get_diff([dot_range])
else:
self.paths = None # act as though change detection not enabled, do not filter targets
self.diff = []
def get_successful_merge_run_commits(self) -> set[str]:
"""Return a set of recent successful merge commits from Azure Pipelines."""
parameters = dict(
maxBuildsPerDefinition=100, # max 5000
queryOrder='queueTimeDescending', # assumes under normal circumstances that later queued jobs are for later commits
resultFilter='succeeded',
reasonFilter='batchedCI', # may miss some non-PR reasons, the alternative is to filter the list after receiving it
repositoryType=self.repo_type,
repositoryId='%s/%s' % (self.org, self.project),
)
url = '%s%s/_apis/build/builds?api-version=6.0&%s' % (self.org_uri, self.project, urllib.parse.urlencode(parameters))
http = HttpClient(self.args, always=True)
response = http.get(url)
# noinspection PyBroadException
try:
result = response.json()
except Exception: # pylint: disable=broad-except
# most likely due to a private project, which returns an HTTP 203 response with HTML
display.warning('Unable to find project. Cannot determine changes. All tests will be executed.')
return set()
commits = set(build['sourceVersion'] for build in result['value'])
return commits
def get_last_successful_commit(self, commits: set[str]) -> t.Optional[str]:
"""Return the last successful commit from git history that is found in the given commit list, or None."""
commit_history = self.git.get_rev_list(max_count=100)
ordered_successful_commits = [commit for commit in commit_history if commit in commits]
last_successful_commit = ordered_successful_commits[0] if ordered_successful_commits else None
return last_successful_commit
def vso_add_attachment(file_type: str, file_name: str, path: str) -> None:
"""Upload and attach a file to the current timeline record."""
vso('task.addattachment', dict(type=file_type, name=file_name), path)
def vso(name: str, data: dict[str, str], message: str) -> None:
"""
Write a logging command for the Azure Pipelines agent to process.
See: https://docs.microsoft.com/en-us/azure/devops/pipelines/scripts/logging-commands?view=azure-devops&tabs=bash
"""
display.info('##vso[%s %s]%s' % (name, ';'.join('='.join((key, value)) for key, value in data.items()), message))
| 10,136
|
Python
|
.py
| 207
| 40.014493
| 150
| 0.644877
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,140
|
epilog.py
|
ansible_ansible/test/lib/ansible_test/_internal/cli/epilog.py
|
"""Argument parsing epilog generation."""
from __future__ import annotations
from .argparsing import (
CompositeActionCompletionFinder,
)
from ..data import (
data_context,
)
def get_epilog(completer: CompositeActionCompletionFinder) -> str:
"""Generate and return the epilog to use for help output."""
if completer.enabled:
epilog = 'Tab completion available using the "argcomplete" python package.'
else:
epilog = 'Install the "argcomplete" python package to enable tab completion.'
if data_context().content.unsupported:
epilog += '\n\n' + data_context().explain_working_directory()
return epilog
| 658
|
Python
|
.py
| 17
| 34.294118
| 85
| 0.725984
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,141
|
converters.py
|
ansible_ansible/test/lib/ansible_test/_internal/cli/converters.py
|
"""Converters for use as the type argument for arparse's add_argument method."""
from __future__ import annotations
import argparse
def key_value_type(value: str) -> tuple[str, str]:
"""Wrapper around key_value."""
return key_value(value)
def key_value(value: str) -> tuple[str, str]:
"""Type parsing and validation for argparse key/value pairs separated by an '=' character."""
parts = value.split('=')
if len(parts) != 2:
raise argparse.ArgumentTypeError('"%s" must be in the format "key=value"' % value)
return parts[0], parts[1]
| 572
|
Python
|
.py
| 12
| 43.416667
| 97
| 0.688969
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,142
|
completers.py
|
ansible_ansible/test/lib/ansible_test/_internal/cli/completers.py
|
"""Completers for use with argcomplete."""
from __future__ import annotations
import argparse
from ..target import (
find_target_completion,
)
from .argparsing.argcompletion import (
OptionCompletionFinder,
)
def complete_target(completer: OptionCompletionFinder, prefix: str, parsed_args: argparse.Namespace, **_) -> list[str]:
"""Perform completion for the targets configured for the command being parsed."""
matches = find_target_completion(parsed_args.targets_func, prefix, completer.list_mode)
completer.disable_completion_mangling = completer.list_mode and len(matches) > 1
return matches
def complete_choices(choices: list[str], prefix: str, **_) -> list[str]:
"""Perform completion using the provided choices."""
matches = [choice for choice in choices if choice.startswith(prefix)]
return matches
def register_completer(action: argparse.Action, completer) -> None:
"""Register the given completer with the specified action."""
action.completer = completer # type: ignore[attr-defined] # intentionally using an attribute that does not exist
| 1,104
|
Python
|
.py
| 21
| 49.047619
| 119
| 0.756983
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,143
|
__init__.py
|
ansible_ansible/test/lib/ansible_test/_internal/cli/__init__.py
|
"""Command line parsing."""
from __future__ import annotations
import argparse
import os
import sys
import typing as t
from .argparsing import (
CompositeActionCompletionFinder,
)
from .commands import (
do_commands,
)
from .epilog import (
get_epilog,
)
from .compat import (
HostSettings,
convert_legacy_args,
)
from ..util import (
get_ansible_version,
)
def parse_args(argv: t.Optional[list[str]] = None) -> argparse.Namespace:
"""Parse command line arguments."""
completer = CompositeActionCompletionFinder()
parser = argparse.ArgumentParser(prog='ansible-test', epilog=get_epilog(completer), formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('--version', action='version', version=f'%(prog)s version {get_ansible_version()}')
do_commands(parser, completer)
completer(
parser,
always_complete_options=False,
)
if argv is None:
argv = sys.argv[1:]
else:
argv = argv[1:]
args = parser.parse_args(argv)
if args.explain and not args.verbosity:
args.verbosity = 1
if args.no_environment:
pass
elif args.host_path:
args.host_settings = HostSettings.deserialize(os.path.join(args.host_path, 'settings.dat'))
else:
args.host_settings = convert_legacy_args(argv, args, args.target_mode)
args.host_settings.apply_defaults()
return args
| 1,427
|
Python
|
.py
| 47
| 25.702128
| 141
| 0.703812
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,144
|
compat.py
|
ansible_ansible/test/lib/ansible_test/_internal/cli/compat.py
|
"""Provides compatibility with first-generation host delegation options in ansible-test."""
from __future__ import annotations
import argparse
import collections.abc as c
import dataclasses
import enum
import os
import types
import typing as t
from ..constants import (
CONTROLLER_PYTHON_VERSIONS,
SUPPORTED_PYTHON_VERSIONS,
)
from ..util import (
ApplicationError,
display,
filter_args,
sorted_versions,
str_to_version,
)
from ..docker_util import (
docker_available,
)
from ..completion import (
docker_completion,
remote_completion,
filter_completion,
)
from ..host_configs import (
ControllerConfig,
ControllerHostConfig,
DockerConfig,
FallbackDetail,
FallbackReason,
HostConfig,
HostContext,
HostSettings,
NativePythonConfig,
NetworkInventoryConfig,
NetworkRemoteConfig,
OriginConfig,
PosixRemoteConfig,
VirtualPythonConfig,
WindowsInventoryConfig,
WindowsRemoteConfig,
)
from ..data import (
data_context,
)
def filter_python(version: t.Optional[str], versions: t.Optional[c.Sequence[str]]) -> t.Optional[str]:
"""If a Python version is given and is in the given version list, return that Python version, otherwise return None."""
return version if version in versions else None
def controller_python(version: t.Optional[str]) -> t.Optional[str]:
"""If a Python version is given and is supported by the controller, return that Python version, otherwise return None."""
return filter_python(version, CONTROLLER_PYTHON_VERSIONS)
def get_fallback_remote_controller() -> str:
"""Return the remote fallback platform for the controller."""
platform = 'freebsd' # lower cost than RHEL and macOS
candidates = [item for item in filter_completion(remote_completion()).values() if item.controller_supported and item.platform == platform]
fallback = sorted(candidates, key=lambda value: str_to_version(value.version), reverse=True)[0]
return fallback.name
def get_option_name(name: str) -> str:
"""Return a command-line option name from the given option name."""
if name == 'targets':
name = 'target'
return f'--{name.replace("_", "-")}'
class PythonVersionUnsupportedError(ApplicationError):
"""A Python version was requested for a context which does not support that version."""
def __init__(self, context: str, version: str, versions: c.Iterable[str]) -> None:
super().__init__(f'Python {version} is not supported by environment `{context}`. Supported Python version(s) are: {", ".join(versions)}')
class PythonVersionUnspecifiedError(ApplicationError):
"""A Python version was not specified for a context which is unknown, thus the Python version is unknown."""
def __init__(self, context: str) -> None:
super().__init__(
f'Environment `{context}` is unknown. Use a predefined environment instead. '
f'Alternatively, to use an unknown environment, use the `--python` option to specify a Python version.'
)
class ControllerNotSupportedError(ApplicationError):
"""Option(s) were specified which do not provide support for the controller and would be ignored because they are irrelevant for the target."""
def __init__(self, context: str) -> None:
super().__init__(f'Environment `{context}` does not provide a Python version supported by the controller.')
class OptionsConflictError(ApplicationError):
"""Option(s) were specified which conflict with other options."""
def __init__(self, first: c.Iterable[str], second: c.Iterable[str]) -> None:
super().__init__(f'Options `{" ".join(first)}` cannot be combined with options `{" ".join(second)}`.')
@dataclasses.dataclass(frozen=True)
class LegacyHostOptions:
"""Legacy host options used prior to the availability of separate controller and target host configuration."""
python: t.Optional[str] = None
python_interpreter: t.Optional[str] = None
local: t.Optional[bool] = None
venv: t.Optional[bool] = None
venv_system_site_packages: t.Optional[bool] = None
remote: t.Optional[str] = None
remote_provider: t.Optional[str] = None
remote_arch: t.Optional[str] = None
docker: t.Optional[str] = None
docker_privileged: t.Optional[bool] = None
docker_seccomp: t.Optional[str] = None
docker_memory: t.Optional[int] = None
windows: t.Optional[list[str]] = None
platform: t.Optional[list[str]] = None
platform_collection: t.Optional[list[tuple[str, str]]] = None
platform_connection: t.Optional[list[tuple[str, str]]] = None
inventory: t.Optional[str] = None
@staticmethod
def create(namespace: t.Union[argparse.Namespace, types.SimpleNamespace]) -> LegacyHostOptions:
"""Create legacy host options from the given namespace."""
kwargs = {field.name: getattr(namespace, field.name, None) for field in dataclasses.fields(LegacyHostOptions)}
if kwargs['python'] == 'default':
kwargs['python'] = None
return LegacyHostOptions(**kwargs)
@staticmethod
def purge_namespace(namespace: t.Union[argparse.Namespace, types.SimpleNamespace]) -> None:
"""Purge legacy host options fields from the given namespace."""
for field in dataclasses.fields(LegacyHostOptions):
if hasattr(namespace, field.name):
delattr(namespace, field.name)
@staticmethod
def purge_args(args: list[str]) -> list[str]:
"""Purge legacy host options from the given command line arguments."""
fields: tuple[dataclasses.Field, ...] = dataclasses.fields(LegacyHostOptions)
filters: dict[str, int] = {get_option_name(field.name): 0 if field.type is t.Optional[bool] else 1 for field in fields}
return filter_args(args, filters)
def get_options_used(self) -> tuple[str, ...]:
"""Return a tuple of the command line options used."""
fields: tuple[dataclasses.Field, ...] = dataclasses.fields(self)
options = tuple(sorted(get_option_name(field.name) for field in fields if getattr(self, field.name)))
return options
class TargetMode(enum.Enum):
"""Type of provisioning to use for the targets."""
WINDOWS_INTEGRATION = enum.auto() # windows-integration
NETWORK_INTEGRATION = enum.auto() # network-integration
POSIX_INTEGRATION = enum.auto() # integration
SANITY = enum.auto() # sanity
UNITS = enum.auto() # units
SHELL = enum.auto() # shell
NO_TARGETS = enum.auto() # coverage
@property
def one_host(self) -> bool:
"""Return True if only one host (the controller) should be used, otherwise return False."""
return self in (TargetMode.SANITY, TargetMode.UNITS, TargetMode.NO_TARGETS)
@property
def no_fallback(self) -> bool:
"""Return True if no fallback is acceptable for the controller (due to options not applying to the target), otherwise return False."""
return self in (TargetMode.WINDOWS_INTEGRATION, TargetMode.NETWORK_INTEGRATION, TargetMode.NO_TARGETS)
@property
def multiple_pythons(self) -> bool:
"""Return True if multiple Python versions are allowed, otherwise False."""
return self in (TargetMode.SANITY, TargetMode.UNITS)
@property
def has_python(self) -> bool:
"""Return True if this mode uses Python, otherwise False."""
return self in (TargetMode.POSIX_INTEGRATION, TargetMode.SANITY, TargetMode.UNITS, TargetMode.SHELL)
def convert_legacy_args(
argv: list[str],
args: t.Union[argparse.Namespace, types.SimpleNamespace],
mode: TargetMode,
) -> HostSettings:
"""Convert pre-split host arguments in the given namespace to their split counterparts."""
old_options = LegacyHostOptions.create(args)
old_options.purge_namespace(args)
new_options = [
'--controller',
'--target',
'--target-python',
'--target-posix',
'--target-windows',
'--target-network',
]
used_old_options = old_options.get_options_used()
used_new_options = [name for name in new_options if name in argv]
if used_old_options:
if used_new_options:
raise OptionsConflictError(used_old_options, used_new_options)
controller, targets, controller_fallback = get_legacy_host_config(mode, old_options)
if controller_fallback:
if mode.one_host:
display.info(controller_fallback.message, verbosity=1)
else:
display.warning(controller_fallback.message)
used_default_pythons = mode in (TargetMode.SANITY, TargetMode.UNITS) and not native_python(old_options)
else:
controller = args.controller or OriginConfig()
controller_fallback = None
if mode == TargetMode.NO_TARGETS:
targets = []
used_default_pythons = False
elif args.targets:
targets = args.targets
used_default_pythons = False
else:
targets = default_targets(mode, controller)
used_default_pythons = mode in (TargetMode.SANITY, TargetMode.UNITS)
args.controller = controller
args.targets = targets
if used_default_pythons:
control_targets = t.cast(list[ControllerConfig], targets)
skipped_python_versions = sorted_versions(list(set(SUPPORTED_PYTHON_VERSIONS) - {target.python.version for target in control_targets}))
else:
skipped_python_versions = []
filtered_args = old_options.purge_args(argv)
filtered_args = filter_args(filtered_args, {name: 1 for name in new_options})
host_settings = HostSettings(
controller=controller,
targets=targets,
skipped_python_versions=skipped_python_versions,
filtered_args=filtered_args,
controller_fallback=controller_fallback,
)
return host_settings
def controller_targets(
mode: TargetMode,
options: LegacyHostOptions,
controller: ControllerHostConfig,
) -> list[HostConfig]:
"""Return the configuration for controller targets."""
python = native_python(options)
targets: list[HostConfig]
if python:
targets = [ControllerConfig(python=python)]
else:
targets = default_targets(mode, controller)
return targets
def native_python(options: LegacyHostOptions) -> t.Optional[NativePythonConfig]:
"""Return a NativePythonConfig for the given version if it is not None, otherwise return None."""
if not options.python and not options.python_interpreter:
return None
return NativePythonConfig(version=options.python, path=options.python_interpreter)
def get_legacy_host_config(
mode: TargetMode,
options: LegacyHostOptions,
) -> tuple[ControllerHostConfig, list[HostConfig], t.Optional[FallbackDetail]]:
"""
Returns controller and target host configs derived from the provided legacy host options.
The goal is to match the original behavior, by using non-split testing whenever possible.
When the options support the controller, use the options for the controller and use ControllerConfig for the targets.
When the options do not support the controller, use the options for the targets and use a default controller config influenced by the options.
"""
venv_fallback = 'venv/default'
docker_fallback = 'default'
remote_fallback = get_fallback_remote_controller()
controller_fallback: t.Optional[tuple[str, str, FallbackReason]] = None
controller: t.Optional[ControllerHostConfig]
targets: list[HostConfig]
if options.venv:
if controller_python(options.python) or not options.python:
controller = OriginConfig(python=VirtualPythonConfig(version=options.python or 'default', system_site_packages=options.venv_system_site_packages))
else:
controller_fallback = f'origin:python={venv_fallback}', f'--venv --python {options.python}', FallbackReason.PYTHON
controller = OriginConfig(python=VirtualPythonConfig(version='default', system_site_packages=options.venv_system_site_packages))
if mode in (TargetMode.SANITY, TargetMode.UNITS):
python = native_python(options)
if python:
control_targets = [ControllerConfig(python=python)]
else:
control_targets = controller.get_default_targets(HostContext(controller_config=controller))
# Target sanity tests either have no Python requirements or manage their own virtual environments.
# Thus, there is no point in setting up virtual environments ahead of time for them.
if mode == TargetMode.UNITS:
targets = [ControllerConfig(python=VirtualPythonConfig(version=target.python.version, path=target.python.path,
system_site_packages=options.venv_system_site_packages)) for target in control_targets]
else:
targets = t.cast(list[HostConfig], control_targets)
else:
targets = [ControllerConfig(python=VirtualPythonConfig(version=options.python or 'default',
system_site_packages=options.venv_system_site_packages))]
elif options.docker:
docker_config = filter_completion(docker_completion()).get(options.docker)
if docker_config:
if options.python and options.python not in docker_config.supported_pythons:
raise PythonVersionUnsupportedError(f'--docker {options.docker}', options.python, docker_config.supported_pythons)
if docker_config.controller_supported:
if controller_python(options.python) or not options.python:
controller = DockerConfig(name=options.docker, python=native_python(options),
privileged=options.docker_privileged, seccomp=options.docker_seccomp, memory=options.docker_memory)
targets = controller_targets(mode, options, controller)
else:
controller_fallback = f'docker:{options.docker}', f'--docker {options.docker} --python {options.python}', FallbackReason.PYTHON
controller = DockerConfig(name=options.docker)
targets = controller_targets(mode, options, controller)
else:
controller_fallback = f'docker:{docker_fallback}', f'--docker {options.docker}', FallbackReason.ENVIRONMENT
controller = DockerConfig(name=docker_fallback)
targets = [DockerConfig(name=options.docker, python=native_python(options),
privileged=options.docker_privileged, seccomp=options.docker_seccomp, memory=options.docker_memory)]
else:
if not options.python:
raise PythonVersionUnspecifiedError(f'--docker {options.docker}')
if controller_python(options.python):
controller = DockerConfig(name=options.docker, python=native_python(options),
privileged=options.docker_privileged, seccomp=options.docker_seccomp, memory=options.docker_memory)
targets = controller_targets(mode, options, controller)
else:
controller_fallback = f'docker:{docker_fallback}', f'--docker {options.docker} --python {options.python}', FallbackReason.PYTHON
controller = DockerConfig(name=docker_fallback)
targets = [DockerConfig(name=options.docker, python=native_python(options),
privileged=options.docker_privileged, seccomp=options.docker_seccomp, memory=options.docker_memory)]
elif options.remote:
remote_config = filter_completion(remote_completion()).get(options.remote)
context, reason = None, None
if remote_config:
if options.python and options.python not in remote_config.supported_pythons:
raise PythonVersionUnsupportedError(f'--remote {options.remote}', options.python, remote_config.supported_pythons)
if remote_config.controller_supported:
if controller_python(options.python) or not options.python:
controller = PosixRemoteConfig(name=options.remote, python=native_python(options), provider=options.remote_provider,
arch=options.remote_arch)
targets = controller_targets(mode, options, controller)
else:
controller_fallback = f'remote:{options.remote}', f'--remote {options.remote} --python {options.python}', FallbackReason.PYTHON
controller = PosixRemoteConfig(name=options.remote, provider=options.remote_provider, arch=options.remote_arch)
targets = controller_targets(mode, options, controller)
else:
context, reason = f'--remote {options.remote}', FallbackReason.ENVIRONMENT
controller = None
targets = [PosixRemoteConfig(name=options.remote, python=native_python(options), provider=options.remote_provider, arch=options.remote_arch)]
elif mode == TargetMode.SHELL and options.remote.startswith('windows/'):
if options.python and options.python not in CONTROLLER_PYTHON_VERSIONS:
raise ControllerNotSupportedError(f'--python {options.python}')
controller = OriginConfig(python=native_python(options))
targets = [WindowsRemoteConfig(name=options.remote, provider=options.remote_provider, arch=options.remote_arch)]
else:
if not options.python:
raise PythonVersionUnspecifiedError(f'--remote {options.remote}')
if controller_python(options.python):
controller = PosixRemoteConfig(name=options.remote, python=native_python(options), provider=options.remote_provider, arch=options.remote_arch)
targets = controller_targets(mode, options, controller)
else:
context, reason = f'--remote {options.remote} --python {options.python}', FallbackReason.PYTHON
controller = None
targets = [PosixRemoteConfig(name=options.remote, python=native_python(options), provider=options.remote_provider, arch=options.remote_arch)]
if not controller:
if docker_available():
controller_fallback = f'docker:{docker_fallback}', context, reason
controller = DockerConfig(name=docker_fallback)
else:
controller_fallback = f'remote:{remote_fallback}', context, reason
controller = PosixRemoteConfig(name=remote_fallback)
else: # local/unspecified
# There are several changes in behavior from the legacy implementation when using no delegation (or the `--local` option).
# These changes are due to ansible-test now maintaining consistency between its own Python and that of controller Python subprocesses.
#
# 1) The `--python-interpreter` option (if different from sys.executable) now affects controller subprocesses and triggers re-execution of ansible-test.
# Previously this option was completely ignored except when used with the `--docker` or `--remote` options.
# 2) The `--python` option now triggers re-execution of ansible-test if it differs from sys.version_info.
# Previously it affected Python subprocesses, but not ansible-test itself.
if controller_python(options.python) or not options.python:
controller = OriginConfig(python=native_python(options))
targets = controller_targets(mode, options, controller)
else:
controller_fallback = 'origin:python=default', f'--python {options.python}', FallbackReason.PYTHON
controller = OriginConfig()
targets = controller_targets(mode, options, controller)
if controller_fallback:
controller_option, context, reason = controller_fallback
if mode.no_fallback:
raise ControllerNotSupportedError(context)
fallback_detail = FallbackDetail(
reason=reason,
message=f'Using `--controller {controller_option}` since `{context}` does not support the controller.',
)
else:
fallback_detail = None
if mode.one_host and any(not isinstance(target, ControllerConfig) for target in targets):
raise ControllerNotSupportedError(controller_fallback[1])
if mode == TargetMode.NO_TARGETS:
targets = []
else:
targets = handle_non_posix_targets(mode, options, targets)
return controller, targets, fallback_detail
def handle_non_posix_targets(
mode: TargetMode,
options: LegacyHostOptions,
targets: list[HostConfig],
) -> list[HostConfig]:
"""Return a list of non-POSIX targets if the target mode is non-POSIX."""
if mode == TargetMode.WINDOWS_INTEGRATION:
if options.windows:
targets = [WindowsRemoteConfig(name=f'windows/{version}', provider=options.remote_provider, arch=options.remote_arch)
for version in options.windows]
else:
targets = [WindowsInventoryConfig(path=options.inventory)]
elif mode == TargetMode.NETWORK_INTEGRATION:
if options.platform:
network_targets = [NetworkRemoteConfig(name=platform, provider=options.remote_provider, arch=options.remote_arch) for platform in options.platform]
for platform, collection in options.platform_collection or []:
for entry in network_targets:
if entry.platform == platform:
entry.collection = collection
for platform, connection in options.platform_connection or []:
for entry in network_targets:
if entry.platform == platform:
entry.connection = connection
targets = t.cast(list[HostConfig], network_targets)
else:
targets = [NetworkInventoryConfig(path=options.inventory)]
return targets
def default_targets(
mode: TargetMode,
controller: ControllerHostConfig,
) -> list[HostConfig]:
"""Return a list of default targets for the given target mode."""
targets: list[HostConfig]
if mode == TargetMode.WINDOWS_INTEGRATION:
targets = [WindowsInventoryConfig(path=os.path.abspath(os.path.join(data_context().content.integration_path, 'inventory.winrm')))]
elif mode == TargetMode.NETWORK_INTEGRATION:
targets = [NetworkInventoryConfig(path=os.path.abspath(os.path.join(data_context().content.integration_path, 'inventory.networking')))]
elif mode.multiple_pythons:
targets = t.cast(list[HostConfig], controller.get_default_targets(HostContext(controller_config=controller)))
else:
targets = [ControllerConfig()]
return targets
| 23,101
|
Python
|
.py
| 413
| 46.622276
| 160
| 0.683535
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,145
|
environments.py
|
ansible_ansible/test/lib/ansible_test/_internal/cli/environments.py
|
"""Command line parsing for test environments."""
from __future__ import annotations
import argparse
import enum
import functools
import typing as t
from ..constants import (
CONTROLLER_PYTHON_VERSIONS,
REMOTE_PROVIDERS,
SECCOMP_CHOICES,
SUPPORTED_PYTHON_VERSIONS,
)
from ..util import (
REMOTE_ARCHITECTURES,
)
from ..completion import (
docker_completion,
network_completion,
remote_completion,
windows_completion,
filter_completion,
)
from ..cli.argparsing import (
CompositeAction,
CompositeActionCompletionFinder,
)
from ..cli.argparsing.actions import (
EnumAction,
)
from ..cli.actions import (
DelegatedControllerAction,
NetworkSshTargetAction,
NetworkTargetAction,
OriginControllerAction,
PosixSshTargetAction,
PosixTargetAction,
SanityPythonTargetAction,
UnitsPythonTargetAction,
WindowsSshTargetAction,
WindowsTargetAction,
)
from ..cli.compat import (
TargetMode,
)
from ..config import (
TerminateMode,
)
from .completers import (
complete_choices,
register_completer,
)
from .converters import (
key_value_type,
)
from .epilog import (
get_epilog,
)
from ..ci import (
get_ci_provider,
)
class ControllerMode(enum.Enum):
"""Type of provisioning to use for the controller."""
NO_DELEGATION = enum.auto()
ORIGIN = enum.auto()
DELEGATED = enum.auto()
def add_environments(
parser: argparse.ArgumentParser,
completer: CompositeActionCompletionFinder,
controller_mode: ControllerMode,
target_mode: TargetMode,
) -> None:
"""Add arguments for the environments used to run ansible-test and commands it invokes."""
no_environment = controller_mode == ControllerMode.NO_DELEGATION and target_mode == TargetMode.NO_TARGETS
parser.set_defaults(no_environment=no_environment)
if no_environment:
return
parser.set_defaults(target_mode=target_mode)
add_global_options(parser, controller_mode)
add_legacy_environment_options(parser, controller_mode, target_mode)
action_types = add_composite_environment_options(parser, completer, controller_mode, target_mode)
sections = [f'{heading}\n{content}'
for action_type, documentation_state in CompositeAction.documentation_state.items() if action_type in action_types
for heading, content in documentation_state.sections.items()]
if not get_ci_provider().supports_core_ci_auth():
sections.append('Remote provisioning options have been hidden since no Ansible Core CI API key was found.')
sections.append(get_epilog(completer))
parser.formatter_class = argparse.RawDescriptionHelpFormatter
parser.epilog = '\n\n'.join(sections)
def add_global_options(
parser: argparse.ArgumentParser,
controller_mode: ControllerMode,
):
"""Add global options for controlling the test environment that work with both the legacy and composite options."""
global_parser = t.cast(argparse.ArgumentParser, parser.add_argument_group(title='global environment arguments'))
global_parser.add_argument(
'--containers',
metavar='JSON',
help=argparse.SUPPRESS,
)
global_parser.add_argument(
'--pypi-proxy',
action='store_true',
help=argparse.SUPPRESS,
)
global_parser.add_argument(
'--pypi-endpoint',
metavar='URI',
help=argparse.SUPPRESS,
)
global_parser.add_argument(
'--requirements',
action='store_true',
default=False,
help='install command requirements',
)
add_global_remote(global_parser, controller_mode)
add_global_docker(global_parser, controller_mode)
def add_composite_environment_options(
parser: argparse.ArgumentParser,
completer: CompositeActionCompletionFinder,
controller_mode: ControllerMode,
target_mode: TargetMode,
) -> list[t.Type[CompositeAction]]:
"""Add composite options for controlling the test environment."""
composite_parser = t.cast(argparse.ArgumentParser, parser.add_argument_group(
title='composite environment arguments (mutually exclusive with "environment arguments" above)'))
composite_parser.add_argument(
'--host-path',
help=argparse.SUPPRESS,
)
action_types: list[t.Type[CompositeAction]] = []
def register_action_type(action_type: t.Type[CompositeAction]) -> t.Type[CompositeAction]:
"""Register the provided composite action type and return it."""
action_types.append(action_type)
return action_type
if controller_mode == ControllerMode.NO_DELEGATION:
composite_parser.set_defaults(controller=None)
else:
register_completer(composite_parser.add_argument(
'--controller',
metavar='OPT',
action=register_action_type(DelegatedControllerAction if controller_mode == ControllerMode.DELEGATED else OriginControllerAction),
help='configuration for the controller',
), completer.completer)
if target_mode == TargetMode.NO_TARGETS:
composite_parser.set_defaults(targets=[])
elif target_mode == TargetMode.SHELL:
group = composite_parser.add_mutually_exclusive_group()
register_completer(group.add_argument(
'--target-posix',
metavar='OPT',
action=register_action_type(PosixSshTargetAction),
help='configuration for the target',
), completer.completer)
suppress = None if get_ci_provider().supports_core_ci_auth() else argparse.SUPPRESS
register_completer(group.add_argument(
'--target-windows',
metavar='OPT',
action=WindowsSshTargetAction if suppress else register_action_type(WindowsSshTargetAction),
help=suppress or 'configuration for the target',
), completer.completer)
register_completer(group.add_argument(
'--target-network',
metavar='OPT',
action=NetworkSshTargetAction if suppress else register_action_type(NetworkSshTargetAction),
help=suppress or 'configuration for the target',
), completer.completer)
else:
if target_mode.multiple_pythons:
target_option = '--target-python'
target_help = 'configuration for the target python interpreter(s)'
elif target_mode == TargetMode.POSIX_INTEGRATION:
target_option = '--target'
target_help = 'configuration for the target'
else:
target_option = '--target'
target_help = 'configuration for the target(s)'
target_actions = {
TargetMode.POSIX_INTEGRATION: PosixTargetAction,
TargetMode.WINDOWS_INTEGRATION: WindowsTargetAction,
TargetMode.NETWORK_INTEGRATION: NetworkTargetAction,
TargetMode.SANITY: SanityPythonTargetAction,
TargetMode.UNITS: UnitsPythonTargetAction,
}
target_action = target_actions[target_mode]
register_completer(composite_parser.add_argument(
target_option,
metavar='OPT',
action=register_action_type(target_action),
help=target_help,
), completer.completer)
return action_types
def add_legacy_environment_options(
parser: argparse.ArgumentParser,
controller_mode: ControllerMode,
target_mode: TargetMode,
):
"""Add legacy options for controlling the test environment."""
environment: argparse.ArgumentParser = parser.add_argument_group( # type: ignore[assignment] # real type private
title='environment arguments (mutually exclusive with "composite environment arguments" below)',
)
add_environments_python(environment, target_mode)
add_environments_host(environment, controller_mode, target_mode)
def add_environments_python(
environments_parser: argparse.ArgumentParser,
target_mode: TargetMode,
) -> None:
"""Add environment arguments to control the Python version(s) used."""
python_versions: tuple[str, ...]
if target_mode.has_python:
python_versions = SUPPORTED_PYTHON_VERSIONS
else:
python_versions = CONTROLLER_PYTHON_VERSIONS
environments_parser.add_argument(
'--python',
metavar='X.Y',
choices=python_versions + ('default',),
help='python version: %s' % ', '.join(python_versions),
)
environments_parser.add_argument(
'--python-interpreter',
metavar='PATH',
help='path to the python interpreter',
)
def add_environments_host(
environments_parser: argparse.ArgumentParser,
controller_mode: ControllerMode,
target_mode: TargetMode,
) -> None:
"""Add environment arguments for the given host and argument modes."""
environments_exclusive_group: argparse.ArgumentParser = environments_parser.add_mutually_exclusive_group() # type: ignore[assignment] # real type private
add_environment_local(environments_exclusive_group)
add_environment_venv(environments_exclusive_group, environments_parser)
if controller_mode == ControllerMode.DELEGATED:
add_environment_remote(environments_exclusive_group, environments_parser, target_mode)
add_environment_docker(environments_exclusive_group, environments_parser, target_mode)
if target_mode == TargetMode.WINDOWS_INTEGRATION:
add_environment_windows(environments_parser)
if target_mode == TargetMode.NETWORK_INTEGRATION:
add_environment_network(environments_parser)
def add_environment_network(
environments_parser: argparse.ArgumentParser,
) -> None:
"""Add environment arguments for running on a windows host."""
register_completer(environments_parser.add_argument(
'--platform',
metavar='PLATFORM',
action='append',
help='network platform/version',
), complete_network_platform)
register_completer(environments_parser.add_argument(
'--platform-collection',
type=key_value_type,
metavar='PLATFORM=COLLECTION',
action='append',
help='collection used to test platform',
), complete_network_platform_collection)
register_completer(environments_parser.add_argument(
'--platform-connection',
type=key_value_type,
metavar='PLATFORM=CONNECTION',
action='append',
help='connection used to test platform',
), complete_network_platform_connection)
environments_parser.add_argument(
'--inventory',
metavar='PATH',
help='path to inventory used for tests',
)
def add_environment_windows(
environments_parser: argparse.ArgumentParser,
) -> None:
"""Add environment arguments for running on a windows host."""
register_completer(environments_parser.add_argument(
'--windows',
metavar='VERSION',
action='append',
help='windows version',
), complete_windows)
environments_parser.add_argument(
'--inventory',
metavar='PATH',
help='path to inventory used for tests',
)
def add_environment_local(
exclusive_parser: argparse.ArgumentParser,
) -> None:
"""Add environment arguments for running on the local (origin) host."""
exclusive_parser.add_argument(
'--local',
action='store_true',
help='run from the local environment',
)
def add_environment_venv(
exclusive_parser: argparse.ArgumentParser,
environments_parser: argparse.ArgumentParser,
) -> None:
"""Add environment arguments for running in ansible-test managed virtual environments."""
exclusive_parser.add_argument(
'--venv',
action='store_true',
help='run from a virtual environment',
)
environments_parser.add_argument(
'--venv-system-site-packages',
action='store_true',
help='enable system site packages',
)
def add_global_docker(
parser: argparse.ArgumentParser,
controller_mode: ControllerMode,
) -> None:
"""Add global options for Docker."""
if controller_mode != ControllerMode.DELEGATED:
parser.set_defaults(
docker_network=None,
docker_terminate=None,
prime_containers=False,
dev_systemd_debug=False,
dev_probe_cgroups=None,
)
return
parser.add_argument(
'--docker-network',
metavar='NET',
help='run using the specified network',
)
parser.add_argument(
'--docker-terminate',
metavar='T',
default=TerminateMode.ALWAYS,
type=TerminateMode,
action=EnumAction,
help='terminate the container: %(choices)s (default: %(default)s)',
)
parser.add_argument(
'--prime-containers',
action='store_true',
help='download containers without running tests',
)
# Docker support isn't related to ansible-core-ci.
# However, ansible-core-ci support is a reasonable indicator that the user may need the `--dev-*` options.
suppress = None if get_ci_provider().supports_core_ci_auth() else argparse.SUPPRESS
parser.add_argument(
'--dev-systemd-debug',
action='store_true',
help=suppress or 'enable systemd debugging in containers',
)
parser.add_argument(
'--dev-probe-cgroups',
metavar='DIR',
nargs='?',
const='',
help=suppress or 'probe container cgroups, with optional log dir',
)
def add_environment_docker(
exclusive_parser: argparse.ArgumentParser,
environments_parser: argparse.ArgumentParser,
target_mode: TargetMode,
) -> None:
"""Add environment arguments for running in docker containers."""
if target_mode in (TargetMode.POSIX_INTEGRATION, TargetMode.SHELL):
docker_images = sorted(filter_completion(docker_completion()))
else:
docker_images = sorted(filter_completion(docker_completion(), controller_only=True))
register_completer(exclusive_parser.add_argument(
'--docker',
metavar='IMAGE',
nargs='?',
const='default',
help='run from a docker container',
), functools.partial(complete_choices, docker_images))
environments_parser.add_argument(
'--docker-privileged',
action='store_true',
help='run docker container in privileged mode',
)
environments_parser.add_argument(
'--docker-seccomp',
metavar='SC',
choices=SECCOMP_CHOICES,
help='set seccomp confinement for the test container: %(choices)s',
)
environments_parser.add_argument(
'--docker-memory',
metavar='INT',
type=int,
help='memory limit for docker in bytes',
)
def add_global_remote(
parser: argparse.ArgumentParser,
controller_mode: ControllerMode,
) -> None:
"""Add global options for remote instances."""
if controller_mode != ControllerMode.DELEGATED:
parser.set_defaults(
remote_stage=None,
remote_endpoint=None,
remote_terminate=None,
)
return
suppress = None if get_ci_provider().supports_core_ci_auth() else argparse.SUPPRESS
register_completer(parser.add_argument(
'--remote-stage',
metavar='STAGE',
default='prod',
help=suppress or 'remote stage to use: prod, dev',
), complete_remote_stage)
parser.add_argument(
'--remote-endpoint',
metavar='EP',
help=suppress or 'remote provisioning endpoint to use',
)
parser.add_argument(
'--remote-terminate',
metavar='T',
default=TerminateMode.NEVER,
type=TerminateMode,
action=EnumAction,
help=suppress or 'terminate the remote instance: %(choices)s (default: %(default)s)',
)
def add_environment_remote(
exclusive_parser: argparse.ArgumentParser,
environments_parser: argparse.ArgumentParser,
target_mode: TargetMode,
) -> None:
"""Add environment arguments for running in ansible-core-ci provisioned remote virtual machines."""
if target_mode == TargetMode.POSIX_INTEGRATION:
remote_platforms = get_remote_platform_choices()
elif target_mode == TargetMode.SHELL:
remote_platforms = sorted(set(get_remote_platform_choices()) | set(get_windows_platform_choices()))
else:
remote_platforms = get_remote_platform_choices(True)
suppress = None if get_ci_provider().supports_core_ci_auth() else argparse.SUPPRESS
register_completer(exclusive_parser.add_argument(
'--remote',
metavar='NAME',
help=suppress or 'run from a remote instance',
), functools.partial(complete_choices, remote_platforms))
environments_parser.add_argument(
'--remote-provider',
metavar='PR',
choices=REMOTE_PROVIDERS,
help=suppress or 'remote provider to use: %(choices)s',
)
environments_parser.add_argument(
'--remote-arch',
metavar='ARCH',
choices=REMOTE_ARCHITECTURES,
help=suppress or 'remote arch to use: %(choices)s',
)
def complete_remote_stage(prefix: str, **_) -> list[str]:
"""Return a list of supported stages matching the given prefix."""
return [stage for stage in ('prod', 'dev') if stage.startswith(prefix)]
def complete_windows(prefix: str, parsed_args: argparse.Namespace, **_) -> list[str]:
"""Return a list of supported Windows versions matching the given prefix, excluding versions already parsed from the command line."""
return [i for i in get_windows_version_choices() if i.startswith(prefix) and (not parsed_args.windows or i not in parsed_args.windows)]
def complete_network_platform(prefix: str, parsed_args: argparse.Namespace, **_) -> list[str]:
"""Return a list of supported network platforms matching the given prefix, excluding platforms already parsed from the command line."""
images = sorted(filter_completion(network_completion()))
return [i for i in images if i.startswith(prefix) and (not parsed_args.platform or i not in parsed_args.platform)]
def complete_network_platform_collection(prefix: str, parsed_args: argparse.Namespace, **_) -> list[str]:
"""Return a list of supported network platforms matching the given prefix, excluding collection platforms already parsed from the command line."""
left = prefix.split('=')[0]
images = sorted(set(image.platform for image in filter_completion(network_completion()).values()))
return [i + '=' for i in images if i.startswith(left) and (not parsed_args.platform_collection or i not in [x[0] for x in parsed_args.platform_collection])]
def complete_network_platform_connection(prefix: str, parsed_args: argparse.Namespace, **_) -> list[str]:
"""Return a list of supported network platforms matching the given prefix, excluding connection platforms already parsed from the command line."""
left = prefix.split('=')[0]
images = sorted(set(image.platform for image in filter_completion(network_completion()).values()))
return [i + '=' for i in images if i.startswith(left) and (not parsed_args.platform_connection or i not in [x[0] for x in parsed_args.platform_connection])]
def get_remote_platform_choices(controller: bool = False) -> list[str]:
"""Return a list of supported remote platforms matching the given prefix."""
return sorted(filter_completion(remote_completion(), controller_only=controller))
def get_windows_platform_choices() -> list[str]:
"""Return a list of supported Windows versions matching the given prefix."""
return sorted(f'windows/{windows.version}' for windows in filter_completion(windows_completion()).values())
def get_windows_version_choices() -> list[str]:
"""Return a list of supported Windows versions."""
return sorted(windows.version for windows in filter_completion(windows_completion()).values())
| 19,956
|
Python
|
.py
| 480
| 34.854167
| 160
| 0.693862
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,146
|
actions.py
|
ansible_ansible/test/lib/ansible_test/_internal/cli/actions.py
|
"""Actions for handling composite arguments with argparse."""
from __future__ import annotations
from .argparsing import (
CompositeAction,
NamespaceParser,
)
from .parsers import (
DelegatedControllerParser,
NetworkSshTargetParser,
NetworkTargetParser,
OriginControllerParser,
PosixSshTargetParser,
PosixTargetParser,
SanityPythonTargetParser,
UnitsPythonTargetParser,
WindowsSshTargetParser,
WindowsTargetParser,
)
class OriginControllerAction(CompositeAction):
"""Composite action parser for the controller when the only option is `origin`."""
def create_parser(self) -> NamespaceParser:
"""Return a namespace parser to parse the argument associated with this action."""
return OriginControllerParser()
class DelegatedControllerAction(CompositeAction):
"""Composite action parser for the controller when delegation is supported."""
def create_parser(self) -> NamespaceParser:
"""Return a namespace parser to parse the argument associated with this action."""
return DelegatedControllerParser()
class PosixTargetAction(CompositeAction):
"""Composite action parser for a POSIX target."""
def create_parser(self) -> NamespaceParser:
"""Return a namespace parser to parse the argument associated with this action."""
return PosixTargetParser()
class WindowsTargetAction(CompositeAction):
"""Composite action parser for a Windows target."""
def create_parser(self) -> NamespaceParser:
"""Return a namespace parser to parse the argument associated with this action."""
return WindowsTargetParser()
class NetworkTargetAction(CompositeAction):
"""Composite action parser for a network target."""
def create_parser(self) -> NamespaceParser:
"""Return a namespace parser to parse the argument associated with this action."""
return NetworkTargetParser()
class SanityPythonTargetAction(CompositeAction):
"""Composite action parser for a sanity target."""
def create_parser(self) -> NamespaceParser:
"""Return a namespace parser to parse the argument associated with this action."""
return SanityPythonTargetParser()
class UnitsPythonTargetAction(CompositeAction):
"""Composite action parser for a units target."""
def create_parser(self) -> NamespaceParser:
"""Return a namespace parser to parse the argument associated with this action."""
return UnitsPythonTargetParser()
class PosixSshTargetAction(CompositeAction):
"""Composite action parser for a POSIX SSH target."""
def create_parser(self) -> NamespaceParser:
"""Return a namespace parser to parse the argument associated with this action."""
return PosixSshTargetParser()
class WindowsSshTargetAction(CompositeAction):
"""Composite action parser for a Windows SSH target."""
def create_parser(self) -> NamespaceParser:
"""Return a namespace parser to parse the argument associated with this action."""
return WindowsSshTargetParser()
class NetworkSshTargetAction(CompositeAction):
"""Composite action parser for a network SSH target."""
def create_parser(self) -> NamespaceParser:
"""Return a namespace parser to parse the argument associated with this action."""
return NetworkSshTargetParser()
| 3,366
|
Python
|
.py
| 68
| 43.794118
| 90
| 0.750459
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,147
|
argcompletion.py
|
ansible_ansible/test/lib/ansible_test/_internal/cli/argparsing/argcompletion.py
|
"""Wrapper around argcomplete providing bug fixes and additional features."""
from __future__ import annotations
import argparse
import enum
import os
import typing as t
class Substitute:
"""Substitute for missing class which accepts all arguments."""
def __init__(self, *args, **kwargs) -> None:
pass
try:
import argcomplete
try:
# argcomplete 3+
# see: https://github.com/kislyuk/argcomplete/commit/bd781cb08512b94966312377186ebc5550f46ae0
from argcomplete.finders import (
CompletionFinder,
default_validator,
)
except ImportError:
# argcomplete <3
from argcomplete import (
CompletionFinder,
default_validator,
)
warn = argcomplete.warn # pylint: disable=invalid-name
except ImportError:
argcomplete = None
CompletionFinder = Substitute
default_validator = Substitute # pylint: disable=invalid-name
warn = Substitute # pylint: disable=invalid-name
class CompType(enum.Enum):
"""
Bash COMP_TYPE argument completion types.
For documentation, see: https://www.gnu.org/software/bash/manual/html_node/Bash-Variables.html#index-COMP_005fTYPE
"""
COMPLETION = '\t'
"""
Standard completion, typically triggered by a single tab.
"""
MENU_COMPLETION = '%'
"""
Menu completion, which cycles through each completion instead of showing a list.
For help using this feature, see: https://stackoverflow.com/questions/12044574/getting-complete-and-menu-complete-to-work-together
"""
LIST = '?'
"""
Standard list, typically triggered by a double tab.
"""
LIST_AMBIGUOUS = '!'
"""
Listing with `show-all-if-ambiguous` set.
For documentation, see https://www.gnu.org/software/bash/manual/html_node/Readline-Init-File-Syntax.html#index-show_002dall_002dif_002dambiguous
For additional details, see: https://unix.stackexchange.com/questions/614123/explanation-of-bash-completion-comp-type
"""
LIST_UNMODIFIED = '@'
"""
Listing with `show-all-if-unmodified` set.
For documentation, see https://www.gnu.org/software/bash/manual/html_node/Readline-Init-File-Syntax.html#index-show_002dall_002dif_002dunmodified
For additional details, see: : https://unix.stackexchange.com/questions/614123/explanation-of-bash-completion-comp-type
"""
@property
def list_mode(self) -> bool:
"""True if completion is running in list mode, otherwise False."""
return self in (CompType.LIST, CompType.LIST_AMBIGUOUS, CompType.LIST_UNMODIFIED)
def register_safe_action(action_type: t.Type[argparse.Action]) -> None:
"""Register the given action as a safe action for argcomplete to use during completion if it is not already registered."""
if argcomplete and action_type not in argcomplete.safe_actions:
if isinstance(argcomplete.safe_actions, set):
# argcomplete 3+
# see: https://github.com/kislyuk/argcomplete/commit/bd781cb08512b94966312377186ebc5550f46ae0
argcomplete.safe_actions.add(action_type)
else:
# argcomplete <3
argcomplete.safe_actions += (action_type,)
def get_comp_type() -> t.Optional[CompType]:
"""Parse the COMP_TYPE environment variable (if present) and return the associated CompType enum value."""
value = os.environ.get('COMP_TYPE')
comp_type = CompType(chr(int(value))) if value else None
return comp_type
class OptionCompletionFinder(CompletionFinder):
"""
Custom completion finder for argcomplete.
It provides support for running completion in list mode, which argcomplete natively handles the same as standard completion.
"""
enabled = bool(argcomplete)
def __init__(self, *args, validator=None, **kwargs) -> None:
if validator:
raise ValueError()
self.comp_type = get_comp_type()
self.list_mode = self.comp_type.list_mode if self.comp_type else False
self.disable_completion_mangling = False
finder = self
def custom_validator(completion, prefix):
"""Completion validator used to optionally bypass validation."""
if finder.disable_completion_mangling:
return True
return default_validator(completion, prefix)
super().__init__(
*args,
validator=custom_validator,
**kwargs,
)
def __call__(self, *args, **kwargs):
if self.enabled:
super().__call__(*args, **kwargs)
def quote_completions(self, completions, cword_prequote, last_wordbreak_pos):
"""Intercept default quoting behavior to optionally block mangling of completion entries."""
if self.disable_completion_mangling:
# Word breaks have already been handled when generating completions, don't mangle them further.
# This is needed in many cases when returning completion lists which lack the existing completion prefix.
last_wordbreak_pos = None
return super().quote_completions(completions, cword_prequote, last_wordbreak_pos)
| 5,166
|
Python
|
.py
| 113
| 38.548673
| 149
| 0.692078
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,148
|
__init__.py
|
ansible_ansible/test/lib/ansible_test/_internal/cli/argparsing/__init__.py
|
"""Completion finder which brings together custom options and completion logic."""
from __future__ import annotations
import abc
import argparse
import os
import re
import typing as t
from .argcompletion import (
OptionCompletionFinder,
get_comp_type,
register_safe_action,
warn,
)
from .parsers import (
Completion,
CompletionError,
CompletionSuccess,
CompletionUnavailable,
DocumentationState,
NamespaceParser,
Parser,
ParserError,
ParserMode,
ParserState,
)
class RegisteredCompletionFinder(OptionCompletionFinder):
"""
Custom option completion finder for argcomplete which allows completion results to be registered.
These registered completions, if provided, are used to filter the final completion results.
This works around a known bug: https://github.com/kislyuk/argcomplete/issues/221
"""
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self.registered_completions: t.Optional[list[str]] = None
def completer(
self,
prefix: str,
action: argparse.Action,
parsed_args: argparse.Namespace,
**kwargs,
) -> list[str]:
"""
Return a list of completions for the specified prefix and action.
Use this as the completer function for argcomplete.
"""
kwargs.clear()
del kwargs
completions = self.get_completions(prefix, action, parsed_args)
if action.nargs and not isinstance(action.nargs, int):
# prevent argcomplete from including unrelated arguments in the completion results
self.registered_completions = completions
return completions
@abc.abstractmethod
def get_completions(
self,
prefix: str,
action: argparse.Action,
parsed_args: argparse.Namespace,
) -> list[str]:
"""
Return a list of completions for the specified prefix and action.
Called by the complete function.
"""
def quote_completions(self, completions, cword_prequote, last_wordbreak_pos):
"""Modify completion results before returning them."""
if self.registered_completions is not None:
# If one of the completion handlers registered their results, only allow those exact results to be returned.
# This prevents argcomplete from adding results from other completers when they are known to be invalid.
allowed_completions = set(self.registered_completions)
completions = [completion for completion in completions if completion in allowed_completions]
return super().quote_completions(completions, cword_prequote, last_wordbreak_pos)
class CompositeAction(argparse.Action, metaclass=abc.ABCMeta):
"""Base class for actions that parse composite arguments."""
documentation_state: dict[t.Type[CompositeAction], DocumentationState] = {}
def __init__(
self,
*args,
**kwargs,
):
self.definition = self.create_parser()
self.documentation_state[type(self)] = documentation_state = DocumentationState()
self.definition.document(documentation_state)
kwargs.update(dest=self.definition.dest)
super().__init__(*args, **kwargs)
register_safe_action(type(self))
@abc.abstractmethod
def create_parser(self) -> NamespaceParser:
"""Return a namespace parser to parse the argument associated with this action."""
def __call__(
self,
parser,
namespace,
values,
option_string=None,
):
state = ParserState(mode=ParserMode.PARSE, namespaces=[namespace], remainder=values)
try:
self.definition.parse(state)
except ParserError as ex:
error = str(ex)
except CompletionError as ex:
error = ex.message
else:
return
if get_comp_type():
# FUTURE: It may be possible to enhance error handling by surfacing this error message during downstream completion.
return # ignore parse errors during completion to avoid breaking downstream completion
raise argparse.ArgumentError(self, error)
class CompositeActionCompletionFinder(RegisteredCompletionFinder):
"""Completion finder with support for composite argument parsing."""
def get_completions(
self,
prefix: str,
action: argparse.Action,
parsed_args: argparse.Namespace,
) -> list[str]:
"""Return a list of completions appropriate for the given prefix and action, taking into account the arguments that have already been parsed."""
assert isinstance(action, CompositeAction)
state = ParserState(
mode=ParserMode.LIST if self.list_mode else ParserMode.COMPLETE,
remainder=prefix,
namespaces=[parsed_args],
)
answer = complete(action.definition, state)
completions = []
if isinstance(answer, CompletionSuccess):
self.disable_completion_mangling = answer.preserve
completions = answer.completions
if isinstance(answer, CompletionError):
warn(answer.message)
return completions
def detect_file_listing(value: str, mode: ParserMode) -> bool:
"""
Return True if Bash will show a file listing and redraw the prompt, otherwise return False.
If there are no list results, a file listing will be shown if the value after the last `=` or `:` character:
- is empty
- matches a full path
- matches a partial path
Otherwise Bash will play the bell sound and display nothing.
see: https://github.com/kislyuk/argcomplete/issues/328
see: https://github.com/kislyuk/argcomplete/pull/284
"""
listing = False
if mode == ParserMode.LIST:
right = re.split('[=:]', value)[-1]
listing = not right or os.path.exists(right)
if not listing:
directory = os.path.dirname(right)
# noinspection PyBroadException
try:
filenames = os.listdir(directory or '.')
except Exception: # pylint: disable=broad-except
pass
else:
listing = any(filename.startswith(right) for filename in filenames)
return listing
def detect_false_file_completion(value: str, mode: ParserMode) -> bool:
"""
Return True if Bash will provide an incorrect file completion, otherwise return False.
If there are no completion results, a filename will be automatically completed if the value after the last `=` or `:` character:
- matches exactly one partial path
Otherwise Bash will play the bell sound and display nothing.
see: https://github.com/kislyuk/argcomplete/issues/328
see: https://github.com/kislyuk/argcomplete/pull/284
"""
completion = False
if mode == ParserMode.COMPLETE:
completion = True
right = re.split('[=:]', value)[-1]
directory, prefix = os.path.split(right)
# noinspection PyBroadException
try:
filenames = os.listdir(directory or '.')
except Exception: # pylint: disable=broad-except
pass
else:
matches = [filename for filename in filenames if filename.startswith(prefix)]
completion = len(matches) == 1
return completion
def complete(
completer: Parser,
state: ParserState,
) -> Completion:
"""Perform argument completion using the given completer and return the completion result."""
value = state.remainder
answer: Completion
try:
completer.parse(state)
raise ParserError('completion expected')
except CompletionUnavailable as ex:
if detect_file_listing(value, state.mode):
# Displaying a warning before the file listing informs the user it is invalid. Bash will redraw the prompt after the list.
# If the file listing is not shown, a warning could be helpful, but would introduce noise on the terminal since the prompt is not redrawn.
answer = CompletionError(ex.message)
elif detect_false_file_completion(value, state.mode):
# When the current prefix provides no matches, but matches files a single file on disk, Bash will perform an incorrect completion.
# Returning multiple invalid matches instead of no matches will prevent Bash from using its own completion logic in this case.
answer = CompletionSuccess(
list_mode=True, # abuse list mode to enable preservation of the literal results
consumed='',
continuation='',
matches=['completion', 'invalid'],
)
else:
answer = ex
except Completion as ex:
answer = ex
return answer
| 8,922
|
Python
|
.py
| 208
| 34.625
| 152
| 0.671019
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,149
|
parsers.py
|
ansible_ansible/test/lib/ansible_test/_internal/cli/argparsing/parsers.py
|
"""General purpose composite argument parsing and completion."""
from __future__ import annotations
import abc
import collections.abc as c
import contextlib
import dataclasses
import enum
import os
import re
import typing as t
# NOTE: When choosing delimiters, take into account Bash and argcomplete behavior.
#
# Recommended characters for assignment and/or continuation: `/` `:` `=`
#
# The recommended assignment_character list is due to how argcomplete handles continuation characters.
# see: https://github.com/kislyuk/argcomplete/blob/5a20d6165fbb4d4d58559378919b05964870cc16/argcomplete/__init__.py#L557-L558
PAIR_DELIMITER = ','
ASSIGNMENT_DELIMITER = '='
PATH_DELIMITER = '/'
# This class was originally frozen. However, that causes issues when running under Python 3.11.
# See: https://github.com/python/cpython/issues/99856
@dataclasses.dataclass
class Completion(Exception):
"""Base class for argument completion results."""
@dataclasses.dataclass
class CompletionUnavailable(Completion):
"""Argument completion unavailable."""
message: str = 'No completions available.'
@dataclasses.dataclass
class CompletionError(Completion):
"""Argument completion error."""
message: t.Optional[str] = None
@dataclasses.dataclass
class CompletionSuccess(Completion):
"""Successful argument completion result."""
list_mode: bool
consumed: str
continuation: str
matches: list[str] = dataclasses.field(default_factory=list)
@property
def preserve(self) -> bool:
"""
True if argcomplete should not mangle completion values, otherwise False.
Only used when more than one completion exists to avoid overwriting the word undergoing completion.
"""
return len(self.matches) > 1 and self.list_mode
@property
def completions(self) -> list[str]:
"""List of completion values to return to argcomplete."""
completions = self.matches
continuation = '' if self.list_mode else self.continuation
if not self.preserve:
# include the existing prefix to avoid rewriting the word undergoing completion
completions = [f'{self.consumed}{completion}{continuation}' for completion in completions]
return completions
class ParserMode(enum.Enum):
"""Mode the parser is operating in."""
PARSE = enum.auto()
COMPLETE = enum.auto()
LIST = enum.auto()
class ParserError(Exception):
"""Base class for all parsing exceptions."""
@dataclasses.dataclass
class ParserBoundary:
"""Boundary details for parsing composite input."""
delimiters: str
required: bool
match: t.Optional[str] = None
ready: bool = True
@dataclasses.dataclass
class ParserState:
"""State of the composite argument parser."""
mode: ParserMode
remainder: str = ''
consumed: str = ''
boundaries: list[ParserBoundary] = dataclasses.field(default_factory=list)
namespaces: list[t.Any] = dataclasses.field(default_factory=list)
parts: list[str] = dataclasses.field(default_factory=list)
@property
def incomplete(self) -> bool:
"""True if parsing is incomplete (unparsed input remains), otherwise False."""
return self.remainder is not None
def match(self, value: str, choices: list[str]) -> bool:
"""Return True if the given value matches the provided choices, taking into account parsing boundaries, otherwise return False."""
if self.current_boundary:
delimiters, delimiter = self.current_boundary.delimiters, self.current_boundary.match
else:
delimiters, delimiter = '', None
for choice in choices:
if choice.rstrip(delimiters) == choice:
# choice is not delimited
if value == choice:
return True # value matched
else:
# choice is delimited
if f'{value}{delimiter}' == choice:
return True # value and delimiter matched
return False
def read(self) -> str:
"""Read and return the next input segment, taking into account parsing boundaries."""
delimiters = "".join(boundary.delimiters for boundary in self.boundaries)
if delimiters:
pattern = '([' + re.escape(delimiters) + '])'
regex = re.compile(pattern)
parts = regex.split(self.remainder, 1)
else:
parts = [self.remainder]
if len(parts) > 1:
value, delimiter, remainder = parts
else:
value, delimiter, remainder = parts[0], None, None
for boundary in reversed(self.boundaries):
if delimiter and delimiter in boundary.delimiters:
boundary.match = delimiter
self.consumed += value + delimiter
break
boundary.match = None
boundary.ready = False
if boundary.required:
break
self.remainder = remainder
return value
@property
def root_namespace(self) -> t.Any:
"""THe root namespace."""
return self.namespaces[0]
@property
def current_namespace(self) -> t.Any:
"""The current namespace."""
return self.namespaces[-1]
@property
def current_boundary(self) -> t.Optional[ParserBoundary]:
"""The current parser boundary, if any, otherwise None."""
return self.boundaries[-1] if self.boundaries else None
def set_namespace(self, namespace: t.Any) -> None:
"""Set the current namespace."""
self.namespaces.append(namespace)
@contextlib.contextmanager
def delimit(self, delimiters: str, required: bool = True) -> c.Iterator[ParserBoundary]:
"""Context manager for delimiting parsing of input."""
boundary = ParserBoundary(delimiters=delimiters, required=required)
self.boundaries.append(boundary)
try:
yield boundary
finally:
self.boundaries.pop()
if boundary.required and not boundary.match:
raise ParserError('required delimiter not found, hit up-level delimiter or end of input instead')
@dataclasses.dataclass
class DocumentationState:
"""State of the composite argument parser's generated documentation."""
sections: dict[str, str] = dataclasses.field(default_factory=dict)
class Parser(metaclass=abc.ABCMeta):
"""Base class for all composite argument parsers."""
@abc.abstractmethod
def parse(self, state: ParserState) -> t.Any:
"""Parse the input from the given state and return the result."""
def document(self, state: DocumentationState) -> t.Optional[str]:
"""Generate and return documentation for this parser."""
raise Exception(f'Undocumented parser: {type(self)}')
class MatchConditions(enum.Flag):
"""Acceptable condition(s) for matching user input to available choices."""
CHOICE = enum.auto()
"""Match any choice."""
ANY = enum.auto()
"""Match any non-empty string."""
NOTHING = enum.auto()
"""Match an empty string which is not followed by a boundary match."""
class DynamicChoicesParser(Parser, metaclass=abc.ABCMeta):
"""Base class for composite argument parsers which use a list of choices that can be generated during completion."""
def __init__(self, conditions: MatchConditions = MatchConditions.CHOICE) -> None:
self.conditions = conditions
@abc.abstractmethod
def get_choices(self, value: str) -> list[str]:
"""Return a list of valid choices based on the given input value."""
def no_completion_match(self, value: str) -> CompletionUnavailable: # pylint: disable=unused-argument
"""Return an instance of CompletionUnavailable when no match was found for the given value."""
return CompletionUnavailable()
def no_choices_available(self, value: str) -> ParserError: # pylint: disable=unused-argument
"""Return an instance of ParserError when parsing fails and no choices are available."""
return ParserError('No choices available.')
def parse(self, state: ParserState) -> t.Any:
"""Parse the input from the given state and return the result."""
value = state.read()
choices = self.get_choices(value)
if state.mode == ParserMode.PARSE or state.incomplete:
if self.conditions & MatchConditions.CHOICE and state.match(value, choices):
return value
if self.conditions & MatchConditions.ANY and value:
return value
if self.conditions & MatchConditions.NOTHING and not value and state.current_boundary and not state.current_boundary.match:
return value
if state.mode == ParserMode.PARSE:
if choices:
raise ParserError(f'"{value}" not in: {", ".join(choices)}')
raise self.no_choices_available(value)
raise CompletionUnavailable()
matches = [choice for choice in choices if choice.startswith(value)]
if not matches:
raise self.no_completion_match(value)
continuation = state.current_boundary.delimiters if state.current_boundary and state.current_boundary.required else ''
raise CompletionSuccess(
list_mode=state.mode == ParserMode.LIST,
consumed=state.consumed,
continuation=continuation,
matches=matches,
)
class ChoicesParser(DynamicChoicesParser):
"""Composite argument parser which relies on a static list of choices."""
def __init__(self, choices: list[str], conditions: MatchConditions = MatchConditions.CHOICE) -> None:
self.choices = choices
super().__init__(conditions=conditions)
def get_choices(self, value: str) -> list[str]:
"""Return a list of valid choices based on the given input value."""
return self.choices
def document(self, state: DocumentationState) -> t.Optional[str]:
"""Generate and return documentation for this parser."""
return '|'.join(self.choices)
class EnumValueChoicesParser(ChoicesParser):
"""Composite argument parser which relies on a static list of choices derived from the values of an enum."""
def __init__(self, enum_type: t.Type[enum.Enum], conditions: MatchConditions = MatchConditions.CHOICE) -> None:
self.enum_type = enum_type
super().__init__(choices=[str(item.value) for item in enum_type], conditions=conditions)
def parse(self, state: ParserState) -> t.Any:
"""Parse the input from the given state and return the result."""
value = super().parse(state)
return self.enum_type(value)
class IntegerParser(DynamicChoicesParser):
"""Composite argument parser for integers."""
PATTERN = re.compile('^[1-9][0-9]*$')
def __init__(self, maximum: t.Optional[int] = None) -> None:
self.maximum = maximum
super().__init__()
def get_choices(self, value: str) -> list[str]:
"""Return a list of valid choices based on the given input value."""
if not value:
numbers = list(range(1, 10))
elif self.PATTERN.search(value):
int_prefix = int(value)
base = int_prefix * 10
numbers = [int_prefix] + [base + i for i in range(0, 10)]
else:
numbers = []
# NOTE: the minimum is currently fixed at 1
if self.maximum is not None:
numbers = [n for n in numbers if n <= self.maximum]
return [str(n) for n in numbers]
def parse(self, state: ParserState) -> t.Any:
"""Parse the input from the given state and return the result."""
value = super().parse(state)
return int(value)
def document(self, state: DocumentationState) -> t.Optional[str]:
"""Generate and return documentation for this parser."""
return '{integer}'
class BooleanParser(ChoicesParser):
"""Composite argument parser for boolean (yes/no) values."""
def __init__(self) -> None:
super().__init__(['yes', 'no'])
def parse(self, state: ParserState) -> bool:
"""Parse the input from the given state and return the result."""
value = super().parse(state)
return value == 'yes'
class AnyParser(ChoicesParser):
"""Composite argument parser which accepts any input value."""
def __init__(self, nothing: bool = False, no_match_message: t.Optional[str] = None) -> None:
self.no_match_message = no_match_message
conditions = MatchConditions.ANY
if nothing:
conditions |= MatchConditions.NOTHING
super().__init__([], conditions=conditions)
def no_completion_match(self, value: str) -> CompletionUnavailable:
"""Return an instance of CompletionUnavailable when no match was found for the given value."""
if self.no_match_message:
return CompletionUnavailable(message=self.no_match_message)
return super().no_completion_match(value)
def no_choices_available(self, value: str) -> ParserError:
"""Return an instance of ParserError when parsing fails and no choices are available."""
if self.no_match_message:
return ParserError(self.no_match_message)
return super().no_choices_available(value)
class RelativePathNameParser(DynamicChoicesParser):
"""Composite argument parser for relative path names."""
RELATIVE_NAMES = ['.', '..']
def __init__(self, choices: list[str]) -> None:
self.choices = choices
super().__init__()
def get_choices(self, value: str) -> list[str]:
"""Return a list of valid choices based on the given input value."""
choices = list(self.choices)
if value in self.RELATIVE_NAMES:
# complete relative names, but avoid suggesting them unless the current name is relative
# unfortunately this will be sorted in reverse of what bash presents ("../ ./" instead of "./ ../")
choices.extend(f'{item}{PATH_DELIMITER}' for item in self.RELATIVE_NAMES)
return choices
class FileParser(Parser):
"""Composite argument parser for absolute or relative file paths."""
def parse(self, state: ParserState) -> str:
"""Parse the input from the given state and return the result."""
if state.mode == ParserMode.PARSE:
path = AnyParser().parse(state)
if not os.path.isfile(path):
raise ParserError(f'Not a file: {path}')
else:
path = ''
with state.delimit(PATH_DELIMITER, required=False) as boundary: # type: ParserBoundary
while boundary.ready:
directory = path or '.'
try:
with os.scandir(directory) as scan: # type: c.Iterator[os.DirEntry]
choices = [f'{item.name}{PATH_DELIMITER}' if item.is_dir() else item.name for item in scan]
except OSError:
choices = []
if not path:
choices.append(PATH_DELIMITER) # allow absolute paths
choices.append('../') # suggest relative paths
part = RelativePathNameParser(choices).parse(state)
path += f'{part}{boundary.match or ""}'
return path
class AbsolutePathParser(Parser):
"""Composite argument parser for absolute file paths. Paths are only verified for proper syntax, not for existence."""
def parse(self, state: ParserState) -> t.Any:
"""Parse the input from the given state and return the result."""
path = ''
with state.delimit(PATH_DELIMITER, required=False) as boundary: # type: ParserBoundary
while boundary.ready:
if path:
path += AnyParser(nothing=True).parse(state)
else:
path += ChoicesParser([PATH_DELIMITER]).parse(state)
path += boundary.match or ''
return path
class NamespaceParser(Parser, metaclass=abc.ABCMeta):
"""Base class for composite argument parsers that store their results in a namespace."""
def parse(self, state: ParserState) -> t.Any:
"""Parse the input from the given state and return the result."""
namespace = state.current_namespace
current = getattr(namespace, self.dest)
if current and self.limit_one:
if state.mode == ParserMode.PARSE:
raise ParserError('Option cannot be specified more than once.')
raise CompletionError('Option cannot be specified more than once.')
value = self.get_value(state)
if self.use_list:
if not current:
current = []
setattr(namespace, self.dest, current)
current.append(value)
else:
setattr(namespace, self.dest, value)
return value
def get_value(self, state: ParserState) -> t.Any:
"""Parse the input from the given state and return the result, without storing the result in the namespace."""
return super().parse(state)
@property
def use_list(self) -> bool:
"""True if the destination is a list, otherwise False."""
return False
@property
def limit_one(self) -> bool:
"""True if only one target is allowed, otherwise False."""
return not self.use_list
@property
@abc.abstractmethod
def dest(self) -> str:
"""The name of the attribute where the value should be stored."""
class NamespaceWrappedParser(NamespaceParser):
"""Composite argument parser that wraps a non-namespace parser and stores the result in a namespace."""
def __init__(self, dest: str, parser: Parser) -> None:
self._dest = dest
self.parser = parser
def get_value(self, state: ParserState) -> t.Any:
"""Parse the input from the given state and return the result, without storing the result in the namespace."""
return self.parser.parse(state)
@property
def dest(self) -> str:
"""The name of the attribute where the value should be stored."""
return self._dest
class KeyValueParser(Parser, metaclass=abc.ABCMeta):
"""Base class for key/value composite argument parsers."""
@abc.abstractmethod
def get_parsers(self, state: ParserState) -> dict[str, Parser]:
"""Return a dictionary of key names and value parsers."""
def parse(self, state: ParserState) -> t.Any:
"""Parse the input from the given state and return the result."""
namespace = state.current_namespace
parsers = self.get_parsers(state)
keys = list(parsers)
with state.delimit(PAIR_DELIMITER, required=False) as pair: # type: ParserBoundary
while pair.ready:
with state.delimit(ASSIGNMENT_DELIMITER):
key = ChoicesParser(keys).parse(state)
value = parsers[key].parse(state)
setattr(namespace, key, value)
keys.remove(key)
return namespace
class PairParser(Parser, metaclass=abc.ABCMeta):
"""Base class for composite argument parsers consisting of a left and right argument parser, with input separated by a delimiter."""
def parse(self, state: ParserState) -> t.Any:
"""Parse the input from the given state and return the result."""
namespace = self.create_namespace()
state.set_namespace(namespace)
with state.delimit(self.delimiter, self.required) as boundary: # type: ParserBoundary
choice = self.get_left_parser(state).parse(state)
if boundary.match:
self.get_right_parser(choice).parse(state)
return namespace
@property
def required(self) -> bool:
"""True if the delimiter (and thus right parser) is required, otherwise False."""
return False
@property
def delimiter(self) -> str:
"""The delimiter to use between the left and right parser."""
return PAIR_DELIMITER
@abc.abstractmethod
def create_namespace(self) -> t.Any:
"""Create and return a namespace."""
@abc.abstractmethod
def get_left_parser(self, state: ParserState) -> Parser:
"""Return the parser for the left side."""
@abc.abstractmethod
def get_right_parser(self, choice: t.Any) -> Parser:
"""Return the parser for the right side."""
class TypeParser(Parser, metaclass=abc.ABCMeta):
"""Base class for composite argument parsers which parse a type name, a colon and then parse results based on the type given by the type name."""
def get_parsers(self, state: ParserState) -> dict[str, Parser]: # pylint: disable=unused-argument
"""Return a dictionary of type names and type parsers."""
return self.get_stateless_parsers()
@abc.abstractmethod
def get_stateless_parsers(self) -> dict[str, Parser]:
"""Return a dictionary of type names and type parsers."""
def parse(self, state: ParserState) -> t.Any:
"""Parse the input from the given state and return the result."""
parsers = self.get_parsers(state)
with state.delimit(':'):
key = ChoicesParser(list(parsers)).parse(state)
value = parsers[key].parse(state)
return value
| 21,490
|
Python
|
.py
| 438
| 40.305936
| 149
| 0.655822
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,150
|
actions.py
|
ansible_ansible/test/lib/ansible_test/_internal/cli/argparsing/actions.py
|
"""Actions for argparse."""
from __future__ import annotations
import argparse
import enum
import typing as t
class EnumAction(argparse.Action):
"""Parse an enum using the lowercase enum names."""
def __init__(self, **kwargs: t.Any) -> None:
self.enum_type: t.Type[enum.Enum] = kwargs.pop('type', None)
kwargs.setdefault('choices', tuple(e.name.lower() for e in self.enum_type))
super().__init__(**kwargs)
def __call__(self, parser, namespace, values, option_string=None):
value = self.enum_type[values.upper()]
setattr(namespace, self.dest, value)
| 606
|
Python
|
.py
| 14
| 38.214286
| 83
| 0.669506
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,151
|
base_argument_parsers.py
|
ansible_ansible/test/lib/ansible_test/_internal/cli/parsers/base_argument_parsers.py
|
"""Base classes for the primary parsers for composite command line arguments."""
from __future__ import annotations
import abc
import typing as t
from ..argparsing.parsers import (
CompletionError,
NamespaceParser,
ParserState,
)
class ControllerNamespaceParser(NamespaceParser, metaclass=abc.ABCMeta):
"""Base class for controller namespace parsers."""
@property
def dest(self) -> str:
"""The name of the attribute where the value should be stored."""
return 'controller'
def parse(self, state: ParserState) -> t.Any:
"""Parse the input from the given state and return the result."""
if state.root_namespace.targets:
raise ControllerRequiredFirstError()
return super().parse(state)
class TargetNamespaceParser(NamespaceParser, metaclass=abc.ABCMeta):
"""Base class for target namespace parsers involving a single target."""
@property
def option_name(self) -> str:
"""The option name used for this parser."""
return '--target'
@property
def dest(self) -> str:
"""The name of the attribute where the value should be stored."""
return 'targets'
@property
def use_list(self) -> bool:
"""True if the destination is a list, otherwise False."""
return True
@property
def limit_one(self) -> bool:
"""True if only one target is allowed, otherwise False."""
return True
class TargetsNamespaceParser(NamespaceParser, metaclass=abc.ABCMeta):
"""Base class for controller namespace parsers involving multiple targets."""
@property
def option_name(self) -> str:
"""The option name used for this parser."""
return '--target'
@property
def dest(self) -> str:
"""The name of the attribute where the value should be stored."""
return 'targets'
@property
def use_list(self) -> bool:
"""True if the destination is a list, otherwise False."""
return True
class ControllerRequiredFirstError(CompletionError):
"""Exception raised when controller and target options are specified out-of-order."""
def __init__(self) -> None:
super().__init__('The `--controller` option must be specified before `--target` option(s).')
| 2,288
|
Python
|
.py
| 56
| 34.625
| 100
| 0.678878
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,152
|
__init__.py
|
ansible_ansible/test/lib/ansible_test/_internal/cli/parsers/__init__.py
|
"""Composite argument parsers for ansible-test specific command-line arguments."""
from __future__ import annotations
import typing as t
from ...constants import (
SUPPORTED_PYTHON_VERSIONS,
)
from ...ci import (
get_ci_provider,
)
from ...host_configs import (
ControllerConfig,
NetworkConfig,
NetworkInventoryConfig,
PosixConfig,
WindowsConfig,
WindowsInventoryConfig,
)
from ..argparsing.parsers import (
DocumentationState,
Parser,
ParserState,
TypeParser,
)
from .value_parsers import (
PythonParser,
)
from .host_config_parsers import (
ControllerParser,
DockerParser,
NetworkInventoryParser,
NetworkRemoteParser,
OriginParser,
PosixRemoteParser,
PosixSshParser,
WindowsInventoryParser,
WindowsRemoteParser,
)
from .base_argument_parsers import (
ControllerNamespaceParser,
TargetNamespaceParser,
TargetsNamespaceParser,
)
class OriginControllerParser(ControllerNamespaceParser, TypeParser):
"""Composite argument parser for the controller when delegation is not supported."""
def get_stateless_parsers(self) -> dict[str, Parser]:
"""Return a dictionary of type names and type parsers."""
return dict(
origin=OriginParser(),
)
def document(self, state: DocumentationState) -> t.Optional[str]:
"""Generate and return documentation for this parser."""
section = '--controller options:'
state.sections[section] = '' # place this section before the sections created by the parsers below
state.sections[section] = '\n'.join([f' {name}:{parser.document(state)}' for name, parser in self.get_stateless_parsers().items()])
return None
class DelegatedControllerParser(ControllerNamespaceParser, TypeParser):
"""Composite argument parser for the controller when delegation is supported."""
def get_stateless_parsers(self) -> dict[str, Parser]:
"""Return a dictionary of type names and type parsers."""
parsers: dict[str, Parser] = dict(
origin=OriginParser(),
docker=DockerParser(controller=True),
)
if get_ci_provider().supports_core_ci_auth():
parsers.update(
remote=PosixRemoteParser(controller=True),
)
return parsers
def document(self, state: DocumentationState) -> t.Optional[str]:
"""Generate and return documentation for this parser."""
section = '--controller options:'
state.sections[section] = '' # place this section before the sections created by the parsers below
state.sections[section] = '\n'.join([f' {name}:{parser.document(state)}' for name, parser in self.get_stateless_parsers().items()])
return None
class PosixTargetParser(TargetNamespaceParser, TypeParser):
"""Composite argument parser for a POSIX target."""
def get_stateless_parsers(self) -> dict[str, Parser]:
"""Return a dictionary of type names and type parsers."""
parsers: dict[str, Parser] = dict(
controller=ControllerParser(),
docker=DockerParser(controller=False),
)
if get_ci_provider().supports_core_ci_auth():
parsers.update(
remote=PosixRemoteParser(controller=False),
)
parsers.update(
ssh=PosixSshParser(),
)
return parsers
def document(self, state: DocumentationState) -> t.Optional[str]:
"""Generate and return documentation for this parser."""
section = f'{self.option_name} options (choose one):'
state.sections[section] = '' # place this section before the sections created by the parsers below
state.sections[section] = '\n'.join([f' {name}:{parser.document(state)}' for name, parser in self.get_stateless_parsers().items()])
return None
class WindowsTargetParser(TargetsNamespaceParser, TypeParser):
"""Composite argument parser for a Windows target."""
@property
def allow_inventory(self) -> bool:
"""True if inventory is allowed, otherwise False."""
return True
def get_parsers(self, state: ParserState) -> dict[str, Parser]:
"""Return a dictionary of type names and type parsers."""
return self.get_internal_parsers(state.root_namespace.targets)
def get_stateless_parsers(self) -> dict[str, Parser]:
"""Return a dictionary of type names and type parsers."""
return self.get_internal_parsers([])
def get_internal_parsers(self, targets: list[WindowsConfig]) -> dict[str, Parser]:
"""Return a dictionary of type names and type parsers."""
parsers: dict[str, Parser] = {}
if self.allow_inventory and not targets:
parsers.update(
inventory=WindowsInventoryParser(),
)
if not targets or not any(isinstance(target, WindowsInventoryConfig) for target in targets):
if get_ci_provider().supports_core_ci_auth():
parsers.update(
remote=WindowsRemoteParser(),
)
return parsers
def document(self, state: DocumentationState) -> t.Optional[str]:
"""Generate and return documentation for this parser."""
section = f'{self.option_name} options (choose one):'
state.sections[section] = '' # place this section before the sections created by the parsers below
state.sections[section] = '\n'.join([f' {name}:{parser.document(state)}' for name, parser in self.get_stateless_parsers().items()])
return None
class NetworkTargetParser(TargetsNamespaceParser, TypeParser):
"""Composite argument parser for a network target."""
@property
def allow_inventory(self) -> bool:
"""True if inventory is allowed, otherwise False."""
return True
def get_parsers(self, state: ParserState) -> dict[str, Parser]:
"""Return a dictionary of type names and type parsers."""
return self.get_internal_parsers(state.root_namespace.targets)
def get_stateless_parsers(self) -> dict[str, Parser]:
"""Return a dictionary of type names and type parsers."""
return self.get_internal_parsers([])
def get_internal_parsers(self, targets: list[NetworkConfig]) -> dict[str, Parser]:
"""Return a dictionary of type names and type parsers."""
parsers: dict[str, Parser] = {}
if self.allow_inventory and not targets:
parsers.update(
inventory=NetworkInventoryParser(),
)
if not targets or not any(isinstance(target, NetworkInventoryConfig) for target in targets):
if get_ci_provider().supports_core_ci_auth():
parsers.update(
remote=NetworkRemoteParser(),
)
return parsers
def document(self, state: DocumentationState) -> t.Optional[str]:
"""Generate and return documentation for this parser."""
section = f'{self.option_name} options (choose one):'
state.sections[section] = '' # place this section before the sections created by the parsers below
state.sections[section] = '\n'.join([f' {name}:{parser.document(state)}' for name, parser in self.get_stateless_parsers().items()])
return None
class PythonTargetParser(TargetsNamespaceParser, Parser):
"""Composite argument parser for a Python target."""
def __init__(self, allow_venv: bool) -> None:
super().__init__()
self.allow_venv = allow_venv
@property
def option_name(self) -> str:
"""The option name used for this parser."""
return '--target-python'
def get_value(self, state: ParserState) -> t.Any:
"""Parse the input from the given state and return the result, without storing the result in the namespace."""
versions = list(SUPPORTED_PYTHON_VERSIONS)
for target in state.root_namespace.targets or []: # type: PosixConfig
versions.remove(target.python.version)
parser = PythonParser(versions, allow_venv=self.allow_venv, allow_default=True)
python = parser.parse(state)
value = ControllerConfig(python=python)
return value
def document(self, state: DocumentationState) -> t.Optional[str]:
"""Generate and return documentation for this parser."""
section = f'{self.option_name} options (choose one):'
state.sections[section] = '\n'.join([
f' {PythonParser(SUPPORTED_PYTHON_VERSIONS, allow_venv=False, allow_default=True).document(state)} # non-origin controller',
f' {PythonParser(SUPPORTED_PYTHON_VERSIONS, allow_venv=True, allow_default=True).document(state)} # origin controller',
])
return None
class SanityPythonTargetParser(PythonTargetParser):
"""Composite argument parser for a sanity Python target."""
def __init__(self) -> None:
super().__init__(allow_venv=False)
class UnitsPythonTargetParser(PythonTargetParser):
"""Composite argument parser for a units Python target."""
def __init__(self) -> None:
super().__init__(allow_venv=True)
class PosixSshTargetParser(PosixTargetParser):
"""Composite argument parser for a POSIX SSH target."""
@property
def option_name(self) -> str:
"""The option name used for this parser."""
return '--target-posix'
class WindowsSshTargetParser(WindowsTargetParser):
"""Composite argument parser for a Windows SSH target."""
@property
def option_name(self) -> str:
"""The option name used for this parser."""
return '--target-windows'
@property
def allow_inventory(self) -> bool:
"""True if inventory is allowed, otherwise False."""
return False
@property
def limit_one(self) -> bool:
"""True if only one target is allowed, otherwise False."""
return True
class NetworkSshTargetParser(NetworkTargetParser):
"""Composite argument parser for a network SSH target."""
@property
def option_name(self) -> str:
"""The option name used for this parser."""
return '--target-network'
@property
def allow_inventory(self) -> bool:
"""True if inventory is allowed, otherwise False."""
return False
@property
def limit_one(self) -> bool:
"""True if only one target is allowed, otherwise False."""
return True
| 10,487
|
Python
|
.py
| 226
| 38.606195
| 140
| 0.668141
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,153
|
host_config_parsers.py
|
ansible_ansible/test/lib/ansible_test/_internal/cli/parsers/host_config_parsers.py
|
"""Composite parsers for the various types of hosts."""
from __future__ import annotations
import typing as t
from ...completion import (
docker_completion,
network_completion,
remote_completion,
windows_completion,
filter_completion,
)
from ...host_configs import (
ControllerConfig,
DockerConfig,
NetworkInventoryConfig,
NetworkRemoteConfig,
OriginConfig,
PosixRemoteConfig,
PosixSshConfig,
WindowsInventoryConfig,
WindowsRemoteConfig,
)
from ..compat import (
get_fallback_remote_controller,
)
from ..argparsing.parsers import (
ChoicesParser,
DocumentationState,
FileParser,
MatchConditions,
NamespaceWrappedParser,
PairParser,
Parser,
ParserError,
ParserState,
)
from .value_parsers import (
PlatformParser,
SshConnectionParser,
)
from .key_value_parsers import (
ControllerKeyValueParser,
DockerKeyValueParser,
EmptyKeyValueParser,
NetworkRemoteKeyValueParser,
OriginKeyValueParser,
PosixRemoteKeyValueParser,
PosixSshKeyValueParser,
WindowsRemoteKeyValueParser,
)
from .helpers import (
get_docker_pythons,
get_remote_pythons,
)
class OriginParser(Parser):
"""Composite argument parser for the origin."""
def parse(self, state: ParserState) -> t.Any:
"""Parse the input from the given state and return the result."""
namespace = OriginConfig()
state.set_namespace(namespace)
parser = OriginKeyValueParser()
parser.parse(state)
return namespace
def document(self, state: DocumentationState) -> t.Optional[str]:
"""Generate and return documentation for this parser."""
return OriginKeyValueParser().document(state)
class ControllerParser(Parser):
"""Composite argument parser for the controller."""
def parse(self, state: ParserState) -> t.Any:
"""Parse the input from the given state and return the result."""
namespace = ControllerConfig()
state.set_namespace(namespace)
parser = ControllerKeyValueParser()
parser.parse(state)
return namespace
def document(self, state: DocumentationState) -> t.Optional[str]:
"""Generate and return documentation for this parser."""
return ControllerKeyValueParser().document(state)
class DockerParser(PairParser):
"""Composite argument parser for a docker host."""
def __init__(self, controller: bool) -> None:
self.controller = controller
def create_namespace(self) -> t.Any:
"""Create and return a namespace."""
return DockerConfig()
def get_left_parser(self, state: ParserState) -> Parser:
"""Return the parser for the left side."""
return NamespaceWrappedParser('name', ChoicesParser(list(filter_completion(docker_completion(), controller_only=self.controller)),
conditions=MatchConditions.CHOICE | MatchConditions.ANY))
def get_right_parser(self, choice: t.Any) -> Parser:
"""Return the parser for the right side."""
return DockerKeyValueParser(choice, self.controller)
def parse(self, state: ParserState) -> t.Any:
"""Parse the input from the given state and return the result."""
value: DockerConfig = super().parse(state)
if not value.python and not get_docker_pythons(value.name, self.controller, True):
raise ParserError(f'Python version required for docker image: {value.name}')
return value
def document(self, state: DocumentationState) -> t.Optional[str]:
"""Generate and return documentation for this parser."""
default = 'default'
content = '\n'.join([f' {image} ({", ".join(get_docker_pythons(image, self.controller, False))})'
for image, item in filter_completion(docker_completion(), controller_only=self.controller).items()])
content += '\n'.join([
'',
' {image} # python must be specified for custom images',
])
state.sections[f'{"controller" if self.controller else "target"} docker images and supported python version (choose one):'] = content
return f'{{image}}[,{DockerKeyValueParser(default, self.controller).document(state)}]'
class PosixRemoteParser(PairParser):
"""Composite argument parser for a POSIX remote host."""
def __init__(self, controller: bool) -> None:
self.controller = controller
def create_namespace(self) -> t.Any:
"""Create and return a namespace."""
return PosixRemoteConfig()
def get_left_parser(self, state: ParserState) -> Parser:
"""Return the parser for the left side."""
return NamespaceWrappedParser('name', PlatformParser(list(filter_completion(remote_completion(), controller_only=self.controller))))
def get_right_parser(self, choice: t.Any) -> Parser:
"""Return the parser for the right side."""
return PosixRemoteKeyValueParser(choice, self.controller)
def parse(self, state: ParserState) -> t.Any:
"""Parse the input from the given state and return the result."""
value: PosixRemoteConfig = super().parse(state)
if not value.python and not get_remote_pythons(value.name, self.controller, True):
raise ParserError(f'Python version required for remote: {value.name}')
return value
def document(self, state: DocumentationState) -> t.Optional[str]:
"""Generate and return documentation for this parser."""
default = get_fallback_remote_controller()
content = '\n'.join([f' {name} ({", ".join(get_remote_pythons(name, self.controller, False))})'
for name, item in filter_completion(remote_completion(), controller_only=self.controller).items()])
content += '\n'.join([
'',
' {platform}/{version} # python must be specified for unknown systems',
])
state.sections[f'{"controller" if self.controller else "target"} remote systems and supported python versions (choose one):'] = content
return f'{{system}}[,{PosixRemoteKeyValueParser(default, self.controller).document(state)}]'
class WindowsRemoteParser(PairParser):
"""Composite argument parser for a Windows remote host."""
def create_namespace(self) -> t.Any:
"""Create and return a namespace."""
return WindowsRemoteConfig()
def get_left_parser(self, state: ParserState) -> Parser:
"""Return the parser for the left side."""
names = list(filter_completion(windows_completion()))
for target in state.root_namespace.targets or []: # type: WindowsRemoteConfig
names.remove(target.name)
return NamespaceWrappedParser('name', PlatformParser(names))
def get_right_parser(self, choice: t.Any) -> Parser:
"""Return the parser for the right side."""
return WindowsRemoteKeyValueParser()
def document(self, state: DocumentationState) -> t.Optional[str]:
"""Generate and return documentation for this parser."""
content = '\n'.join([f' {name}' for name, item in filter_completion(windows_completion()).items()])
content += '\n'.join([
'',
' windows/{version} # use an unknown windows version',
])
state.sections['target remote systems (choose one):'] = content
return f'{{system}}[,{WindowsRemoteKeyValueParser().document(state)}]'
class NetworkRemoteParser(PairParser):
"""Composite argument parser for a network remote host."""
def create_namespace(self) -> t.Any:
"""Create and return a namespace."""
return NetworkRemoteConfig()
def get_left_parser(self, state: ParserState) -> Parser:
"""Return the parser for the left side."""
names = list(filter_completion(network_completion()))
for target in state.root_namespace.targets or []: # type: NetworkRemoteConfig
names.remove(target.name)
return NamespaceWrappedParser('name', PlatformParser(names))
def get_right_parser(self, choice: t.Any) -> Parser:
"""Return the parser for the right side."""
return NetworkRemoteKeyValueParser()
def document(self, state: DocumentationState) -> t.Optional[str]:
"""Generate and return documentation for this parser."""
content = '\n'.join([f' {name}' for name, item in filter_completion(network_completion()).items()])
content += '\n'.join([
'',
' {platform}/{version} # use an unknown platform and version',
])
state.sections['target remote systems (choose one):'] = content
return f'{{system}}[,{NetworkRemoteKeyValueParser().document(state)}]'
class WindowsInventoryParser(PairParser):
"""Composite argument parser for a Windows inventory."""
def create_namespace(self) -> t.Any:
"""Create and return a namespace."""
return WindowsInventoryConfig()
def get_left_parser(self, state: ParserState) -> Parser:
"""Return the parser for the left side."""
return NamespaceWrappedParser('path', FileParser())
def get_right_parser(self, choice: t.Any) -> Parser:
"""Return the parser for the right side."""
return EmptyKeyValueParser()
def document(self, state: DocumentationState) -> t.Optional[str]:
"""Generate and return documentation for this parser."""
return '{path} # INI format inventory file'
class NetworkInventoryParser(PairParser):
"""Composite argument parser for a network inventory."""
def create_namespace(self) -> t.Any:
"""Create and return a namespace."""
return NetworkInventoryConfig()
def get_left_parser(self, state: ParserState) -> Parser:
"""Return the parser for the left side."""
return NamespaceWrappedParser('path', FileParser())
def get_right_parser(self, choice: t.Any) -> Parser:
"""Return the parser for the right side."""
return EmptyKeyValueParser()
def document(self, state: DocumentationState) -> t.Optional[str]:
"""Generate and return documentation for this parser."""
return '{path} # INI format inventory file'
class PosixSshParser(PairParser):
"""Composite argument parser for a POSIX SSH host."""
def create_namespace(self) -> t.Any:
"""Create and return a namespace."""
return PosixSshConfig()
def get_left_parser(self, state: ParserState) -> Parser:
"""Return the parser for the left side."""
return SshConnectionParser()
def get_right_parser(self, choice: t.Any) -> Parser:
"""Return the parser for the right side."""
return PosixSshKeyValueParser()
@property
def required(self) -> bool:
"""True if the delimiter (and thus right parser) is required, otherwise False."""
return True
def document(self, state: DocumentationState) -> t.Optional[str]:
"""Generate and return documentation for this parser."""
return f'{SshConnectionParser().document(state)}[,{PosixSshKeyValueParser().document(state)}]'
| 11,216
|
Python
|
.py
| 230
| 41.108696
| 143
| 0.673029
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,154
|
value_parsers.py
|
ansible_ansible/test/lib/ansible_test/_internal/cli/parsers/value_parsers.py
|
"""Composite argument value parsers used by other parsers."""
from __future__ import annotations
import collections.abc as c
import typing as t
from ...host_configs import (
NativePythonConfig,
PythonConfig,
VirtualPythonConfig,
)
from ..argparsing.parsers import (
AbsolutePathParser,
AnyParser,
ChoicesParser,
DocumentationState,
IntegerParser,
MatchConditions,
Parser,
ParserError,
ParserState,
ParserBoundary,
)
class PythonParser(Parser):
"""
Composite argument parser for Python versions, with support for specifying paths and using virtual environments.
Allowed formats:
{version}
venv/{version}
venv/system-site-packages/{version}
The `{version}` has two possible formats:
X.Y
X.Y@{path}
Where `X.Y` is the Python major and minor version number and `{path}` is an absolute path with one of the following formats:
/path/to/python
/path/to/python/directory/
When a trailing slash is present, it is considered a directory, and `python{version}` will be appended to it automatically.
The default path depends on the context:
- Known docker/remote environments can declare their own path.
- The origin host uses `sys.executable` if `{version}` matches the current version in `sys.version_info`.
- The origin host (as a controller or target) use the `$PATH` environment variable to find `python{version}`.
- As a fallback/default, the path `/usr/bin/python{version}` is used.
NOTE: The Python path determines where to find the Python interpreter.
In the case of an ansible-test managed virtual environment, that Python interpreter will be used to create the virtual environment.
So the path given will not be the one actually used for the controller or target.
Known docker/remote environments limit the available Python versions to configured values known to be valid.
The origin host and unknown environments assume all relevant Python versions are available.
"""
def __init__(
self,
versions: c.Sequence[str],
*,
allow_default: bool,
allow_venv: bool,
):
version_choices = list(versions)
if allow_default:
version_choices.append('default')
first_choices = list(version_choices)
if allow_venv:
first_choices.append('venv/')
venv_choices = list(version_choices) + ['system-site-packages/']
self.versions = versions
self.allow_default = allow_default
self.allow_venv = allow_venv
self.version_choices = version_choices
self.first_choices = first_choices
self.venv_choices = venv_choices
self.venv_choices = venv_choices
def parse(self, state: ParserState) -> t.Any:
"""Parse the input from the given state and return the result."""
boundary: ParserBoundary
with state.delimit('@/', required=False) as boundary:
version = ChoicesParser(self.first_choices).parse(state)
python: PythonConfig
if version == 'venv':
with state.delimit('@/', required=False) as boundary:
version = ChoicesParser(self.venv_choices).parse(state)
if version == 'system-site-packages':
system_site_packages = True
with state.delimit('@', required=False) as boundary:
version = ChoicesParser(self.version_choices).parse(state)
else:
system_site_packages = False
python = VirtualPythonConfig(version=version, system_site_packages=system_site_packages)
else:
python = NativePythonConfig(version=version)
if boundary.match == '@':
# FUTURE: For OriginConfig or ControllerConfig->OriginConfig the path could be validated with an absolute path parser (file or directory).
python.path = AbsolutePathParser().parse(state)
return python
def document(self, state: DocumentationState) -> t.Optional[str]:
"""Generate and return documentation for this parser."""
docs = '[venv/[system-site-packages/]]' if self.allow_venv else ''
if self.versions:
docs += '|'.join(self.version_choices)
else:
docs += '{X.Y}'
docs += '[@{path|dir/}]'
return docs
class PlatformParser(ChoicesParser):
"""Composite argument parser for "{platform}/{version}" formatted choices."""
def __init__(self, choices: list[str]) -> None:
super().__init__(choices, conditions=MatchConditions.CHOICE | MatchConditions.ANY)
def parse(self, state: ParserState) -> t.Any:
"""Parse the input from the given state and return the result."""
value = super().parse(state)
if len(value.split('/')) != 2:
raise ParserError(f'invalid platform format: {value}')
return value
class SshConnectionParser(Parser):
"""
Composite argument parser for connecting to a host using SSH.
Format: user@host[:port]
"""
EXPECTED_FORMAT = '{user}@{host}[:{port}]'
def parse(self, state: ParserState) -> t.Any:
"""Parse the input from the given state and return the result."""
namespace = state.current_namespace
with state.delimit('@'):
user = AnyParser(no_match_message=f'Expected {{user}} from: {self.EXPECTED_FORMAT}').parse(state)
setattr(namespace, 'user', user)
with state.delimit(':', required=False) as colon: # type: ParserBoundary
host = AnyParser(no_match_message=f'Expected {{host}} from: {self.EXPECTED_FORMAT}').parse(state)
setattr(namespace, 'host', host)
if colon.match:
port = IntegerParser(65535).parse(state)
setattr(namespace, 'port', port)
return namespace
def document(self, state: DocumentationState) -> t.Optional[str]:
"""Generate and return documentation for this parser."""
return self.EXPECTED_FORMAT
| 6,060
|
Python
|
.py
| 130
| 38.5
| 150
| 0.666497
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,155
|
helpers.py
|
ansible_ansible/test/lib/ansible_test/_internal/cli/parsers/helpers.py
|
"""Helper functions for composite parsers."""
from __future__ import annotations
from ...constants import (
CONTROLLER_PYTHON_VERSIONS,
SUPPORTED_PYTHON_VERSIONS,
)
from ...completion import (
docker_completion,
remote_completion,
filter_completion,
)
from ...host_configs import (
DockerConfig,
HostConfig,
PosixRemoteConfig,
)
def get_docker_pythons(name: str, controller: bool, strict: bool) -> list[str]:
"""Return a list of docker instance Python versions supported by the specified host config."""
image_config = filter_completion(docker_completion()).get(name)
available_pythons = CONTROLLER_PYTHON_VERSIONS if controller else SUPPORTED_PYTHON_VERSIONS
if not image_config:
return [] if strict else list(available_pythons)
supported_pythons = [python for python in image_config.supported_pythons if python in available_pythons]
return supported_pythons
def get_remote_pythons(name: str, controller: bool, strict: bool) -> list[str]:
"""Return a list of remote instance Python versions supported by the specified host config."""
platform_config = filter_completion(remote_completion()).get(name)
available_pythons = CONTROLLER_PYTHON_VERSIONS if controller else SUPPORTED_PYTHON_VERSIONS
if not platform_config:
return [] if strict else list(available_pythons)
supported_pythons = [python for python in platform_config.supported_pythons if python in available_pythons]
return supported_pythons
def get_controller_pythons(controller_config: HostConfig, strict: bool) -> list[str]:
"""Return a list of controller Python versions supported by the specified host config."""
if isinstance(controller_config, DockerConfig):
pythons = get_docker_pythons(controller_config.name, False, strict)
elif isinstance(controller_config, PosixRemoteConfig):
pythons = get_remote_pythons(controller_config.name, False, strict)
else:
pythons = list(SUPPORTED_PYTHON_VERSIONS)
return pythons
| 2,034
|
Python
|
.py
| 41
| 44.804878
| 111
| 0.753667
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,156
|
key_value_parsers.py
|
ansible_ansible/test/lib/ansible_test/_internal/cli/parsers/key_value_parsers.py
|
"""Composite argument key-value parsers used by other parsers."""
from __future__ import annotations
import typing as t
from ...constants import (
CONTROLLER_PYTHON_VERSIONS,
REMOTE_PROVIDERS,
SECCOMP_CHOICES,
SUPPORTED_PYTHON_VERSIONS,
)
from ...completion import (
AuditMode,
CGroupVersion,
)
from ...util import (
REMOTE_ARCHITECTURES,
WINDOWS_CONNECTIONS,
)
from ...host_configs import (
OriginConfig,
)
from ...become import (
SUPPORTED_BECOME_METHODS,
)
from ..argparsing.parsers import (
AnyParser,
BooleanParser,
ChoicesParser,
DocumentationState,
EnumValueChoicesParser,
IntegerParser,
KeyValueParser,
Parser,
ParserState,
)
from .value_parsers import (
PythonParser,
)
from .helpers import (
get_controller_pythons,
get_remote_pythons,
get_docker_pythons,
)
class OriginKeyValueParser(KeyValueParser):
"""Composite argument parser for origin key/value pairs."""
def get_parsers(self, state: ParserState) -> dict[str, Parser]:
"""Return a dictionary of key names and value parsers."""
versions = CONTROLLER_PYTHON_VERSIONS
return dict(
python=PythonParser(versions=versions, allow_venv=True, allow_default=True),
)
def document(self, state: DocumentationState) -> t.Optional[str]:
"""Generate and return documentation for this parser."""
python_parser = PythonParser(versions=CONTROLLER_PYTHON_VERSIONS, allow_venv=True, allow_default=True)
section_name = 'origin options'
state.sections[f'controller {section_name} (comma separated):'] = '\n'.join([
f' python={python_parser.document(state)}',
])
return f'{{{section_name}}} # default'
class ControllerKeyValueParser(KeyValueParser):
"""Composite argument parser for controller key/value pairs."""
def get_parsers(self, state: ParserState) -> dict[str, Parser]:
"""Return a dictionary of key names and value parsers."""
versions = get_controller_pythons(state.root_namespace.controller, False)
allow_default = bool(get_controller_pythons(state.root_namespace.controller, True))
allow_venv = isinstance(state.root_namespace.controller, OriginConfig) or not state.root_namespace.controller
return dict(
python=PythonParser(versions=versions, allow_venv=allow_venv, allow_default=allow_default),
)
def document(self, state: DocumentationState) -> t.Optional[str]:
"""Generate and return documentation for this parser."""
section_name = 'controller options'
state.sections[f'target {section_name} (comma separated):'] = '\n'.join([
f' python={PythonParser(SUPPORTED_PYTHON_VERSIONS, allow_venv=False, allow_default=True).document(state)} # non-origin controller',
f' python={PythonParser(SUPPORTED_PYTHON_VERSIONS, allow_venv=True, allow_default=True).document(state)} # origin controller',
])
return f'{{{section_name}}} # default'
class DockerKeyValueParser(KeyValueParser):
"""Composite argument parser for docker key/value pairs."""
def __init__(self, image: str, controller: bool) -> None:
self.controller = controller
self.versions = get_docker_pythons(image, controller, False)
self.allow_default = bool(get_docker_pythons(image, controller, True))
def get_parsers(self, state: ParserState) -> dict[str, Parser]:
"""Return a dictionary of key names and value parsers."""
return dict(
python=PythonParser(versions=self.versions, allow_venv=False, allow_default=self.allow_default),
seccomp=ChoicesParser(SECCOMP_CHOICES),
cgroup=EnumValueChoicesParser(CGroupVersion),
audit=EnumValueChoicesParser(AuditMode),
privileged=BooleanParser(),
memory=IntegerParser(),
)
def document(self, state: DocumentationState) -> t.Optional[str]:
"""Generate and return documentation for this parser."""
python_parser = PythonParser(versions=[], allow_venv=False, allow_default=self.allow_default)
section_name = 'docker options'
state.sections[f'{"controller" if self.controller else "target"} {section_name} (comma separated):'] = '\n'.join([
f' python={python_parser.document(state)}',
f' seccomp={ChoicesParser(SECCOMP_CHOICES).document(state)}',
f' cgroup={EnumValueChoicesParser(CGroupVersion).document(state)}',
f' audit={EnumValueChoicesParser(AuditMode).document(state)}',
f' privileged={BooleanParser().document(state)}',
f' memory={IntegerParser().document(state)} # bytes',
])
return f'{{{section_name}}}'
class PosixRemoteKeyValueParser(KeyValueParser):
"""Composite argument parser for POSIX remote key/value pairs."""
def __init__(self, name: str, controller: bool) -> None:
self.controller = controller
self.versions = get_remote_pythons(name, controller, False)
self.allow_default = bool(get_remote_pythons(name, controller, True))
def get_parsers(self, state: ParserState) -> dict[str, Parser]:
"""Return a dictionary of key names and value parsers."""
return dict(
become=ChoicesParser(list(SUPPORTED_BECOME_METHODS)),
provider=ChoicesParser(REMOTE_PROVIDERS),
arch=ChoicesParser(REMOTE_ARCHITECTURES),
python=PythonParser(versions=self.versions, allow_venv=False, allow_default=self.allow_default),
)
def document(self, state: DocumentationState) -> t.Optional[str]:
"""Generate and return documentation for this parser."""
python_parser = PythonParser(versions=[], allow_venv=False, allow_default=self.allow_default)
section_name = 'remote options'
state.sections[f'{"controller" if self.controller else "target"} {section_name} (comma separated):'] = '\n'.join([
f' become={ChoicesParser(list(SUPPORTED_BECOME_METHODS)).document(state)}',
f' provider={ChoicesParser(REMOTE_PROVIDERS).document(state)}',
f' arch={ChoicesParser(REMOTE_ARCHITECTURES).document(state)}',
f' python={python_parser.document(state)}',
])
return f'{{{section_name}}}'
class WindowsRemoteKeyValueParser(KeyValueParser):
"""Composite argument parser for Windows remote key/value pairs."""
def get_parsers(self, state: ParserState) -> dict[str, Parser]:
"""Return a dictionary of key names and value parsers."""
return dict(
provider=ChoicesParser(REMOTE_PROVIDERS),
arch=ChoicesParser(REMOTE_ARCHITECTURES),
connection=ChoicesParser(WINDOWS_CONNECTIONS),
)
def document(self, state: DocumentationState) -> t.Optional[str]:
"""Generate and return documentation for this parser."""
section_name = 'remote options'
state.sections[f'target {section_name} (comma separated):'] = '\n'.join([
f' provider={ChoicesParser(REMOTE_PROVIDERS).document(state)}',
f' arch={ChoicesParser(REMOTE_ARCHITECTURES).document(state)}',
f' connection={ChoicesParser(WINDOWS_CONNECTIONS).document(state)}',
])
return f'{{{section_name}}}'
class NetworkRemoteKeyValueParser(KeyValueParser):
"""Composite argument parser for network remote key/value pairs."""
def get_parsers(self, state: ParserState) -> dict[str, Parser]:
"""Return a dictionary of key names and value parsers."""
return dict(
provider=ChoicesParser(REMOTE_PROVIDERS),
arch=ChoicesParser(REMOTE_ARCHITECTURES),
collection=AnyParser(),
connection=AnyParser(),
)
def document(self, state: DocumentationState) -> t.Optional[str]:
"""Generate and return documentation for this parser."""
section_name = 'remote options'
state.sections[f'target {section_name} (comma separated):'] = '\n'.join([
f' provider={ChoicesParser(REMOTE_PROVIDERS).document(state)}',
f' arch={ChoicesParser(REMOTE_ARCHITECTURES).document(state)}',
' collection={collection}',
' connection={connection}',
])
return f'{{{section_name}}}'
class PosixSshKeyValueParser(KeyValueParser):
"""Composite argument parser for POSIX SSH host key/value pairs."""
def get_parsers(self, state: ParserState) -> dict[str, Parser]:
"""Return a dictionary of key names and value parsers."""
return dict(
python=PythonParser(versions=list(SUPPORTED_PYTHON_VERSIONS), allow_venv=False, allow_default=False),
)
def document(self, state: DocumentationState) -> t.Optional[str]:
"""Generate and return documentation for this parser."""
python_parser = PythonParser(versions=SUPPORTED_PYTHON_VERSIONS, allow_venv=False, allow_default=False)
section_name = 'ssh options'
state.sections[f'target {section_name} (comma separated):'] = '\n'.join([
f' python={python_parser.document(state)}',
])
return f'{{{section_name}}}'
class EmptyKeyValueParser(KeyValueParser):
"""Composite argument parser when a key/value parser is required but there are no keys available."""
def get_parsers(self, state: ParserState) -> dict[str, Parser]:
"""Return a dictionary of key names and value parsers."""
return {}
| 9,570
|
Python
|
.py
| 188
| 42.87234
| 145
| 0.675858
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,157
|
__init__.py
|
ansible_ansible/test/lib/ansible_test/_internal/cli/commands/__init__.py
|
"""Command line parsing for all commands."""
from __future__ import annotations
import argparse
import functools
import sys
from ...util import (
display,
)
from ..completers import (
complete_target,
register_completer,
)
from ..environments import (
CompositeActionCompletionFinder,
)
from .coverage import (
do_coverage,
)
from .env import (
do_env,
)
from .integration import (
do_integration,
)
from .sanity import (
do_sanity,
)
from .shell import (
do_shell,
)
from .units import (
do_units,
)
def do_commands(
parent: argparse.ArgumentParser,
completer: CompositeActionCompletionFinder,
) -> None:
"""Command line parsing for all commands."""
common = argparse.ArgumentParser(add_help=False)
common.add_argument(
'-e',
'--explain',
action='store_true',
help='explain commands that would be executed',
)
common.add_argument(
'-v',
'--verbose',
dest='verbosity',
action='count',
default=0,
help='display more output',
)
common.add_argument(
'--color',
metavar='COLOR',
nargs='?',
help='generate color output: yes, no, auto',
const='yes',
default='auto',
type=color,
)
common.add_argument(
'--debug',
action='store_true',
help='run ansible commands in debug mode',
)
common.add_argument(
'--truncate',
dest='truncate',
metavar='COLUMNS',
type=int,
default=display.columns,
help='truncate some long output (0=disabled) (default: auto)',
)
common.add_argument(
'--redact',
dest='redact',
action='store_true',
default=True,
help=argparse.SUPPRESS, # kept for backwards compatibility, but no point in advertising since it's the default
)
common.add_argument(
'--no-redact',
dest='redact',
action='store_false',
default=False,
help='show sensitive values in output',
)
test = argparse.ArgumentParser(add_help=False, parents=[common])
testing = test.add_argument_group(title='common testing arguments')
register_completer(testing.add_argument(
'include',
metavar='TARGET',
nargs='*',
help='test the specified target',
), functools.partial(complete_target, completer))
register_completer(testing.add_argument(
'--include',
metavar='TARGET',
action='append',
help='include the specified target',
), functools.partial(complete_target, completer))
register_completer(testing.add_argument(
'--exclude',
metavar='TARGET',
action='append',
help='exclude the specified target',
), functools.partial(complete_target, completer))
register_completer(testing.add_argument(
'--require',
metavar='TARGET',
action='append',
help='require the specified target',
), functools.partial(complete_target, completer))
testing.add_argument(
'--coverage',
action='store_true',
help='analyze code coverage when running tests',
)
testing.add_argument(
'--coverage-check',
action='store_true',
help='only verify code coverage can be enabled',
)
testing.add_argument(
'--metadata',
help=argparse.SUPPRESS,
)
testing.add_argument(
'--base-branch',
metavar='BRANCH',
help='base branch used for change detection',
)
testing.add_argument(
'--changed',
action='store_true',
help='limit targets based on changes',
)
changes = test.add_argument_group(title='change detection arguments')
changes.add_argument(
'--tracked',
action='store_true',
help=argparse.SUPPRESS,
)
changes.add_argument(
'--untracked',
action='store_true',
help='include untracked files',
)
changes.add_argument(
'--ignore-committed',
dest='committed',
action='store_false',
help='exclude committed files',
)
changes.add_argument(
'--ignore-staged',
dest='staged',
action='store_false',
help='exclude staged files',
)
changes.add_argument(
'--ignore-unstaged',
dest='unstaged',
action='store_false',
help='exclude unstaged files',
)
changes.add_argument(
'--changed-from',
metavar='PATH',
help=argparse.SUPPRESS,
)
changes.add_argument(
'--changed-path',
metavar='PATH',
action='append',
help=argparse.SUPPRESS,
)
subparsers = parent.add_subparsers(metavar='COMMAND', required=True)
do_coverage(subparsers, common, completer)
do_env(subparsers, common, completer)
do_shell(subparsers, common, completer)
do_integration(subparsers, test, completer)
do_sanity(subparsers, test, completer)
do_units(subparsers, test, completer)
def color(value: str) -> bool:
"""Strict converter for color option."""
if value == 'yes':
return True
if value == 'no':
return False
if value == 'auto':
return sys.stdout.isatty()
raise argparse.ArgumentTypeError(f"invalid choice: '{value}' (choose from 'yes', 'no', 'auto')")
| 5,436
|
Python
|
.py
| 195
| 21.246154
| 119
| 0.618672
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,158
|
env.py
|
ansible_ansible/test/lib/ansible_test/_internal/cli/commands/env.py
|
"""Command line parsing for the `env` command."""
from __future__ import annotations
import argparse
from ...commands.env import (
EnvConfig,
command_env,
)
from ..environments import (
CompositeActionCompletionFinder,
ControllerMode,
TargetMode,
add_environments,
)
def do_env(
subparsers,
parent: argparse.ArgumentParser,
completer: CompositeActionCompletionFinder,
):
"""Command line parsing for the `env` command."""
parser: argparse.ArgumentParser = subparsers.add_parser(
'env',
parents=[parent],
help='show information about the test environment',
)
parser.set_defaults(
func=command_env,
config=EnvConfig,
)
env = parser.add_argument_group(title='env arguments')
env.add_argument(
'--show',
action='store_true',
help='show environment on stdout',
)
env.add_argument(
'--dump',
action='store_true',
help='dump environment to disk',
)
env.add_argument(
'--list-files',
action='store_true',
help='list files on stdout',
)
env.add_argument(
'--timeout',
type=float,
metavar='MINUTES',
help='timeout for future ansible-test commands (0 clears)',
)
add_environments(parser, completer, ControllerMode.NO_DELEGATION, TargetMode.NO_TARGETS) # env
| 1,399
|
Python
|
.py
| 51
| 21.490196
| 99
| 0.647455
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,159
|
shell.py
|
ansible_ansible/test/lib/ansible_test/_internal/cli/commands/shell.py
|
"""Command line parsing for the `shell` command."""
from __future__ import annotations
import argparse
from ...commands.shell import (
command_shell,
)
from ...config import (
ShellConfig,
)
from ..environments import (
CompositeActionCompletionFinder,
ControllerMode,
TargetMode,
add_environments,
)
def do_shell(
subparsers,
parent: argparse.ArgumentParser,
completer: CompositeActionCompletionFinder,
):
"""Command line parsing for the `shell` command."""
parser: argparse.ArgumentParser = subparsers.add_parser(
'shell',
parents=[parent],
help='open an interactive shell',
)
parser.set_defaults(
func=command_shell,
config=ShellConfig,
)
shell = parser.add_argument_group(title='shell arguments')
shell.add_argument(
'cmd',
nargs='*',
help='run the specified command',
)
shell.add_argument(
'--raw',
action='store_true',
help='direct to shell with no setup',
)
shell.add_argument(
'--export',
metavar='PATH',
help='export inventory instead of opening a shell',
)
add_environments(parser, completer, ControllerMode.DELEGATED, TargetMode.SHELL) # shell
| 1,268
|
Python
|
.py
| 47
| 21.468085
| 92
| 0.662531
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,160
|
units.py
|
ansible_ansible/test/lib/ansible_test/_internal/cli/commands/units.py
|
"""Command line parsing for the `units` command."""
from __future__ import annotations
import argparse
from ...config import (
UnitsConfig,
)
from ...commands.units import (
command_units,
)
from ...target import (
walk_units_targets,
)
from ..environments import (
CompositeActionCompletionFinder,
ControllerMode,
TargetMode,
add_environments,
)
def do_units(
subparsers,
parent: argparse.ArgumentParser,
completer: CompositeActionCompletionFinder,
):
"""Command line parsing for the `units` command."""
parser: argparse.ArgumentParser = subparsers.add_parser(
'units',
parents=[parent],
help='unit tests',
)
parser.set_defaults(
func=command_units,
targets_func=walk_units_targets,
config=UnitsConfig,
)
units = parser.add_argument_group(title='unit test arguments')
units.add_argument(
'--collect-only',
action='store_true',
help='collect tests but do not execute them',
)
units.add_argument(
'--num-workers',
metavar='INT',
type=int,
help='number of workers to use (default: auto)',
)
units.add_argument(
'--requirements-mode',
choices=('only', 'skip'),
help=argparse.SUPPRESS,
)
add_environments(parser, completer, ControllerMode.DELEGATED, TargetMode.UNITS) # units
| 1,406
|
Python
|
.py
| 52
| 21.557692
| 92
| 0.660701
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,161
|
sanity.py
|
ansible_ansible/test/lib/ansible_test/_internal/cli/commands/sanity.py
|
"""Command line parsing for the `sanity` command."""
from __future__ import annotations
import argparse
from ...config import (
SanityConfig,
)
from ...commands.sanity import (
command_sanity,
sanity_get_tests,
)
from ...target import (
walk_sanity_targets,
)
from ..environments import (
CompositeActionCompletionFinder,
ControllerMode,
TargetMode,
add_environments,
)
def do_sanity(
subparsers,
parent: argparse.ArgumentParser,
completer: CompositeActionCompletionFinder,
):
"""Command line parsing for the `sanity` command."""
parser: argparse.ArgumentParser = subparsers.add_parser(
'sanity',
parents=[parent],
help='sanity tests',
)
parser.set_defaults(
func=command_sanity,
targets_func=walk_sanity_targets,
config=SanityConfig,
)
sanity = parser.add_argument_group(title='sanity test arguments')
sanity.add_argument(
'--test',
metavar='TEST',
action='append',
choices=[test.name for test in sanity_get_tests()],
help='tests to run',
)
sanity.add_argument(
'--skip-test',
metavar='TEST',
action='append',
choices=[test.name for test in sanity_get_tests()],
help='tests to skip',
)
sanity.add_argument(
'--allow-disabled',
action='store_true',
help='allow tests to run which are disabled by default',
)
sanity.add_argument(
'--list-tests',
action='store_true',
help='list available tests',
)
sanity.add_argument(
'--enable-optional-errors',
action='store_true',
help='enable optional errors',
)
sanity.add_argument(
'--lint',
action='store_true',
help='write lint output to stdout, everything else stderr',
)
sanity.add_argument(
'--junit',
action='store_true',
help='write test failures to junit xml files',
)
sanity.add_argument(
'--failure-ok',
action='store_true',
help='exit successfully on failed tests after saving results',
)
sanity.add_argument(
'--prime-venvs',
action='store_true',
help='prepare virtual environments without running tests',
)
add_environments(parser, completer, ControllerMode.DELEGATED, TargetMode.SANITY) # sanity
| 2,409
|
Python
|
.py
| 86
| 21.674419
| 94
| 0.633681
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,162
|
xml.py
|
ansible_ansible/test/lib/ansible_test/_internal/cli/commands/coverage/xml.py
|
"""Command line parsing for the `coverage xml` command."""
from __future__ import annotations
import argparse
import collections.abc as c
import typing as t
from ....commands.coverage.xml import (
command_coverage_xml,
CoverageXmlConfig,
)
from ...environments import (
CompositeActionCompletionFinder,
ControllerMode,
TargetMode,
add_environments,
)
def do_xml(
subparsers,
parent: argparse.ArgumentParser,
add_coverage_common: c.Callable[[argparse.ArgumentParser], None],
completer: CompositeActionCompletionFinder,
) -> None:
"""Command line parsing for the `coverage xml` command."""
parser: argparse.ArgumentParser = subparsers.add_parser(
'xml',
parents=[parent],
help='generate xml coverage report',
)
parser.set_defaults(
func=command_coverage_xml,
config=CoverageXmlConfig,
)
coverage_combine = t.cast(argparse.ArgumentParser, parser.add_argument_group(title='coverage arguments'))
add_coverage_common(coverage_combine)
add_environments(parser, completer, ControllerMode.DELEGATED, TargetMode.NO_TARGETS) # coverage xml
| 1,151
|
Python
|
.py
| 34
| 29.294118
| 109
| 0.736462
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,163
|
report.py
|
ansible_ansible/test/lib/ansible_test/_internal/cli/commands/coverage/report.py
|
"""Command line parsing for the `coverage report` command."""
from __future__ import annotations
import argparse
import collections.abc as c
import typing as t
from ....commands.coverage.report import (
command_coverage_report,
CoverageReportConfig,
)
from ...environments import (
CompositeActionCompletionFinder,
ControllerMode,
TargetMode,
add_environments,
)
def do_report(
subparsers,
parent: argparse.ArgumentParser,
add_coverage_common: c.Callable[[argparse.ArgumentParser], None],
completer: CompositeActionCompletionFinder,
) -> None:
"""Command line parsing for the `coverage report` command."""
parser: argparse.ArgumentParser = subparsers.add_parser(
'report',
parents=[parent],
help='generate console coverage report',
)
parser.set_defaults(
func=command_coverage_report,
config=CoverageReportConfig,
)
coverage_report = t.cast(argparse.ArgumentParser, parser.add_argument_group('coverage arguments'))
add_coverage_common(coverage_report)
coverage_report.add_argument(
'--show-missing',
action='store_true',
help='show line numbers of statements not executed',
)
coverage_report.add_argument(
'--include',
metavar='PAT[,...]',
help='only include paths that match a pattern (accepts quoted shell wildcards)',
)
coverage_report.add_argument(
'--omit',
metavar='PAT[,...]',
help='omit paths that match a pattern (accepts quoted shell wildcards)',
)
add_environments(parser, completer, ControllerMode.DELEGATED, TargetMode.NO_TARGETS) # coverage report
| 1,683
|
Python
|
.py
| 49
| 28.857143
| 107
| 0.701603
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,164
|
__init__.py
|
ansible_ansible/test/lib/ansible_test/_internal/cli/commands/coverage/__init__.py
|
"""Command line parsing for all `coverage` commands."""
from __future__ import annotations
import argparse
from ....commands.coverage import (
COVERAGE_GROUPS,
)
from ...environments import (
CompositeActionCompletionFinder,
)
from .analyze import (
do_analyze,
)
from .combine import (
do_combine,
)
from .erase import (
do_erase,
)
from .html import (
do_html,
)
from .report import (
do_report,
)
from .xml import (
do_xml,
)
def do_coverage(
subparsers,
parent: argparse.ArgumentParser,
completer: CompositeActionCompletionFinder,
) -> None:
"""Command line parsing for all `coverage` commands."""
coverage_common = argparse.ArgumentParser(add_help=False, parents=[parent])
parser = subparsers.add_parser(
'coverage',
help='code coverage management and reporting',
)
coverage_subparsers = parser.add_subparsers(metavar='COMMAND', required=True)
do_analyze(coverage_subparsers, coverage_common, completer)
do_erase(coverage_subparsers, coverage_common, completer)
do_combine(coverage_subparsers, parent, add_coverage_common, completer)
do_report(coverage_subparsers, parent, add_coverage_common, completer)
do_html(coverage_subparsers, parent, add_coverage_common, completer)
do_xml(coverage_subparsers, parent, add_coverage_common, completer)
def add_coverage_common(
parser: argparse.ArgumentParser,
):
"""Add common coverage arguments."""
parser.add_argument(
'--group-by',
metavar='GROUP',
action='append',
choices=COVERAGE_GROUPS,
help='group output by: %s' % ', '.join(COVERAGE_GROUPS),
)
parser.add_argument(
'--all',
action='store_true',
help='include all python/powershell source files',
)
parser.add_argument(
'--stub',
action='store_true',
help='generate empty report of all python/powershell source files',
)
| 1,967
|
Python
|
.py
| 66
| 25.121212
| 81
| 0.697131
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,165
|
html.py
|
ansible_ansible/test/lib/ansible_test/_internal/cli/commands/coverage/html.py
|
"""Command line parsing for the `coverage html` command."""
from __future__ import annotations
import argparse
import collections.abc as c
import typing as t
from ....commands.coverage.html import (
command_coverage_html,
CoverageHtmlConfig,
)
from ...environments import (
CompositeActionCompletionFinder,
ControllerMode,
TargetMode,
add_environments,
)
def do_html(
subparsers,
parent: argparse.ArgumentParser,
add_coverage_common: c.Callable[[argparse.ArgumentParser], None],
completer: CompositeActionCompletionFinder,
) -> None:
"""Command line parsing for the `coverage html` command."""
parser: argparse.ArgumentParser = subparsers.add_parser(
'html',
parents=[parent],
help='generate html coverage report',
)
parser.set_defaults(
func=command_coverage_html,
config=CoverageHtmlConfig,
)
coverage_combine = t.cast(argparse.ArgumentParser, parser.add_argument_group(title='coverage arguments'))
add_coverage_common(coverage_combine)
add_environments(parser, completer, ControllerMode.DELEGATED, TargetMode.NO_TARGETS) # coverage html
| 1,162
|
Python
|
.py
| 34
| 29.617647
| 109
| 0.739053
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,166
|
erase.py
|
ansible_ansible/test/lib/ansible_test/_internal/cli/commands/coverage/erase.py
|
"""Command line parsing for the `coverage erase` command."""
from __future__ import annotations
import argparse
from ....commands.coverage.erase import (
command_coverage_erase,
CoverageEraseConfig,
)
from ...environments import (
CompositeActionCompletionFinder,
ControllerMode,
TargetMode,
add_environments,
)
def do_erase(
subparsers,
parent: argparse.ArgumentParser,
completer: CompositeActionCompletionFinder,
) -> None:
"""Command line parsing for the `coverage erase` command."""
parser: argparse.ArgumentParser = subparsers.add_parser(
'erase',
parents=[parent],
help='erase coverage data files',
)
parser.set_defaults(
func=command_coverage_erase,
config=CoverageEraseConfig,
)
add_environments(parser, completer, ControllerMode.ORIGIN, TargetMode.NO_TARGETS) # coverage erase
| 894
|
Python
|
.py
| 29
| 26.137931
| 103
| 0.724942
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,167
|
combine.py
|
ansible_ansible/test/lib/ansible_test/_internal/cli/commands/coverage/combine.py
|
"""Command line parsing for the `coverage combine` command."""
from __future__ import annotations
import argparse
import collections.abc as c
import typing as t
from ....commands.coverage.combine import (
command_coverage_combine,
CoverageCombineConfig,
)
from ...environments import (
CompositeActionCompletionFinder,
ControllerMode,
TargetMode,
add_environments,
)
def do_combine(
subparsers,
parent: argparse.ArgumentParser,
add_coverage_common: c.Callable[[argparse.ArgumentParser], None],
completer: CompositeActionCompletionFinder,
) -> None:
"""Command line parsing for the `coverage combine` command."""
parser: argparse.ArgumentParser = subparsers.add_parser(
'combine',
parents=[parent],
help='combine coverage data and rewrite remote paths',
)
parser.set_defaults(
func=command_coverage_combine,
config=CoverageCombineConfig,
)
coverage_combine = t.cast(argparse.ArgumentParser, parser.add_argument_group(title='coverage arguments'))
add_coverage_common(coverage_combine)
coverage_combine.add_argument(
'--export',
metavar='DIR',
help='directory to export combined coverage files to',
)
add_environments(parser, completer, ControllerMode.DELEGATED, TargetMode.NO_TARGETS) # coverage combine
| 1,357
|
Python
|
.py
| 39
| 29.846154
| 109
| 0.731651
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,168
|
__init__.py
|
ansible_ansible/test/lib/ansible_test/_internal/cli/commands/coverage/analyze/__init__.py
|
"""Command line parsing for all `coverage analyze` commands."""
from __future__ import annotations
import argparse
from .targets import (
do_targets,
)
from ....environments import (
CompositeActionCompletionFinder,
)
def do_analyze(
subparsers,
parent: argparse.ArgumentParser,
completer: CompositeActionCompletionFinder,
) -> None:
"""Command line parsing for all `coverage analyze` commands."""
parser: argparse.ArgumentParser = subparsers.add_parser(
'analyze',
help='analyze collected coverage data',
)
analyze_subparsers = parser.add_subparsers(metavar='COMMAND', required=True)
do_targets(analyze_subparsers, parent, completer)
| 698
|
Python
|
.py
| 21
| 29.238095
| 80
| 0.741791
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,169
|
generate.py
|
ansible_ansible/test/lib/ansible_test/_internal/cli/commands/coverage/analyze/targets/generate.py
|
"""Command line parsing for the `coverage analyze targets generate` command."""
from __future__ import annotations
import argparse
from ......commands.coverage.analyze.targets.generate import (
command_coverage_analyze_targets_generate,
CoverageAnalyzeTargetsGenerateConfig,
)
from .....environments import (
CompositeActionCompletionFinder,
ControllerMode,
TargetMode,
add_environments,
)
def do_generate(
subparsers,
parent: argparse.ArgumentParser,
completer: CompositeActionCompletionFinder,
):
"""Command line parsing for the `coverage analyze targets generate` command."""
parser: argparse.ArgumentParser = subparsers.add_parser(
'generate',
parents=[parent],
help='aggregate coverage by integration test target',
)
parser.set_defaults(
func=command_coverage_analyze_targets_generate,
config=CoverageAnalyzeTargetsGenerateConfig,
)
targets_generate = parser.add_argument_group(title='coverage arguments')
targets_generate.add_argument(
'input_dir',
nargs='?',
help='directory to read coverage from',
)
targets_generate.add_argument(
'output_file',
help='output file for aggregated coverage',
)
add_environments(parser, completer, ControllerMode.ORIGIN, TargetMode.NO_TARGETS) # coverage analyze targets generate
| 1,389
|
Python
|
.py
| 39
| 30.25641
| 122
| 0.729851
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,170
|
filter.py
|
ansible_ansible/test/lib/ansible_test/_internal/cli/commands/coverage/analyze/targets/filter.py
|
"""Command line parsing for the `coverage analyze targets filter` command."""
from __future__ import annotations
import argparse
from ......commands.coverage.analyze.targets.filter import (
command_coverage_analyze_targets_filter,
CoverageAnalyzeTargetsFilterConfig,
)
from .....environments import (
CompositeActionCompletionFinder,
ControllerMode,
TargetMode,
add_environments,
)
def do_filter(
subparsers,
parent: argparse.ArgumentParser,
completer: CompositeActionCompletionFinder,
):
"""Command line parsing for the `coverage analyze targets filter` command."""
parser: argparse.ArgumentParser = subparsers.add_parser(
'filter',
parents=[parent],
help='filter aggregated coverage data',
)
parser.set_defaults(
func=command_coverage_analyze_targets_filter,
config=CoverageAnalyzeTargetsFilterConfig,
)
targets_filter = parser.add_argument_group(title='coverage arguments')
targets_filter.add_argument(
'input_file',
help='input file to read aggregated coverage from',
)
targets_filter.add_argument(
'output_file',
help='output file to write expanded coverage to',
)
targets_filter.add_argument(
'--include-target',
metavar='TGT',
dest='include_targets',
action='append',
help='include the specified targets',
)
targets_filter.add_argument(
'--exclude-target',
metavar='TGT',
dest='exclude_targets',
action='append',
help='exclude the specified targets',
)
targets_filter.add_argument(
'--include-path',
metavar='REGEX',
help='include paths matching the given regex',
)
targets_filter.add_argument(
'--exclude-path',
metavar='REGEX',
help='exclude paths matching the given regex',
)
add_environments(parser, completer, ControllerMode.ORIGIN, TargetMode.NO_TARGETS) # coverage analyze targets filter
| 2,029
|
Python
|
.py
| 62
| 26.467742
| 120
| 0.682028
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,171
|
missing.py
|
ansible_ansible/test/lib/ansible_test/_internal/cli/commands/coverage/analyze/targets/missing.py
|
"""Command line parsing for the `coverage analyze targets missing` command."""
from __future__ import annotations
import argparse
from ......commands.coverage.analyze.targets.missing import (
command_coverage_analyze_targets_missing,
CoverageAnalyzeTargetsMissingConfig,
)
from .....environments import (
CompositeActionCompletionFinder,
ControllerMode,
TargetMode,
add_environments,
)
def do_missing(
subparsers,
parent: argparse.ArgumentParser,
completer: CompositeActionCompletionFinder,
):
"""Command line parsing for the `coverage analyze targets missing` command."""
parser: argparse.ArgumentParser = subparsers.add_parser(
'missing',
parents=[parent],
help='identify coverage in one file missing in another',
)
parser.set_defaults(
func=command_coverage_analyze_targets_missing,
config=CoverageAnalyzeTargetsMissingConfig,
)
targets_missing = parser.add_argument_group(title='coverage arguments')
targets_missing.add_argument(
'from_file',
help='input file containing aggregated coverage',
)
targets_missing.add_argument(
'to_file',
help='input file containing aggregated coverage',
)
targets_missing.add_argument(
'output_file',
help='output file to write aggregated coverage to',
)
targets_missing.add_argument(
'--only-gaps',
action='store_true',
help='report only arcs/lines not hit by any target',
)
targets_missing.add_argument(
'--only-exists',
action='store_true',
help='limit results to files that exist',
)
add_environments(parser, completer, ControllerMode.ORIGIN, TargetMode.NO_TARGETS) # coverage analyze targets missing
| 1,795
|
Python
|
.py
| 52
| 28.653846
| 121
| 0.704624
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,172
|
__init__.py
|
ansible_ansible/test/lib/ansible_test/_internal/cli/commands/coverage/analyze/targets/__init__.py
|
"""Command line parsing for all `coverage analyze targets` commands."""
from __future__ import annotations
import argparse
from .....environments import (
CompositeActionCompletionFinder,
)
from .combine import (
do_combine,
)
from .expand import (
do_expand,
)
from .filter import (
do_filter,
)
from .generate import (
do_generate,
)
from .missing import (
do_missing,
)
def do_targets(
subparsers,
parent: argparse.ArgumentParser,
completer: CompositeActionCompletionFinder,
) -> None:
"""Command line parsing for all `coverage analyze targets` commands."""
targets = subparsers.add_parser(
'targets',
help='analyze integration test target coverage',
)
targets_subparsers = targets.add_subparsers(metavar='COMMAND', required=True)
do_generate(targets_subparsers, parent, completer)
do_expand(targets_subparsers, parent, completer)
do_filter(targets_subparsers, parent, completer)
do_combine(targets_subparsers, parent, completer)
do_missing(targets_subparsers, parent, completer)
| 1,082
|
Python
|
.py
| 37
| 25.567568
| 81
| 0.738878
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,173
|
combine.py
|
ansible_ansible/test/lib/ansible_test/_internal/cli/commands/coverage/analyze/targets/combine.py
|
"""Command line parsing for the `coverage analyze targets combine` command."""
from __future__ import annotations
import argparse
from ......commands.coverage.analyze.targets.combine import (
command_coverage_analyze_targets_combine,
CoverageAnalyzeTargetsCombineConfig,
)
from .....environments import (
CompositeActionCompletionFinder,
ControllerMode,
TargetMode,
add_environments,
)
def do_combine(
subparsers,
parent: argparse.ArgumentParser,
completer: CompositeActionCompletionFinder,
):
"""Command line parsing for the `coverage analyze targets combine` command."""
parser: argparse.ArgumentParser = subparsers.add_parser(
'combine',
parents=[parent],
help='combine multiple aggregated coverage files',
)
parser.set_defaults(
func=command_coverage_analyze_targets_combine,
config=CoverageAnalyzeTargetsCombineConfig,
)
targets_combine = parser.add_argument_group('coverage arguments')
targets_combine.add_argument(
'input_file',
nargs='+',
help='input file to read aggregated coverage from',
)
targets_combine.add_argument(
'output_file',
help='output file to write aggregated coverage to',
)
add_environments(parser, completer, ControllerMode.ORIGIN, TargetMode.NO_TARGETS) # coverage analyze targets combine
| 1,388
|
Python
|
.py
| 39
| 30.230769
| 121
| 0.728155
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,174
|
expand.py
|
ansible_ansible/test/lib/ansible_test/_internal/cli/commands/coverage/analyze/targets/expand.py
|
"""Command line parsing for the `coverage analyze targets expand` command."""
from __future__ import annotations
import argparse
from ......commands.coverage.analyze.targets.expand import (
command_coverage_analyze_targets_expand,
CoverageAnalyzeTargetsExpandConfig,
)
from .....environments import (
CompositeActionCompletionFinder,
ControllerMode,
TargetMode,
add_environments,
)
def do_expand(
subparsers,
parent: argparse.ArgumentParser,
completer: CompositeActionCompletionFinder,
):
"""Command line parsing for the `coverage analyze targets expand` command."""
parser: argparse.ArgumentParser = subparsers.add_parser(
'expand',
parents=[parent],
help='expand target names from integers in aggregated coverage',
)
parser.set_defaults(
func=command_coverage_analyze_targets_expand,
config=CoverageAnalyzeTargetsExpandConfig,
)
targets_expand = parser.add_argument_group(title='coverage arguments')
targets_expand.add_argument(
'input_file',
help='input file to read aggregated coverage from',
)
targets_expand.add_argument(
'output_file',
help='output file to write expanded coverage to',
)
add_environments(parser, completer, ControllerMode.ORIGIN, TargetMode.NO_TARGETS) # coverage analyze targets expand
| 1,374
|
Python
|
.py
| 38
| 30.894737
| 120
| 0.732278
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,175
|
windows.py
|
ansible_ansible/test/lib/ansible_test/_internal/cli/commands/integration/windows.py
|
"""Command line parsing for the `windows-integration` command."""
from __future__ import annotations
import argparse
import collections.abc as c
import typing as t
from ....commands.integration.windows import (
command_windows_integration,
)
from ....config import (
WindowsIntegrationConfig,
)
from ....target import (
walk_windows_integration_targets,
)
from ...environments import (
CompositeActionCompletionFinder,
ControllerMode,
TargetMode,
add_environments,
)
def do_windows_integration(
subparsers,
parent: argparse.ArgumentParser,
add_integration_common: c.Callable[[argparse.ArgumentParser], None],
completer: CompositeActionCompletionFinder,
):
"""Command line parsing for the `windows-integration` command."""
parser: argparse.ArgumentParser = subparsers.add_parser(
'windows-integration',
parents=[parent],
help='windows integration tests',
)
parser.set_defaults(
func=command_windows_integration,
targets_func=walk_windows_integration_targets,
config=WindowsIntegrationConfig,
)
windows_integration = t.cast(argparse.ArgumentParser, parser.add_argument_group(title='windows integration test arguments'))
add_integration_common(windows_integration)
add_environments(parser, completer, ControllerMode.DELEGATED, TargetMode.WINDOWS_INTEGRATION) # windows-integration
| 1,414
|
Python
|
.py
| 40
| 30.975
| 128
| 0.757153
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,176
|
network.py
|
ansible_ansible/test/lib/ansible_test/_internal/cli/commands/integration/network.py
|
"""Command line parsing for the `network-integration` command."""
from __future__ import annotations
import argparse
import collections.abc as c
import os
import typing as t
from ....commands.integration.network import (
command_network_integration,
)
from ....config import (
NetworkIntegrationConfig,
)
from ....target import (
walk_network_integration_targets,
)
from ....data import (
data_context,
)
from ...environments import (
CompositeActionCompletionFinder,
ControllerMode,
TargetMode,
add_environments,
)
from ...completers import (
register_completer,
)
def do_network_integration(
subparsers,
parent: argparse.ArgumentParser,
add_integration_common: c.Callable[[argparse.ArgumentParser], None],
completer: CompositeActionCompletionFinder,
):
"""Command line parsing for the `network-integration` command."""
parser: argparse.ArgumentParser = subparsers.add_parser(
'network-integration',
parents=[parent],
help='network integration tests',
)
parser.set_defaults(
func=command_network_integration,
targets_func=walk_network_integration_targets,
config=NetworkIntegrationConfig,
)
network_integration = t.cast(argparse.ArgumentParser, parser.add_argument_group(title='network integration test arguments'))
add_integration_common(network_integration)
register_completer(network_integration.add_argument(
'--testcase',
metavar='TESTCASE',
help='limit a test to a specified testcase',
), complete_network_testcase)
add_environments(parser, completer, ControllerMode.DELEGATED, TargetMode.NETWORK_INTEGRATION) # network-integration
def complete_network_testcase(prefix: str, parsed_args: argparse.Namespace, **_) -> list[str]:
"""Return a list of test cases matching the given prefix if only one target was parsed from the command line, otherwise return an empty list."""
testcases = []
# since testcases are module specific, don't autocomplete if more than one
# module is specified
if len(parsed_args.include) != 1:
return []
target = parsed_args.include[0]
test_dir = os.path.join(data_context().content.integration_targets_path, target, 'tests')
connection_dirs = data_context().content.get_dirs(test_dir)
for connection_dir in connection_dirs:
for testcase in [os.path.basename(path) for path in data_context().content.get_files(connection_dir)]:
if testcase.startswith(prefix):
testcases.append(testcase.split('.', 1)[0])
return testcases
| 2,621
|
Python
|
.py
| 67
| 34.119403
| 148
| 0.727703
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,177
|
__init__.py
|
ansible_ansible/test/lib/ansible_test/_internal/cli/commands/integration/__init__.py
|
"""Command line parsing for all integration commands."""
from __future__ import annotations
import argparse
from ...completers import (
complete_target,
register_completer,
)
from ...environments import (
CompositeActionCompletionFinder,
)
from .network import (
do_network_integration,
)
from .posix import (
do_posix_integration,
)
from .windows import (
do_windows_integration,
)
def do_integration(
subparsers,
parent: argparse.ArgumentParser,
completer: CompositeActionCompletionFinder,
):
"""Command line parsing for all integration commands."""
parser = argparse.ArgumentParser(
add_help=False,
parents=[parent],
)
do_posix_integration(subparsers, parser, add_integration_common, completer)
do_network_integration(subparsers, parser, add_integration_common, completer)
do_windows_integration(subparsers, parser, add_integration_common, completer)
def add_integration_common(
parser: argparse.ArgumentParser,
):
"""Add common integration arguments."""
register_completer(parser.add_argument(
'--start-at',
metavar='TARGET',
help='start at the specified target',
), complete_target)
parser.add_argument(
'--start-at-task',
metavar='TASK',
help='start at the specified task',
)
parser.add_argument(
'--tags',
metavar='TAGS',
help='only run plays and tasks tagged with these values',
)
parser.add_argument(
'--skip-tags',
metavar='TAGS',
help='only run plays and tasks whose tags do not match these values',
)
parser.add_argument(
'--diff',
action='store_true',
help='show diff output',
)
parser.add_argument(
'--allow-destructive',
action='store_true',
help='allow destructive tests',
)
parser.add_argument(
'--allow-root',
action='store_true',
help='allow tests requiring root when not root',
)
parser.add_argument(
'--allow-disabled',
action='store_true',
help='allow tests which have been marked as disabled',
)
parser.add_argument(
'--allow-unstable',
action='store_true',
help='allow tests which have been marked as unstable',
)
parser.add_argument(
'--allow-unstable-changed',
action='store_true',
help='allow tests which have been marked as unstable when focused changes are detected',
)
parser.add_argument(
'--allow-unsupported',
action='store_true',
help='allow tests which have been marked as unsupported',
)
parser.add_argument(
'--retry-on-error',
action='store_true',
help='retry failed test with increased verbosity',
)
parser.add_argument(
'--continue-on-error',
action='store_true',
help='continue after failed test',
)
parser.add_argument(
'--debug-strategy',
action='store_true',
help='run test playbooks using the debug strategy',
)
parser.add_argument(
'--changed-all-target',
metavar='TARGET',
default='all',
help='target to run when all tests are needed',
)
parser.add_argument(
'--changed-all-mode',
metavar='MODE',
choices=('default', 'include', 'exclude'),
help='include/exclude behavior with --changed-all-target: %(choices)s',
)
parser.add_argument(
'--list-targets',
action='store_true',
help='list matching targets instead of running tests',
)
parser.add_argument(
'--no-temp-workdir',
action='store_true',
help='do not run tests from a temporary directory (use only for verifying broken tests)',
)
parser.add_argument(
'--no-temp-unicode',
action='store_true',
help='avoid unicode characters in temporary directory (use only for verifying broken tests)',
)
| 4,029
|
Python
|
.py
| 133
| 23.75188
| 101
| 0.639255
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,178
|
posix.py
|
ansible_ansible/test/lib/ansible_test/_internal/cli/commands/integration/posix.py
|
"""Command line parsing for the `integration` command."""
from __future__ import annotations
import argparse
import collections.abc as c
import typing as t
from ....commands.integration.posix import (
command_posix_integration,
)
from ....config import (
PosixIntegrationConfig,
)
from ....target import (
walk_posix_integration_targets,
)
from ...environments import (
CompositeActionCompletionFinder,
ControllerMode,
TargetMode,
add_environments,
)
def do_posix_integration(
subparsers,
parent: argparse.ArgumentParser,
add_integration_common: c.Callable[[argparse.ArgumentParser], None],
completer: CompositeActionCompletionFinder,
):
"""Command line parsing for the `integration` command."""
parser: argparse.ArgumentParser = subparsers.add_parser(
'integration',
parents=[parent],
help='posix integration tests',
)
parser.set_defaults(
func=command_posix_integration,
targets_func=walk_posix_integration_targets,
config=PosixIntegrationConfig,
)
posix_integration = t.cast(argparse.ArgumentParser, parser.add_argument_group(title='integration test arguments'))
add_integration_common(posix_integration)
add_environments(parser, completer, ControllerMode.DELEGATED, TargetMode.POSIX_INTEGRATION) # integration
| 1,350
|
Python
|
.py
| 40
| 29.375
| 118
| 0.749038
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,179
|
packaging.py
|
ansible_ansible/test/lib/ansible_test/_internal/compat/packaging.py
|
"""Packaging compatibility."""
from __future__ import annotations
import typing as t
try:
from packaging import (
specifiers,
version,
)
SpecifierSet: t.Optional[t.Type[specifiers.SpecifierSet]] = specifiers.SpecifierSet
Version: t.Optional[t.Type[version.Version]] = version.Version
PACKAGING_IMPORT_ERROR = None
except ImportError as ex:
SpecifierSet = None # pylint: disable=invalid-name
Version = None # pylint: disable=invalid-name
PACKAGING_IMPORT_ERROR = ex
| 518
|
Python
|
.py
| 15
| 30.133333
| 87
| 0.728
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,180
|
__init__.py
|
ansible_ansible/test/lib/ansible_test/_internal/compat/__init__.py
|
"""Nearly empty __init__.py to keep pylint happy."""
from __future__ import annotations
| 88
|
Python
|
.py
| 2
| 43
| 52
| 0.709302
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,181
|
yaml.py
|
ansible_ansible/test/lib/ansible_test/_internal/compat/yaml.py
|
"""PyYAML compatibility."""
from __future__ import annotations
import typing as t
from functools import (
partial,
)
try:
import yaml as _yaml
YAML_IMPORT_ERROR = None
except ImportError as ex:
yaml_load = None # pylint: disable=invalid-name
YAML_IMPORT_ERROR = ex
else:
try:
_SafeLoader: t.Union[t.Type[_yaml.CSafeLoader], t.Type[_yaml.SafeLoader]] = _yaml.CSafeLoader
except AttributeError:
_SafeLoader = _yaml.SafeLoader
yaml_load = partial(_yaml.load, Loader=_SafeLoader)
| 530
|
Python
|
.py
| 18
| 25.5
| 101
| 0.708087
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,182
|
__init__.py
|
ansible_ansible/test/lib/ansible_test/_internal/commands/__init__.py
|
"""Nearly empty __init__.py to keep pylint happy."""
from __future__ import annotations
| 88
|
Python
|
.py
| 2
| 43
| 52
| 0.709302
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,183
|
shellcheck.py
|
ansible_ansible/test/lib/ansible_test/_internal/commands/sanity/shellcheck.py
|
"""Sanity test using shellcheck."""
from __future__ import annotations
import os
import typing as t
from xml.etree.ElementTree import (
fromstring,
Element,
)
from . import (
SanityVersionNeutral,
SanityMessage,
SanityFailure,
SanitySuccess,
SanitySkipped,
SanityTargets,
SANITY_ROOT,
)
from ...test import (
TestResult,
)
from ...target import (
TestTarget,
)
from ...util import (
SubprocessError,
read_lines_without_comments,
find_executable,
)
from ...util_common import (
run_command,
)
from ...config import (
SanityConfig,
)
class ShellcheckTest(SanityVersionNeutral):
"""Sanity test using shellcheck."""
@property
def error_code(self) -> t.Optional[str]:
"""Error code for ansible-test matching the format used by the underlying test program, or None if the program does not use error codes."""
return 'AT1000'
def filter_targets(self, targets: list[TestTarget]) -> list[TestTarget]:
"""Return the given list of test targets, filtered to include only those relevant for the test."""
return [target for target in targets if os.path.splitext(target.path)[1] == '.sh']
def test(self, args: SanityConfig, targets: SanityTargets) -> TestResult:
exclude_file = os.path.join(SANITY_ROOT, 'shellcheck', 'exclude.txt')
exclude = set(read_lines_without_comments(exclude_file, remove_blank_lines=True, optional=True))
settings = self.load_processor(args)
paths = [target.path for target in targets.include]
if not find_executable('shellcheck', required='warning'):
return SanitySkipped(self.name)
cmd = [
'shellcheck',
'-e', ','.join(sorted(exclude)),
'--format', 'checkstyle',
] + paths # fmt: skip
try:
stdout, stderr = run_command(args, cmd, capture=True)
status = 0
except SubprocessError as ex:
stdout = ex.stdout
stderr = ex.stderr
status = ex.status
if stderr or status > 1:
raise SubprocessError(cmd=cmd, status=status, stderr=stderr, stdout=stdout)
if args.explain:
return SanitySuccess(self.name)
# json output is missing file paths in older versions of shellcheck, so we'll use xml instead
root: Element = fromstring(stdout)
results = []
for item in root:
for entry in item:
results.append(SanityMessage(
message=entry.attrib['message'],
path=item.attrib['name'],
line=int(entry.attrib['line']),
column=int(entry.attrib['column']),
level=entry.attrib['severity'],
code=entry.attrib['source'].replace('ShellCheck.', ''),
))
results = settings.process_errors(results, paths)
if results:
return SanityFailure(self.name, messages=results)
return SanitySuccess(self.name)
| 3,070
|
Python
|
.py
| 83
| 28.831325
| 147
| 0.628842
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,184
|
bin_symlinks.py
|
ansible_ansible/test/lib/ansible_test/_internal/commands/sanity/bin_symlinks.py
|
"""Sanity test for symlinks in the bin directory."""
from __future__ import annotations
import os
from . import (
SanityVersionNeutral,
SanityMessage,
SanityFailure,
SanitySuccess,
SanityTargets,
)
from ...constants import (
__file__ as symlink_map_full_path,
)
from ...test import (
TestResult,
)
from ...config import (
SanityConfig,
)
from ...data import (
data_context,
)
from ...payload import (
ANSIBLE_BIN_SYMLINK_MAP,
)
from ...util import (
ANSIBLE_SOURCE_ROOT,
)
class BinSymlinksTest(SanityVersionNeutral):
"""Sanity test for symlinks in the bin directory."""
ansible_only = True
@property
def can_ignore(self) -> bool:
"""True if the test supports ignore entries."""
return False
@property
def no_targets(self) -> bool:
"""True if the test does not use test targets. Mutually exclusive with all_targets."""
return True
def test(self, args: SanityConfig, targets: SanityTargets) -> TestResult:
bin_root = os.path.join(ANSIBLE_SOURCE_ROOT, 'bin')
bin_names = os.listdir(bin_root)
bin_paths = sorted(os.path.join(bin_root, path) for path in bin_names)
errors: list[tuple[str, str]] = []
symlink_map_path = os.path.relpath(symlink_map_full_path, data_context().content.root)
for bin_path in bin_paths:
if not os.path.islink(bin_path):
errors.append((bin_path, 'not a symbolic link'))
continue
dest = os.readlink(bin_path)
if not os.path.exists(bin_path):
errors.append((bin_path, 'points to non-existent path "%s"' % dest))
continue
if not os.path.isfile(bin_path):
errors.append((bin_path, 'points to non-file "%s"' % dest))
continue
map_dest = ANSIBLE_BIN_SYMLINK_MAP.get(os.path.basename(bin_path))
if not map_dest:
errors.append((bin_path, 'missing from ANSIBLE_BIN_SYMLINK_MAP in file "%s"' % symlink_map_path))
continue
if dest != map_dest:
errors.append((bin_path, 'points to "%s" instead of "%s" from ANSIBLE_BIN_SYMLINK_MAP in file "%s"' % (dest, map_dest, symlink_map_path)))
continue
if not os.access(bin_path, os.X_OK):
errors.append((bin_path, 'points to non-executable file "%s"' % dest))
continue
for bin_name, dest in ANSIBLE_BIN_SYMLINK_MAP.items():
if bin_name not in bin_names:
bin_path = os.path.join(bin_root, bin_name)
errors.append((bin_path, 'missing symlink to "%s" defined in ANSIBLE_BIN_SYMLINK_MAP in file "%s"' % (dest, symlink_map_path)))
messages = [SanityMessage(message=message, path=os.path.relpath(path, data_context().content.root), confidence=100) for path, message in errors]
if errors:
return SanityFailure(self.name, messages=messages)
return SanitySuccess(self.name)
| 3,072
|
Python
|
.py
| 74
| 33
| 154
| 0.620202
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,185
|
compile.py
|
ansible_ansible/test/lib/ansible_test/_internal/commands/sanity/compile.py
|
"""Sanity test for proper python syntax."""
from __future__ import annotations
import os
from . import (
SanityMultipleVersion,
SanityMessage,
SanityFailure,
SanitySuccess,
SanityTargets,
SanitySkipped,
TARGET_SANITY_ROOT,
)
from ...test import (
TestResult,
)
from ...target import (
TestTarget,
)
from ...util import (
SubprocessError,
display,
parse_to_list_of_dict,
is_subdir,
)
from ...util_common import (
run_command,
)
from ...config import (
SanityConfig,
)
from ...host_configs import (
PythonConfig,
)
class CompileTest(SanityMultipleVersion):
"""Sanity test for proper python syntax."""
def filter_targets(self, targets: list[TestTarget]) -> list[TestTarget]:
"""Return the given list of test targets, filtered to include only those relevant for the test."""
return [target for target in targets if os.path.splitext(target.path)[1] == '.py' or is_subdir(target.path, 'bin')]
def test(self, args: SanityConfig, targets: SanityTargets, python: PythonConfig) -> TestResult:
if args.prime_venvs:
return SanitySkipped(self.name, python_version=python.version)
settings = self.load_processor(args, python.version)
paths = [target.path for target in targets.include]
cmd = [python.path, os.path.join(TARGET_SANITY_ROOT, 'compile', 'compile.py')]
data = '\n'.join(paths)
display.info(data, verbosity=4)
try:
stdout, stderr = run_command(args, cmd, data=data, capture=True)
status = 0
except SubprocessError as ex:
stdout = ex.stdout
stderr = ex.stderr
status = ex.status
if stderr:
raise SubprocessError(cmd=cmd, status=status, stderr=stderr, stdout=stdout)
if args.explain:
return SanitySuccess(self.name, python_version=python.version)
pattern = r'^(?P<path>[^:]*):(?P<line>[0-9]+):(?P<column>[0-9]+): (?P<message>.*)$'
results = parse_to_list_of_dict(pattern, stdout)
results = [SanityMessage(
message=r['message'],
path=r['path'].replace('./', ''),
line=int(r['line']),
column=int(r['column']),
) for r in results]
results = settings.process_errors(results, paths)
if results:
return SanityFailure(self.name, messages=results, python_version=python.version)
return SanitySuccess(self.name, python_version=python.version)
| 2,539
|
Python
|
.py
| 69
| 29.855072
| 123
| 0.645254
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,186
|
ansible_doc.py
|
ansible_ansible/test/lib/ansible_test/_internal/commands/sanity/ansible_doc.py
|
"""Sanity test for ansible-doc."""
from __future__ import annotations
import collections
import json
import os
import re
from . import (
DOCUMENTABLE_PLUGINS,
MULTI_FILE_PLUGINS,
SanitySingleVersion,
SanityFailure,
SanitySuccess,
SanityTargets,
SanityMessage,
)
from ...test import (
TestResult,
)
from ...target import (
TestTarget,
)
from ...util import (
SubprocessError,
display,
is_subdir,
)
from ...ansible_util import (
ansible_environment,
intercept_python,
)
from ...config import (
SanityConfig,
)
from ...data import (
data_context,
)
from ...host_configs import (
PythonConfig,
)
class AnsibleDocTest(SanitySingleVersion):
"""Sanity test for ansible-doc."""
def filter_targets(self, targets: list[TestTarget]) -> list[TestTarget]:
"""Return the given list of test targets, filtered to include only those relevant for the test."""
plugin_paths = [plugin_path for plugin_type, plugin_path in data_context().content.plugin_paths.items() if plugin_type in DOCUMENTABLE_PLUGINS]
return [target for target in targets
if os.path.splitext(target.path)[1] == '.py'
and os.path.basename(target.path) != '__init__.py'
and any(is_subdir(target.path, path) for path in plugin_paths)
]
def test(self, args: SanityConfig, targets: SanityTargets, python: PythonConfig) -> TestResult:
settings = self.load_processor(args)
paths = [target.path for target in targets.include]
doc_targets: dict[str, list[str]] = collections.defaultdict(list)
remap_types = dict(
modules='module',
)
for plugin_type, plugin_path in data_context().content.plugin_paths.items():
plugin_type = remap_types.get(plugin_type, plugin_type)
for plugin_file_path in [target.name for target in targets.include if is_subdir(target.path, plugin_path)]:
plugin_parts = os.path.relpath(plugin_file_path, plugin_path).split(os.path.sep)
plugin_name = os.path.splitext(plugin_parts[-1])[0]
if plugin_name.startswith('_') and not data_context().content.collection:
plugin_name = plugin_name[1:]
plugin_fqcn = data_context().content.prefix + '.'.join(plugin_parts[:-1] + [plugin_name])
doc_targets[plugin_type].append(plugin_fqcn)
env = ansible_environment(args, color=False)
for doc_type in MULTI_FILE_PLUGINS:
if doc_targets.get(doc_type):
# List plugins
cmd = ['ansible-doc', '-l', '--json', '-t', doc_type]
prefix = data_context().content.prefix if data_context().content.collection else 'ansible.builtin.'
cmd.append(prefix[:-1])
try:
stdout, stderr = intercept_python(args, python, cmd, env, capture=True)
status = 0
except SubprocessError as ex:
stdout = ex.stdout
stderr = ex.stderr
status = ex.status
if status:
summary = '%s' % SubprocessError(cmd=cmd, status=status, stderr=stderr)
return SanityFailure(self.name, summary=summary)
if stdout:
display.info(stdout.strip(), verbosity=3)
if stderr:
summary = 'Output on stderr from ansible-doc is considered an error.\n\n%s' % SubprocessError(cmd, stderr=stderr)
return SanityFailure(self.name, summary=summary)
if args.explain:
continue
plugin_list_json = json.loads(stdout)
doc_targets[doc_type] = []
for plugin_name, plugin_value in sorted(plugin_list_json.items()):
if plugin_value != 'UNDOCUMENTED':
doc_targets[doc_type].append(plugin_name)
if not doc_targets[doc_type]:
del doc_targets[doc_type]
error_messages: list[SanityMessage] = []
for doc_type in sorted(doc_targets):
for format_option in [None, '--json']:
cmd = ['ansible-doc', '-t', doc_type]
if format_option is not None:
cmd.append(format_option)
cmd.extend(sorted(doc_targets[doc_type]))
try:
stdout, stderr = intercept_python(args, python, cmd, env, capture=True)
status = 0
except SubprocessError as ex:
stdout = ex.stdout
stderr = ex.stderr
status = ex.status
if status:
summary = '%s' % SubprocessError(cmd=cmd, status=status, stderr=stderr)
return SanityFailure(self.name, summary=summary)
if stdout:
display.info(stdout.strip(), verbosity=3)
if stderr:
# ignore removed module/plugin warnings
stderr = re.sub(r'\[WARNING]: [^ ]+ [^ ]+ has been removed\n', '', stderr).strip()
if stderr:
summary = 'Output on stderr from ansible-doc is considered an error.\n\n%s' % SubprocessError(cmd, stderr=stderr)
return SanityFailure(self.name, summary=summary)
if args.explain:
return SanitySuccess(self.name)
error_messages = settings.process_errors(error_messages, paths)
if error_messages:
return SanityFailure(self.name, messages=error_messages)
return SanitySuccess(self.name)
| 5,776
|
Python
|
.py
| 127
| 33.322835
| 151
| 0.582382
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,187
|
__init__.py
|
ansible_ansible/test/lib/ansible_test/_internal/commands/sanity/__init__.py
|
"""Execute Ansible sanity tests."""
from __future__ import annotations
import abc
import glob
import hashlib
import json
import os
import pathlib
import re
import collections
import collections.abc as c
import typing as t
from ...constants import (
CONTROLLER_PYTHON_VERSIONS,
REMOTE_ONLY_PYTHON_VERSIONS,
SUPPORTED_PYTHON_VERSIONS,
)
from ...encoding import (
to_bytes,
)
from ...io import (
read_json_file,
write_json_file,
write_text_file,
)
from ...util import (
ApplicationError,
SubprocessError,
display,
import_plugins,
load_plugins,
parse_to_list_of_dict,
ANSIBLE_TEST_CONTROLLER_ROOT,
ANSIBLE_TEST_TARGET_ROOT,
is_binary_file,
read_lines_without_comments,
is_subdir,
paths_to_dirs,
get_ansible_version,
str_to_version,
cache,
remove_tree,
)
from ...util_common import (
intercept_python,
handle_layout_messages,
yamlcheck,
create_result_directories,
)
from ...ansible_util import (
ansible_environment,
)
from ...target import (
walk_internal_targets,
walk_sanity_targets,
TestTarget,
)
from ...executor import (
get_changes_filter,
AllTargetsSkipped,
Delegate,
)
from ...python_requirements import (
PipCommand,
PipInstall,
collect_requirements,
run_pip,
)
from ...config import (
SanityConfig,
)
from ...test import (
TestSuccess,
TestFailure,
TestSkipped,
TestMessage,
TestResult,
calculate_best_confidence,
)
from ...data import (
data_context,
)
from ...content_config import (
get_content_config,
)
from ...host_configs import (
DockerConfig,
PosixConfig,
PythonConfig,
VirtualPythonConfig,
)
from ...host_profiles import (
PosixProfile,
)
from ...provisioning import (
prepare_profiles,
)
from ...pypi_proxy import (
configure_pypi_proxy,
)
from ...venv import (
create_virtual_environment,
)
COMMAND = 'sanity'
SANITY_ROOT = os.path.join(ANSIBLE_TEST_CONTROLLER_ROOT, 'sanity')
TARGET_SANITY_ROOT = os.path.join(ANSIBLE_TEST_TARGET_ROOT, 'sanity')
# NOTE: must match ansible.constants.DOCUMENTABLE_PLUGINS, but with 'module' replaced by 'modules'!
DOCUMENTABLE_PLUGINS = (
'become', 'cache', 'callback', 'cliconf', 'connection', 'filter', 'httpapi', 'inventory',
'lookup', 'netconf', 'modules', 'shell', 'strategy', 'test', 'vars',
)
# Plugin types that can have multiple plugins per file, and where filenames not always correspond to plugin names
MULTI_FILE_PLUGINS = ('filter', 'test', )
created_venvs: list[str] = []
def command_sanity(args: SanityConfig) -> None:
"""Run sanity tests."""
create_result_directories(args)
target_configs = t.cast(list[PosixConfig], args.targets)
target_versions: dict[str, PosixConfig] = {target.python.version: target for target in target_configs}
handle_layout_messages(data_context().content.sanity_messages)
changes = get_changes_filter(args)
require = args.require + changes
targets = SanityTargets.create(args.include, args.exclude, require)
if not targets.include:
raise AllTargetsSkipped()
tests = list(sanity_get_tests())
if args.test:
disabled = []
tests = [target for target in tests if target.name in args.test]
else:
disabled = [target.name for target in tests if not target.enabled and not args.allow_disabled]
tests = [target for target in tests if target.enabled or args.allow_disabled]
if args.skip_test:
tests = [target for target in tests if target.name not in args.skip_test]
if not args.host_path:
for test in tests:
test.origin_hook(args)
targets_use_pypi = any(isinstance(test, SanityMultipleVersion) and test.needs_pypi for test in tests) and not args.list_tests
host_state = prepare_profiles(args, targets_use_pypi=targets_use_pypi) # sanity
get_content_config(args) # make sure content config has been parsed prior to delegation
if args.delegate:
raise Delegate(host_state=host_state, require=changes, exclude=args.exclude)
configure_pypi_proxy(args, host_state.controller_profile) # sanity
if disabled:
display.warning('Skipping tests disabled by default without --allow-disabled: %s' % ', '.join(sorted(disabled)))
target_profiles: dict[str, PosixProfile] = {profile.config.python.version: profile for profile in host_state.targets(PosixProfile)}
total = 0
failed = []
result: t.Optional[TestResult]
for test in tests:
if args.list_tests:
print(test.name) # display goes to stderr, this should be on stdout
continue
for version in SUPPORTED_PYTHON_VERSIONS:
options = ''
if isinstance(test, SanityMultipleVersion):
if version not in target_versions and version not in args.host_settings.skipped_python_versions:
continue # version was not requested, skip it silently
else:
if version != args.controller_python.version:
continue # only multi-version sanity tests use target versions, the rest use the controller version
if test.supported_python_versions and version not in test.supported_python_versions:
result = SanitySkipped(test.name, version)
result.reason = f'Skipping sanity test "{test.name}" on Python {version} because it is unsupported.' \
f' Supported Python versions: {", ".join(test.supported_python_versions)}'
else:
if isinstance(test, SanityMultipleVersion):
settings = test.load_processor(args, version)
elif isinstance(test, SanitySingleVersion):
settings = test.load_processor(args)
elif isinstance(test, SanityVersionNeutral):
settings = test.load_processor(args)
else:
raise Exception('Unsupported test type: %s' % type(test))
all_targets = list(targets.targets)
if test.all_targets:
usable_targets = list(targets.targets)
elif test.no_targets:
usable_targets = []
else:
usable_targets = list(targets.include)
all_targets = SanityTargets.filter_and_inject_targets(test, all_targets)
usable_targets = SanityTargets.filter_and_inject_targets(test, usable_targets)
usable_targets = sorted(test.filter_targets_by_version(args, list(usable_targets), version))
usable_targets = settings.filter_skipped_targets(usable_targets)
sanity_targets = SanityTargets(tuple(all_targets), tuple(usable_targets))
test_needed = bool(usable_targets or test.no_targets or args.prime_venvs)
result = None
if test_needed and version in args.host_settings.skipped_python_versions:
# Deferred checking of Python availability. Done here since it is now known to be required for running the test.
# Earlier checking could cause a spurious warning to be generated for a collection which does not support the Python version.
# If the user specified a Python version, an error will be generated before reaching this point when the Python interpreter is not found.
result = SanitySkipped(test.name, version)
result.reason = f'Skipping sanity test "{test.name}" on Python {version} because it could not be found.'
if not result:
if isinstance(test, SanityMultipleVersion):
display.info(f'Running sanity test "{test.name}" on Python {version}')
else:
display.info(f'Running sanity test "{test.name}"')
if test_needed and not result:
if isinstance(test, SanityMultipleVersion):
# multi-version sanity tests handle their own requirements (if any) and use the target python
test_profile = target_profiles[version]
result = test.test(args, sanity_targets, test_profile.python)
options = ' --python %s' % version
elif isinstance(test, SanitySingleVersion):
# single version sanity tests use the controller python
test_profile = host_state.controller_profile
virtualenv_python = create_sanity_virtualenv(args, test_profile.python, test.name)
if virtualenv_python:
virtualenv_yaml = args.explain or check_sanity_virtualenv_yaml(virtualenv_python)
if test.require_libyaml and not virtualenv_yaml:
result = SanitySkipped(test.name)
result.reason = f'Skipping sanity test "{test.name}" on Python {version} due to missing libyaml support in PyYAML.'
else:
if virtualenv_yaml is False:
display.warning(f'Sanity test "{test.name}" on Python {version} may be slow due to missing libyaml support in PyYAML.')
if args.prime_venvs:
result = SanitySkipped(test.name)
else:
result = test.test(args, sanity_targets, virtualenv_python)
else:
result = SanitySkipped(test.name, version)
result.reason = f'Skipping sanity test "{test.name}" on Python {version} due to missing virtual environment support.'
elif isinstance(test, SanityVersionNeutral):
if args.prime_venvs:
result = SanitySkipped(test.name)
else:
# version neutral sanity tests handle their own requirements (if any)
result = test.test(args, sanity_targets)
else:
raise Exception('Unsupported test type: %s' % type(test))
elif result:
pass
else:
result = SanitySkipped(test.name, version)
result.write(args)
total += 1
if isinstance(result, SanityFailure):
failed.append(result.test + options)
controller = args.controller
if created_venvs and isinstance(controller, DockerConfig) and controller.name == 'default' and not args.prime_venvs:
names = ', '.join(created_venvs)
display.warning(f'There following sanity test virtual environments are out-of-date in the "default" container: {names}')
if failed:
message = 'The %d sanity test(s) listed below (out of %d) failed. See error output above for details.\n%s' % (
len(failed), total, '\n'.join(failed))
if args.failure_ok:
display.error(message)
else:
raise ApplicationError(message)
@cache
def collect_code_smell_tests() -> tuple[SanityTest, ...]:
"""Return a tuple of available code smell sanity tests."""
paths = glob.glob(os.path.join(SANITY_ROOT, 'code-smell', '*.py'))
if data_context().content.is_ansible:
# include Ansible specific code-smell tests which are not configured to be skipped
ansible_code_smell_root = os.path.join(data_context().content.root, 'test', 'sanity', 'code-smell')
skip_tests = read_lines_without_comments(os.path.join(ansible_code_smell_root, 'skip.txt'), remove_blank_lines=True, optional=True)
paths.extend(path for path in glob.glob(os.path.join(ansible_code_smell_root, '*.py')) if os.path.basename(path) not in skip_tests)
tests = tuple(SanityScript.create(p) for p in paths)
return tests
class SanityIgnoreParser:
"""Parser for the consolidated sanity test ignore file."""
NO_CODE = '_'
def __init__(self, args: SanityConfig) -> None:
if data_context().content.collection:
ansible_version = '%s.%s' % tuple(get_ansible_version().split('.')[:2])
ansible_label = 'Ansible %s' % ansible_version
file_name = 'ignore-%s.txt' % ansible_version
else:
ansible_label = 'Ansible'
file_name = 'ignore.txt'
self.args = args
self.relative_path = os.path.join(data_context().content.sanity_path, file_name)
self.path = os.path.join(data_context().content.root, self.relative_path)
self.ignores: dict[str, dict[str, dict[str, int]]] = collections.defaultdict(lambda: collections.defaultdict(dict))
self.skips: dict[str, dict[str, int]] = collections.defaultdict(lambda: collections.defaultdict(int))
self.parse_errors: list[tuple[int, int, str]] = []
self.file_not_found_errors: list[tuple[int, str]] = []
lines = read_lines_without_comments(self.path, optional=True)
targets = SanityTargets.get_targets()
paths = set(target.path for target in targets)
tests_by_name: dict[str, SanityTest] = {}
versioned_test_names: set[str] = set()
unversioned_test_names: dict[str, str] = {}
directories = paths_to_dirs(list(paths))
paths_by_test: dict[str, set[str]] = {}
display.info('Read %d sanity test ignore line(s) for %s from: %s' % (len(lines), ansible_label, self.relative_path), verbosity=1)
for test in sanity_get_tests():
test_targets = SanityTargets.filter_and_inject_targets(test, targets)
if isinstance(test, SanityMultipleVersion):
versioned_test_names.add(test.name)
for python_version in test.supported_python_versions:
test_name = '%s-%s' % (test.name, python_version)
paths_by_test[test_name] = set(target.path for target in test.filter_targets_by_version(args, test_targets, python_version))
tests_by_name[test_name] = test
else:
unversioned_test_names.update(dict(('%s-%s' % (test.name, python_version), test.name) for python_version in SUPPORTED_PYTHON_VERSIONS))
paths_by_test[test.name] = set(target.path for target in test.filter_targets_by_version(args, test_targets, ''))
tests_by_name[test.name] = test
for line_no, line in enumerate(lines, start=1):
if not line:
self.parse_errors.append((line_no, 1, "Line cannot be empty or contain only a comment"))
continue
parts = line.split(' ')
path = parts[0]
codes = parts[1:]
if not path:
self.parse_errors.append((line_no, 1, "Line cannot start with a space"))
continue
if path.endswith(os.path.sep):
if path not in directories:
self.file_not_found_errors.append((line_no, path))
continue
else:
if path not in paths:
self.file_not_found_errors.append((line_no, path))
continue
if not codes:
self.parse_errors.append((line_no, len(path), "Error code required after path"))
continue
code = codes[0]
if not code:
self.parse_errors.append((line_no, len(path) + 1, "Error code after path cannot be empty"))
continue
if len(codes) > 1:
self.parse_errors.append((line_no, len(path) + len(code) + 2, "Error code cannot contain spaces"))
continue
parts = code.split('!')
code = parts[0]
commands = parts[1:]
parts = code.split(':')
test_name = parts[0]
error_codes = parts[1:]
test = tests_by_name.get(test_name)
if not test:
unversioned_name = unversioned_test_names.get(test_name)
if unversioned_name:
self.parse_errors.append((line_no, len(path) + len(unversioned_name) + 2, "Sanity test '%s' cannot use a Python version like '%s'" % (
unversioned_name, test_name)))
elif test_name in versioned_test_names:
self.parse_errors.append((line_no, len(path) + len(test_name) + 1, "Sanity test '%s' requires a Python version like '%s-%s'" % (
test_name, test_name, args.controller_python.version)))
else:
self.parse_errors.append((line_no, len(path) + 2, "Sanity test '%s' does not exist" % test_name))
continue
if path.endswith(os.path.sep) and not test.include_directories:
self.parse_errors.append((line_no, 1, "Sanity test '%s' does not support directory paths" % test_name))
continue
if path not in paths_by_test[test_name] and not test.no_targets:
self.parse_errors.append((line_no, 1, "Sanity test '%s' does not test path '%s'" % (test_name, path)))
continue
if commands and error_codes:
self.parse_errors.append((line_no, len(path) + len(test_name) + 2, "Error code cannot contain both '!' and ':' characters"))
continue
if commands:
command = commands[0]
if len(commands) > 1:
self.parse_errors.append((line_no, len(path) + len(test_name) + len(command) + 3, "Error code cannot contain multiple '!' characters"))
continue
if command == 'skip':
if not test.can_skip:
self.parse_errors.append((line_no, len(path) + len(test_name) + 2, "Sanity test '%s' cannot be skipped" % test_name))
continue
existing_line_no = self.skips.get(test_name, {}).get(path)
if existing_line_no:
self.parse_errors.append((line_no, 1, "Duplicate '%s' skip for path '%s' first found on line %d" % (test_name, path, existing_line_no)))
continue
self.skips[test_name][path] = line_no
continue
self.parse_errors.append((line_no, len(path) + len(test_name) + 2, "Command '!%s' not recognized" % command))
continue
if not test.can_ignore:
self.parse_errors.append((line_no, len(path) + 1, "Sanity test '%s' cannot be ignored" % test_name))
continue
if test.error_code:
if not error_codes:
self.parse_errors.append((line_no, len(path) + len(test_name) + 1, "Sanity test '%s' requires an error code" % test_name))
continue
error_code = error_codes[0]
if len(error_codes) > 1:
self.parse_errors.append((line_no, len(path) + len(test_name) + len(error_code) + 3, "Error code cannot contain multiple ':' characters"))
continue
if error_code in test.optional_error_codes:
self.parse_errors.append((line_no, len(path) + len(test_name) + 3, "Optional error code '%s' cannot be ignored" % (
error_code)))
continue
else:
if error_codes:
self.parse_errors.append((line_no, len(path) + len(test_name) + 2, "Sanity test '%s' does not support error codes" % test_name))
continue
error_code = self.NO_CODE
existing = self.ignores.get(test_name, {}).get(path, {}).get(error_code)
if existing:
if test.error_code:
self.parse_errors.append((line_no, 1, "Duplicate '%s' ignore for error code '%s' for path '%s' first found on line %d" % (
test_name, error_code, path, existing)))
else:
self.parse_errors.append((line_no, 1, "Duplicate '%s' ignore for path '%s' first found on line %d" % (
test_name, path, existing)))
continue
self.ignores[test_name][path][error_code] = line_no
@staticmethod
def load(args: SanityConfig) -> SanityIgnoreParser:
"""Return the current SanityIgnore instance, initializing it if needed."""
try:
return SanityIgnoreParser.instance # type: ignore[attr-defined]
except AttributeError:
pass
instance = SanityIgnoreParser(args)
SanityIgnoreParser.instance = instance # type: ignore[attr-defined]
return instance
class SanityIgnoreProcessor:
"""Processor for sanity test ignores for a single run of one sanity test."""
def __init__(
self,
args: SanityConfig,
test: SanityTest,
python_version: t.Optional[str],
) -> None:
name = test.name
code = test.error_code
if python_version:
full_name = '%s-%s' % (name, python_version)
else:
full_name = name
self.args = args
self.test = test
self.code = code
self.parser = SanityIgnoreParser.load(args)
self.ignore_entries = self.parser.ignores.get(full_name, {})
self.skip_entries = self.parser.skips.get(full_name, {})
self.used_line_numbers: set[int] = set()
def filter_skipped_targets(self, targets: list[TestTarget]) -> list[TestTarget]:
"""Return the given targets, with any skipped paths filtered out."""
return sorted(target for target in targets if target.path not in self.skip_entries)
def process_errors(self, errors: list[SanityMessage], paths: list[str]) -> list[SanityMessage]:
"""Return the given errors filtered for ignores and with any settings related errors included."""
errors = self.filter_messages(errors)
errors.extend(self.get_errors(paths))
errors = sorted(set(errors))
return errors
def filter_messages(self, messages: list[SanityMessage]) -> list[SanityMessage]:
"""Return a filtered list of the given messages using the entries that have been loaded."""
filtered = []
for message in messages:
if message.code in self.test.optional_error_codes and not self.args.enable_optional_errors:
continue
path_entry = self.ignore_entries.get(message.path)
if path_entry:
code = message.code if self.code else SanityIgnoreParser.NO_CODE
line_no = path_entry.get(code)
if line_no:
self.used_line_numbers.add(line_no)
continue
filtered.append(message)
return filtered
def get_errors(self, paths: list[str]) -> list[SanityMessage]:
"""Return error messages related to issues with the file."""
messages: list[SanityMessage] = []
# unused errors
unused: list[tuple[int, str, str]] = []
if self.test.no_targets or self.test.all_targets:
# tests which do not accept a target list, or which use all targets, always return all possible errors, so all ignores can be checked
targets = SanityTargets.get_targets()
test_targets = SanityTargets.filter_and_inject_targets(self.test, targets)
paths = [target.path for target in test_targets]
for path in paths:
path_entry = self.ignore_entries.get(path)
if not path_entry:
continue
unused.extend((line_no, path, code) for code, line_no in path_entry.items() if line_no not in self.used_line_numbers)
messages.extend(SanityMessage(
code=self.code,
message="Ignoring '%s' on '%s' is unnecessary" % (code, path) if self.code else "Ignoring '%s' is unnecessary" % path,
path=self.parser.relative_path,
line=line,
column=1,
confidence=calculate_best_confidence(((self.parser.path, line), (path, 0)), self.args.metadata) if self.args.metadata.changes else None,
) for line, path, code in unused)
return messages
class SanitySuccess(TestSuccess):
"""Sanity test success."""
def __init__(self, test: str, python_version: t.Optional[str] = None) -> None:
super().__init__(COMMAND, test, python_version)
class SanitySkipped(TestSkipped):
"""Sanity test skipped."""
def __init__(self, test: str, python_version: t.Optional[str] = None) -> None:
super().__init__(COMMAND, test, python_version)
class SanityFailure(TestFailure):
"""Sanity test failure."""
def __init__(
self,
test: str,
python_version: t.Optional[str] = None,
messages: t.Optional[c.Sequence[SanityMessage]] = None,
summary: t.Optional[str] = None,
) -> None:
super().__init__(COMMAND, test, python_version, messages, summary)
class SanityMessage(TestMessage):
"""Single sanity test message for one file."""
class SanityTargets:
"""Sanity test target information."""
def __init__(self, targets: tuple[TestTarget, ...], include: tuple[TestTarget, ...]) -> None:
self.targets = targets
self.include = include
@staticmethod
def create(include: list[str], exclude: list[str], require: list[str]) -> SanityTargets:
"""Create a SanityTargets instance from the given include, exclude and require lists."""
_targets = SanityTargets.get_targets()
_include = walk_internal_targets(_targets, include, exclude, require)
return SanityTargets(_targets, _include)
@staticmethod
def filter_and_inject_targets(test: SanityTest, targets: c.Iterable[TestTarget]) -> list[TestTarget]:
"""Filter and inject targets based on test requirements and the given target list."""
test_targets = list(targets)
if not test.include_symlinks:
# remove all symlinks unless supported by the test
test_targets = [target for target in test_targets if not target.symlink]
if not test.include_directories or not test.include_symlinks:
# exclude symlinked directories unless supported by the test
test_targets = [target for target in test_targets if not target.path.endswith(os.path.sep)]
if test.include_directories:
# include directories containing any of the included files
test_targets += tuple(TestTarget(path, None, None, '') for path in paths_to_dirs([target.path for target in test_targets]))
if not test.include_symlinks:
# remove all directory symlinks unless supported by the test
test_targets = [target for target in test_targets if not target.symlink]
return test_targets
@staticmethod
def get_targets() -> tuple[TestTarget, ...]:
"""Return a tuple of sanity test targets. Uses a cached version when available."""
try:
return SanityTargets.get_targets.targets # type: ignore[attr-defined]
except AttributeError:
targets = tuple(sorted(walk_sanity_targets()))
SanityTargets.get_targets.targets = targets # type: ignore[attr-defined]
return targets
class SanityTest(metaclass=abc.ABCMeta):
"""Sanity test base class."""
ansible_only = False
def __init__(self, name: t.Optional[str] = None) -> None:
if not name:
name = self.__class__.__name__
name = re.sub(r'Test$', '', name) # drop Test suffix
name = re.sub(r'(.)([A-Z][a-z]+)', r'\1-\2', name).lower() # use dashes instead of capitalization
self.name = name
self.enabled = True
# Optional error codes represent errors which spontaneously occur without changes to the content under test, such as those based on the current date.
# Because these errors can be unpredictable they behave differently than normal error codes:
# * They are not reported by default. The `--enable-optional-errors` option must be used to display these errors.
# * They cannot be ignored. This is done to maintain the integrity of the ignore system.
self.optional_error_codes: set[str] = set()
@property
def error_code(self) -> t.Optional[str]:
"""Error code for ansible-test matching the format used by the underlying test program, or None if the program does not use error codes."""
return None
@property
def can_ignore(self) -> bool:
"""True if the test supports ignore entries."""
return True
@property
def can_skip(self) -> bool:
"""True if the test supports skip entries."""
return not self.all_targets and not self.no_targets
@property
def all_targets(self) -> bool:
"""True if test targets will not be filtered using includes, excludes, requires or changes. Mutually exclusive with no_targets."""
return False
@property
def no_targets(self) -> bool:
"""True if the test does not use test targets. Mutually exclusive with all_targets."""
return False
@property
def include_directories(self) -> bool:
"""True if the test targets should include directories."""
return False
@property
def include_symlinks(self) -> bool:
"""True if the test targets should include symlinks."""
return False
@property
def supported_python_versions(self) -> t.Optional[tuple[str, ...]]:
"""A tuple of supported Python versions or None if the test does not depend on specific Python versions."""
return CONTROLLER_PYTHON_VERSIONS
def origin_hook(self, args: SanityConfig) -> None:
"""This method is called on the origin, before the test runs or delegation occurs."""
def filter_targets(self, targets: list[TestTarget]) -> list[TestTarget]: # pylint: disable=unused-argument
"""Return the given list of test targets, filtered to include only those relevant for the test."""
if self.no_targets:
return []
raise NotImplementedError('Sanity test "%s" must implement "filter_targets" or set "no_targets" to True.' % self.name)
def filter_targets_by_version(self, args: SanityConfig, targets: list[TestTarget], python_version: str) -> list[TestTarget]:
"""Return the given list of test targets, filtered to include only those relevant for the test, taking into account the Python version."""
del args # args is not used here, but derived classes may make use of it
del python_version # python_version is not used here, but derived classes may make use of it
targets = self.filter_targets(targets)
return targets
@staticmethod
def filter_remote_targets(targets: list[TestTarget]) -> list[TestTarget]:
"""Return a filtered list of the given targets, including only those that require support for remote-only Python versions."""
targets = [target for target in targets if (
is_subdir(target.path, data_context().content.module_path) or
is_subdir(target.path, data_context().content.module_utils_path) or
is_subdir(target.path, data_context().content.unit_module_path) or
is_subdir(target.path, data_context().content.unit_module_utils_path) or
# include modules/module_utils within integration test library directories
re.search('^%s/.*/library/' % re.escape(data_context().content.integration_targets_path), target.path) or
# special handling for content in ansible-core
(data_context().content.is_ansible and (
# utility code that runs in target environments and requires support for remote-only Python versions
is_subdir(target.path, 'test/lib/ansible_test/_util/target/') or
# integration test support modules/module_utils continue to require support for remote-only Python versions
re.search('^test/support/integration/.*/(modules|module_utils)/', target.path) or
# collection loader requires support for remote-only Python versions
re.search('^lib/ansible/utils/collection_loader/', target.path)
))
)]
return targets
class SanitySingleVersion(SanityTest, metaclass=abc.ABCMeta):
"""Base class for sanity test plugins which should run on a single python version."""
@property
def require_libyaml(self) -> bool:
"""True if the test requires PyYAML to have libyaml support."""
return False
@abc.abstractmethod
def test(self, args: SanityConfig, targets: SanityTargets, python: PythonConfig) -> TestResult:
"""Run the sanity test and return the result."""
def load_processor(self, args: SanityConfig) -> SanityIgnoreProcessor:
"""Load the ignore processor for this sanity test."""
return SanityIgnoreProcessor(args, self, None)
class SanityScript(SanityTest, metaclass=abc.ABCMeta):
"""Base class for sanity test scripts."""
@classmethod
def create(cls, path: str) -> SanityScript:
"""Create and return a SanityScript instance from the given path."""
name = os.path.splitext(os.path.basename(path))[0]
config_path = os.path.splitext(path)[0] + '.json'
if os.path.exists(config_path):
config = read_json_file(config_path)
else:
config = None
instance: SanityScript
if config.get('multi_version'):
instance = SanityScriptMultipleVersion(name=name, path=path, config=config)
else:
instance = SanityScriptSingleVersion(name=name, path=path, config=config)
return instance
def __init__(self, name: str, path: str, config: dict[str, t.Any] | None) -> None:
super().__init__(name=name)
self.path = path
self.config = config
if self.config:
self.enabled = not self.config.get('disabled')
self.output: t.Optional[str] = self.config.get('output')
self.extensions: list[str] = self.config.get('extensions')
self.prefixes: list[str] = self.config.get('prefixes')
self.files: list[str] = self.config.get('files')
self.text: t.Optional[bool] = self.config.get('text')
self.ignore_self: bool = self.config.get('ignore_self')
self.controller_only: bool = self.config.get('controller_only')
self.min_max_python_only: bool = self.config.get('min_max_python_only')
self.minimum_python_version: t.Optional[str] = self.config.get('minimum_python_version')
self.maximum_python_version: t.Optional[str] = self.config.get('maximum_python_version')
self.__all_targets: bool = self.config.get('all_targets')
self.__no_targets: bool = self.config.get('no_targets')
self.__include_directories: bool = self.config.get('include_directories')
self.__include_symlinks: bool = self.config.get('include_symlinks')
self.__error_code: str | None = self.config.get('error_code', None)
else:
self.output = None
self.extensions = []
self.prefixes = []
self.files = []
self.text = None
self.ignore_self = False
self.controller_only = False
self.min_max_python_only = False
self.minimum_python_version = None
self.maximum_python_version = None
self.__all_targets = False
self.__no_targets = True
self.__include_directories = False
self.__include_symlinks = False
self.__error_code = None
if self.no_targets:
mutually_exclusive = (
'extensions',
'prefixes',
'files',
'text',
'ignore_self',
'all_targets',
'include_directories',
'include_symlinks',
)
problems = sorted(name for name in mutually_exclusive if getattr(self, name))
if problems:
raise ApplicationError('Sanity test "%s" option "no_targets" is mutually exclusive with options: %s' % (self.name, ', '.join(problems)))
@property
def error_code(self) -> t.Optional[str]:
"""Error code for ansible-test matching the format used by the underlying test program, or None if the program does not use error codes."""
return self.__error_code
@property
def all_targets(self) -> bool:
"""True if test targets will not be filtered using includes, excludes, requires or changes. Mutually exclusive with no_targets."""
return self.__all_targets
@property
def no_targets(self) -> bool:
"""True if the test does not use test targets. Mutually exclusive with all_targets."""
return self.__no_targets
@property
def include_directories(self) -> bool:
"""True if the test targets should include directories."""
return self.__include_directories
@property
def include_symlinks(self) -> bool:
"""True if the test targets should include symlinks."""
return self.__include_symlinks
@property
def supported_python_versions(self) -> t.Optional[tuple[str, ...]]:
"""A tuple of supported Python versions or None if the test does not depend on specific Python versions."""
versions = super().supported_python_versions
if self.controller_only:
versions = tuple(version for version in versions if version in CONTROLLER_PYTHON_VERSIONS)
if self.minimum_python_version:
versions = tuple(version for version in versions if str_to_version(version) >= str_to_version(self.minimum_python_version))
if self.maximum_python_version:
versions = tuple(version for version in versions if str_to_version(version) <= str_to_version(self.maximum_python_version))
if self.min_max_python_only:
versions = versions[0], versions[-1]
return versions
def filter_targets(self, targets: list[TestTarget]) -> list[TestTarget]:
"""Return the given list of test targets, filtered to include only those relevant for the test."""
if self.no_targets:
return []
if self.text is not None:
if self.text:
targets = [target for target in targets if not is_binary_file(target.path)]
else:
targets = [target for target in targets if is_binary_file(target.path)]
if self.extensions:
targets = [target for target in targets if os.path.splitext(target.path)[1] in self.extensions
or (is_subdir(target.path, 'bin') and '.py' in self.extensions)]
if self.prefixes:
targets = [target for target in targets if any(target.path.startswith(pre) for pre in self.prefixes)]
if self.files:
targets = [target for target in targets if os.path.basename(target.path) in self.files]
if self.ignore_self and data_context().content.is_ansible:
relative_self_path = os.path.relpath(self.path, data_context().content.root)
targets = [target for target in targets if target.path != relative_self_path]
return targets
def test_script(self, args: SanityConfig, targets: SanityTargets, virtualenv_python: PythonConfig, python: PythonConfig) -> TestResult:
"""Run the sanity test and return the result."""
cmd = [virtualenv_python.path, self.path]
env = ansible_environment(args, color=False)
env.update(
PYTHONUTF8='1', # force all code-smell sanity tests to run with Python UTF-8 Mode enabled
ANSIBLE_TEST_TARGET_PYTHON_VERSION=python.version,
ANSIBLE_TEST_CONTROLLER_PYTHON_VERSIONS=','.join(CONTROLLER_PYTHON_VERSIONS),
ANSIBLE_TEST_REMOTE_ONLY_PYTHON_VERSIONS=','.join(REMOTE_ONLY_PYTHON_VERSIONS),
)
if self.min_max_python_only:
min_python, max_python = self.supported_python_versions
env.update(ANSIBLE_TEST_MIN_PYTHON=min_python)
env.update(ANSIBLE_TEST_MAX_PYTHON=max_python)
pattern = None
data = None
settings = self.conditionally_load_processor(args, python.version)
paths = [target.path for target in targets.include]
if self.config:
if self.output == 'path-line-column-message':
pattern = '^(?P<path>[^:]*):(?P<line>[0-9]+):(?P<column>[0-9]+): (?P<message>.*)$'
elif self.output == 'path-message':
pattern = '^(?P<path>[^:]*): (?P<message>.*)$'
elif self.output == 'path-line-column-code-message':
pattern = '^(?P<path>[^:]*):(?P<line>[0-9]+):(?P<column>[0-9]+): (?P<code>[^:]*): (?P<message>.*)$'
else:
raise ApplicationError('Unsupported output type: %s' % self.output)
if not self.no_targets:
data = '\n'.join(paths)
if data:
display.info(data, verbosity=4)
try:
stdout, stderr = intercept_python(args, virtualenv_python, cmd, data=data, env=env, capture=True)
status = 0
except SubprocessError as ex:
stdout = ex.stdout
stderr = ex.stderr
status = ex.status
if args.explain:
return SanitySuccess(self.name)
if stdout and not stderr:
if pattern:
matches = parse_to_list_of_dict(pattern, stdout)
messages = [SanityMessage(
message=m['message'],
path=m['path'],
line=int(m.get('line', 0)),
column=int(m.get('column', 0)),
code=m.get('code'),
) for m in matches]
messages = settings.process_errors(messages, paths)
if not messages:
return SanitySuccess(self.name)
return SanityFailure(self.name, messages=messages)
if stderr or status:
summary = '%s' % SubprocessError(cmd=cmd, status=status, stderr=stderr, stdout=stdout)
return SanityFailure(self.name, summary=summary)
messages = settings.process_errors([], paths)
if messages:
return SanityFailure(self.name, messages=messages)
return SanitySuccess(self.name)
@abc.abstractmethod
def conditionally_load_processor(self, args: SanityConfig, python_version: str) -> SanityIgnoreProcessor:
"""Load the ignore processor for this sanity test."""
class SanityVersionNeutral(SanityTest, metaclass=abc.ABCMeta):
"""Base class for sanity test plugins which are idependent of the python version being used."""
@abc.abstractmethod
def test(self, args: SanityConfig, targets: SanityTargets) -> TestResult:
"""Run the sanity test and return the result."""
def load_processor(self, args: SanityConfig) -> SanityIgnoreProcessor:
"""Load the ignore processor for this sanity test."""
return SanityIgnoreProcessor(args, self, None)
@property
def supported_python_versions(self) -> t.Optional[tuple[str, ...]]:
"""A tuple of supported Python versions or None if the test does not depend on specific Python versions."""
return None
class SanityMultipleVersion(SanityTest, metaclass=abc.ABCMeta):
"""Base class for sanity test plugins which should run on multiple python versions."""
@abc.abstractmethod
def test(self, args: SanityConfig, targets: SanityTargets, python: PythonConfig) -> TestResult:
"""Run the sanity test and return the result."""
def load_processor(self, args: SanityConfig, python_version: str) -> SanityIgnoreProcessor:
"""Load the ignore processor for this sanity test."""
return SanityIgnoreProcessor(args, self, python_version)
@property
def needs_pypi(self) -> bool:
"""True if the test requires PyPI, otherwise False."""
return False
@property
def supported_python_versions(self) -> t.Optional[tuple[str, ...]]:
"""A tuple of supported Python versions or None if the test does not depend on specific Python versions."""
return SUPPORTED_PYTHON_VERSIONS
def filter_targets_by_version(self, args: SanityConfig, targets: list[TestTarget], python_version: str) -> list[TestTarget]:
"""Return the given list of test targets, filtered to include only those relevant for the test, taking into account the Python version."""
if not python_version:
raise Exception('python_version is required to filter multi-version tests')
targets = super().filter_targets_by_version(args, targets, python_version)
if python_version in REMOTE_ONLY_PYTHON_VERSIONS:
content_config = get_content_config(args)
if python_version not in content_config.modules.python_versions:
# when a remote-only python version is not supported there are no paths to test
return []
# when a remote-only python version is supported, tests must be applied only to targets that support remote-only Python versions
targets = self.filter_remote_targets(targets)
return targets
class SanityScriptSingleVersion(SanityScript, SanitySingleVersion):
"""External sanity test script which should run on a single python version."""
def test(self, args: SanityConfig, targets: SanityTargets, python: PythonConfig) -> TestResult:
"""Run the sanity test and return the result."""
return super().test_script(args, targets, python, python)
def conditionally_load_processor(self, args: SanityConfig, python_version: str) -> SanityIgnoreProcessor:
"""Load the ignore processor for this sanity test."""
return SanityIgnoreProcessor(args, self, None)
class SanityScriptMultipleVersion(SanityScript, SanityMultipleVersion):
"""External sanity test script which should run on multiple python versions."""
def test(self, args: SanityConfig, targets: SanityTargets, python: PythonConfig) -> TestResult:
"""Run the sanity test and return the result."""
multi_version = self.config['multi_version']
if multi_version == 'controller':
virtualenv_python_config = args.controller_python
elif multi_version == 'target':
virtualenv_python_config = python
else:
raise NotImplementedError(f'{multi_version=}')
virtualenv_python = create_sanity_virtualenv(args, virtualenv_python_config, self.name)
if not virtualenv_python:
result = SanitySkipped(self.name, python.version)
result.reason = f'Skipping sanity test "{self.name}" due to missing virtual environment support on Python {virtualenv_python_config.version}.'
return result
if args.prime_venvs:
return SanitySkipped(self.name, python.version)
return super().test_script(args, targets, virtualenv_python, python)
def conditionally_load_processor(self, args: SanityConfig, python_version: str) -> SanityIgnoreProcessor:
"""Load the ignore processor for this sanity test."""
return SanityIgnoreProcessor(args, self, python_version)
@cache
def sanity_get_tests() -> tuple[SanityTest, ...]:
"""Return a tuple of the available sanity tests."""
import_plugins('commands/sanity')
sanity_plugins: dict[str, t.Type[SanityTest]] = {}
load_plugins(SanityTest, sanity_plugins)
sanity_plugins.pop('sanity') # SanityCodeSmellTest
sanity_tests = tuple(plugin() for plugin in sanity_plugins.values() if data_context().content.is_ansible or not plugin.ansible_only)
sanity_tests = tuple(sorted(sanity_tests + collect_code_smell_tests(), key=lambda k: k.name))
return sanity_tests
def create_sanity_virtualenv(
args: SanityConfig,
python: PythonConfig,
name: str,
coverage: bool = False,
minimize: bool = False,
) -> t.Optional[VirtualPythonConfig]:
"""Return an existing sanity virtual environment matching the requested parameters or create a new one."""
commands = collect_requirements( # create_sanity_virtualenv()
python=python,
controller=True,
command=None,
ansible=False,
coverage=coverage,
minimize=minimize,
sanity=name,
)
if commands:
label = f'sanity.{name}'
else:
label = 'sanity' # use a single virtualenv name for tests which have no requirements
# The path to the virtual environment must be kept short to avoid the 127 character shebang length limit on Linux.
# If the limit is exceeded, generated entry point scripts from pip installed packages will fail with syntax errors.
virtualenv_install = json.dumps([command.serialize() for command in commands], indent=4)
virtualenv_hash = hash_pip_commands(commands)
virtualenv_cache = os.path.join(os.path.expanduser('~/.ansible/test/venv'))
virtualenv_path = os.path.join(virtualenv_cache, label, f'{python.version}', virtualenv_hash)
virtualenv_marker = os.path.join(virtualenv_path, 'marker.txt')
meta_install = os.path.join(virtualenv_path, 'meta.install.json')
meta_yaml = os.path.join(virtualenv_path, 'meta.yaml.json')
virtualenv_python = VirtualPythonConfig(
version=python.version,
path=os.path.join(virtualenv_path, 'bin', 'python'),
)
if not os.path.exists(virtualenv_marker):
# a virtualenv without a marker is assumed to have been partially created
remove_tree(virtualenv_path)
if not create_virtual_environment(args, python, virtualenv_path):
return None
run_pip(args, virtualenv_python, commands, None) # create_sanity_virtualenv()
if not args.explain:
write_text_file(meta_install, virtualenv_install)
# false positive: pylint: disable=no-member
if any(isinstance(command, PipInstall) and command.has_package('pyyaml') for command in commands):
virtualenv_yaml = yamlcheck(virtualenv_python, args.explain)
else:
virtualenv_yaml = None
if not args.explain:
write_json_file(meta_yaml, virtualenv_yaml)
created_venvs.append(f'{label}-{python.version}')
if not args.explain:
# touch the marker to keep track of when the virtualenv was last used
pathlib.Path(virtualenv_marker).touch()
return virtualenv_python
def hash_pip_commands(commands: list[PipCommand]) -> str:
"""Return a short hash unique to the given list of pip commands, suitable for identifying the resulting sanity test environment."""
serialized_commands = json.dumps([make_pip_command_hashable(command) for command in commands], indent=4)
return hashlib.sha256(to_bytes(serialized_commands)).hexdigest()[:8]
def make_pip_command_hashable(command: PipCommand) -> tuple[str, dict[str, t.Any]]:
"""Return a serialized version of the given pip command that is suitable for hashing."""
if isinstance(command, PipInstall):
# The pre-build instructions for pip installs must be omitted, so they do not affect the hash.
# This is allows the pre-build commands to be added without breaking sanity venv caching.
# It is safe to omit these from the hash since they only affect packages used during builds, not what is installed in the venv.
command = PipInstall(
requirements=[omit_pre_build_from_requirement(*req) for req in command.requirements],
constraints=list(command.constraints),
packages=list(command.packages),
)
return command.serialize()
def omit_pre_build_from_requirement(path: str, requirements: str) -> tuple[str, str]:
"""Return the given requirements with pre-build instructions omitted."""
lines = requirements.splitlines(keepends=True)
# CAUTION: This code must be kept in sync with the code which processes pre-build instructions in:
# test/lib/ansible_test/_util/target/setup/requirements.py
lines = [line for line in lines if not line.startswith('# pre-build ')]
return path, ''.join(lines)
def check_sanity_virtualenv_yaml(python: VirtualPythonConfig) -> t.Optional[bool]:
"""Return True if PyYAML has libyaml support for the given sanity virtual environment, False if it does not and None if it was not found."""
virtualenv_path = os.path.dirname(os.path.dirname(python.path))
meta_yaml = os.path.join(virtualenv_path, 'meta.yaml.json')
virtualenv_yaml = read_json_file(meta_yaml)
return virtualenv_yaml
| 53,388
|
Python
|
.py
| 981
| 43.627931
| 160
| 0.635927
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,188
|
integration_aliases.py
|
ansible_ansible/test/lib/ansible_test/_internal/commands/sanity/integration_aliases.py
|
"""Sanity test to check integration test aliases."""
from __future__ import annotations
import dataclasses
import json
import textwrap
import os
import re
import typing as t
from . import (
SanitySingleVersion,
SanityMessage,
SanityFailure,
SanitySuccess,
SanityTargets,
SANITY_ROOT,
)
from ...test import (
TestResult,
)
from ...config import (
SanityConfig,
)
from ...target import (
filter_targets,
walk_posix_integration_targets,
walk_windows_integration_targets,
walk_integration_targets,
walk_module_targets,
CompletionTarget,
IntegrationTargetType,
)
from ..integration.cloud import (
get_cloud_platforms,
)
from ...io import (
read_text_file,
)
from ...util import (
display,
raw_command,
)
from ...util_common import (
get_docs_url,
write_json_test_results,
ResultType,
)
from ...host_configs import (
PythonConfig,
)
class IntegrationAliasesTest(SanitySingleVersion):
"""Sanity test to evaluate integration test aliases."""
CI_YML = '.azure-pipelines/azure-pipelines.yml'
TEST_ALIAS_PREFIX = 'shippable' # this will be changed at some point in the future
DISABLED = 'disabled/'
UNSTABLE = 'unstable/'
UNSUPPORTED = 'unsupported/'
EXPLAIN_URL = get_docs_url('https://docs.ansible.com/ansible-core/devel/dev_guide/testing/sanity/integration-aliases.html')
TEMPLATE_DISABLED = """
The following integration tests are **disabled** [[explain]({explain_url}#disabled)]:
{tests}
Consider fixing the integration tests before or alongside changes.
"""
TEMPLATE_UNSTABLE = """
The following integration tests are **unstable** [[explain]({explain_url}#unstable)]:
{tests}
Tests may need to be restarted due to failures unrelated to changes.
"""
TEMPLATE_UNSUPPORTED = """
The following integration tests are **unsupported** [[explain]({explain_url}#unsupported)]:
{tests}
Consider running the tests manually or extending test infrastructure to add support.
"""
TEMPLATE_UNTESTED = """
The following modules have **no integration tests** [[explain]({explain_url}#untested)]:
{tests}
Consider adding integration tests before or alongside changes.
"""
ansible_only = True
def __init__(self) -> None:
super().__init__()
self._ci_config: dict[str, t.Any] = {}
self._ci_test_groups: dict[str, list[int]] = {}
@property
def can_ignore(self) -> bool:
"""True if the test supports ignore entries."""
return False
@property
def no_targets(self) -> bool:
"""True if the test does not use test targets. Mutually exclusive with all_targets."""
return True
def load_ci_config(self, python: PythonConfig) -> dict[str, t.Any]:
"""Load and return the CI YAML configuration."""
if not self._ci_config:
self._ci_config = self.load_yaml(python, self.CI_YML)
return self._ci_config
@property
def ci_test_groups(self) -> dict[str, list[int]]:
"""Return a dictionary of CI test names and their group(s)."""
if not self._ci_test_groups:
test_groups: dict[str, set[int]] = {}
for stage in self._ci_config['stages']:
for job in stage['jobs']:
if job.get('template') != 'templates/matrix.yml':
continue
parameters = job['parameters']
groups = parameters.get('groups', [])
test_format = parameters.get('testFormat', '{0}')
test_group_format = parameters.get('groupFormat', '{0}/{{1}}')
for target in parameters['targets']:
test = target.get('test') or target.get('name')
if groups:
tests_formatted = [test_group_format.format(test_format).format(test, group) for group in groups]
else:
tests_formatted = [test_format.format(test)]
for test_formatted in tests_formatted:
parts = test_formatted.split('/')
key = parts[0]
if key in ('sanity', 'units'):
continue
try:
group = int(parts[-1])
except ValueError:
continue
if group < 1 or group > 99:
continue
group_set = test_groups.setdefault(key, set())
group_set.add(group)
self._ci_test_groups = dict((key, sorted(value)) for key, value in test_groups.items())
return self._ci_test_groups
def format_test_group_alias(self, name: str, fallback: str = '') -> str:
"""Return a test group alias using the given name and fallback."""
group_numbers = self.ci_test_groups.get(name, None)
if group_numbers:
if min(group_numbers) != 1:
display.warning('Min test group "%s" in %s is %d instead of 1.' % (name, self.CI_YML, min(group_numbers)), unique=True)
if max(group_numbers) != len(group_numbers):
display.warning('Max test group "%s" in %s is %d instead of %d.' % (name, self.CI_YML, max(group_numbers), len(group_numbers)), unique=True)
if max(group_numbers) > 9:
alias = '%s/%s/group(%s)/' % (self.TEST_ALIAS_PREFIX, name, '|'.join(str(i) for i in range(min(group_numbers), max(group_numbers) + 1)))
elif len(group_numbers) > 1:
alias = '%s/%s/group[%d-%d]/' % (self.TEST_ALIAS_PREFIX, name, min(group_numbers), max(group_numbers))
else:
alias = '%s/%s/group%d/' % (self.TEST_ALIAS_PREFIX, name, min(group_numbers))
elif fallback:
alias = '%s/%s/group%d/' % (self.TEST_ALIAS_PREFIX, fallback, 1)
else:
raise Exception('cannot find test group "%s" in %s' % (name, self.CI_YML))
return alias
def load_yaml(self, python: PythonConfig, path: str) -> dict[str, t.Any]:
"""Load the specified YAML file and return the contents."""
yaml_to_json_path = os.path.join(SANITY_ROOT, self.name, 'yaml_to_json.py')
return json.loads(raw_command([python.path, yaml_to_json_path], data=read_text_file(path), capture=True)[0])
def test(self, args: SanityConfig, targets: SanityTargets, python: PythonConfig) -> TestResult:
if args.explain:
return SanitySuccess(self.name)
if not os.path.isfile(self.CI_YML):
return SanityFailure(self.name, messages=[SanityMessage(
message='file missing',
path=self.CI_YML,
)])
results = Results(
comments=[],
labels={},
)
self.load_ci_config(python)
self.check_changes(args, results)
write_json_test_results(ResultType.BOT, 'data-sanity-ci.json', results.__dict__)
messages = []
messages += self.check_posix_targets(args)
messages += self.check_windows_targets()
if messages:
return SanityFailure(self.name, messages=messages)
return SanitySuccess(self.name)
def check_posix_targets(self, args: SanityConfig) -> list[SanityMessage]:
"""Check POSIX integration test targets and return messages with any issues found."""
posix_targets = tuple(walk_posix_integration_targets())
clouds = get_cloud_platforms(args, posix_targets)
cloud_targets = ['cloud/%s/' % cloud for cloud in clouds]
all_cloud_targets = tuple(filter_targets(posix_targets, ['cloud/'], errors=False))
invalid_cloud_targets = tuple(filter_targets(all_cloud_targets, cloud_targets, include=False, errors=False))
messages = []
for target in invalid_cloud_targets:
for alias in target.aliases:
if alias.startswith('cloud/') and alias != 'cloud/':
if any(alias.startswith(cloud_target) for cloud_target in cloud_targets):
continue
messages.append(SanityMessage('invalid alias `%s`' % alias, '%s/aliases' % target.path))
messages += self.check_ci_group(
targets=tuple(filter_targets(posix_targets, ['cloud/', '%s/generic/' % self.TEST_ALIAS_PREFIX], include=False, errors=False)),
find=self.format_test_group_alias('linux').replace('linux', 'posix'),
find_incidental=['%s/posix/incidental/' % self.TEST_ALIAS_PREFIX],
)
messages += self.check_ci_group(
targets=tuple(filter_targets(posix_targets, ['%s/generic/' % self.TEST_ALIAS_PREFIX], errors=False)),
find=self.format_test_group_alias('generic'),
)
for cloud in clouds:
if cloud == 'httptester':
find = self.format_test_group_alias('linux').replace('linux', 'posix')
find_incidental = ['%s/posix/incidental/' % self.TEST_ALIAS_PREFIX]
else:
find = self.format_test_group_alias(cloud, 'generic')
find_incidental = ['%s/%s/incidental/' % (self.TEST_ALIAS_PREFIX, cloud), '%s/cloud/incidental/' % self.TEST_ALIAS_PREFIX]
messages += self.check_ci_group(
targets=tuple(filter_targets(posix_targets, ['cloud/%s/' % cloud], errors=False)),
find=find,
find_incidental=find_incidental,
)
target_type_groups = {
IntegrationTargetType.TARGET: (1, 2),
IntegrationTargetType.CONTROLLER: (3, 4, 5),
IntegrationTargetType.CONFLICT: (),
IntegrationTargetType.UNKNOWN: (),
}
for target in posix_targets:
if target.name == 'ansible-test-container':
continue # special test target which uses group 6 -- nothing else should be in that group
if f'{self.TEST_ALIAS_PREFIX}/posix/' not in target.aliases:
continue
found_groups = [alias for alias in target.aliases if re.search(f'^{self.TEST_ALIAS_PREFIX}/posix/group[0-9]+/$', alias)]
expected_groups = [f'{self.TEST_ALIAS_PREFIX}/posix/group{group}/' for group in target_type_groups[target.target_type]]
valid_groups = [group for group in found_groups if group in expected_groups]
invalid_groups = [group for group in found_groups if not any(group.startswith(expected_group) for expected_group in expected_groups)]
if not valid_groups:
messages.append(SanityMessage(f'Target of type {target.target_type.name} must be in at least one of these groups: {", ".join(expected_groups)}',
f'{target.path}/aliases'))
if invalid_groups:
messages.append(SanityMessage(f'Target of type {target.target_type.name} cannot be in these groups: {", ".join(invalid_groups)}',
f'{target.path}/aliases'))
return messages
def check_windows_targets(self) -> list[SanityMessage]:
"""Check Windows integration test targets and return messages with any issues found."""
windows_targets = tuple(walk_windows_integration_targets())
messages = []
messages += self.check_ci_group(
targets=windows_targets,
find=self.format_test_group_alias('windows'),
find_incidental=['%s/windows/incidental/' % self.TEST_ALIAS_PREFIX],
)
return messages
def check_ci_group(
self,
targets: tuple[CompletionTarget, ...],
find: str,
find_incidental: t.Optional[list[str]] = None,
) -> list[SanityMessage]:
"""Check the CI groups set in the provided targets and return a list of messages with any issues found."""
all_paths = set(target.path for target in targets)
supported_paths = set(target.path for target in filter_targets(targets, [find], errors=False))
unsupported_paths = set(target.path for target in filter_targets(targets, [self.UNSUPPORTED], errors=False))
if find_incidental:
incidental_paths = set(target.path for target in filter_targets(targets, find_incidental, errors=False))
else:
incidental_paths = set()
unassigned_paths = all_paths - supported_paths - unsupported_paths - incidental_paths
conflicting_paths = supported_paths & unsupported_paths
unassigned_message = 'missing alias `%s` or `%s`' % (find.strip('/'), self.UNSUPPORTED.strip('/'))
conflicting_message = 'conflicting alias `%s` and `%s`' % (find.strip('/'), self.UNSUPPORTED.strip('/'))
messages = []
for path in unassigned_paths:
if path == 'test/integration/targets/ansible-test-container':
continue # special test target which uses group 6 -- nothing else should be in that group
messages.append(SanityMessage(unassigned_message, '%s/aliases' % path))
for path in conflicting_paths:
messages.append(SanityMessage(conflicting_message, '%s/aliases' % path))
return messages
def check_changes(self, args: SanityConfig, results: Results) -> None:
"""Check changes and store results in the provided result dictionary."""
integration_targets = list(walk_integration_targets())
module_targets = list(walk_module_targets())
integration_targets_by_name = dict((target.name, target) for target in integration_targets)
module_names_by_path = dict((target.path, target.module) for target in module_targets)
disabled_targets = []
unstable_targets = []
unsupported_targets = []
for command in [command for command in args.metadata.change_description.focused_command_targets if 'integration' in command]:
for target in args.metadata.change_description.focused_command_targets[command]:
if self.DISABLED in integration_targets_by_name[target].aliases:
disabled_targets.append(target)
elif self.UNSTABLE in integration_targets_by_name[target].aliases:
unstable_targets.append(target)
elif self.UNSUPPORTED in integration_targets_by_name[target].aliases:
unsupported_targets.append(target)
untested_modules = []
for path in args.metadata.change_description.no_integration_paths:
module = module_names_by_path.get(path)
if module:
untested_modules.append(module)
comments = [
self.format_comment(self.TEMPLATE_DISABLED, disabled_targets),
self.format_comment(self.TEMPLATE_UNSTABLE, unstable_targets),
self.format_comment(self.TEMPLATE_UNSUPPORTED, unsupported_targets),
self.format_comment(self.TEMPLATE_UNTESTED, untested_modules),
]
comments = [comment for comment in comments if comment]
labels = dict(
needs_tests=bool(untested_modules),
disabled_tests=bool(disabled_targets),
unstable_tests=bool(unstable_targets),
unsupported_tests=bool(unsupported_targets),
)
results.comments += comments
results.labels.update(labels)
def format_comment(self, template: str, targets: list[str]) -> t.Optional[str]:
"""Format and return a comment based on the given template and targets, or None if there are no targets."""
if not targets:
return None
tests = '\n'.join('- %s' % target for target in targets)
data = dict(
explain_url=self.EXPLAIN_URL,
tests=tests,
)
message = textwrap.dedent(template).strip().format(**data)
return message
@dataclasses.dataclass
class Results:
"""Check results."""
comments: list[str]
labels: dict[str, bool]
| 16,221
|
Python
|
.py
| 319
| 39.846395
| 160
| 0.613957
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,189
|
yamllint.py
|
ansible_ansible/test/lib/ansible_test/_internal/commands/sanity/yamllint.py
|
"""Sanity test using yamllint."""
from __future__ import annotations
import json
import os
import typing as t
from . import (
SanitySingleVersion,
SanityMessage,
SanityFailure,
SanitySuccess,
SanityTargets,
SANITY_ROOT,
)
from ...test import (
TestResult,
)
from ...target import (
TestTarget,
)
from ...util import (
SubprocessError,
display,
is_subdir,
)
from ...util_common import (
run_command,
)
from ...config import (
SanityConfig,
)
from ...data import (
data_context,
)
from ...host_configs import (
PythonConfig,
)
class YamllintTest(SanitySingleVersion):
"""Sanity test using yamllint."""
@property
def error_code(self) -> t.Optional[str]:
"""Error code for ansible-test matching the format used by the underlying test program, or None if the program does not use error codes."""
return 'ansible-test'
@property
def require_libyaml(self) -> bool:
"""True if the test requires PyYAML to have libyaml support."""
return True
def filter_targets(self, targets: list[TestTarget]) -> list[TestTarget]:
"""Return the given list of test targets, filtered to include only those relevant for the test."""
yaml_targets = [target for target in targets if os.path.splitext(target.path)[1] in ('.yml', '.yaml')]
for plugin_type, plugin_path in sorted(data_context().content.plugin_paths.items()):
if plugin_type == 'module_utils':
continue
yaml_targets.extend([target for target in targets if
os.path.splitext(target.path)[1] == '.py' and
os.path.basename(target.path) != '__init__.py' and
is_subdir(target.path, plugin_path)])
return yaml_targets
def test(self, args: SanityConfig, targets: SanityTargets, python: PythonConfig) -> TestResult:
settings = self.load_processor(args)
paths = [target.path for target in targets.include]
results = self.test_paths(args, paths, python)
results = settings.process_errors(results, paths)
if results:
return SanityFailure(self.name, messages=results)
return SanitySuccess(self.name)
@staticmethod
def test_paths(args: SanityConfig, paths: list[str], python: PythonConfig) -> list[SanityMessage]:
"""Test the specified paths using the given Python and return the results."""
cmd = [
python.path,
os.path.join(SANITY_ROOT, 'yamllint', 'yamllinter.py'),
]
data = '\n'.join(paths)
display.info(data, verbosity=4)
try:
stdout, stderr = run_command(args, cmd, data=data, capture=True)
status = 0
except SubprocessError as ex:
stdout = ex.stdout
stderr = ex.stderr
status = ex.status
if stderr:
raise SubprocessError(cmd=cmd, status=status, stderr=stderr, stdout=stdout)
if args.explain:
return []
results = json.loads(stdout)['messages']
results = [SanityMessage(
code=r['code'],
message=r['message'],
path=r['path'],
line=int(r['line']),
column=int(r['column']),
level=r['level'],
) for r in results]
return results
| 3,423
|
Python
|
.py
| 95
| 27.936842
| 147
| 0.618441
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,190
|
validate_modules.py
|
ansible_ansible/test/lib/ansible_test/_internal/commands/sanity/validate_modules.py
|
"""Sanity test using validate-modules."""
from __future__ import annotations
import collections
import contextlib
import json
import os
import tarfile
import typing as t
from . import (
DOCUMENTABLE_PLUGINS,
MULTI_FILE_PLUGINS,
SanitySingleVersion,
SanityMessage,
SanityFailure,
SanitySuccess,
SanityTargets,
SANITY_ROOT,
)
from ...io import (
make_dirs,
)
from ...test import (
TestResult,
)
from ...target import (
TestTarget,
)
from ...util import (
SubprocessError,
display,
)
from ...util_common import (
ExitHandler,
process_scoped_temporary_directory,
run_command,
ResultType,
)
from ...ansible_util import (
ansible_environment,
get_collection_detail,
CollectionDetailError,
)
from ...config import (
SanityConfig,
)
from ...ci import (
get_ci_provider,
)
from ...data import (
data_context,
PayloadConfig,
)
from ...host_configs import (
PythonConfig,
)
from ...git import (
Git,
)
from ...provider.source import (
SourceProvider as GitSourceProvider,
)
class ValidateModulesTest(SanitySingleVersion):
"""Sanity test using validate-modules."""
def __init__(self) -> None:
super().__init__()
self.optional_error_codes.update([
'deprecated-date',
])
self._prefixes = {
plugin_type: plugin_path + '/'
for plugin_type, plugin_path in data_context().content.plugin_paths.items()
if plugin_type in DOCUMENTABLE_PLUGINS
}
self._exclusions = set()
if not data_context().content.collection:
self._exclusions.add('lib/ansible/plugins/cache/base.py')
@property
def error_code(self) -> t.Optional[str]:
"""Error code for ansible-test matching the format used by the underlying test program, or None if the program does not use error codes."""
return 'A100'
def get_plugin_type(self, target: TestTarget) -> t.Optional[str]:
"""Return the plugin type of the given target, or None if it is not a plugin or module."""
if target.path.endswith('/__init__.py'):
return None
if target.path in self._exclusions:
return None
for plugin_type, prefix in self._prefixes.items():
if target.path.startswith(prefix):
return plugin_type
return None
def filter_targets(self, targets: list[TestTarget]) -> list[TestTarget]:
"""Return the given list of test targets, filtered to include only those relevant for the test."""
return [target for target in targets if self.get_plugin_type(target) is not None]
def test(self, args: SanityConfig, targets: SanityTargets, python: PythonConfig) -> TestResult:
env = ansible_environment(args, color=False)
settings = self.load_processor(args)
target_per_type = collections.defaultdict(list)
for target in targets.include:
target_per_type[self.get_plugin_type(target)].append(target)
# Remove plugins that cannot be associated to a single file (test and filter plugins).
for plugin_type in MULTI_FILE_PLUGINS:
target_per_type.pop(plugin_type, None)
cmd = [
python.path,
os.path.join(SANITY_ROOT, 'validate-modules', 'validate.py'),
'--format', 'json',
'--arg-spec',
] # fmt: skip
if data_context().content.collection:
cmd.extend(['--collection', data_context().content.collection.directory])
try:
collection_detail = get_collection_detail(python)
if collection_detail.version:
cmd.extend(['--collection-version', collection_detail.version])
else:
display.warning('Skipping validate-modules collection version checks since no collection version was found.')
except CollectionDetailError as ex:
display.warning('Skipping validate-modules collection version checks since collection detail loading failed: %s' % ex.reason)
else:
path = self.get_archive_path(args)
if os.path.exists(path):
temp_dir = process_scoped_temporary_directory(args)
with tarfile.open(path) as file:
# deprecated: description='extractall fallback without filter' python_version='3.11'
if hasattr(tarfile, 'data_filter'):
file.extractall(temp_dir, filter='data') # type: ignore[call-arg]
else:
file.extractall(temp_dir)
cmd.extend([
'--original-plugins', temp_dir,
])
errors = []
for plugin_type, plugin_targets in sorted(target_per_type.items()):
paths = [target.path for target in plugin_targets]
plugin_cmd = list(cmd)
if plugin_type != 'modules':
plugin_cmd += ['--plugin-type', plugin_type]
plugin_cmd += paths
try:
stdout, stderr = run_command(args, plugin_cmd, env=env, capture=True)
status = 0
except SubprocessError as ex:
stdout = ex.stdout
stderr = ex.stderr
status = ex.status
if stderr or status not in (0, 3):
raise SubprocessError(cmd=plugin_cmd, status=status, stderr=stderr, stdout=stdout)
if args.explain:
continue
messages = json.loads(stdout)
for filename in messages:
output = messages[filename]
for item in output['errors']:
errors.append(SanityMessage(
path=filename,
line=int(item['line']) if 'line' in item else 0,
column=int(item['column']) if 'column' in item else 0,
code='%s' % item['code'],
message=item['msg'],
))
all_paths = [target.path for target in targets.include]
all_errors = settings.process_errors(errors, all_paths)
if args.explain:
return SanitySuccess(self.name)
if all_errors:
return SanityFailure(self.name, messages=all_errors)
return SanitySuccess(self.name)
def origin_hook(self, args: SanityConfig) -> None:
"""This method is called on the origin, before the test runs or delegation occurs."""
if not data_context().content.is_ansible:
return
if not isinstance(data_context().source_provider, GitSourceProvider):
display.warning('The validate-modules sanity test cannot compare against the base commit because git is not being used.')
return
base_commit = args.base_branch or get_ci_provider().get_base_commit(args)
if not base_commit:
display.warning('The validate-modules sanity test cannot compare against the base commit because it was not detected.')
return
path = self.get_archive_path(args)
def cleanup() -> None:
"""Cleanup callback called when the process exits."""
with contextlib.suppress(FileNotFoundError):
os.unlink(path)
def git_callback(payload_config: PayloadConfig) -> None:
"""Include the previous plugin content archive in the payload."""
files = payload_config.files
files.append((path, os.path.relpath(path, data_context().content.root)))
ExitHandler.register(cleanup)
data_context().register_payload_callback(git_callback)
make_dirs(os.path.dirname(path))
git = Git()
git.run_git(['archive', '--output', path, base_commit, 'lib/ansible/modules/', 'lib/ansible/plugins/'])
@staticmethod
def get_archive_path(args: SanityConfig) -> str:
"""Return the path to the original plugin content archive."""
return os.path.join(ResultType.TMP.path, f'validate-modules-{args.metadata.session_id}.tar')
| 8,186
|
Python
|
.py
| 196
| 31.908163
| 147
| 0.616074
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,191
|
pep8.py
|
ansible_ansible/test/lib/ansible_test/_internal/commands/sanity/pep8.py
|
"""Sanity test for PEP 8 style guidelines using pycodestyle."""
from __future__ import annotations
import os
import typing as t
from . import (
SanitySingleVersion,
SanityMessage,
SanityFailure,
SanitySuccess,
SanityTargets,
SANITY_ROOT,
)
from ...test import (
TestResult,
)
from ...target import (
TestTarget,
)
from ...util import (
SubprocessError,
read_lines_without_comments,
parse_to_list_of_dict,
is_subdir,
)
from ...util_common import (
run_command,
)
from ...config import (
SanityConfig,
)
from ...host_configs import (
PythonConfig,
)
class Pep8Test(SanitySingleVersion):
"""Sanity test for PEP 8 style guidelines using pycodestyle."""
@property
def error_code(self) -> t.Optional[str]:
"""Error code for ansible-test matching the format used by the underlying test program, or None if the program does not use error codes."""
return 'A100'
def filter_targets(self, targets: list[TestTarget]) -> list[TestTarget]:
"""Return the given list of test targets, filtered to include only those relevant for the test."""
return [target for target in targets if os.path.splitext(target.path)[1] == '.py' or is_subdir(target.path, 'bin')]
def test(self, args: SanityConfig, targets: SanityTargets, python: PythonConfig) -> TestResult:
current_ignore_file = os.path.join(SANITY_ROOT, 'pep8', 'current-ignore.txt')
current_ignore = sorted(read_lines_without_comments(current_ignore_file, remove_blank_lines=True))
settings = self.load_processor(args)
paths = [target.path for target in targets.include]
cmd = [
python.path,
'-m', 'pycodestyle',
'--max-line-length', '160',
'--config', '/dev/null',
'--ignore', ','.join(sorted(current_ignore)),
] + paths # fmt: skip
if paths:
try:
stdout, stderr = run_command(args, cmd, capture=True)
status = 0
except SubprocessError as ex:
stdout = ex.stdout
stderr = ex.stderr
status = ex.status
if stderr:
raise SubprocessError(cmd=cmd, status=status, stderr=stderr, stdout=stdout)
else:
stdout = None
if args.explain:
return SanitySuccess(self.name)
if stdout:
pattern = '^(?P<path>[^:]*):(?P<line>[0-9]+):(?P<column>[0-9]+): (?P<code>[WE][0-9]{3}) (?P<message>.*)$'
results = parse_to_list_of_dict(pattern, stdout)
else:
results = []
messages = [SanityMessage(
message=r['message'],
path=r['path'],
line=int(r['line']),
column=int(r['column']),
level='warning' if r['code'].startswith('W') else 'error',
code=r['code'],
) for r in results]
errors = settings.process_errors(messages, paths)
if errors:
return SanityFailure(self.name, messages=errors)
return SanitySuccess(self.name)
| 3,125
|
Python
|
.py
| 85
| 28.694118
| 147
| 0.606965
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,192
|
ignores.py
|
ansible_ansible/test/lib/ansible_test/_internal/commands/sanity/ignores.py
|
"""Sanity test for the sanity ignore file."""
from __future__ import annotations
import os
from . import (
SanityFailure,
SanityIgnoreParser,
SanityVersionNeutral,
SanitySuccess,
SanityMessage,
SanityTargets,
)
from ...test import (
calculate_confidence,
calculate_best_confidence,
TestResult,
)
from ...config import (
SanityConfig,
)
class IgnoresTest(SanityVersionNeutral):
"""Sanity test for sanity test ignore entries."""
@property
def can_ignore(self) -> bool:
"""True if the test supports ignore entries."""
return False
@property
def no_targets(self) -> bool:
"""True if the test does not use test targets. Mutually exclusive with all_targets."""
return True
def test(self, args: SanityConfig, targets: SanityTargets) -> TestResult:
sanity_ignore = SanityIgnoreParser.load(args)
messages: list[SanityMessage] = []
# parse errors
messages.extend(SanityMessage(
message=message,
path=sanity_ignore.relative_path,
line=line,
column=column,
confidence=calculate_confidence(sanity_ignore.path, line, args.metadata) if args.metadata.changes else None,
) for line, column, message in sanity_ignore.parse_errors)
# file not found errors
messages.extend(SanityMessage(
message="%s '%s' does not exist" % ("Directory" if path.endswith(os.path.sep) else "File", path),
path=sanity_ignore.relative_path,
line=line,
column=1,
confidence=calculate_best_confidence(((sanity_ignore.path, line), (path, 0)), args.metadata) if args.metadata.changes else None,
) for line, path in sanity_ignore.file_not_found_errors)
# conflicting ignores and skips
for test_name, ignores in sanity_ignore.ignores.items():
for ignore_path, ignore_entry in ignores.items():
skip_line_no = sanity_ignore.skips.get(test_name, {}).get(ignore_path)
if not skip_line_no:
continue
for ignore_line_no in ignore_entry.values():
messages.append(SanityMessage(
message="Ignoring '%s' is unnecessary due to skip entry on line %d" % (ignore_path, skip_line_no),
path=sanity_ignore.relative_path,
line=ignore_line_no,
column=1,
confidence=calculate_confidence(sanity_ignore.path, ignore_line_no, args.metadata) if args.metadata.changes else None,
))
if messages:
return SanityFailure(self.name, messages=messages)
return SanitySuccess(self.name)
| 2,789
|
Python
|
.py
| 65
| 32.923077
| 142
| 0.629068
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,193
|
pslint.py
|
ansible_ansible/test/lib/ansible_test/_internal/commands/sanity/pslint.py
|
"""Sanity test using PSScriptAnalyzer."""
from __future__ import annotations
import json
import os
import re
import typing as t
from . import (
SanityVersionNeutral,
SanityMessage,
SanityFailure,
SanitySuccess,
SanitySkipped,
SanityTargets,
SANITY_ROOT,
)
from ...test import (
TestResult,
)
from ...target import (
TestTarget,
)
from ...util import (
SubprocessError,
find_executable,
ANSIBLE_TEST_DATA_ROOT,
)
from ...util_common import (
run_command,
)
from ...config import (
SanityConfig,
)
from ...data import (
data_context,
)
class PslintTest(SanityVersionNeutral):
"""Sanity test using PSScriptAnalyzer."""
@property
def error_code(self) -> t.Optional[str]:
"""Error code for ansible-test matching the format used by the underlying test program, or None if the program does not use error codes."""
return 'AnsibleTest'
def filter_targets(self, targets: list[TestTarget]) -> list[TestTarget]:
"""Return the given list of test targets, filtered to include only those relevant for the test."""
return [target for target in targets if os.path.splitext(target.path)[1] in ('.ps1', '.psm1', '.psd1')]
def test(self, args: SanityConfig, targets: SanityTargets) -> TestResult:
settings = self.load_processor(args)
paths = [target.path for target in targets.include]
if not find_executable('pwsh', required='warning'):
return SanitySkipped(self.name)
cmds = []
if args.controller.is_managed or args.requirements:
cmds.append(['pwsh', os.path.join(ANSIBLE_TEST_DATA_ROOT, 'requirements', 'sanity.pslint.ps1')])
cmds.append(['pwsh', os.path.join(SANITY_ROOT, 'pslint', 'pslint.ps1')] + paths)
stdout = ''
for cmd in cmds:
try:
stdout, stderr = run_command(args, cmd, capture=True)
status = 0
except SubprocessError as ex:
stdout = ex.stdout
stderr = ex.stderr
status = ex.status
if stderr:
raise SubprocessError(cmd=cmd, status=status, stderr=stderr, stdout=stdout)
if args.explain:
return SanitySuccess(self.name)
severity = [
'Information',
'Warning',
'Error',
'ParseError',
]
cwd = data_context().content.root + '/'
# replace unicode smart quotes and ellipsis with ascii versions
stdout = re.sub('[\u2018\u2019]', "'", stdout)
stdout = re.sub('[\u201c\u201d]', '"', stdout)
stdout = re.sub('[\u2026]', '...', stdout)
messages = json.loads(stdout)
errors = [SanityMessage(
code=m['RuleName'],
message=m['Message'],
path=m['ScriptPath'].replace(cwd, ''),
line=m['Line'] or 0,
column=m['Column'] or 0,
level=severity[m['Severity']],
) for m in messages]
errors = settings.process_errors(errors, paths)
if errors:
return SanityFailure(self.name, messages=errors)
return SanitySuccess(self.name)
| 3,210
|
Python
|
.py
| 90
| 27.8
| 147
| 0.61521
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,194
|
import.py
|
ansible_ansible/test/lib/ansible_test/_internal/commands/sanity/import.py
|
"""Sanity test for proper import exception handling."""
from __future__ import annotations
import collections.abc as c
import os
from . import (
SanityMultipleVersion,
SanityMessage,
SanityFailure,
SanitySuccess,
SanitySkipped,
TARGET_SANITY_ROOT,
SanityTargets,
create_sanity_virtualenv,
check_sanity_virtualenv_yaml,
)
from ...constants import (
CONTROLLER_MIN_PYTHON_VERSION,
REMOTE_ONLY_PYTHON_VERSIONS,
)
from ...test import (
TestResult,
)
from ...target import (
TestTarget,
)
from ...util import (
cache,
SubprocessError,
display,
parse_to_list_of_dict,
is_subdir,
ANSIBLE_TEST_TOOLS_ROOT,
)
from ...util_common import (
ResultType,
create_temp_dir,
)
from ...ansible_util import (
ansible_environment,
)
from ...config import (
SanityConfig,
)
from ...coverage_util import (
cover_python,
)
from ...data import (
data_context,
)
from ...host_configs import (
PythonConfig,
)
def _get_module_test(module_restrictions: bool) -> c.Callable[[str], bool]:
"""Create a predicate which tests whether a path can be used by modules or not."""
module_path = data_context().content.module_path
module_utils_path = data_context().content.module_utils_path
if module_restrictions:
return lambda path: is_subdir(path, module_path) or is_subdir(path, module_utils_path)
return lambda path: not (is_subdir(path, module_path) or is_subdir(path, module_utils_path))
class ImportTest(SanityMultipleVersion):
"""Sanity test for proper import exception handling."""
def filter_targets(self, targets: list[TestTarget]) -> list[TestTarget]:
"""Return the given list of test targets, filtered to include only those relevant for the test."""
if data_context().content.is_ansible:
# all of ansible-core must pass the import test, not just plugins/modules
# modules/module_utils will be tested using the module context
# everything else will be tested using the plugin context
paths = ['lib/ansible']
else:
# only plugins/modules must pass the import test for collections
paths = list(data_context().content.plugin_paths.values())
return [target for target in targets if os.path.splitext(target.path)[1] == '.py' and
any(is_subdir(target.path, path) for path in paths)]
@property
def needs_pypi(self) -> bool:
"""True if the test requires PyPI, otherwise False."""
return True
def test(self, args: SanityConfig, targets: SanityTargets, python: PythonConfig) -> TestResult:
settings = self.load_processor(args, python.version)
paths = [target.path for target in targets.include]
temp_root = os.path.join(ResultType.TMP.path, 'sanity', 'import')
messages = []
for import_type, test in (
('module', _get_module_test(True)),
('plugin', _get_module_test(False)),
):
if import_type == 'plugin' and python.version in REMOTE_ONLY_PYTHON_VERSIONS:
# Plugins are not supported on remote-only Python versions.
# However, the collection loader is used by the import sanity test and unit tests on remote-only Python versions.
# To support this, it is tested as a plugin, but using a venv which installs no requirements.
# Filtering of paths relevant to the Python version tested has already been performed by filter_remote_targets.
venv_type = 'empty'
else:
venv_type = import_type
data = '\n'.join([path for path in paths if test(path)])
if not data and not args.prime_venvs:
continue
virtualenv_python = create_sanity_virtualenv(args, python, f'{self.name}.{venv_type}', coverage=args.coverage, minimize=True)
if not virtualenv_python:
display.warning(f'Skipping sanity test "{self.name}" on Python {python.version} due to missing virtual environment support.')
return SanitySkipped(self.name, python.version)
virtualenv_yaml = args.explain or check_sanity_virtualenv_yaml(virtualenv_python)
if virtualenv_yaml is False:
display.warning(f'Sanity test "{self.name}" ({import_type}) on Python {python.version} may be slow due to missing libyaml support in PyYAML.')
env = ansible_environment(args, color=False)
env.update(
SANITY_TEMP_PATH=ResultType.TMP.path,
SANITY_IMPORTER_TYPE=import_type,
)
if data_context().content.collection:
external_python = create_sanity_virtualenv(args, args.controller_python, self.name)
env.update(
SANITY_COLLECTION_FULL_NAME=data_context().content.collection.full_name,
SANITY_EXTERNAL_PYTHON=external_python.path,
SANITY_YAML_TO_JSON=os.path.join(ANSIBLE_TEST_TOOLS_ROOT, 'yaml_to_json.py'),
ANSIBLE_CONTROLLER_MIN_PYTHON_VERSION=CONTROLLER_MIN_PYTHON_VERSION,
PYTHONPATH=':'.join((get_ansible_test_python_path(), env["PYTHONPATH"])),
)
if args.prime_venvs:
continue
display.info(import_type + ': ' + data, verbosity=4)
cmd = ['importer.py']
# add the importer to the path so it can be accessed through the coverage injector
env.update(
PATH=os.pathsep.join([os.path.join(TARGET_SANITY_ROOT, 'import'), env['PATH']]),
)
try:
stdout, stderr = cover_python(args, virtualenv_python, cmd, self.name, env, capture=True, data=data)
if stdout or stderr:
raise SubprocessError(cmd, stdout=stdout, stderr=stderr)
except SubprocessError as ex:
if ex.status != 10 or ex.stderr or not ex.stdout:
raise
pattern = r'^(?P<path>[^:]*):(?P<line>[0-9]+):(?P<column>[0-9]+): (?P<message>.*)$'
parsed = parse_to_list_of_dict(pattern, ex.stdout)
relative_temp_root = os.path.relpath(temp_root, data_context().content.root) + os.path.sep
messages += [SanityMessage(
message=r['message'],
path=os.path.relpath(r['path'], relative_temp_root) if r['path'].startswith(relative_temp_root) else r['path'],
line=int(r['line']),
column=int(r['column']),
) for r in parsed]
if args.prime_venvs:
return SanitySkipped(self.name, python_version=python.version)
results = settings.process_errors(messages, paths)
if results:
return SanityFailure(self.name, messages=results, python_version=python.version)
return SanitySuccess(self.name, python_version=python.version)
@cache
def get_ansible_test_python_path() -> str:
"""
Return a directory usable for PYTHONPATH, containing only the ansible-test collection loader.
The temporary directory created will be cached for the lifetime of the process and cleaned up at exit.
"""
python_path = create_temp_dir(prefix='ansible-test-')
return python_path
| 7,416
|
Python
|
.py
| 156
| 37.884615
| 158
| 0.637171
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,195
|
pylint.py
|
ansible_ansible/test/lib/ansible_test/_internal/commands/sanity/pylint.py
|
"""Sanity test using pylint."""
from __future__ import annotations
import collections.abc as c
import itertools
import json
import os
import datetime
import typing as t
from . import (
SanitySingleVersion,
SanityMessage,
SanityFailure,
SanitySuccess,
SanityTargets,
SANITY_ROOT,
)
from ...constants import (
CONTROLLER_PYTHON_VERSIONS,
REMOTE_ONLY_PYTHON_VERSIONS,
)
from ...io import (
make_dirs,
)
from ...test import (
TestResult,
)
from ...target import (
TestTarget,
)
from ...util import (
SubprocessError,
display,
is_subdir,
)
from ...util_common import (
run_command,
)
from ...ansible_util import (
ansible_environment,
get_collection_detail,
CollectionDetail,
CollectionDetailError,
ResultType,
)
from ...config import (
SanityConfig,
)
from ...data import (
data_context,
)
from ...host_configs import (
PythonConfig,
)
class PylintTest(SanitySingleVersion):
"""Sanity test using pylint."""
def __init__(self) -> None:
super().__init__()
self.optional_error_codes.update([
'ansible-deprecated-date',
'too-complex',
])
@property
def error_code(self) -> t.Optional[str]:
"""Error code for ansible-test matching the format used by the underlying test program, or None if the program does not use error codes."""
return 'ansible-test'
def filter_targets(self, targets: list[TestTarget]) -> list[TestTarget]:
"""Return the given list of test targets, filtered to include only those relevant for the test."""
return [target for target in targets if os.path.splitext(target.path)[1] == '.py' or is_subdir(target.path, 'bin')]
def test(self, args: SanityConfig, targets: SanityTargets, python: PythonConfig) -> TestResult:
target_paths = set(target.path for target in self.filter_remote_targets(list(targets.targets)))
plugin_dir = os.path.join(SANITY_ROOT, 'pylint', 'plugins')
plugin_names = sorted(p[0] for p in [
os.path.splitext(p) for p in os.listdir(plugin_dir)] if p[1] == '.py' and p[0] != '__init__')
settings = self.load_processor(args)
paths = [target.path for target in targets.include]
module_paths = [os.path.relpath(p, data_context().content.module_path).split(os.path.sep) for p in
paths if is_subdir(p, data_context().content.module_path)]
module_dirs = sorted({p[0] for p in module_paths if len(p) > 1})
large_module_group_threshold = 500
large_module_groups = [key for key, value in
itertools.groupby(module_paths, lambda p: p[0] if len(p) > 1 else '') if len(list(value)) > large_module_group_threshold]
large_module_group_paths = [os.path.relpath(p, data_context().content.module_path).split(os.path.sep) for p in paths
if any(is_subdir(p, os.path.join(data_context().content.module_path, g)) for g in large_module_groups)]
large_module_group_dirs = sorted({os.path.sep.join(p[:2]) for p in large_module_group_paths if len(p) > 2})
contexts = []
remaining_paths = set(paths)
def add_context(available_paths: set[str], context_name: str, context_filter: c.Callable[[str], bool]) -> None:
"""Add the specified context to the context list, consuming available paths that match the given context filter."""
filtered_paths = set(p for p in available_paths if context_filter(p))
if selected_paths := sorted(path for path in filtered_paths if path in target_paths):
contexts.append((context_name, True, selected_paths))
if selected_paths := sorted(path for path in filtered_paths if path not in target_paths):
contexts.append((context_name, False, selected_paths))
available_paths -= filtered_paths
def filter_path(path_filter: str = None) -> c.Callable[[str], bool]:
"""Return a function that filters out paths which are not a subdirectory of the given path."""
def context_filter(path_to_filter: str) -> bool:
"""Return true if the given path matches, otherwise return False."""
return is_subdir(path_to_filter, path_filter)
return context_filter
for large_module_dir in large_module_group_dirs:
add_context(remaining_paths, 'modules/%s' % large_module_dir, filter_path(os.path.join(data_context().content.module_path, large_module_dir)))
for module_dir in module_dirs:
add_context(remaining_paths, 'modules/%s' % module_dir, filter_path(os.path.join(data_context().content.module_path, module_dir)))
add_context(remaining_paths, 'modules', filter_path(data_context().content.module_path))
add_context(remaining_paths, 'module_utils', filter_path(data_context().content.module_utils_path))
add_context(remaining_paths, 'units', filter_path(data_context().content.unit_path))
if data_context().content.collection:
add_context(remaining_paths, 'collection', lambda p: True)
else:
add_context(remaining_paths, 'validate-modules', filter_path('test/lib/ansible_test/_util/controller/sanity/validate-modules/'))
add_context(remaining_paths, 'validate-modules-unit', filter_path('test/lib/ansible_test/tests/validate-modules-unit/'))
add_context(remaining_paths, 'code-smell', filter_path('test/lib/ansible_test/_util/controller/sanity/code-smell/'))
add_context(remaining_paths, 'ansible-test-target', filter_path('test/lib/ansible_test/_util/target/'))
add_context(remaining_paths, 'ansible-test', filter_path('test/lib/'))
add_context(remaining_paths, 'test', filter_path('test/'))
add_context(remaining_paths, 'hacking', filter_path('hacking/'))
add_context(remaining_paths, 'ansible', lambda p: True)
messages = []
context_times = []
collection_detail = None
if data_context().content.collection:
try:
collection_detail = get_collection_detail(python)
if not collection_detail.version:
display.warning('Skipping pylint collection version checks since no collection version was found.')
except CollectionDetailError as ex:
display.warning('Skipping pylint collection version checks since collection detail loading failed: %s' % ex.reason)
test_start = datetime.datetime.now(tz=datetime.timezone.utc)
for context, is_target, context_paths in sorted(contexts):
if not context_paths:
continue
context_start = datetime.datetime.now(tz=datetime.timezone.utc)
messages += self.pylint(args, context, is_target, context_paths, plugin_dir, plugin_names, python, collection_detail)
context_end = datetime.datetime.now(tz=datetime.timezone.utc)
context_times.append('%s: %d (%s)' % (context, len(context_paths), context_end - context_start))
test_end = datetime.datetime.now(tz=datetime.timezone.utc)
for context_time in context_times:
display.info(context_time, verbosity=4)
display.info('total: %d (%s)' % (len(paths), test_end - test_start), verbosity=4)
errors = [SanityMessage(
message=m['message'].replace('\n', ' '),
path=m['path'],
line=int(m['line']),
column=int(m['column']),
level=m['type'],
code=m['symbol'],
) for m in messages]
if args.explain:
return SanitySuccess(self.name)
errors = settings.process_errors(errors, paths)
if errors:
return SanityFailure(self.name, messages=errors)
return SanitySuccess(self.name)
@staticmethod
def pylint(
args: SanityConfig,
context: str,
is_target: bool,
paths: list[str],
plugin_dir: str,
plugin_names: list[str],
python: PythonConfig,
collection_detail: CollectionDetail,
) -> list[dict[str, str]]:
"""Run pylint using the config specified by the context on the specified paths."""
rcfile = os.path.join(SANITY_ROOT, 'pylint', 'config', context.split('/')[0] + '.cfg')
if not os.path.exists(rcfile):
if data_context().content.collection:
rcfile = os.path.join(SANITY_ROOT, 'pylint', 'config', 'collection.cfg')
else:
rcfile = os.path.join(SANITY_ROOT, 'pylint', 'config', 'default.cfg')
if is_target:
context_label = 'target'
min_python_version = REMOTE_ONLY_PYTHON_VERSIONS[0]
else:
context_label = 'controller'
min_python_version = CONTROLLER_PYTHON_VERSIONS[0]
load_plugins = set(plugin_names)
plugin_options: dict[str, str] = {}
# plugin: deprecated (ansible-test)
if data_context().content.collection:
plugin_options.update({'--collection-name': data_context().content.collection.full_name})
if collection_detail and collection_detail.version:
plugin_options.update({'--collection-version': collection_detail.version})
# plugin: pylint.extensions.mccabe
if args.enable_optional_errors:
load_plugins.add('pylint.extensions.mccabe')
plugin_options.update({'--max-complexity': '20'})
options = {
'--py-version': min_python_version,
'--load-plugins': ','.join(sorted(load_plugins)),
'--rcfile': rcfile,
'--jobs': '0',
'--reports': 'n',
'--output-format': 'json',
}
cmd = [python.path, '-m', 'pylint']
cmd.extend(itertools.chain.from_iterable((options | plugin_options).items()))
cmd.extend(paths)
append_python_path = [plugin_dir]
if data_context().content.collection:
append_python_path.append(data_context().content.collection.root)
env = ansible_environment(args)
env['PYTHONPATH'] += os.path.pathsep + os.path.pathsep.join(append_python_path)
# expose plugin paths for use in custom plugins
env.update(dict(('ANSIBLE_TEST_%s_PATH' % k.upper(), os.path.abspath(v) + os.path.sep) for k, v in data_context().content.plugin_paths.items()))
# Set PYLINTHOME to prevent pylint from checking for an obsolete directory, which can result in a test failure due to stderr output.
# See: https://github.com/PyCQA/pylint/blob/e6c6bf5dfd61511d64779f54264b27a368c43100/pylint/constants.py#L148
pylint_home = os.path.join(ResultType.TMP.path, 'pylint')
make_dirs(pylint_home)
env.update(PYLINTHOME=pylint_home)
if paths:
display.info(f'Checking {len(paths)} file(s) in context {context!r} ({context_label}) with config: {rcfile}', verbosity=1)
try:
stdout, stderr = run_command(args, cmd, env=env, capture=True)
status = 0
except SubprocessError as ex:
stdout = ex.stdout
stderr = ex.stderr
status = ex.status
if stderr or status >= 32:
raise SubprocessError(cmd=cmd, status=status, stderr=stderr, stdout=stdout)
else:
stdout = None
if not args.explain and stdout:
messages = json.loads(stdout)
else:
messages = []
return messages
| 11,682
|
Python
|
.py
| 230
| 41.1
| 154
| 0.635149
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,196
|
__init__.py
|
ansible_ansible/test/lib/ansible_test/_internal/commands/env/__init__.py
|
"""Show information about the test environment."""
from __future__ import annotations
import datetime
import os
import platform
import sys
import typing as t
from ...config import (
CommonConfig,
)
from ...io import (
write_json_file,
)
from ...util import (
display,
get_ansible_version,
get_available_python_versions,
ApplicationError,
)
from ...util_common import (
data_context,
write_json_test_results,
ResultType,
)
from ...docker_util import (
get_docker_command,
get_docker_info,
get_docker_container_id,
)
from ...constants import (
TIMEOUT_PATH,
)
from ...ci import (
get_ci_provider,
)
from ...timeout import (
TimeoutDetail,
)
class EnvConfig(CommonConfig):
"""Configuration for the `env` command."""
def __init__(self, args: t.Any) -> None:
super().__init__(args, 'env')
self.show: bool = args.show
self.dump: bool = args.dump
self.timeout: int | float | None = args.timeout
self.list_files: bool = args.list_files
if not self.show and not self.dump and self.timeout is None and not self.list_files:
# default to --show if no options were given
self.show = True
def command_env(args: EnvConfig) -> None:
"""Entry point for the `env` command."""
show_dump_env(args)
list_files_env(args)
set_timeout(args)
def show_dump_env(args: EnvConfig) -> None:
"""Show information about the current environment and/or write the information to disk."""
if not args.show and not args.dump:
return
container_id = get_docker_container_id()
data = dict(
ansible=dict(
version=get_ansible_version(),
),
docker=get_docker_details(args),
container_id=container_id,
environ=os.environ.copy(),
location=dict(
pwd=os.environ.get('PWD', None),
cwd=os.getcwd(),
),
git=get_ci_provider().get_git_details(args),
platform=dict(
datetime=datetime.datetime.now(tz=datetime.timezone.utc).strftime('%Y-%m-%dT%H:%M:%SZ'),
platform=platform.platform(),
uname=platform.uname(),
),
python=dict(
executable=sys.executable,
version=platform.python_version(),
),
interpreters=get_available_python_versions(),
)
if args.show:
verbose = {
'docker': 3,
'docker.executable': 0,
'environ': 2,
'platform.uname': 1,
}
show_dict(data, verbose)
if args.dump and not args.explain:
write_json_test_results(ResultType.BOT, 'data-environment.json', data)
def list_files_env(args: EnvConfig) -> None:
"""List files on stdout."""
if not args.list_files:
return
for path in data_context().content.all_files():
display.info(path)
def set_timeout(args: EnvConfig) -> None:
"""Set an execution timeout for subsequent ansible-test invocations."""
if args.timeout is None:
return
timeout = TimeoutDetail.create(args.timeout)
if timeout:
display.info(f'Setting a {timeout.duration} minute test timeout which will end at: {timeout.deadline}', verbosity=1)
else:
display.info('Clearing existing test timeout.', verbosity=1)
if args.explain:
return
if timeout:
write_json_file(TIMEOUT_PATH, timeout.to_dict())
elif os.path.exists(TIMEOUT_PATH):
os.remove(TIMEOUT_PATH)
def show_dict(data: dict[str, t.Any], verbose: dict[str, int], root_verbosity: int = 0, path: t.Optional[list[str]] = None) -> None:
"""Show a dict with varying levels of verbosity."""
path = path if path else []
for key, value in sorted(data.items()):
indent = ' ' * len(path)
key_path = path + [key]
key_name = '.'.join(key_path)
verbosity = verbose.get(key_name, root_verbosity)
if isinstance(value, (tuple, list)):
display.info(indent + '%s:' % key, verbosity=verbosity)
for item in value:
display.info(indent + ' - %s' % item, verbosity=verbosity)
elif isinstance(value, dict):
min_verbosity = min([verbosity] + [v for k, v in verbose.items() if k.startswith('%s.' % key)])
display.info(indent + '%s:' % key, verbosity=min_verbosity)
show_dict(value, verbose, verbosity, key_path)
else:
display.info(indent + '%s: %s' % (key, value), verbosity=verbosity)
def get_docker_details(args: EnvConfig) -> dict[str, t.Any]:
"""Return details about docker."""
docker = get_docker_command()
executable = None
info = None
version = None
if docker:
executable = docker.executable
try:
docker_info = get_docker_info(args)
except ApplicationError as ex:
display.warning(str(ex))
else:
info = docker_info.info
version = docker_info.version
docker_details = dict(
executable=executable,
info=info,
version=version,
)
return docker_details
| 5,165
|
Python
|
.py
| 152
| 26.940789
| 132
| 0.62241
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,197
|
xml.py
|
ansible_ansible/test/lib/ansible_test/_internal/commands/coverage/xml.py
|
"""Generate XML code coverage reports."""
from __future__ import annotations
import os
import time
from xml.etree.ElementTree import (
Comment,
Element,
SubElement,
tostring,
)
from xml.dom import (
minidom,
)
from ...io import (
make_dirs,
read_json_file,
)
from ...util_common import (
ResultType,
write_text_test_results,
)
from ...util import (
get_ansible_version,
)
from ...data import (
data_context,
)
from ...provisioning import (
prepare_profiles,
)
from .combine import (
combine_coverage_files,
CoverageCombineConfig,
)
from . import (
run_coverage,
)
def command_coverage_xml(args: CoverageXmlConfig) -> None:
"""Generate an XML coverage report."""
host_state = prepare_profiles(args) # coverage xml
output_files = combine_coverage_files(args, host_state)
for output_file in output_files:
xml_name = '%s.xml' % os.path.basename(output_file)
if output_file.endswith('-powershell'):
report = _generate_powershell_xml(output_file)
rough_string = tostring(report, 'utf-8')
reparsed = minidom.parseString(rough_string)
pretty = reparsed.toprettyxml(indent=' ')
write_text_test_results(ResultType.REPORTS, xml_name, pretty)
else:
xml_path = os.path.join(ResultType.REPORTS.path, xml_name)
make_dirs(ResultType.REPORTS.path)
run_coverage(args, host_state, output_file, 'xml', ['-i', '-o', xml_path])
def _generate_powershell_xml(coverage_file: str) -> Element:
"""Generate a PowerShell coverage report XML element from the specified coverage file and return it."""
coverage_info = read_json_file(coverage_file)
content_root = data_context().content.root
is_ansible = data_context().content.is_ansible
packages: dict[str, dict[str, dict[str, int]]] = {}
for path, results in coverage_info.items():
filename = os.path.splitext(os.path.basename(path))[0]
if filename.startswith('Ansible.ModuleUtils'):
package = 'ansible.module_utils'
elif is_ansible:
package = 'ansible.modules'
else:
rel_path = path[len(content_root) + 1:]
plugin_type = "modules" if rel_path.startswith("plugins/modules") else "module_utils"
package = 'ansible_collections.%splugins.%s' % (data_context().content.collection.prefix, plugin_type)
if package not in packages:
packages[package] = {}
packages[package][path] = results
elem_coverage = Element('coverage')
elem_coverage.append(
Comment(' Generated by ansible-test from the Ansible project: https://www.ansible.com/ '))
elem_coverage.append(
Comment(' Based on https://raw.githubusercontent.com/cobertura/web/master/htdocs/xml/coverage-04.dtd '))
elem_sources = SubElement(elem_coverage, 'sources')
elem_source = SubElement(elem_sources, 'source')
elem_source.text = data_context().content.root
elem_packages = SubElement(elem_coverage, 'packages')
total_lines_hit = 0
total_line_count = 0
for package_name, package_data in packages.items():
lines_hit, line_count = _add_cobertura_package(elem_packages, package_name, package_data)
total_lines_hit += lines_hit
total_line_count += line_count
elem_coverage.attrib.update({
'branch-rate': '0',
'branches-covered': '0',
'branches-valid': '0',
'complexity': '0',
'line-rate': str(round(total_lines_hit / total_line_count, 4)) if total_line_count else "0",
'lines-covered': str(total_line_count),
'lines-valid': str(total_lines_hit),
'timestamp': str(int(time.time())),
'version': get_ansible_version(),
})
return elem_coverage
def _add_cobertura_package(packages: Element, package_name: str, package_data: dict[str, dict[str, int]]) -> tuple[int, int]:
"""Add a package element to the given packages element."""
elem_package = SubElement(packages, 'package')
elem_classes = SubElement(elem_package, 'classes')
total_lines_hit = 0
total_line_count = 0
for path, results in package_data.items():
lines_hit = len([True for hits in results.values() if hits])
line_count = len(results)
total_lines_hit += lines_hit
total_line_count += line_count
elem_class = SubElement(elem_classes, 'class')
class_name = os.path.splitext(os.path.basename(path))[0]
if class_name.startswith("Ansible.ModuleUtils"):
class_name = class_name[20:]
content_root = data_context().content.root
filename = path
if filename.startswith(content_root):
filename = filename[len(content_root) + 1:]
elem_class.attrib.update({
'branch-rate': '0',
'complexity': '0',
'filename': filename,
'line-rate': str(round(lines_hit / line_count, 4)) if line_count else "0",
'name': class_name,
})
SubElement(elem_class, 'methods')
elem_lines = SubElement(elem_class, 'lines')
for number, hits in results.items():
elem_line = SubElement(elem_lines, 'line')
elem_line.attrib.update(
hits=str(hits),
number=str(number),
)
elem_package.attrib.update({
'branch-rate': '0',
'complexity': '0',
'line-rate': str(round(total_lines_hit / total_line_count, 4)) if total_line_count else "0",
'name': package_name,
})
return total_lines_hit, total_line_count
class CoverageXmlConfig(CoverageCombineConfig):
"""Configuration for the coverage xml command."""
| 5,774
|
Python
|
.py
| 142
| 33.359155
| 125
| 0.64333
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,198
|
report.py
|
ansible_ansible/test/lib/ansible_test/_internal/commands/coverage/report.py
|
"""Generate console code coverage reports."""
from __future__ import annotations
import os
import typing as t
from ...io import (
read_json_file,
)
from ...util import (
display,
)
from ...data import (
data_context,
)
from ...provisioning import (
prepare_profiles,
)
from .combine import (
combine_coverage_files,
CoverageCombineConfig,
)
from . import (
run_coverage,
)
def command_coverage_report(args: CoverageReportConfig) -> None:
"""Generate a console coverage report."""
host_state = prepare_profiles(args) # coverage report
output_files = combine_coverage_files(args, host_state)
for output_file in output_files:
if args.group_by or args.stub:
display.info('>>> Coverage Group: %s' % ' '.join(os.path.basename(output_file).split('=')[1:]))
if output_file.endswith('-powershell'):
display.info(_generate_powershell_output_report(args, output_file))
else:
options = []
if args.show_missing:
options.append('--show-missing')
if args.include:
options.extend(['--include', args.include])
if args.omit:
options.extend(['--omit', args.omit])
run_coverage(args, host_state, output_file, 'report', options)
def _generate_powershell_output_report(args: CoverageReportConfig, coverage_file: str) -> str:
"""Generate and return a PowerShell coverage report for the given coverage file."""
coverage_info = read_json_file(coverage_file)
root_path = data_context().content.root + '/'
name_padding = 7
cover_padding = 8
file_report = []
total_stmts = 0
total_miss = 0
for filename in sorted(coverage_info.keys()):
hit_info = coverage_info[filename]
if filename.startswith(root_path):
filename = filename[len(root_path):]
if args.omit and filename in args.omit:
continue
if args.include and filename not in args.include:
continue
stmts = len(hit_info)
miss = len([hit for hit in hit_info.values() if hit == 0])
name_padding = max(name_padding, len(filename) + 3)
total_stmts += stmts
total_miss += miss
cover = "{0}%".format(int((stmts - miss) / stmts * 100))
missing = []
current_missing = None
sorted_lines = sorted([int(x) for x in hit_info.keys()])
for idx, line in enumerate(sorted_lines):
hit = hit_info[str(line)]
if hit == 0 and current_missing is None:
current_missing = line
elif hit != 0 and current_missing is not None:
end_line = sorted_lines[idx - 1]
if current_missing == end_line:
missing.append(str(current_missing))
else:
missing.append('%s-%s' % (current_missing, end_line))
current_missing = None
if current_missing is not None:
end_line = sorted_lines[-1]
if current_missing == end_line:
missing.append(str(current_missing))
else:
missing.append('%s-%s' % (current_missing, end_line))
file_report.append({'name': filename, 'stmts': stmts, 'miss': miss, 'cover': cover, 'missing': missing})
if total_stmts == 0:
return ''
total_percent = '{0}%'.format(int((total_stmts - total_miss) / total_stmts * 100))
stmts_padding = max(8, len(str(total_stmts)))
miss_padding = max(7, len(str(total_miss)))
line_length = name_padding + stmts_padding + miss_padding + cover_padding
header = 'Name'.ljust(name_padding) + 'Stmts'.rjust(stmts_padding) + 'Miss'.rjust(miss_padding) + \
'Cover'.rjust(cover_padding)
if args.show_missing:
header += 'Lines Missing'.rjust(16)
line_length += 16
line_break = '-' * line_length
lines = ['%s%s%s%s%s' % (f['name'].ljust(name_padding), str(f['stmts']).rjust(stmts_padding),
str(f['miss']).rjust(miss_padding), f['cover'].rjust(cover_padding),
' ' + ', '.join(f['missing']) if args.show_missing else '')
for f in file_report]
totals = 'TOTAL'.ljust(name_padding) + str(total_stmts).rjust(stmts_padding) + \
str(total_miss).rjust(miss_padding) + total_percent.rjust(cover_padding)
report = '{0}\n{1}\n{2}\n{1}\n{3}'.format(header, line_break, "\n".join(lines), totals)
return report
class CoverageReportConfig(CoverageCombineConfig):
"""Configuration for the coverage report command."""
def __init__(self, args: t.Any) -> None:
super().__init__(args)
self.show_missing: bool = args.show_missing
self.include: str = args.include
self.omit: str = args.omit
| 4,868
|
Python
|
.py
| 112
| 34.660714
| 112
| 0.602757
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,199
|
__init__.py
|
ansible_ansible/test/lib/ansible_test/_internal/commands/coverage/__init__.py
|
"""Common logic for the coverage subcommand."""
from __future__ import annotations
import collections.abc as c
import json
import os
import re
import typing as t
from ...encoding import (
to_bytes,
)
from ...io import (
read_text_file,
read_json_file,
)
from ...util import (
ApplicationError,
common_environment,
display,
ANSIBLE_TEST_DATA_ROOT,
)
from ...util_common import (
intercept_python,
ResultType,
)
from ...config import (
EnvironmentConfig,
)
from ...python_requirements import (
install_requirements,
)
from ...target import (
walk_module_targets,
)
from ...data import (
data_context,
)
from ...pypi_proxy import (
configure_pypi_proxy,
)
from ...provisioning import (
HostState,
)
from ...coverage_util import (
get_coverage_file_schema_version,
CoverageError,
CONTROLLER_COVERAGE_VERSION,
)
if t.TYPE_CHECKING:
import coverage as coverage_module
COVERAGE_GROUPS = ('command', 'target', 'environment', 'version')
COVERAGE_CONFIG_PATH = os.path.join(ANSIBLE_TEST_DATA_ROOT, 'coveragerc')
COVERAGE_OUTPUT_FILE_NAME = 'coverage'
class CoverageConfig(EnvironmentConfig):
"""Configuration for the coverage command."""
def __init__(self, args: t.Any) -> None:
super().__init__(args, 'coverage')
def initialize_coverage(args: CoverageConfig, host_state: HostState) -> coverage_module:
"""Delegate execution if requested, install requirements, then import and return the coverage module. Raises an exception if coverage is not available."""
configure_pypi_proxy(args, host_state.controller_profile) # coverage
install_requirements(args, host_state.controller_profile.python, coverage=True) # coverage
try:
import coverage
except ImportError:
coverage = None
coverage_required_version = CONTROLLER_COVERAGE_VERSION.coverage_version
if not coverage:
raise ApplicationError(f'Version {coverage_required_version} of the Python "coverage" module must be installed to use this command.')
if coverage.__version__ != coverage_required_version:
raise ApplicationError(f'Version {coverage_required_version} of the Python "coverage" module is required. Version {coverage.__version__} was found.')
return coverage
def run_coverage(args: CoverageConfig, host_state: HostState, output_file: str, command: str, cmd: list[str]) -> None:
"""Run the coverage cli tool with the specified options."""
env = common_environment()
env.update(COVERAGE_FILE=output_file)
cmd = ['python', '-m', 'coverage.__main__', command, '--rcfile', COVERAGE_CONFIG_PATH] + cmd
stdout, stderr = intercept_python(args, host_state.controller_profile.python, cmd, env, capture=True)
stdout = (stdout or '').strip()
stderr = (stderr or '').strip()
if stdout:
display.info(stdout)
if stderr:
display.warning(stderr)
def get_all_coverage_files() -> list[str]:
"""Return a list of all coverage file paths."""
return get_python_coverage_files() + get_powershell_coverage_files()
def get_python_coverage_files(path: t.Optional[str] = None) -> list[str]:
"""Return the list of Python coverage file paths."""
return get_coverage_files('python', path)
def get_powershell_coverage_files(path: t.Optional[str] = None) -> list[str]:
"""Return the list of PowerShell coverage file paths."""
return get_coverage_files('powershell', path)
def get_coverage_files(language: str, path: t.Optional[str] = None) -> list[str]:
"""Return the list of coverage file paths for the given language."""
coverage_dir = path or ResultType.COVERAGE.path
try:
coverage_files = [os.path.join(coverage_dir, f) for f in os.listdir(coverage_dir)
if '=coverage.' in f and '=%s' % language in f]
except FileNotFoundError:
return []
return coverage_files
def get_collection_path_regexes() -> tuple[t.Optional[t.Pattern], t.Optional[t.Pattern]]:
"""Return a pair of regexes used for identifying and manipulating collection paths."""
if data_context().content.collection:
collection_search_re = re.compile(r'/%s/' % data_context().content.collection.directory)
collection_sub_re = re.compile(r'^.*?/%s/' % data_context().content.collection.directory)
else:
collection_search_re = None
collection_sub_re = None
return collection_search_re, collection_sub_re
def get_python_modules() -> dict[str, str]:
"""Return a dictionary of Ansible module names and their paths."""
return dict((target.module, target.path) for target in list(walk_module_targets()) if target.path.endswith('.py'))
def enumerate_python_arcs(
path: str,
coverage: coverage_module,
modules: dict[str, str],
collection_search_re: t.Optional[t.Pattern],
collection_sub_re: t.Optional[t.Pattern],
) -> c.Generator[tuple[str, set[tuple[int, int]]], None, None]:
"""Enumerate Python code coverage arcs in the given file."""
if os.path.getsize(path) == 0:
display.warning('Empty coverage file: %s' % path, verbosity=2)
return
try:
arc_data = read_python_coverage(path, coverage)
except CoverageError as ex:
display.error(str(ex))
return
for filename, arcs in arc_data.items():
if not arcs:
# This is most likely due to using an unsupported version of coverage.
display.warning('No arcs found for "%s" in coverage file: %s' % (filename, path))
continue
filename = sanitize_filename(filename, modules=modules, collection_search_re=collection_search_re, collection_sub_re=collection_sub_re)
if not filename:
continue
yield filename, set(arcs)
PythonArcs = dict[str, list[tuple[int, int]]]
"""Python coverage arcs."""
def read_python_coverage(path: str, coverage: coverage_module) -> PythonArcs:
"""Return coverage arcs from the specified coverage file. Raises a CoverageError exception if coverage cannot be read."""
try:
return read_python_coverage_native(path, coverage)
except CoverageError as ex:
schema_version = get_coverage_file_schema_version(path)
if schema_version == CONTROLLER_COVERAGE_VERSION.schema_version:
raise CoverageError(path, f'Unexpected failure reading supported schema version {schema_version}.') from ex
if schema_version == 0:
return read_python_coverage_legacy(path)
raise CoverageError(path, f'Unsupported schema version: {schema_version}')
def read_python_coverage_native(path: str, coverage: coverage_module) -> PythonArcs:
"""Return coverage arcs from the specified coverage file using the coverage API."""
try:
data = coverage.CoverageData(path)
data.read()
arcs = {filename: data.arcs(filename) for filename in data.measured_files()}
except Exception as ex:
raise CoverageError(path, f'Error reading coverage file using coverage API: {ex}') from ex
return arcs
def read_python_coverage_legacy(path: str) -> PythonArcs:
"""Return coverage arcs from the specified coverage file, which must be in the legacy JSON format."""
try:
contents = read_text_file(path)
contents = re.sub(r'''^!coverage.py: This is a private format, don't read it directly!''', '', contents)
data = json.loads(contents)
arcs: PythonArcs = {filename: [t.cast(tuple[int, int], tuple(arc)) for arc in arc_list] for filename, arc_list in data['arcs'].items()}
except Exception as ex:
raise CoverageError(path, f'Error reading JSON coverage file: {ex}') from ex
return arcs
def enumerate_powershell_lines(
path: str,
collection_search_re: t.Optional[t.Pattern],
collection_sub_re: t.Optional[t.Pattern],
) -> c.Generator[tuple[str, dict[int, int]], None, None]:
"""Enumerate PowerShell code coverage lines in the given file."""
if os.path.getsize(path) == 0:
display.warning('Empty coverage file: %s' % path, verbosity=2)
return
try:
coverage_run = read_json_file(path)
except Exception as ex: # pylint: disable=locally-disabled, broad-except
display.error('%s' % ex)
return
for filename, hits in coverage_run.items():
filename = sanitize_filename(filename, collection_search_re=collection_search_re, collection_sub_re=collection_sub_re)
if not filename:
continue
if isinstance(hits, dict) and not hits.get('Line'):
# Input data was previously aggregated and thus uses the standard ansible-test output format for PowerShell coverage.
# This format differs from the more verbose format of raw coverage data from the remote Windows hosts.
hits = dict((int(key), value) for key, value in hits.items())
yield filename, hits
continue
# PowerShell unpacks arrays if there's only a single entry so this is a defensive check on that
if not isinstance(hits, list):
hits = [hits]
hits = dict((hit['Line'], hit['HitCount']) for hit in hits if hit)
yield filename, hits
def sanitize_filename(
filename: str,
modules: t.Optional[dict[str, str]] = None,
collection_search_re: t.Optional[t.Pattern] = None,
collection_sub_re: t.Optional[t.Pattern] = None,
) -> t.Optional[str]:
"""Convert the given code coverage path to a local absolute path and return its, or None if the path is not valid."""
ansible_path = os.path.abspath('lib/ansible/') + '/'
root_path = data_context().content.root + '/'
integration_temp_path = os.path.sep + os.path.join(ResultType.TMP.relative_path, 'integration') + os.path.sep
if modules is None:
modules = {}
if '/ansible_modlib.zip/ansible/' in filename:
# Rewrite the module_utils path from the remote host to match the controller. Ansible 2.6 and earlier.
new_name = re.sub('^.*/ansible_modlib.zip/ansible/', ansible_path, filename)
display.info('%s -> %s' % (filename, new_name), verbosity=3)
filename = new_name
elif collection_search_re and collection_search_re.search(filename):
new_name = os.path.abspath(collection_sub_re.sub('', filename))
display.info('%s -> %s' % (filename, new_name), verbosity=3)
filename = new_name
elif re.search(r'/ansible_[^/]+_payload\.zip/ansible/', filename):
# Rewrite the module_utils path from the remote host to match the controller. Ansible 2.7 and later.
new_name = re.sub(r'^.*/ansible_[^/]+_payload\.zip/ansible/', ansible_path, filename)
display.info('%s -> %s' % (filename, new_name), verbosity=3)
filename = new_name
elif '/ansible_module_' in filename:
# Rewrite the module path from the remote host to match the controller. Ansible 2.6 and earlier.
module_name = re.sub('^.*/ansible_module_(?P<module>.*).py$', '\\g<module>', filename)
if module_name not in modules:
display.warning('Skipping coverage of unknown module: %s' % module_name)
return None
new_name = os.path.abspath(modules[module_name])
display.info('%s -> %s' % (filename, new_name), verbosity=3)
filename = new_name
elif re.search(r'/ansible_[^/]+_payload(_[^/]+|\.zip)/__main__\.py$', filename):
# Rewrite the module path from the remote host to match the controller. Ansible 2.7 and later.
# AnsiballZ versions using zipimporter will match the `.zip` portion of the regex.
# AnsiballZ versions not using zipimporter will match the `_[^/]+` portion of the regex.
module_name = re.sub(r'^.*/ansible_(?P<module>[^/]+)_payload(_[^/]+|\.zip)/__main__\.py$',
'\\g<module>', filename).rstrip('_')
if module_name not in modules:
display.warning('Skipping coverage of unknown module: %s' % module_name)
return None
new_name = os.path.abspath(modules[module_name])
display.info('%s -> %s' % (filename, new_name), verbosity=3)
filename = new_name
elif re.search('^(/.*?)?/root/ansible/', filename):
# Rewrite the path of code running on a remote host or in a docker container as root.
new_name = re.sub('^(/.*?)?/root/ansible/', root_path, filename)
display.info('%s -> %s' % (filename, new_name), verbosity=3)
filename = new_name
elif integration_temp_path in filename:
# Rewrite the path of code running from an integration test temporary directory.
new_name = re.sub(r'^.*' + re.escape(integration_temp_path) + '[^/]+/', root_path, filename)
display.info('%s -> %s' % (filename, new_name), verbosity=3)
filename = new_name
filename = os.path.abspath(filename) # make sure path is absolute (will be relative if previously exported)
return filename
class PathChecker:
"""Checks code coverage paths to verify they are valid and reports on the findings."""
def __init__(self, args: CoverageConfig, collection_search_re: t.Optional[t.Pattern] = None) -> None:
self.args = args
self.collection_search_re = collection_search_re
self.invalid_paths: list[str] = []
self.invalid_path_chars = 0
def check_path(self, path: str) -> bool:
"""Return True if the given coverage path is valid, otherwise display a warning and return False."""
if os.path.isfile(to_bytes(path)):
return True
if self.collection_search_re and self.collection_search_re.search(path) and os.path.basename(path) == '__init__.py':
# the collection loader uses implicit namespace packages, so __init__.py does not need to exist on disk
# coverage is still reported for these non-existent files, but warnings are not needed
return False
self.invalid_paths.append(path)
self.invalid_path_chars += len(path)
if self.args.verbosity > 1:
display.warning('Invalid coverage path: %s' % path)
return False
def report(self) -> None:
"""Display a warning regarding invalid paths if any were found."""
if self.invalid_paths:
display.warning('Ignored %d characters from %d invalid coverage path(s).' % (self.invalid_path_chars, len(self.invalid_paths)))
| 14,408
|
Python
|
.py
| 281
| 44.587189
| 158
| 0.676047
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|