id
int64 0
458k
| file_name
stringlengths 4
119
| file_path
stringlengths 14
227
| content
stringlengths 24
9.96M
| size
int64 24
9.96M
| language
stringclasses 1
value | extension
stringclasses 14
values | total_lines
int64 1
219k
| avg_line_length
float64 2.52
4.63M
| max_line_length
int64 5
9.91M
| alphanum_fraction
float64 0
1
| repo_name
stringlengths 7
101
| repo_stars
int64 100
139k
| repo_forks
int64 0
26.4k
| repo_open_issues
int64 0
2.27k
| repo_license
stringclasses 12
values | repo_extraction_date
stringclasses 433
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
13,200
|
html.py
|
ansible_ansible/test/lib/ansible_test/_internal/commands/coverage/html.py
|
"""Generate HTML code coverage reports."""
from __future__ import annotations
import os
from ...io import (
make_dirs,
)
from ...util import (
display,
)
from ...util_common import (
ResultType,
)
from ...provisioning import (
prepare_profiles,
)
from .combine import (
combine_coverage_files,
CoverageCombineConfig,
)
from . import (
run_coverage,
)
def command_coverage_html(args: CoverageHtmlConfig) -> None:
"""Generate an HTML coverage report."""
host_state = prepare_profiles(args) # coverage html
output_files = combine_coverage_files(args, host_state)
for output_file in output_files:
if output_file.endswith('-powershell'):
# coverage.py does not support non-Python files so we just skip the local html report.
display.info("Skipping output file %s in html generation" % output_file, verbosity=3)
continue
dir_name = os.path.join(ResultType.REPORTS.path, os.path.basename(output_file))
make_dirs(dir_name)
run_coverage(args, host_state, output_file, 'html', ['-i', '-d', dir_name])
display.info('HTML report generated: file:///%s' % os.path.join(dir_name, 'index.html'))
class CoverageHtmlConfig(CoverageCombineConfig):
"""Configuration for the coverage html command."""
| 1,319
|
Python
|
.py
| 37
| 30.918919
| 98
| 0.692429
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,201
|
erase.py
|
ansible_ansible/test/lib/ansible_test/_internal/commands/coverage/erase.py
|
"""Erase code coverage files."""
from __future__ import annotations
import os
from ...util_common import (
ResultType,
)
from ...executor import (
Delegate,
)
from ...provisioning import (
prepare_profiles,
)
from . import (
CoverageConfig,
)
def command_coverage_erase(args: CoverageEraseConfig) -> None:
"""Erase code coverage data files collected during test runs."""
host_state = prepare_profiles(args) # coverage erase
if args.delegate:
raise Delegate(host_state=host_state)
coverage_dir = ResultType.COVERAGE.path
for name in os.listdir(coverage_dir):
if not name.startswith('coverage') and '=coverage.' not in name:
continue
path = os.path.join(coverage_dir, name)
if not args.explain:
os.remove(path)
class CoverageEraseConfig(CoverageConfig):
"""Configuration for the coverage erase command."""
| 915
|
Python
|
.py
| 29
| 26.758621
| 72
| 0.699541
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,202
|
combine.py
|
ansible_ansible/test/lib/ansible_test/_internal/commands/coverage/combine.py
|
"""Combine code coverage files."""
from __future__ import annotations
import collections.abc as c
import os
import json
import typing as t
from ...target import (
walk_compile_targets,
walk_powershell_targets,
)
from ...io import (
read_text_file,
)
from ...util import (
ANSIBLE_TEST_TOOLS_ROOT,
display,
ApplicationError,
raw_command,
)
from ...util_common import (
ResultType,
write_json_file,
write_json_test_results,
)
from ...executor import (
Delegate,
)
from ...data import (
data_context,
PayloadConfig,
)
from ...host_configs import (
DockerConfig,
RemoteConfig,
)
from ...provisioning import (
HostState,
prepare_profiles,
)
from . import (
enumerate_python_arcs,
enumerate_powershell_lines,
get_collection_path_regexes,
get_all_coverage_files,
get_python_coverage_files,
get_python_modules,
get_powershell_coverage_files,
initialize_coverage,
COVERAGE_OUTPUT_FILE_NAME,
COVERAGE_GROUPS,
CoverageConfig,
PathChecker,
)
TValue = t.TypeVar('TValue')
def command_coverage_combine(args: CoverageCombineConfig) -> None:
"""Patch paths in coverage files and merge into a single file."""
host_state = prepare_profiles(args) # coverage combine
combine_coverage_files(args, host_state)
def combine_coverage_files(args: CoverageCombineConfig, host_state: HostState) -> list[str]:
"""Combine coverage and return a list of the resulting files."""
if args.delegate:
if isinstance(args.controller, (DockerConfig, RemoteConfig)):
paths = get_all_coverage_files()
exported_paths = [path for path in paths if os.path.basename(path).split('=')[-1].split('.')[:2] == ['coverage', 'combined']]
if not exported_paths:
raise ExportedCoverageDataNotFound()
pairs = [(path, os.path.relpath(path, data_context().content.root)) for path in exported_paths]
def coverage_callback(payload_config: PayloadConfig) -> None:
"""Add the coverage files to the payload file list."""
display.info('Including %d exported coverage file(s) in payload.' % len(pairs), verbosity=1)
files = payload_config.files
files.extend(pairs)
data_context().register_payload_callback(coverage_callback)
raise Delegate(host_state=host_state)
paths = _command_coverage_combine_powershell(args) + _command_coverage_combine_python(args, host_state)
for path in paths:
display.info('Generated combined output: %s' % path, verbosity=1)
return paths
class ExportedCoverageDataNotFound(ApplicationError):
"""Exception when no combined coverage data is present yet is required."""
def __init__(self) -> None:
super().__init__(
'Coverage data must be exported before processing with the `--docker` or `--remote` option.\n'
'Export coverage with `ansible-test coverage combine` using the `--export` option.\n'
'The exported files must be in the directory: %s/' % ResultType.COVERAGE.relative_path
)
def _command_coverage_combine_python(args: CoverageCombineConfig, host_state: HostState) -> list[str]:
"""Combine Python coverage files and return a list of the output files."""
coverage = initialize_coverage(args, host_state)
modules = get_python_modules()
coverage_files = get_python_coverage_files()
def _default_stub_value(source_paths: list[str]) -> dict[str, set[tuple[int, int]]]:
return {path: {(0, 0)} for path in source_paths}
counter = 0
sources = _get_coverage_targets(args, walk_compile_targets)
groups = _build_stub_groups(args, sources, _default_stub_value)
collection_search_re, collection_sub_re = get_collection_path_regexes()
for coverage_file in coverage_files:
counter += 1
display.info('[%4d/%4d] %s' % (counter, len(coverage_files), coverage_file), verbosity=2)
group = get_coverage_group(args, coverage_file)
if group is None:
display.warning('Unexpected name for coverage file: %s' % coverage_file)
continue
for filename, arcs in enumerate_python_arcs(coverage_file, coverage, modules, collection_search_re, collection_sub_re):
if args.export:
filename = os.path.relpath(filename) # exported paths must be relative since absolute paths may differ between systems
if group not in groups:
groups[group] = {}
arc_data = groups[group]
if filename not in arc_data:
arc_data[filename] = set()
arc_data[filename].update(arcs)
output_files = []
if args.export:
coverage_file = os.path.join(args.export, '')
suffix = '=coverage.combined'
else:
coverage_file = os.path.join(ResultType.COVERAGE.path, COVERAGE_OUTPUT_FILE_NAME)
suffix = ''
path_checker = PathChecker(args, collection_search_re)
for group in sorted(groups):
arc_data = groups[group]
output_file = coverage_file + group + suffix
if args.explain:
continue
updated = coverage.CoverageData(output_file)
for filename in arc_data:
if not path_checker.check_path(filename):
continue
updated.add_arcs({filename: list(arc_data[filename])})
if args.all:
updated.add_arcs(dict((source[0], []) for source in sources))
updated.write() # always write files to make sure stale files do not exist
if updated:
# only report files which are non-empty to prevent coverage from reporting errors
output_files.append(output_file)
path_checker.report()
return sorted(output_files)
def _command_coverage_combine_powershell(args: CoverageCombineConfig) -> list[str]:
"""Combine PowerShell coverage files and return a list of the output files."""
coverage_files = get_powershell_coverage_files()
def _default_stub_value(source_paths: list[str]) -> dict[str, dict[int, int]]:
cmd = ['pwsh', os.path.join(ANSIBLE_TEST_TOOLS_ROOT, 'coverage_stub.ps1')]
cmd.extend(source_paths)
stubs = json.loads(raw_command(cmd, capture=True)[0])
return dict((d['Path'], dict((line, 0) for line in d['Lines'])) for d in stubs)
counter = 0
sources = _get_coverage_targets(args, walk_powershell_targets)
groups = _build_stub_groups(args, sources, _default_stub_value)
collection_search_re, collection_sub_re = get_collection_path_regexes()
for coverage_file in coverage_files:
counter += 1
display.info('[%4d/%4d] %s' % (counter, len(coverage_files), coverage_file), verbosity=2)
group = get_coverage_group(args, coverage_file)
if group is None:
display.warning('Unexpected name for coverage file: %s' % coverage_file)
continue
for filename, hits in enumerate_powershell_lines(coverage_file, collection_search_re, collection_sub_re):
if args.export:
filename = os.path.relpath(filename) # exported paths must be relative since absolute paths may differ between systems
if group not in groups:
groups[group] = {}
coverage_data = groups[group]
if filename not in coverage_data:
coverage_data[filename] = {}
file_coverage = coverage_data[filename]
for line_no, hit_count in hits.items():
file_coverage[line_no] = file_coverage.get(line_no, 0) + hit_count
output_files = []
path_checker = PathChecker(args)
for group in sorted(groups):
coverage_data = dict((filename, data) for filename, data in groups[group].items() if path_checker.check_path(filename))
if args.all:
missing_sources = [source for source, _source_line_count in sources if source not in coverage_data]
coverage_data.update(_default_stub_value(missing_sources))
if not args.explain:
if args.export:
output_file = os.path.join(args.export, group + '=coverage.combined')
write_json_file(output_file, coverage_data, formatted=False)
output_files.append(output_file)
continue
output_file = COVERAGE_OUTPUT_FILE_NAME + group + '-powershell'
write_json_test_results(ResultType.COVERAGE, output_file, coverage_data, formatted=False)
output_files.append(os.path.join(ResultType.COVERAGE.path, output_file))
path_checker.report()
return sorted(output_files)
def _get_coverage_targets(args: CoverageCombineConfig, walk_func: c.Callable) -> list[tuple[str, int]]:
"""Return a list of files to cover and the number of lines in each file, using the given function as the source of the files."""
sources = []
if args.all or args.stub:
# excludes symlinks of regular files to avoid reporting on the same file multiple times
# in the future it would be nice to merge any coverage for symlinks into the real files
for target in walk_func(include_symlinks=False):
target_path = os.path.abspath(target.path)
target_lines = len(read_text_file(target_path).splitlines())
sources.append((target_path, target_lines))
sources.sort()
return sources
def _build_stub_groups(
args: CoverageCombineConfig,
sources: list[tuple[str, int]],
default_stub_value: c.Callable[[list[str]], dict[str, TValue]],
) -> dict[str, dict[str, TValue]]:
"""
Split the given list of sources with line counts into groups, maintaining a maximum line count for each group.
Each group consists of a dictionary of sources and default coverage stubs generated by the provided default_stub_value function.
"""
groups = {}
if args.stub:
stub_group: list[str] = []
stub_groups = [stub_group]
stub_line_limit = 500000
stub_line_count = 0
for source, source_line_count in sources:
stub_group.append(source)
stub_line_count += source_line_count
if stub_line_count > stub_line_limit:
stub_line_count = 0
stub_group = []
stub_groups.append(stub_group)
for stub_index, stub_group in enumerate(stub_groups):
if not stub_group:
continue
groups['=stub-%02d' % (stub_index + 1)] = default_stub_value(stub_group)
return groups
def get_coverage_group(args: CoverageCombineConfig, coverage_file: str) -> t.Optional[str]:
"""Return the name of the coverage group for the specified coverage file, or None if no group was found."""
parts = os.path.basename(coverage_file).split('=', 4)
if len(parts) != 5 or not parts[4].startswith('coverage.'):
return None
names = dict(
command=parts[0],
target=parts[1],
environment=parts[2],
version=parts[3],
)
export_names = dict(
version=parts[3],
)
group = ''
for part in COVERAGE_GROUPS:
if part in args.group_by:
group += '=%s' % names[part]
elif args.export:
group += '=%s' % export_names.get(part, 'various')
if args.export:
group = group.lstrip('=')
return group
class CoverageCombineConfig(CoverageConfig):
"""Configuration for the coverage combine command."""
def __init__(self, args: t.Any) -> None:
super().__init__(args)
self.group_by: frozenset[str] = frozenset(args.group_by) if args.group_by else frozenset()
self.all: bool = args.all
self.stub: bool = args.stub
# only available to coverage combine
self.export: str = args.export if 'export' in args else False
| 11,950
|
Python
|
.py
| 258
| 38.104651
| 137
| 0.655616
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,203
|
__init__.py
|
ansible_ansible/test/lib/ansible_test/_internal/commands/coverage/analyze/__init__.py
|
"""Common logic for the `coverage analyze` subcommand."""
from __future__ import annotations
import typing as t
from .. import (
CoverageConfig,
)
class CoverageAnalyzeConfig(CoverageConfig):
"""Configuration for the `coverage analyze` command."""
def __init__(self, args: t.Any) -> None:
super().__init__(args)
# avoid mixing log messages with file output when using `/dev/stdout` for the output file on commands
# this may be worth considering as the default behavior in the future, instead of being dependent on the command or options used
self.display_stderr = True
| 619
|
Python
|
.py
| 13
| 42.846154
| 136
| 0.715474
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,204
|
generate.py
|
ansible_ansible/test/lib/ansible_test/_internal/commands/coverage/analyze/targets/generate.py
|
"""Analyze code coverage data to determine which integration test targets provide coverage for each arc or line."""
from __future__ import annotations
import os
import typing as t
from .....encoding import (
to_text,
)
from .....data import (
data_context,
)
from .....util_common import (
ResultType,
)
from .....executor import (
Delegate,
)
from .....provisioning import (
prepare_profiles,
HostState,
)
from ... import (
enumerate_powershell_lines,
enumerate_python_arcs,
get_collection_path_regexes,
get_powershell_coverage_files,
get_python_coverage_files,
get_python_modules,
initialize_coverage,
PathChecker,
)
from . import (
CoverageAnalyzeTargetsConfig,
get_target_index,
make_report,
write_report,
)
from . import (
Arcs,
Lines,
TargetIndexes,
)
class CoverageAnalyzeTargetsGenerateConfig(CoverageAnalyzeTargetsConfig):
"""Configuration for the `coverage analyze targets generate` command."""
def __init__(self, args: t.Any) -> None:
super().__init__(args)
self.input_dir: str = args.input_dir or ResultType.COVERAGE.path
self.output_file: str = args.output_file
def command_coverage_analyze_targets_generate(args: CoverageAnalyzeTargetsGenerateConfig) -> None:
"""Analyze code coverage data to determine which integration test targets provide coverage for each arc or line."""
host_state = prepare_profiles(args) # coverage analyze targets generate
if args.delegate:
raise Delegate(host_state)
root = data_context().content.root
target_indexes: TargetIndexes = {}
arcs = dict((os.path.relpath(path, root), data) for path, data in analyze_python_coverage(args, host_state, args.input_dir, target_indexes).items())
lines = dict((os.path.relpath(path, root), data) for path, data in analyze_powershell_coverage(args, args.input_dir, target_indexes).items())
report = make_report(target_indexes, arcs, lines)
write_report(args, report, args.output_file)
def analyze_python_coverage(
args: CoverageAnalyzeTargetsGenerateConfig,
host_state: HostState,
path: str,
target_indexes: TargetIndexes,
) -> Arcs:
"""Analyze Python code coverage."""
results: Arcs = {}
collection_search_re, collection_sub_re = get_collection_path_regexes()
modules = get_python_modules()
python_files = get_python_coverage_files(path)
coverage = initialize_coverage(args, host_state)
for python_file in python_files:
if not is_integration_coverage_file(python_file):
continue
target_name = get_target_name(python_file)
target_index = get_target_index(target_name, target_indexes)
for filename, covered_arcs in enumerate_python_arcs(python_file, coverage, modules, collection_search_re, collection_sub_re):
arcs = results.setdefault(filename, {})
for covered_arc in covered_arcs:
arc = arcs.setdefault(covered_arc, set())
arc.add(target_index)
prune_invalid_filenames(args, results, collection_search_re=collection_search_re)
return results
def analyze_powershell_coverage(
args: CoverageAnalyzeTargetsGenerateConfig,
path: str,
target_indexes: TargetIndexes,
) -> Lines:
"""Analyze PowerShell code coverage"""
results: Lines = {}
collection_search_re, collection_sub_re = get_collection_path_regexes()
powershell_files = get_powershell_coverage_files(path)
for powershell_file in powershell_files:
if not is_integration_coverage_file(powershell_file):
continue
target_name = get_target_name(powershell_file)
target_index = get_target_index(target_name, target_indexes)
for filename, hits in enumerate_powershell_lines(powershell_file, collection_search_re, collection_sub_re):
lines = results.setdefault(filename, {})
for covered_line in hits:
line = lines.setdefault(covered_line, set())
line.add(target_index)
prune_invalid_filenames(args, results)
return results
def prune_invalid_filenames(
args: CoverageAnalyzeTargetsGenerateConfig,
results: dict[str, t.Any],
collection_search_re: t.Optional[t.Pattern] = None,
) -> None:
"""Remove invalid filenames from the given result set."""
path_checker = PathChecker(args, collection_search_re)
for path in list(results.keys()):
if not path_checker.check_path(path):
del results[path]
def get_target_name(path: str) -> str:
"""Extract the test target name from the given coverage path."""
return to_text(os.path.basename(path).split('=')[1])
def is_integration_coverage_file(path: str) -> bool:
"""Returns True if the coverage file came from integration tests, otherwise False."""
return os.path.basename(path).split('=')[0] in ('integration', 'windows-integration', 'network-integration')
| 4,972
|
Python
|
.py
| 119
| 36.142857
| 152
| 0.706212
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,205
|
filter.py
|
ansible_ansible/test/lib/ansible_test/_internal/commands/coverage/analyze/targets/filter.py
|
"""Filter an aggregated coverage file, keeping only the specified targets."""
from __future__ import annotations
import collections.abc as c
import re
import typing as t
from .....executor import (
Delegate,
)
from .....provisioning import (
prepare_profiles,
)
from . import (
CoverageAnalyzeTargetsConfig,
expand_indexes,
generate_indexes,
make_report,
read_report,
write_report,
)
from . import (
NamedPoints,
TargetKey,
TargetIndexes,
)
class CoverageAnalyzeTargetsFilterConfig(CoverageAnalyzeTargetsConfig):
"""Configuration for the `coverage analyze targets filter` command."""
def __init__(self, args: t.Any) -> None:
super().__init__(args)
self.input_file: str = args.input_file
self.output_file: str = args.output_file
self.include_targets: list[str] = args.include_targets
self.exclude_targets: list[str] = args.exclude_targets
self.include_path: t.Optional[str] = args.include_path
self.exclude_path: t.Optional[str] = args.exclude_path
def command_coverage_analyze_targets_filter(args: CoverageAnalyzeTargetsFilterConfig) -> None:
"""Filter target names in an aggregated coverage file."""
host_state = prepare_profiles(args) # coverage analyze targets filter
if args.delegate:
raise Delegate(host_state=host_state)
covered_targets, covered_path_arcs, covered_path_lines = read_report(args.input_file)
def pass_target_key(value: TargetKey) -> TargetKey:
"""Return the given target key unmodified."""
return value
filtered_path_arcs = expand_indexes(covered_path_arcs, covered_targets, pass_target_key)
filtered_path_lines = expand_indexes(covered_path_lines, covered_targets, pass_target_key)
include_targets = set(args.include_targets) if args.include_targets else None
exclude_targets = set(args.exclude_targets) if args.exclude_targets else None
include_path = re.compile(args.include_path) if args.include_path else None
exclude_path = re.compile(args.exclude_path) if args.exclude_path else None
def path_filter_func(path: str) -> bool:
"""Return True if the given path should be included, otherwise return False."""
if include_path and not re.search(include_path, path):
return False
if exclude_path and re.search(exclude_path, path):
return False
return True
def target_filter_func(targets: set[str]) -> set[str]:
"""Filter the given targets and return the result based on the defined includes and excludes."""
if include_targets:
targets &= include_targets
if exclude_targets:
targets -= exclude_targets
return targets
filtered_path_arcs = filter_data(filtered_path_arcs, path_filter_func, target_filter_func)
filtered_path_lines = filter_data(filtered_path_lines, path_filter_func, target_filter_func)
target_indexes: TargetIndexes = {}
indexed_path_arcs = generate_indexes(target_indexes, filtered_path_arcs)
indexed_path_lines = generate_indexes(target_indexes, filtered_path_lines)
report = make_report(target_indexes, indexed_path_arcs, indexed_path_lines)
write_report(args, report, args.output_file)
def filter_data(
data: NamedPoints,
path_filter_func: c.Callable[[str], bool],
target_filter_func: c.Callable[[set[str]], set[str]],
) -> NamedPoints:
"""Filter the data set using the specified filter function."""
result: NamedPoints = {}
for src_path, src_points in data.items():
if not path_filter_func(src_path):
continue
dst_points = {}
for src_point, src_targets in src_points.items():
dst_targets = target_filter_func(src_targets)
if dst_targets:
dst_points[src_point] = dst_targets
if dst_points:
result[src_path] = dst_points
return result
| 3,958
|
Python
|
.py
| 88
| 38.488636
| 104
| 0.697784
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,206
|
missing.py
|
ansible_ansible/test/lib/ansible_test/_internal/commands/coverage/analyze/targets/missing.py
|
"""Identify aggregated coverage in one file missing from another."""
from __future__ import annotations
import os
import typing as t
from .....encoding import (
to_bytes,
)
from .....executor import (
Delegate,
)
from .....provisioning import (
prepare_profiles,
)
from . import (
CoverageAnalyzeTargetsConfig,
get_target_index,
make_report,
read_report,
write_report,
)
from . import (
TargetIndexes,
IndexedPoints,
)
class CoverageAnalyzeTargetsMissingConfig(CoverageAnalyzeTargetsConfig):
"""Configuration for the `coverage analyze targets missing` command."""
def __init__(self, args: t.Any) -> None:
super().__init__(args)
self.from_file: str = args.from_file
self.to_file: str = args.to_file
self.output_file: str = args.output_file
self.only_gaps: bool = args.only_gaps
self.only_exists: bool = args.only_exists
def command_coverage_analyze_targets_missing(args: CoverageAnalyzeTargetsMissingConfig) -> None:
"""Identify aggregated coverage in one file missing from another."""
host_state = prepare_profiles(args) # coverage analyze targets missing
if args.delegate:
raise Delegate(host_state=host_state)
from_targets, from_path_arcs, from_path_lines = read_report(args.from_file)
to_targets, to_path_arcs, to_path_lines = read_report(args.to_file)
target_indexes: TargetIndexes = {}
if args.only_gaps:
arcs = find_gaps(from_path_arcs, from_targets, to_path_arcs, target_indexes, args.only_exists)
lines = find_gaps(from_path_lines, from_targets, to_path_lines, target_indexes, args.only_exists)
else:
arcs = find_missing(from_path_arcs, from_targets, to_path_arcs, to_targets, target_indexes, args.only_exists)
lines = find_missing(from_path_lines, from_targets, to_path_lines, to_targets, target_indexes, args.only_exists)
report = make_report(target_indexes, arcs, lines)
write_report(args, report, args.output_file)
def find_gaps(
from_data: IndexedPoints,
from_index: list[str],
to_data: IndexedPoints,
target_indexes: TargetIndexes,
only_exists: bool,
) -> IndexedPoints:
"""Find gaps in coverage between the from and to data sets."""
target_data: IndexedPoints = {}
for from_path, from_points in from_data.items():
if only_exists and not os.path.isfile(to_bytes(from_path)):
continue
to_points = to_data.get(from_path, {})
gaps = set(from_points.keys()) - set(to_points.keys())
if gaps:
gap_points = dict((key, value) for key, value in from_points.items() if key in gaps)
target_data[from_path] = dict((gap, set(get_target_index(from_index[i], target_indexes) for i in indexes)) for gap, indexes in gap_points.items())
return target_data
def find_missing(
from_data: IndexedPoints,
from_index: list[str],
to_data: IndexedPoints,
to_index: list[str],
target_indexes: TargetIndexes,
only_exists: bool,
) -> IndexedPoints:
"""Find coverage in from_data not present in to_data (arcs or lines)."""
target_data: IndexedPoints = {}
for from_path, from_points in from_data.items():
if only_exists and not os.path.isfile(to_bytes(from_path)):
continue
to_points = to_data.get(from_path, {})
for from_point, from_target_indexes in from_points.items():
to_target_indexes = to_points.get(from_point, set())
remaining_targets = set(from_index[i] for i in from_target_indexes) - set(to_index[i] for i in to_target_indexes)
if remaining_targets:
target_index = target_data.setdefault(from_path, {}).setdefault(from_point, set())
target_index.update(get_target_index(name, target_indexes) for name in remaining_targets)
return target_data
| 3,894
|
Python
|
.py
| 88
| 38.068182
| 158
| 0.683095
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,207
|
__init__.py
|
ansible_ansible/test/lib/ansible_test/_internal/commands/coverage/analyze/targets/__init__.py
|
"""Analyze integration test target code coverage."""
from __future__ import annotations
import collections.abc as c
import os
import typing as t
from .....io import (
read_json_file,
write_json_file,
)
from .....util import (
ApplicationError,
display,
)
from .. import (
CoverageAnalyzeConfig,
)
TargetKey = t.TypeVar('TargetKey', int, tuple[int, int])
TFlexKey = t.TypeVar('TFlexKey', int, tuple[int, int], str)
NamedPoints = dict[str, dict[TargetKey, set[str]]]
IndexedPoints = dict[str, dict[TargetKey, set[int]]]
Arcs = dict[str, dict[tuple[int, int], set[int]]]
Lines = dict[str, dict[int, set[int]]]
TargetIndexes = dict[str, int]
TargetSetIndexes = dict[frozenset[int], int]
class CoverageAnalyzeTargetsConfig(CoverageAnalyzeConfig):
"""Configuration for the `coverage analyze targets` command."""
def make_report(target_indexes: TargetIndexes, arcs: Arcs, lines: Lines) -> dict[str, t.Any]:
"""Condense target indexes, arcs and lines into a compact report."""
set_indexes: TargetSetIndexes = {}
arc_refs = dict((path, dict((format_arc(arc), get_target_set_index(indexes, set_indexes)) for arc, indexes in data.items())) for path, data in arcs.items())
line_refs = dict((path, dict((line, get_target_set_index(indexes, set_indexes)) for line, indexes in data.items())) for path, data in lines.items())
report = dict(
targets=[name for name, index in sorted(target_indexes.items(), key=lambda kvp: kvp[1])],
target_sets=[sorted(data) for data, index in sorted(set_indexes.items(), key=lambda kvp: kvp[1])],
arcs=arc_refs,
lines=line_refs,
)
return report
def load_report(report: dict[str, t.Any]) -> tuple[list[str], Arcs, Lines]:
"""Extract target indexes, arcs and lines from an existing report."""
try:
target_indexes: list[str] = report['targets']
target_sets: list[list[int]] = report['target_sets']
arc_data: dict[str, dict[str, int]] = report['arcs']
line_data: dict[str, dict[int, int]] = report['lines']
except KeyError as ex:
raise ApplicationError('Document is missing key "%s".' % ex.args) from None
except TypeError:
raise ApplicationError('Document is type "%s" instead of "dict".' % type(report).__name__) from None
arcs = dict((path, dict((parse_arc(arc), set(target_sets[index])) for arc, index in data.items())) for path, data in arc_data.items())
lines = dict((path, dict((int(line), set(target_sets[index])) for line, index in data.items())) for path, data in line_data.items())
return target_indexes, arcs, lines
def read_report(path: str) -> tuple[list[str], Arcs, Lines]:
"""Read a JSON report from disk."""
try:
report = read_json_file(path)
except Exception as ex:
raise ApplicationError('File "%s" is not valid JSON: %s' % (path, ex)) from None
try:
return load_report(report)
except ApplicationError as ex:
raise ApplicationError('File "%s" is not an aggregated coverage data file. %s' % (path, ex)) from None
def write_report(args: CoverageAnalyzeTargetsConfig, report: dict[str, t.Any], path: str) -> None:
"""Write a JSON report to disk."""
if args.explain:
return
write_json_file(path, report, formatted=False)
display.info('Generated %d byte report with %d targets covering %d files.' % (
os.path.getsize(path), len(report['targets']), len(set(report['arcs'].keys()) | set(report['lines'].keys())),
), verbosity=1)
def format_line(value: int) -> str:
"""Format line as a string."""
return str(value) # putting this in a function keeps both pylint and mypy happy
def format_arc(value: tuple[int, int]) -> str:
"""Format an arc tuple as a string."""
return '%d:%d' % value
def parse_arc(value: str) -> tuple[int, int]:
"""Parse an arc string into a tuple."""
first, last = tuple(map(int, value.split(':')))
return first, last
def get_target_set_index(data: set[int], target_set_indexes: TargetSetIndexes) -> int:
"""Find or add the target set in the result set and return the target set index."""
return target_set_indexes.setdefault(frozenset(data), len(target_set_indexes))
def get_target_index(name: str, target_indexes: TargetIndexes) -> int:
"""Find or add the target in the result set and return the target index."""
return target_indexes.setdefault(name, len(target_indexes))
def expand_indexes(
source_data: IndexedPoints,
source_index: list[str],
format_func: c.Callable[[TargetKey], TFlexKey],
) -> dict[str, dict[TFlexKey, set[str]]]:
"""Expand indexes from the source into target names for easier processing of the data (arcs or lines)."""
combined_data: dict[str, dict[TFlexKey, set[str]]] = {}
for covered_path, covered_points in source_data.items():
combined_points = combined_data.setdefault(covered_path, {})
for covered_point, covered_target_indexes in covered_points.items():
combined_point = combined_points.setdefault(format_func(covered_point), set())
for covered_target_index in covered_target_indexes:
combined_point.add(source_index[covered_target_index])
return combined_data
def generate_indexes(target_indexes: TargetIndexes, data: NamedPoints) -> IndexedPoints:
"""Return an indexed version of the given data (arcs or points)."""
results: IndexedPoints = {}
for path, points in data.items():
result_points = results[path] = {}
for point, target_names in points.items():
result_point = result_points[point] = set()
for target_name in target_names:
result_point.add(get_target_index(target_name, target_indexes))
return results
| 5,785
|
Python
|
.py
| 110
| 47.118182
| 160
| 0.682472
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,208
|
combine.py
|
ansible_ansible/test/lib/ansible_test/_internal/commands/coverage/analyze/targets/combine.py
|
"""Combine integration test target code coverage reports."""
from __future__ import annotations
import typing as t
from .....executor import (
Delegate,
)
from .....provisioning import (
prepare_profiles,
)
from . import (
CoverageAnalyzeTargetsConfig,
get_target_index,
make_report,
read_report,
write_report,
)
from . import (
Arcs,
IndexedPoints,
Lines,
TargetIndexes,
)
class CoverageAnalyzeTargetsCombineConfig(CoverageAnalyzeTargetsConfig):
"""Configuration for the `coverage analyze targets combine` command."""
def __init__(self, args: t.Any) -> None:
super().__init__(args)
self.input_files: list[str] = args.input_file
self.output_file: str = args.output_file
def command_coverage_analyze_targets_combine(args: CoverageAnalyzeTargetsCombineConfig) -> None:
"""Combine integration test target code coverage reports."""
host_state = prepare_profiles(args) # coverage analyze targets combine
if args.delegate:
raise Delegate(host_state=host_state)
combined_target_indexes: TargetIndexes = {}
combined_path_arcs: Arcs = {}
combined_path_lines: Lines = {}
for report_path in args.input_files:
covered_targets, covered_path_arcs, covered_path_lines = read_report(report_path)
merge_indexes(covered_path_arcs, covered_targets, combined_path_arcs, combined_target_indexes)
merge_indexes(covered_path_lines, covered_targets, combined_path_lines, combined_target_indexes)
report = make_report(combined_target_indexes, combined_path_arcs, combined_path_lines)
write_report(args, report, args.output_file)
def merge_indexes(
source_data: IndexedPoints,
source_index: list[str],
combined_data: IndexedPoints,
combined_index: TargetIndexes,
) -> None:
"""Merge indexes from the source into the combined data set (arcs or lines)."""
for covered_path, covered_points in source_data.items():
combined_points = combined_data.setdefault(covered_path, {})
for covered_point, covered_target_indexes in covered_points.items():
combined_point = combined_points.setdefault(covered_point, set())
for covered_target_index in covered_target_indexes:
combined_point.add(get_target_index(source_index[covered_target_index], combined_index))
| 2,367
|
Python
|
.py
| 55
| 37.6
| 104
| 0.719895
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,209
|
expand.py
|
ansible_ansible/test/lib/ansible_test/_internal/commands/coverage/analyze/targets/expand.py
|
"""Expand target names in an aggregated coverage file."""
from __future__ import annotations
import typing as t
from .....io import (
SortedSetEncoder,
write_json_file,
)
from .....executor import (
Delegate,
)
from .....provisioning import (
prepare_profiles,
)
from . import (
CoverageAnalyzeTargetsConfig,
expand_indexes,
format_arc,
format_line,
read_report,
)
class CoverageAnalyzeTargetsExpandConfig(CoverageAnalyzeTargetsConfig):
"""Configuration for the `coverage analyze targets expand` command."""
def __init__(self, args: t.Any) -> None:
super().__init__(args)
self.input_file: str = args.input_file
self.output_file: str = args.output_file
def command_coverage_analyze_targets_expand(args: CoverageAnalyzeTargetsExpandConfig) -> None:
"""Expand target names in an aggregated coverage file."""
host_state = prepare_profiles(args) # coverage analyze targets expand
if args.delegate:
raise Delegate(host_state=host_state)
covered_targets, covered_path_arcs, covered_path_lines = read_report(args.input_file)
report = dict(
arcs=expand_indexes(covered_path_arcs, covered_targets, format_arc),
lines=expand_indexes(covered_path_lines, covered_targets, format_line),
)
if not args.explain:
write_json_file(args.output_file, report, encoder=SortedSetEncoder)
| 1,407
|
Python
|
.py
| 38
| 32.289474
| 94
| 0.718081
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,210
|
__init__.py
|
ansible_ansible/test/lib/ansible_test/_internal/commands/shell/__init__.py
|
"""Open a shell prompt inside an ansible-test environment."""
from __future__ import annotations
import os
import sys
import typing as t
from ...util import (
ApplicationError,
OutputStream,
display,
SubprocessError,
HostConnectionError,
)
from ...config import (
ShellConfig,
)
from ...executor import (
Delegate,
)
from ...connections import (
Connection,
LocalConnection,
SshConnection,
)
from ...host_profiles import (
ControllerProfile,
PosixProfile,
SshTargetHostProfile,
)
from ...provisioning import (
prepare_profiles,
)
from ...host_configs import (
ControllerConfig,
OriginConfig,
)
from ...inventory import (
create_controller_inventory,
create_posix_inventory,
)
def command_shell(args: ShellConfig) -> None:
"""Entry point for the `shell` command."""
if args.raw and isinstance(args.targets[0], ControllerConfig):
raise ApplicationError('The --raw option has no effect on the controller.')
if not args.export and not args.cmd and not sys.stdin.isatty():
raise ApplicationError('Standard input must be a TTY to launch a shell.')
host_state = prepare_profiles(args, skip_setup=args.raw) # shell
if args.delegate:
raise Delegate(host_state=host_state)
if args.raw and not isinstance(args.controller, OriginConfig):
display.warning('The --raw option will only be applied to the target.')
target_profile = t.cast(SshTargetHostProfile, host_state.target_profiles[0])
if isinstance(target_profile, ControllerProfile):
# run the shell locally unless a target was requested
con: Connection = LocalConnection(args)
if args.export:
display.info('Configuring controller inventory.', verbosity=1)
create_controller_inventory(args, args.export, host_state.controller_profile)
else:
# a target was requested, connect to it over SSH
con = target_profile.get_controller_target_connections()[0]
if args.export:
display.info('Configuring target inventory.', verbosity=1)
create_posix_inventory(args, args.export, host_state.target_profiles, True)
if args.export:
return
if args.cmd:
# Running a command is assumed to be non-interactive. Only a shell (no command) is interactive.
# If we want to support interactive commands in the future, we'll need an `--interactive` command line option.
# Command stderr output is allowed to mix with our own output, which is all sent to stderr.
con.run(args.cmd, capture=False, interactive=False, output_stream=OutputStream.ORIGINAL)
return
if isinstance(con, SshConnection) and args.raw:
cmd: list[str] = []
elif isinstance(target_profile, PosixProfile):
cmd = []
if args.raw:
shell = 'sh' # shell required for non-ssh connection
else:
shell = 'bash'
python = target_profile.python # make sure the python interpreter has been initialized before opening a shell
display.info(f'Target Python {python.version} is at: {python.path}')
optional_vars = (
'TERM', # keep backspace working
)
env = {name: os.environ[name] for name in optional_vars if name in os.environ}
if env:
cmd = ['/usr/bin/env'] + [f'{name}={value}' for name, value in env.items()]
cmd += [shell, '-i']
else:
cmd = []
try:
con.run(cmd, capture=False, interactive=True)
except SubprocessError as ex:
if isinstance(con, SshConnection) and ex.status == 255:
# 255 indicates SSH itself failed, rather than a command run on the remote host.
# In this case, report a host connection error so additional troubleshooting output is provided.
if not args.delegate and not args.host_path:
def callback() -> None:
"""Callback to run during error display."""
target_profile.on_target_failure() # when the controller is not delegated, report failures immediately
else:
callback = None
raise HostConnectionError(f'SSH shell connection failed for host {target_profile.config}: {ex}', callback) from ex
raise
| 4,371
|
Python
|
.py
| 104
| 34.519231
| 126
| 0.666982
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,211
|
filters.py
|
ansible_ansible/test/lib/ansible_test/_internal/commands/integration/filters.py
|
"""Logic for filtering out integration test targets which are unsupported for the currently provided arguments and available hosts."""
from __future__ import annotations
import abc
import typing as t
from ...config import (
IntegrationConfig,
)
from ...util import (
cache,
detect_architecture,
display,
get_type_map,
)
from ...target import (
IntegrationTarget,
)
from ...host_configs import (
ControllerConfig,
DockerConfig,
FallbackReason,
HostConfig,
NetworkInventoryConfig,
NetworkRemoteConfig,
OriginConfig,
PosixConfig,
PosixRemoteConfig,
PosixSshConfig,
RemoteConfig,
WindowsInventoryConfig,
WindowsRemoteConfig,
)
from ...host_profiles import (
HostProfile,
)
THostConfig = t.TypeVar('THostConfig', bound=HostConfig)
TPosixConfig = t.TypeVar('TPosixConfig', bound=PosixConfig)
TRemoteConfig = t.TypeVar('TRemoteConfig', bound=RemoteConfig)
THostProfile = t.TypeVar('THostProfile', bound=HostProfile)
class TargetFilter(t.Generic[THostConfig], metaclass=abc.ABCMeta):
"""Base class for target filters."""
def __init__(self, args: IntegrationConfig, configs: list[THostConfig], controller: bool) -> None:
self.args = args
self.configs = configs
self.controller = controller
self.host_type = 'controller' if controller else 'target'
# values which are not host specific
self.include_targets = args.include
self.allow_root = args.allow_root
self.allow_destructive = args.allow_destructive
@property
def config(self) -> THostConfig:
"""The configuration to filter. Only valid when there is a single config."""
if len(self.configs) != 1:
raise Exception()
return self.configs[0]
def skip(
self,
skip: str,
reason: str,
targets: list[IntegrationTarget],
exclude: set[str],
override: t.Optional[list[str]] = None,
) -> None:
"""Apply the specified skip rule to the given targets by updating the provided exclude list."""
if skip.startswith('skip/'):
skipped = [target.name for target in targets if skip in target.skips and (not override or target.name not in override)]
else:
skipped = [target.name for target in targets if f'{skip}/' in target.aliases and (not override or target.name not in override)]
self.apply_skip(f'"{skip}"', reason, skipped, exclude)
def apply_skip(self, marked: str, reason: str, skipped: list[str], exclude: set[str]) -> None:
"""Apply the provided skips to the given exclude list."""
if not skipped:
return
exclude.update(skipped)
display.warning(f'Excluding {self.host_type} tests marked {marked} {reason}: {", ".join(skipped)}')
def filter_profiles(self, profiles: list[THostProfile], target: IntegrationTarget) -> list[THostProfile]:
"""Filter the list of profiles, returning only those which are not skipped for the given target."""
del target
return profiles
def filter_targets(self, targets: list[IntegrationTarget], exclude: set[str]) -> None:
"""Filter the list of targets, adding any which this host profile cannot support to the provided exclude list."""
if self.controller and self.args.host_settings.controller_fallback and targets:
affected_targets = [target.name for target in targets]
reason = self.args.host_settings.controller_fallback.reason
if reason == FallbackReason.ENVIRONMENT:
exclude.update(affected_targets)
display.warning(f'Excluding {self.host_type} tests since a fallback controller is in use: {", ".join(affected_targets)}')
elif reason == FallbackReason.PYTHON:
display.warning(f'Some {self.host_type} tests may be redundant since a fallback python is in use: {", ".join(affected_targets)}')
if not self.allow_destructive and not self.config.is_managed:
override_destructive = set(target for target in self.include_targets if target.startswith('destructive/'))
override = [target.name for target in targets if override_destructive & set(target.aliases)]
self.skip('destructive', 'which require --allow-destructive or prefixing with "destructive/" to run on unmanaged hosts', targets, exclude, override)
if not self.args.allow_disabled:
override_disabled = set(target for target in self.args.include if target.startswith('disabled/'))
override = [target.name for target in targets if override_disabled & set(target.aliases)]
self.skip('disabled', 'which require --allow-disabled or prefixing with "disabled/"', targets, exclude, override)
if not self.args.allow_unsupported:
override_unsupported = set(target for target in self.args.include if target.startswith('unsupported/'))
override = [target.name for target in targets if override_unsupported & set(target.aliases)]
self.skip('unsupported', 'which require --allow-unsupported or prefixing with "unsupported/"', targets, exclude, override)
if not self.args.allow_unstable:
override_unstable = set(target for target in self.args.include if target.startswith('unstable/'))
if self.args.allow_unstable_changed:
override_unstable |= set(self.args.metadata.change_description.focused_targets or [])
override = [target.name for target in targets if override_unstable & set(target.aliases)]
self.skip('unstable', 'which require --allow-unstable or prefixing with "unstable/"', targets, exclude, override)
class PosixTargetFilter(TargetFilter[TPosixConfig]):
"""Target filter for POSIX hosts."""
def filter_targets(self, targets: list[IntegrationTarget], exclude: set[str]) -> None:
"""Filter the list of targets, adding any which this host profile cannot support to the provided exclude list."""
super().filter_targets(targets, exclude)
if not self.allow_root and not self.config.have_root:
self.skip('needs/root', 'which require --allow-root or running as root', targets, exclude)
self.skip(f'skip/python{self.config.python.version}', f'which are not supported by Python {self.config.python.version}', targets, exclude)
self.skip(f'skip/python{self.config.python.major_version}', f'which are not supported by Python {self.config.python.major_version}', targets, exclude)
class DockerTargetFilter(PosixTargetFilter[DockerConfig]):
"""Target filter for docker hosts."""
def filter_targets(self, targets: list[IntegrationTarget], exclude: set[str]) -> None:
"""Filter the list of targets, adding any which this host profile cannot support to the provided exclude list."""
super().filter_targets(targets, exclude)
self.skip('skip/docker', 'which cannot run under docker', targets, exclude)
if not self.config.privileged:
self.skip('needs/privileged', 'which require --docker-privileged to run under docker', targets, exclude)
class PosixSshTargetFilter(PosixTargetFilter[PosixSshConfig]):
"""Target filter for POSIX SSH hosts."""
class RemoteTargetFilter(TargetFilter[TRemoteConfig]):
"""Target filter for remote Ansible Core CI managed hosts."""
def filter_profiles(self, profiles: list[THostProfile], target: IntegrationTarget) -> list[THostProfile]:
"""Filter the list of profiles, returning only those which are not skipped for the given target."""
profiles = super().filter_profiles(profiles, target)
skipped_profiles = [profile for profile in profiles if any(skip in target.skips for skip in get_remote_skip_aliases(profile.config))]
if skipped_profiles:
configs: list[TRemoteConfig] = [profile.config for profile in skipped_profiles]
display.warning(f'Excluding skipped hosts from inventory: {", ".join(config.name for config in configs)}')
profiles = [profile for profile in profiles if profile not in skipped_profiles]
return profiles
def filter_targets(self, targets: list[IntegrationTarget], exclude: set[str]) -> None:
"""Filter the list of targets, adding any which this host profile cannot support to the provided exclude list."""
super().filter_targets(targets, exclude)
if len(self.configs) > 1:
host_skips = {host.name: get_remote_skip_aliases(host) for host in self.configs}
# Skip only targets which skip all hosts.
# Targets that skip only some hosts will be handled during inventory generation.
skipped = [target.name for target in targets if all(any(skip in target.skips for skip in skips) for skips in host_skips.values())]
if skipped:
exclude.update(skipped)
display.warning(f'Excluding tests which do not support {", ".join(host_skips.keys())}: {", ".join(skipped)}')
else:
skips = get_remote_skip_aliases(self.config)
for skip, reason in skips.items():
self.skip(skip, reason, targets, exclude)
class PosixRemoteTargetFilter(PosixTargetFilter[PosixRemoteConfig], RemoteTargetFilter[PosixRemoteConfig]):
"""Target filter for POSIX remote hosts."""
class WindowsRemoteTargetFilter(RemoteTargetFilter[WindowsRemoteConfig]):
"""Target filter for remote Windows hosts."""
class WindowsInventoryTargetFilter(TargetFilter[WindowsInventoryConfig]):
"""Target filter for Windows inventory."""
class NetworkRemoteTargetFilter(RemoteTargetFilter[NetworkRemoteConfig]):
"""Target filter for remote network hosts."""
class NetworkInventoryTargetFilter(TargetFilter[NetworkInventoryConfig]):
"""Target filter for network inventory."""
class OriginTargetFilter(PosixTargetFilter[OriginConfig]):
"""Target filter for localhost."""
def filter_targets(self, targets: list[IntegrationTarget], exclude: set[str]) -> None:
"""Filter the list of targets, adding any which this host profile cannot support to the provided exclude list."""
super().filter_targets(targets, exclude)
arch = detect_architecture(self.config.python.path)
if arch:
self.skip(f'skip/{arch}', f'which are not supported by {arch}', targets, exclude)
@cache
def get_host_target_type_map() -> dict[t.Type[HostConfig], t.Type[TargetFilter]]:
"""Create and return a mapping of HostConfig types to TargetFilter types."""
return get_type_map(TargetFilter, HostConfig)
def get_target_filter(args: IntegrationConfig, configs: list[HostConfig], controller: bool) -> TargetFilter:
"""Return an integration test target filter instance for the provided host configurations."""
target_type = type(configs[0])
if issubclass(target_type, ControllerConfig):
target_type = type(args.controller)
configs = [args.controller]
filter_type = get_host_target_type_map()[target_type]
filter_instance = filter_type(args, configs, controller)
return filter_instance
def get_remote_skip_aliases(config: RemoteConfig) -> dict[str, str]:
"""Return a dictionary of skip aliases and the reason why they apply."""
return get_platform_skip_aliases(config.platform, config.version, config.arch)
def get_platform_skip_aliases(platform: str, version: str, arch: t.Optional[str]) -> dict[str, str]:
"""Return a dictionary of skip aliases and the reason why they apply."""
skips = {
f'skip/{platform}': platform,
f'skip/{platform}/{version}': f'{platform} {version}',
f'skip/{platform}{version}': f'{platform} {version}', # legacy syntax, use above format
}
if arch:
skips.update({
f'skip/{arch}': arch,
f'skip/{arch}/{platform}': f'{platform} on {arch}',
f'skip/{arch}/{platform}/{version}': f'{platform} {version} on {arch}',
})
skips = {alias: f'which are not supported by {description}' for alias, description in skips.items()}
return skips
| 12,168
|
Python
|
.py
| 201
| 52.676617
| 160
| 0.699764
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,212
|
windows.py
|
ansible_ansible/test/lib/ansible_test/_internal/commands/integration/windows.py
|
"""Windows integration testing."""
from __future__ import annotations
import os
from ...util import (
ApplicationError,
ANSIBLE_TEST_CONFIG_ROOT,
)
from ...util_common import (
handle_layout_messages,
)
from ...containers import (
create_container_hooks,
local_ssh,
root_ssh,
)
from ...target import (
walk_windows_integration_targets,
)
from ...config import (
WindowsIntegrationConfig,
)
from ...host_configs import (
WindowsInventoryConfig,
WindowsRemoteConfig,
)
from . import (
command_integration_filter,
command_integration_filtered,
get_inventory_absolute_path,
get_inventory_relative_path,
check_inventory,
delegate_inventory,
)
from ...data import (
data_context,
)
def command_windows_integration(args: WindowsIntegrationConfig) -> None:
"""Entry point for the `windows-integration` command."""
handle_layout_messages(data_context().content.integration_messages)
inventory_relative_path = get_inventory_relative_path(args)
template_path = os.path.join(ANSIBLE_TEST_CONFIG_ROOT, os.path.basename(inventory_relative_path)) + '.template'
if issubclass(args.target_type, WindowsInventoryConfig):
target = args.only_target(WindowsInventoryConfig)
inventory_path = get_inventory_absolute_path(args, target)
if args.delegate or not target.path:
target.path = inventory_relative_path
else:
inventory_path = os.path.join(data_context().content.root, inventory_relative_path)
if not args.explain and not issubclass(args.target_type, WindowsRemoteConfig) and not os.path.isfile(inventory_path):
raise ApplicationError(
'Inventory not found: %s\n'
'Use --inventory to specify the inventory path.\n'
'Use --windows to provision resources and generate an inventory file.\n'
'See also inventory template: %s' % (inventory_path, template_path)
)
check_inventory(args, inventory_path)
delegate_inventory(args, inventory_path)
all_targets = tuple(walk_windows_integration_targets(include_hidden=True))
host_state, internal_targets = command_integration_filter(args, all_targets)
control_connections = [local_ssh(args, host_state.controller_profile.python)]
managed_connections = [root_ssh(ssh) for ssh in host_state.get_controller_target_connections()]
pre_target, post_target = create_container_hooks(args, control_connections, managed_connections)
command_integration_filtered(args, host_state, internal_targets, all_targets, inventory_path, pre_target=pre_target, post_target=post_target)
| 2,639
|
Python
|
.py
| 63
| 36.857143
| 145
| 0.733776
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,213
|
network.py
|
ansible_ansible/test/lib/ansible_test/_internal/commands/integration/network.py
|
"""Network integration testing."""
from __future__ import annotations
import os
from ...util import (
ApplicationError,
ANSIBLE_TEST_CONFIG_ROOT,
)
from ...util_common import (
handle_layout_messages,
)
from ...target import (
walk_network_integration_targets,
)
from ...config import (
NetworkIntegrationConfig,
)
from . import (
command_integration_filter,
command_integration_filtered,
get_inventory_absolute_path,
get_inventory_relative_path,
check_inventory,
delegate_inventory,
)
from ...data import (
data_context,
)
from ...host_configs import (
NetworkInventoryConfig,
NetworkRemoteConfig,
)
def command_network_integration(args: NetworkIntegrationConfig) -> None:
"""Entry point for the `network-integration` command."""
handle_layout_messages(data_context().content.integration_messages)
inventory_relative_path = get_inventory_relative_path(args)
template_path = os.path.join(ANSIBLE_TEST_CONFIG_ROOT, os.path.basename(inventory_relative_path)) + '.template'
if issubclass(args.target_type, NetworkInventoryConfig):
target = args.only_target(NetworkInventoryConfig)
inventory_path = get_inventory_absolute_path(args, target)
if args.delegate or not target.path:
target.path = inventory_relative_path
else:
inventory_path = os.path.join(data_context().content.root, inventory_relative_path)
if args.no_temp_workdir:
# temporary solution to keep DCI tests working
inventory_exists = os.path.exists(inventory_path)
else:
inventory_exists = os.path.isfile(inventory_path)
if not args.explain and not issubclass(args.target_type, NetworkRemoteConfig) and not inventory_exists:
raise ApplicationError(
'Inventory not found: %s\n'
'Use --inventory to specify the inventory path.\n'
'Use --platform to provision resources and generate an inventory file.\n'
'See also inventory template: %s' % (inventory_path, template_path)
)
check_inventory(args, inventory_path)
delegate_inventory(args, inventory_path)
all_targets = tuple(walk_network_integration_targets(include_hidden=True))
host_state, internal_targets = command_integration_filter(args, all_targets)
command_integration_filtered(args, host_state, internal_targets, all_targets, inventory_path)
| 2,417
|
Python
|
.py
| 60
| 34.933333
| 115
| 0.726496
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,214
|
__init__.py
|
ansible_ansible/test/lib/ansible_test/_internal/commands/integration/__init__.py
|
"""Ansible integration test infrastructure."""
from __future__ import annotations
import collections.abc as c
import contextlib
import datetime
import json
import os
import re
import shutil
import tempfile
import time
import typing as t
from ...encoding import (
to_bytes,
)
from ...ansible_util import (
ansible_environment,
)
from ...executor import (
get_changes_filter,
AllTargetsSkipped,
Delegate,
ListTargets,
)
from ...python_requirements import (
install_requirements,
)
from ...ci import (
get_ci_provider,
)
from ...target import (
analyze_integration_target_dependencies,
walk_integration_targets,
IntegrationTarget,
walk_internal_targets,
TIntegrationTarget,
IntegrationTargetType,
)
from ...config import (
IntegrationConfig,
NetworkIntegrationConfig,
PosixIntegrationConfig,
WindowsIntegrationConfig,
TIntegrationConfig,
)
from ...io import (
make_dirs,
read_text_file,
)
from ...util import (
ApplicationError,
display,
SubprocessError,
remove_tree,
)
from ...util_common import (
named_temporary_file,
ResultType,
run_command,
write_json_test_results,
check_pyyaml,
)
from ...coverage_util import (
cover_python,
)
from ...cache import (
CommonCache,
)
from .cloud import (
CloudEnvironmentConfig,
cloud_filter,
cloud_init,
get_cloud_environment,
get_cloud_platforms,
)
from ...data import (
data_context,
PayloadConfig,
)
from ...host_configs import (
InventoryConfig,
OriginConfig,
)
from ...host_profiles import (
ControllerProfile,
ControllerHostProfile,
HostProfile,
PosixProfile,
SshTargetHostProfile,
)
from ...provisioning import (
HostState,
prepare_profiles,
)
from ...pypi_proxy import (
configure_pypi_proxy,
)
from ...inventory import (
create_controller_inventory,
create_windows_inventory,
create_network_inventory,
create_posix_inventory,
)
from .filters import (
get_target_filter,
)
from .coverage import (
CoverageManager,
)
THostProfile = t.TypeVar('THostProfile', bound=HostProfile)
def generate_dependency_map(integration_targets: list[IntegrationTarget]) -> dict[str, set[IntegrationTarget]]:
"""Analyze the given list of integration test targets and return a dictionary expressing target names and the targets on which they depend."""
targets_dict = dict((target.name, target) for target in integration_targets)
target_dependencies = analyze_integration_target_dependencies(integration_targets)
dependency_map: dict[str, set[IntegrationTarget]] = {}
invalid_targets = set()
for dependency, dependents in target_dependencies.items():
dependency_target = targets_dict.get(dependency)
if not dependency_target:
invalid_targets.add(dependency)
continue
for dependent in dependents:
if dependent not in dependency_map:
dependency_map[dependent] = set()
dependency_map[dependent].add(dependency_target)
if invalid_targets:
raise ApplicationError('Non-existent target dependencies: %s' % ', '.join(sorted(invalid_targets)))
return dependency_map
def get_files_needed(target_dependencies: list[IntegrationTarget]) -> list[str]:
"""Return a list of files needed by the given list of target dependencies."""
files_needed: list[str] = []
for target_dependency in target_dependencies:
files_needed += target_dependency.needs_file
files_needed = sorted(set(files_needed))
invalid_paths = [path for path in files_needed if not os.path.isfile(path)]
if invalid_paths:
raise ApplicationError('Invalid "needs/file/*" aliases:\n%s' % '\n'.join(invalid_paths))
return files_needed
def check_inventory(args: IntegrationConfig, inventory_path: str) -> None:
"""Check the given inventory for issues."""
if not isinstance(args.controller, OriginConfig):
if os.path.exists(inventory_path):
inventory = read_text_file(inventory_path)
if 'ansible_ssh_private_key_file' in inventory:
display.warning('Use of "ansible_ssh_private_key_file" in inventory with the --docker or --remote option is unsupported and will likely fail.')
def get_inventory_absolute_path(args: IntegrationConfig, target: InventoryConfig) -> str:
"""Return the absolute inventory path used for the given integration configuration or target inventory config (if provided)."""
path = target.path or os.path.basename(get_inventory_relative_path(args))
if args.host_path:
path = os.path.join(data_context().content.root, path) # post-delegation, path is relative to the content root
else:
path = os.path.join(data_context().content.root, data_context().content.integration_path, path)
return path
def get_inventory_relative_path(args: IntegrationConfig) -> str:
"""Return the inventory path used for the given integration configuration relative to the content root."""
inventory_names: dict[t.Type[IntegrationConfig], str] = {
PosixIntegrationConfig: 'inventory',
WindowsIntegrationConfig: 'inventory.winrm',
NetworkIntegrationConfig: 'inventory.networking',
}
return os.path.join(data_context().content.integration_path, inventory_names[type(args)])
def delegate_inventory(args: IntegrationConfig, inventory_path_src: str) -> None:
"""Make the given inventory available during delegation."""
if isinstance(args, PosixIntegrationConfig):
return
def inventory_callback(payload_config: PayloadConfig) -> None:
"""
Add the inventory file to the payload file list.
This will preserve the file during delegation even if it is ignored or is outside the content and install roots.
"""
files = payload_config.files
inventory_path = get_inventory_relative_path(args)
inventory_tuple = inventory_path_src, inventory_path
if os.path.isfile(inventory_path_src) and inventory_tuple not in files:
originals = [item for item in files if item[1] == inventory_path]
if originals:
for original in originals:
files.remove(original)
display.warning('Overriding inventory file "%s" with "%s".' % (inventory_path, inventory_path_src))
else:
display.notice('Sourcing inventory file "%s" from "%s".' % (inventory_path, inventory_path_src))
files.append(inventory_tuple)
data_context().register_payload_callback(inventory_callback)
@contextlib.contextmanager
def integration_test_environment(
args: IntegrationConfig,
target: IntegrationTarget,
inventory_path_src: str,
) -> c.Iterator[IntegrationEnvironment]:
"""Context manager that prepares the integration test environment and cleans it up."""
ansible_config_src = args.get_ansible_config()
ansible_config_relative = os.path.join(data_context().content.integration_path, '%s.cfg' % args.command)
if args.no_temp_workdir or 'no/temp_workdir/' in target.aliases:
display.warning('Disabling the temp work dir is a temporary debugging feature that may be removed in the future without notice.')
integration_dir = os.path.join(data_context().content.root, data_context().content.integration_path)
targets_dir = os.path.join(data_context().content.root, data_context().content.integration_targets_path)
inventory_path = inventory_path_src
ansible_config = ansible_config_src
vars_file = os.path.join(data_context().content.root, data_context().content.integration_vars_path)
yield IntegrationEnvironment(data_context().content.root, integration_dir, targets_dir, inventory_path, ansible_config, vars_file)
return
# When testing a collection, the temporary directory must reside within the collection.
# This is necessary to enable support for the default collection for non-collection content (playbooks and roles).
root_temp_dir = os.path.join(ResultType.TMP.path, 'integration')
prefix = '%s-' % target.name
suffix = '-\u00c5\u00d1\u015a\u00cc\u03b2\u0141\u00c8'
if args.no_temp_unicode or 'no/temp_unicode/' in target.aliases:
display.warning('Disabling unicode in the temp work dir is a temporary debugging feature that may be removed in the future without notice.')
suffix = '-ansible'
if args.explain:
temp_dir = os.path.join(root_temp_dir, '%stemp%s' % (prefix, suffix))
else:
make_dirs(root_temp_dir)
temp_dir = tempfile.mkdtemp(prefix=prefix, suffix=suffix, dir=root_temp_dir)
try:
display.info('Preparing temporary directory: %s' % temp_dir, verbosity=2)
inventory_relative_path = get_inventory_relative_path(args)
inventory_path = os.path.join(temp_dir, inventory_relative_path)
cache = IntegrationCache(args)
target_dependencies = sorted([target] + list(cache.dependency_map.get(target.name, set())))
files_needed = get_files_needed(target_dependencies)
integration_dir = os.path.join(temp_dir, data_context().content.integration_path)
targets_dir = os.path.join(temp_dir, data_context().content.integration_targets_path)
ansible_config = os.path.join(temp_dir, ansible_config_relative)
vars_file_src = os.path.join(data_context().content.root, data_context().content.integration_vars_path)
vars_file = os.path.join(temp_dir, data_context().content.integration_vars_path)
file_copies = [
(ansible_config_src, ansible_config),
(inventory_path_src, inventory_path),
]
if os.path.exists(vars_file_src):
file_copies.append((vars_file_src, vars_file))
file_copies += [(path, os.path.join(temp_dir, path)) for path in files_needed]
integration_targets_relative_path = data_context().content.integration_targets_path
directory_copies = [
(
os.path.join(integration_targets_relative_path, target.relative_path),
os.path.join(temp_dir, integration_targets_relative_path, target.relative_path),
)
for target in target_dependencies
]
directory_copies = sorted(set(directory_copies))
file_copies = sorted(set(file_copies))
if not args.explain:
make_dirs(integration_dir)
for dir_src, dir_dst in directory_copies:
display.info('Copying %s/ to %s/' % (dir_src, dir_dst), verbosity=2)
if not args.explain:
shutil.copytree(to_bytes(dir_src), to_bytes(dir_dst), symlinks=True) # type: ignore[arg-type] # incorrect type stub omits bytes path support
for file_src, file_dst in file_copies:
display.info('Copying %s to %s' % (file_src, file_dst), verbosity=2)
if not args.explain:
make_dirs(os.path.dirname(file_dst))
shutil.copy2(file_src, file_dst)
yield IntegrationEnvironment(temp_dir, integration_dir, targets_dir, inventory_path, ansible_config, vars_file)
finally:
if not args.explain:
remove_tree(temp_dir)
@contextlib.contextmanager
def integration_test_config_file(
args: IntegrationConfig,
env_config: CloudEnvironmentConfig,
integration_dir: str,
) -> c.Iterator[t.Optional[str]]:
"""Context manager that provides a config file for integration tests, if needed."""
if not env_config:
yield None
return
config_vars = (env_config.ansible_vars or {}).copy()
config_vars.update(
ansible_test=dict(
environment=env_config.env_vars,
module_defaults=env_config.module_defaults,
)
)
config_file = json.dumps(config_vars, indent=4, sort_keys=True)
with named_temporary_file(args, 'config-file-', '.json', integration_dir, config_file) as path: # type: str
filename = os.path.relpath(path, integration_dir)
display.info('>>> Config File: %s\n%s' % (filename, config_file), verbosity=3)
yield path
def create_inventory(
args: IntegrationConfig,
host_state: HostState,
inventory_path: str,
target: IntegrationTarget,
) -> None:
"""Create inventory."""
if isinstance(args, PosixIntegrationConfig):
if target.target_type == IntegrationTargetType.CONTROLLER:
display.info('Configuring controller inventory.', verbosity=1)
create_controller_inventory(args, inventory_path, host_state.controller_profile)
elif target.target_type == IntegrationTargetType.TARGET:
display.info('Configuring target inventory.', verbosity=1)
create_posix_inventory(args, inventory_path, host_state.target_profiles, 'needs/ssh/' in target.aliases)
else:
raise Exception(f'Unhandled test type for target "{target.name}": {target.target_type.name.lower()}')
elif isinstance(args, WindowsIntegrationConfig):
display.info('Configuring target inventory.', verbosity=1)
target_profiles = filter_profiles_for_target(args, host_state.target_profiles, target)
create_windows_inventory(args, inventory_path, target_profiles)
elif isinstance(args, NetworkIntegrationConfig):
display.info('Configuring target inventory.', verbosity=1)
target_profiles = filter_profiles_for_target(args, host_state.target_profiles, target)
create_network_inventory(args, inventory_path, target_profiles)
def command_integration_filtered(
args: IntegrationConfig,
host_state: HostState,
targets: tuple[IntegrationTarget, ...],
all_targets: tuple[IntegrationTarget, ...],
inventory_path: str,
pre_target: t.Optional[c.Callable[[IntegrationTarget], None]] = None,
post_target: t.Optional[c.Callable[[IntegrationTarget], None]] = None,
):
"""Run integration tests for the specified targets."""
found = False
passed = []
failed = []
targets_iter = iter(targets)
all_targets_dict = dict((target.name, target) for target in all_targets)
setup_errors = []
setup_targets_executed: set[str] = set()
for target in all_targets:
for setup_target in target.setup_once + target.setup_always:
if setup_target not in all_targets_dict:
setup_errors.append('Target "%s" contains invalid setup target: %s' % (target.name, setup_target))
if setup_errors:
raise ApplicationError('Found %d invalid setup aliases:\n%s' % (len(setup_errors), '\n'.join(setup_errors)))
check_pyyaml(host_state.controller_profile.python)
test_dir = os.path.join(ResultType.TMP.path, 'output_dir')
if not args.explain and any('needs/ssh/' in target.aliases for target in targets):
max_tries = 20
display.info('SSH connection to controller required by tests. Checking the connection.')
for i in range(1, max_tries + 1):
try:
run_command(args, ['ssh', '-o', 'BatchMode=yes', 'localhost', 'id'], capture=True)
display.info('SSH service responded.')
break
except SubprocessError:
if i == max_tries:
raise
seconds = 3
display.warning('SSH service not responding. Waiting %d second(s) before checking again.' % seconds)
time.sleep(seconds)
start_at_task = args.start_at_task
results = {}
target_profile = host_state.target_profiles[0]
if isinstance(target_profile, PosixProfile):
target_python = target_profile.python
if isinstance(target_profile, ControllerProfile):
if host_state.controller_profile.python.path != target_profile.python.path:
install_requirements(args, target_python, command=True, controller=False) # integration
elif isinstance(target_profile, SshTargetHostProfile):
connection = target_profile.get_controller_target_connections()[0]
install_requirements(args, target_python, command=True, controller=False, connection=connection) # integration
coverage_manager = CoverageManager(args, host_state, inventory_path)
coverage_manager.setup()
try:
for target in targets_iter:
if args.start_at and not found:
found = target.name == args.start_at
if not found:
continue
create_inventory(args, host_state, inventory_path, target)
tries = 2 if args.retry_on_error else 1
verbosity = args.verbosity
cloud_environment = get_cloud_environment(args, target)
try:
while tries:
tries -= 1
try:
if cloud_environment:
cloud_environment.setup_once()
run_setup_targets(args, host_state, test_dir, target.setup_once, all_targets_dict, setup_targets_executed, inventory_path,
coverage_manager, False)
start_time = time.time()
if pre_target:
pre_target(target)
run_setup_targets(args, host_state, test_dir, target.setup_always, all_targets_dict, setup_targets_executed, inventory_path,
coverage_manager, True)
if not args.explain:
# create a fresh test directory for each test target
remove_tree(test_dir)
make_dirs(test_dir)
try:
if target.script_path:
command_integration_script(args, host_state, target, test_dir, inventory_path, coverage_manager)
else:
command_integration_role(args, host_state, target, start_at_task, test_dir, inventory_path, coverage_manager)
start_at_task = None
finally:
if post_target:
post_target(target)
end_time = time.time()
results[target.name] = dict(
name=target.name,
type=target.type,
aliases=target.aliases,
modules=target.modules,
run_time_seconds=int(end_time - start_time),
setup_once=target.setup_once,
setup_always=target.setup_always,
)
break
except SubprocessError:
if cloud_environment:
cloud_environment.on_failure(target, tries)
if not tries:
raise
if target.retry_never:
display.warning(f'Skipping retry of test target "{target.name}" since it has been excluded from retries.')
raise
display.warning('Retrying test target "%s" with maximum verbosity.' % target.name)
display.verbosity = args.verbosity = 6
passed.append(target)
except Exception as ex:
failed.append(target)
if args.continue_on_error:
display.error(str(ex))
continue
display.notice('To resume at this test target, use the option: --start-at %s' % target.name)
next_target = next(targets_iter, None)
if next_target:
display.notice('To resume after this test target, use the option: --start-at %s' % next_target.name)
raise
finally:
display.verbosity = args.verbosity = verbosity
finally:
if not args.explain:
coverage_manager.teardown()
result_name = '%s-%s.json' % (
args.command, re.sub(r'[^0-9]', '-', str(datetime.datetime.now(tz=datetime.timezone.utc).replace(microsecond=0, tzinfo=None))))
data = dict(
targets=results,
)
write_json_test_results(ResultType.DATA, result_name, data)
if failed:
raise ApplicationError('The %d integration test(s) listed below (out of %d) failed. See error output above for details:\n%s' % (
len(failed), len(passed) + len(failed), '\n'.join(target.name for target in failed)))
def command_integration_script(
args: IntegrationConfig,
host_state: HostState,
target: IntegrationTarget,
test_dir: str,
inventory_path: str,
coverage_manager: CoverageManager,
):
"""Run an integration test script."""
display.info('Running %s integration test script' % target.name)
env_config = None
if isinstance(args, PosixIntegrationConfig):
cloud_environment = get_cloud_environment(args, target)
if cloud_environment:
env_config = cloud_environment.get_environment_config()
if env_config:
display.info('>>> Environment Config\n%s' % json.dumps(dict(
env_vars=env_config.env_vars,
ansible_vars=env_config.ansible_vars,
callback_plugins=env_config.callback_plugins,
module_defaults=env_config.module_defaults,
), indent=4, sort_keys=True), verbosity=3)
with integration_test_environment(args, target, inventory_path) as test_env: # type: IntegrationEnvironment
cmd = ['./%s' % os.path.basename(target.script_path)]
if args.verbosity:
cmd.append('-' + ('v' * args.verbosity))
env = integration_environment(args, target, test_dir, test_env.inventory_path, test_env.ansible_config, env_config, test_env)
cwd = os.path.join(test_env.targets_dir, target.relative_path)
env.update(
# support use of adhoc ansible commands in collections without specifying the fully qualified collection name
ANSIBLE_PLAYBOOK_DIR=cwd,
)
if env_config and env_config.env_vars:
env.update(env_config.env_vars)
with integration_test_config_file(args, env_config, test_env.integration_dir) as config_path: # type: t.Optional[str]
if config_path:
cmd += ['-e', '@%s' % config_path]
env.update(coverage_manager.get_environment(target.name, target.aliases))
cover_python(args, host_state.controller_profile.python, cmd, target.name, env, cwd=cwd, capture=False)
def command_integration_role(
args: IntegrationConfig,
host_state: HostState,
target: IntegrationTarget,
start_at_task: t.Optional[str],
test_dir: str,
inventory_path: str,
coverage_manager: CoverageManager,
):
"""Run an integration test role."""
display.info('Running %s integration test role' % target.name)
env_config = None
vars_files = []
variables = dict(
output_dir=test_dir,
)
if isinstance(args, WindowsIntegrationConfig):
hosts = 'windows'
gather_facts = False
variables.update(
win_output_dir=r'C:\ansible_testing',
)
elif isinstance(args, NetworkIntegrationConfig):
hosts = target.network_platform
gather_facts = False
else:
hosts = 'testhost'
gather_facts = True
if 'gather_facts/yes/' in target.aliases:
gather_facts = True
elif 'gather_facts/no/' in target.aliases:
gather_facts = False
if not isinstance(args, NetworkIntegrationConfig):
cloud_environment = get_cloud_environment(args, target)
if cloud_environment:
env_config = cloud_environment.get_environment_config()
if env_config:
display.info('>>> Environment Config\n%s' % json.dumps(dict(
env_vars=env_config.env_vars,
ansible_vars=env_config.ansible_vars,
callback_plugins=env_config.callback_plugins,
module_defaults=env_config.module_defaults,
), indent=4, sort_keys=True), verbosity=3)
with integration_test_environment(args, target, inventory_path) as test_env: # type: IntegrationEnvironment
if os.path.exists(test_env.vars_file):
vars_files.append(os.path.relpath(test_env.vars_file, test_env.integration_dir))
play = dict(
hosts=hosts,
gather_facts=gather_facts,
vars_files=vars_files,
vars=variables,
roles=[
target.name,
],
)
if env_config:
if env_config.ansible_vars:
variables.update(env_config.ansible_vars)
play.update(
environment=env_config.env_vars,
module_defaults=env_config.module_defaults,
)
playbook = json.dumps([play], indent=4, sort_keys=True)
with named_temporary_file(args=args, directory=test_env.integration_dir, prefix='%s-' % target.name, suffix='.yml', content=playbook) as playbook_path:
filename = os.path.basename(playbook_path)
display.info('>>> Playbook: %s\n%s' % (filename, playbook.strip()), verbosity=3)
cmd = ['ansible-playbook', filename, '-i', os.path.relpath(test_env.inventory_path, test_env.integration_dir)]
if start_at_task:
cmd += ['--start-at-task', start_at_task]
if args.tags:
cmd += ['--tags', args.tags]
if args.skip_tags:
cmd += ['--skip-tags', args.skip_tags]
if args.diff:
cmd += ['--diff']
if isinstance(args, NetworkIntegrationConfig):
if args.testcase:
cmd += ['-e', 'testcase=%s' % args.testcase]
if args.verbosity:
cmd.append('-' + ('v' * args.verbosity))
env = integration_environment(args, target, test_dir, test_env.inventory_path, test_env.ansible_config, env_config, test_env)
cwd = test_env.integration_dir
env.update(
# support use of adhoc ansible commands in collections without specifying the fully qualified collection name
ANSIBLE_PLAYBOOK_DIR=cwd,
)
if env_config and env_config.env_vars:
env.update(env_config.env_vars)
env['ANSIBLE_ROLES_PATH'] = test_env.targets_dir
env.update(coverage_manager.get_environment(target.name, target.aliases))
cover_python(args, host_state.controller_profile.python, cmd, target.name, env, cwd=cwd, capture=False)
def run_setup_targets(
args: IntegrationConfig,
host_state: HostState,
test_dir: str,
target_names: c.Sequence[str],
targets_dict: dict[str, IntegrationTarget],
targets_executed: set[str],
inventory_path: str,
coverage_manager: CoverageManager,
always: bool,
):
"""Run setup targets."""
for target_name in target_names:
if not always and target_name in targets_executed:
continue
target = targets_dict[target_name]
if not args.explain:
# create a fresh test directory for each test target
remove_tree(test_dir)
make_dirs(test_dir)
if target.script_path:
command_integration_script(args, host_state, target, test_dir, inventory_path, coverage_manager)
else:
command_integration_role(args, host_state, target, None, test_dir, inventory_path, coverage_manager)
targets_executed.add(target_name)
def integration_environment(
args: IntegrationConfig,
target: IntegrationTarget,
test_dir: str,
inventory_path: str,
ansible_config: t.Optional[str],
env_config: t.Optional[CloudEnvironmentConfig],
test_env: IntegrationEnvironment,
) -> dict[str, str]:
"""Return a dictionary of environment variables to use when running the given integration test target."""
env = ansible_environment(args, ansible_config=ansible_config)
callback_plugins = ['junit'] + (env_config.callback_plugins or [] if env_config else [])
integration = dict(
JUNIT_OUTPUT_DIR=ResultType.JUNIT.path,
JUNIT_TASK_RELATIVE_PATH=test_env.test_dir,
JUNIT_REPLACE_OUT_OF_TREE_PATH='out-of-tree:',
ANSIBLE_CALLBACKS_ENABLED=','.join(sorted(set(callback_plugins))),
ANSIBLE_TEST_CI=args.metadata.ci_provider or get_ci_provider().code,
ANSIBLE_TEST_COVERAGE='check' if args.coverage_check else ('yes' if args.coverage else ''),
OUTPUT_DIR=test_dir,
INVENTORY_PATH=os.path.abspath(inventory_path),
)
if args.debug_strategy:
env.update(ANSIBLE_STRATEGY='debug')
if 'non_local/' in target.aliases:
if args.coverage:
display.warning('Skipping coverage reporting on Ansible modules for non-local test: %s' % target.name)
env.update(ANSIBLE_TEST_REMOTE_INTERPRETER='')
env.update(integration)
return env
class IntegrationEnvironment:
"""Details about the integration environment."""
def __init__(self, test_dir: str, integration_dir: str, targets_dir: str, inventory_path: str, ansible_config: str, vars_file: str) -> None:
self.test_dir = test_dir
self.integration_dir = integration_dir
self.targets_dir = targets_dir
self.inventory_path = inventory_path
self.ansible_config = ansible_config
self.vars_file = vars_file
class IntegrationCache(CommonCache):
"""Integration cache."""
@property
def integration_targets(self) -> list[IntegrationTarget]:
"""The list of integration test targets."""
return self.get('integration_targets', lambda: list(walk_integration_targets()))
@property
def dependency_map(self) -> dict[str, set[IntegrationTarget]]:
"""The dependency map of integration test targets."""
return self.get('dependency_map', lambda: generate_dependency_map(self.integration_targets))
def filter_profiles_for_target(args: IntegrationConfig, profiles: list[THostProfile], target: IntegrationTarget) -> list[THostProfile]:
"""Return a list of profiles after applying target filters."""
if target.target_type == IntegrationTargetType.CONTROLLER:
profile_filter = get_target_filter(args, [args.controller], True)
elif target.target_type == IntegrationTargetType.TARGET:
profile_filter = get_target_filter(args, args.targets, False)
else:
raise Exception(f'Unhandled test type for target "{target.name}": {target.target_type.name.lower()}')
profiles = profile_filter.filter_profiles(profiles, target)
return profiles
def get_integration_filter(args: IntegrationConfig, targets: list[IntegrationTarget]) -> set[str]:
"""Return a list of test targets to skip based on the host(s) that will be used to run the specified test targets."""
invalid_targets = sorted(target.name for target in targets if target.target_type not in (IntegrationTargetType.CONTROLLER, IntegrationTargetType.TARGET))
if invalid_targets and not args.list_targets:
message = f'''Unable to determine context for the following test targets: {", ".join(invalid_targets)}
Make sure the test targets are correctly named:
- Modules - The target name should match the module name.
- Plugins - The target name should be "{{plugin_type}}_{{plugin_name}}".
If necessary, context can be controlled by adding entries to the "aliases" file for a test target:
- Add the name(s) of modules which are tested.
- Add "context/target" for module and module_utils tests (these will run on the target host).
- Add "context/controller" for other test types (these will run on the controller).'''
raise ApplicationError(message)
invalid_targets = sorted(target.name for target in targets if target.actual_type not in (IntegrationTargetType.CONTROLLER, IntegrationTargetType.TARGET))
if invalid_targets:
if data_context().content.is_ansible:
display.warning(f'Unable to determine context for the following test targets: {", ".join(invalid_targets)}')
else:
display.warning(f'Unable to determine context for the following test targets, they will be run on the target host: {", ".join(invalid_targets)}')
exclude: set[str] = set()
controller_targets = [target for target in targets if target.target_type == IntegrationTargetType.CONTROLLER]
target_targets = [target for target in targets if target.target_type == IntegrationTargetType.TARGET]
controller_filter = get_target_filter(args, [args.controller], True)
target_filter = get_target_filter(args, args.targets, False)
controller_filter.filter_targets(controller_targets, exclude)
target_filter.filter_targets(target_targets, exclude)
return exclude
def command_integration_filter(
args: TIntegrationConfig,
targets: c.Iterable[TIntegrationTarget],
) -> tuple[HostState, tuple[TIntegrationTarget, ...]]:
"""Filter the given integration test targets."""
targets = tuple(target for target in targets if 'hidden/' not in target.aliases)
changes = get_changes_filter(args)
# special behavior when the --changed-all-target target is selected based on changes
if args.changed_all_target in changes:
# act as though the --changed-all-target target was in the include list
if args.changed_all_mode == 'include' and args.changed_all_target not in args.include:
args.include.append(args.changed_all_target)
args.delegate_args += ['--include', args.changed_all_target]
# act as though the --changed-all-target target was in the exclude list
elif args.changed_all_mode == 'exclude' and args.changed_all_target not in args.exclude:
args.exclude.append(args.changed_all_target)
require = args.require + changes
exclude = args.exclude
internal_targets = walk_internal_targets(targets, args.include, exclude, require)
environment_exclude = get_integration_filter(args, list(internal_targets))
environment_exclude |= set(cloud_filter(args, internal_targets))
if environment_exclude:
exclude = sorted(set(exclude) | environment_exclude)
internal_targets = walk_internal_targets(targets, args.include, exclude, require)
if not internal_targets:
raise AllTargetsSkipped()
if args.start_at and not any(target.name == args.start_at for target in internal_targets):
raise ApplicationError('Start at target matches nothing: %s' % args.start_at)
cloud_init(args, internal_targets)
vars_file_src = os.path.join(data_context().content.root, data_context().content.integration_vars_path)
if os.path.exists(vars_file_src):
def integration_config_callback(payload_config: PayloadConfig) -> None:
"""
Add the integration config vars file to the payload file list.
This will preserve the file during delegation even if the file is ignored by source control.
"""
files = payload_config.files
files.append((vars_file_src, data_context().content.integration_vars_path))
data_context().register_payload_callback(integration_config_callback)
if args.list_targets:
raise ListTargets([target.name for target in internal_targets])
# requirements are installed using a callback since the windows-integration and network-integration host status checks depend on them
host_state = prepare_profiles(args, targets_use_pypi=True, requirements=requirements) # integration, windows-integration, network-integration
if args.delegate:
raise Delegate(host_state=host_state, require=require, exclude=exclude)
return host_state, internal_targets
def requirements(host_profile: HostProfile) -> None:
"""Install requirements after bootstrapping and delegation."""
if isinstance(host_profile, ControllerHostProfile) and host_profile.controller:
configure_pypi_proxy(host_profile.args, host_profile) # integration, windows-integration, network-integration
install_requirements(host_profile.args, host_profile.python, ansible=True, command=True) # integration, windows-integration, network-integration
elif isinstance(host_profile, PosixProfile) and not isinstance(host_profile, ControllerProfile):
configure_pypi_proxy(host_profile.args, host_profile) # integration
| 36,888
|
Python
|
.py
| 732
| 41.103825
| 159
| 0.664272
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,215
|
coverage.py
|
ansible_ansible/test/lib/ansible_test/_internal/commands/integration/coverage.py
|
"""Code coverage support for integration tests."""
from __future__ import annotations
import abc
import os
import shutil
import tempfile
import typing as t
import zipfile
from ...io import (
write_text_file,
)
from ...ansible_util import (
run_playbook,
)
from ...config import (
IntegrationConfig,
)
from ...util import (
COVERAGE_CONFIG_NAME,
MODE_DIRECTORY,
MODE_DIRECTORY_WRITE,
MODE_FILE,
SubprocessError,
cache,
display,
generate_name,
get_generic_type,
get_type_map,
remove_tree,
sanitize_host_name,
verified_chmod,
)
from ...util_common import (
ResultType,
)
from ...coverage_util import (
generate_coverage_config,
get_coverage_platform,
)
from ...host_configs import (
HostConfig,
PosixConfig,
WindowsConfig,
WindowsInventoryConfig,
WindowsRemoteConfig,
)
from ...data import (
data_context,
)
from ...host_profiles import (
ControllerProfile,
HostProfile,
PosixProfile,
SshTargetHostProfile,
)
from ...provisioning import (
HostState,
)
from ...connections import (
LocalConnection,
)
from ...inventory import (
create_windows_inventory,
create_posix_inventory,
)
THostConfig = t.TypeVar('THostConfig', bound=HostConfig)
class CoverageHandler(t.Generic[THostConfig], metaclass=abc.ABCMeta):
"""Base class for configuring hosts for integration test code coverage."""
def __init__(self, args: IntegrationConfig, host_state: HostState, inventory_path: str) -> None:
self.args = args
self.host_state = host_state
self.inventory_path = inventory_path
self.profiles = self.get_profiles()
def get_profiles(self) -> list[HostProfile]:
"""Return a list of profiles relevant for this handler."""
profile_type = get_generic_type(type(self), HostConfig)
profiles = [profile for profile in self.host_state.target_profiles if isinstance(profile.config, profile_type)]
return profiles
@property
@abc.abstractmethod
def is_active(self) -> bool:
"""True if the handler should be used, otherwise False."""
@abc.abstractmethod
def setup(self) -> None:
"""Perform setup for code coverage."""
@abc.abstractmethod
def teardown(self) -> None:
"""Perform teardown for code coverage."""
@abc.abstractmethod
def create_inventory(self) -> None:
"""Create inventory, if needed."""
@abc.abstractmethod
def get_environment(self, target_name: str, aliases: tuple[str, ...]) -> dict[str, str]:
"""Return a dictionary of environment variables for running tests with code coverage."""
def run_playbook(self, playbook: str, variables: dict[str, str]) -> None:
"""Run the specified playbook using the current inventory."""
self.create_inventory()
run_playbook(self.args, self.inventory_path, playbook, capture=False, variables=variables)
class PosixCoverageHandler(CoverageHandler[PosixConfig]):
"""Configure integration test code coverage for POSIX hosts."""
def __init__(self, args: IntegrationConfig, host_state: HostState, inventory_path: str) -> None:
super().__init__(args, host_state, inventory_path)
# Common temporary directory used on all POSIX hosts that will be created world writeable.
self.common_temp_path = f'/tmp/ansible-test-{generate_name()}'
def get_profiles(self) -> list[HostProfile]:
"""Return a list of profiles relevant for this handler."""
profiles = super().get_profiles()
profiles = [profile for profile in profiles if not isinstance(profile, ControllerProfile) or
profile.python.path != self.host_state.controller_profile.python.path]
return profiles
@property
def is_active(self) -> bool:
"""True if the handler should be used, otherwise False."""
return True
@property
def target_profile(self) -> t.Optional[PosixProfile]:
"""The POSIX target profile, if it uses a different Python interpreter than the controller, otherwise None."""
return t.cast(PosixProfile, self.profiles[0]) if self.profiles else None
def setup(self) -> None:
"""Perform setup for code coverage."""
self.setup_controller()
self.setup_target()
def teardown(self) -> None:
"""Perform teardown for code coverage."""
self.teardown_controller()
self.teardown_target()
def setup_controller(self) -> None:
"""Perform setup for code coverage on the controller."""
coverage_config_path = os.path.join(self.common_temp_path, COVERAGE_CONFIG_NAME)
coverage_output_path = os.path.join(self.common_temp_path, ResultType.COVERAGE.name)
coverage_config = generate_coverage_config(self.args)
write_text_file(coverage_config_path, coverage_config, create_directories=True)
verified_chmod(coverage_config_path, MODE_FILE)
os.mkdir(coverage_output_path)
verified_chmod(coverage_output_path, MODE_DIRECTORY_WRITE)
def setup_target(self) -> None:
"""Perform setup for code coverage on the target."""
if not self.target_profile:
return
if isinstance(self.target_profile, ControllerProfile):
return
self.run_playbook('posix_coverage_setup.yml', self.get_playbook_variables())
def teardown_controller(self) -> None:
"""Perform teardown for code coverage on the controller."""
coverage_temp_path = os.path.join(self.common_temp_path, ResultType.COVERAGE.name)
platform = get_coverage_platform(self.args.controller)
for filename in os.listdir(coverage_temp_path):
shutil.copyfile(os.path.join(coverage_temp_path, filename), os.path.join(ResultType.COVERAGE.path, update_coverage_filename(filename, platform)))
remove_tree(self.common_temp_path)
def teardown_target(self) -> None:
"""Perform teardown for code coverage on the target."""
if not self.target_profile:
return
if isinstance(self.target_profile, ControllerProfile):
return
profile = t.cast(SshTargetHostProfile, self.target_profile)
platform = get_coverage_platform(profile.config)
con = profile.get_controller_target_connections()[0]
with tempfile.NamedTemporaryFile(prefix='ansible-test-coverage-', suffix='.tgz') as coverage_tgz:
try:
con.create_archive(chdir=self.common_temp_path, name=ResultType.COVERAGE.name, dst=coverage_tgz)
except SubprocessError as ex:
display.warning(f'Failed to download coverage results: {ex}')
else:
coverage_tgz.seek(0)
with tempfile.TemporaryDirectory() as temp_dir:
local_con = LocalConnection(self.args)
local_con.extract_archive(chdir=temp_dir, src=coverage_tgz)
base_dir = os.path.join(temp_dir, ResultType.COVERAGE.name)
for filename in os.listdir(base_dir):
shutil.copyfile(os.path.join(base_dir, filename), os.path.join(ResultType.COVERAGE.path, update_coverage_filename(filename, platform)))
self.run_playbook('posix_coverage_teardown.yml', self.get_playbook_variables())
def get_environment(self, target_name: str, aliases: tuple[str, ...]) -> dict[str, str]:
"""Return a dictionary of environment variables for running tests with code coverage."""
# Enable code coverage collection on Ansible modules (both local and remote).
# Used by the AnsiballZ wrapper generator in lib/ansible/executor/module_common.py to support code coverage.
config_file = os.path.join(self.common_temp_path, COVERAGE_CONFIG_NAME)
# Include the command, target and platform marker so the remote host can create a filename with that info.
# The generated AnsiballZ wrapper is responsible for adding '=python-{X.Y}=coverage.{hostname}.{pid}.{id}'
coverage_file = os.path.join(self.common_temp_path, ResultType.COVERAGE.name, '='.join((self.args.command, target_name, 'platform')))
if self.args.coverage_check:
# cause the 'coverage' module to be found, but not imported or enabled
coverage_file = ''
variables = dict(
_ANSIBLE_COVERAGE_CONFIG=config_file,
_ANSIBLE_COVERAGE_OUTPUT=coverage_file,
)
return variables
def create_inventory(self) -> None:
"""Create inventory."""
create_posix_inventory(self.args, self.inventory_path, self.host_state.target_profiles)
def get_playbook_variables(self) -> dict[str, str]:
"""Return a dictionary of variables for setup and teardown of POSIX coverage."""
return dict(
common_temp_dir=self.common_temp_path,
coverage_config=generate_coverage_config(self.args),
coverage_config_path=os.path.join(self.common_temp_path, COVERAGE_CONFIG_NAME),
coverage_output_path=os.path.join(self.common_temp_path, ResultType.COVERAGE.name),
mode_directory=f'{MODE_DIRECTORY:04o}',
mode_directory_write=f'{MODE_DIRECTORY_WRITE:04o}',
mode_file=f'{MODE_FILE:04o}',
)
class WindowsCoverageHandler(CoverageHandler[WindowsConfig]):
"""Configure integration test code coverage for Windows hosts."""
def __init__(self, args: IntegrationConfig, host_state: HostState, inventory_path: str) -> None:
super().__init__(args, host_state, inventory_path)
# Common temporary directory used on all Windows hosts that will be created writable by everyone.
self.remote_temp_path = f'C:\\ansible_test_coverage_{generate_name()}'
@property
def is_active(self) -> bool:
"""True if the handler should be used, otherwise False."""
return bool(self.profiles) and not self.args.coverage_check
def setup(self) -> None:
"""Perform setup for code coverage."""
self.run_playbook('windows_coverage_setup.yml', self.get_playbook_variables())
def teardown(self) -> None:
"""Perform teardown for code coverage."""
with tempfile.TemporaryDirectory() as local_temp_path:
variables = self.get_playbook_variables()
variables.update(
local_temp_path=local_temp_path,
)
self.run_playbook('windows_coverage_teardown.yml', variables)
for filename in os.listdir(local_temp_path):
if all(isinstance(profile.config, WindowsRemoteConfig) for profile in self.profiles):
prefix = 'remote'
elif all(isinstance(profile.config, WindowsInventoryConfig) for profile in self.profiles):
prefix = 'inventory'
else:
raise NotImplementedError()
platform = f'{prefix}-{sanitize_host_name(os.path.splitext(filename)[0])}'
with zipfile.ZipFile(os.path.join(local_temp_path, filename)) as coverage_zip:
for item in coverage_zip.infolist():
if item.is_dir():
raise Exception(f'Unexpected directory in zip file: {item.filename}')
item.filename = update_coverage_filename(item.filename, platform)
coverage_zip.extract(item, ResultType.COVERAGE.path)
def get_environment(self, target_name: str, aliases: tuple[str, ...]) -> dict[str, str]:
"""Return a dictionary of environment variables for running tests with code coverage."""
# Include the command, target and platform marker so the remote host can create a filename with that info.
# The remote is responsible for adding '={language-version}=coverage.{hostname}.{pid}.{id}'
coverage_name = '='.join((self.args.command, target_name, 'platform'))
variables = dict(
_ANSIBLE_COVERAGE_REMOTE_OUTPUT=os.path.join(self.remote_temp_path, coverage_name),
_ANSIBLE_COVERAGE_REMOTE_PATH_FILTER=os.path.join(data_context().content.root, '*'),
)
return variables
def create_inventory(self) -> None:
"""Create inventory."""
create_windows_inventory(self.args, self.inventory_path, self.host_state.target_profiles)
def get_playbook_variables(self) -> dict[str, str]:
"""Return a dictionary of variables for setup and teardown of Windows coverage."""
return dict(
remote_temp_path=self.remote_temp_path,
)
class CoverageManager:
"""Manager for code coverage configuration and state."""
def __init__(self, args: IntegrationConfig, host_state: HostState, inventory_path: str) -> None:
self.args = args
self.host_state = host_state
self.inventory_path = inventory_path
if self.args.coverage:
handler_types = set(get_handler_type(type(profile.config)) for profile in host_state.profiles)
handler_types.discard(None)
else:
handler_types = set()
handlers = [handler_type(args=args, host_state=host_state, inventory_path=inventory_path) for handler_type in handler_types]
self.handlers = [handler for handler in handlers if handler.is_active]
def setup(self) -> None:
"""Perform setup for code coverage."""
if not self.args.coverage:
return
for handler in self.handlers:
handler.setup()
def teardown(self) -> None:
"""Perform teardown for code coverage."""
if not self.args.coverage:
return
for handler in self.handlers:
handler.teardown()
def get_environment(self, target_name: str, aliases: tuple[str, ...]) -> dict[str, str]:
"""Return a dictionary of environment variables for running tests with code coverage."""
if not self.args.coverage or 'non_local/' in aliases:
return {}
env = {}
for handler in self.handlers:
env.update(handler.get_environment(target_name, aliases))
return env
@cache
def get_config_handler_type_map() -> dict[t.Type[HostConfig], t.Type[CoverageHandler]]:
"""Create and return a mapping of HostConfig types to CoverageHandler types."""
return get_type_map(CoverageHandler, HostConfig)
def get_handler_type(config_type: t.Type[HostConfig]) -> t.Optional[t.Type[CoverageHandler]]:
"""Return the coverage handler type associated with the given host config type if found, otherwise return None."""
queue = [config_type]
type_map = get_config_handler_type_map()
while queue:
config_type = queue.pop(0)
handler_type = type_map.get(config_type)
if handler_type:
return handler_type
queue.extend(config_type.__bases__)
return None
def update_coverage_filename(original_filename: str, platform: str) -> str:
"""Validate the given filename and insert the specified platform, then return the result."""
parts = original_filename.split('=')
if original_filename != os.path.basename(original_filename) or len(parts) != 5 or parts[2] != 'platform':
raise Exception(f'Unexpected coverage filename: {original_filename}')
parts[2] = platform
updated_filename = '='.join(parts)
display.info(f'Coverage file for platform "{platform}": {original_filename} -> {updated_filename}', verbosity=3)
return updated_filename
| 15,578
|
Python
|
.py
| 312
| 41.464744
| 159
| 0.669856
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,216
|
posix.py
|
ansible_ansible/test/lib/ansible_test/_internal/commands/integration/posix.py
|
"""POSIX integration testing."""
from __future__ import annotations
import os
from ...util_common import (
handle_layout_messages,
)
from ...containers import (
create_container_hooks,
local_ssh,
root_ssh,
)
from ...target import (
walk_posix_integration_targets,
)
from ...config import (
PosixIntegrationConfig,
)
from . import (
command_integration_filter,
command_integration_filtered,
get_inventory_relative_path,
)
from ...data import (
data_context,
)
def command_posix_integration(args: PosixIntegrationConfig) -> None:
"""Entry point for the `integration` command."""
handle_layout_messages(data_context().content.integration_messages)
inventory_relative_path = get_inventory_relative_path(args)
inventory_path = os.path.join(data_context().content.root, inventory_relative_path)
all_targets = tuple(walk_posix_integration_targets(include_hidden=True))
host_state, internal_targets = command_integration_filter(args, all_targets)
control_connections = [local_ssh(args, host_state.controller_profile.python)]
managed_connections = [root_ssh(ssh) for ssh in host_state.get_controller_target_connections()]
pre_target, post_target = create_container_hooks(args, control_connections, managed_connections)
command_integration_filtered(args, host_state, internal_targets, all_targets, inventory_path, pre_target=pre_target, post_target=post_target)
| 1,444
|
Python
|
.py
| 36
| 36.555556
| 145
| 0.757163
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,217
|
gcp.py
|
ansible_ansible/test/lib/ansible_test/_internal/commands/integration/cloud/gcp.py
|
# Copyright: (c) 2018, Google Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
"""GCP plugin for integration tests."""
from __future__ import annotations
import configparser
from ....util import (
display,
)
from ....config import (
IntegrationConfig,
)
from . import (
CloudEnvironment,
CloudEnvironmentConfig,
CloudProvider,
)
class GcpCloudProvider(CloudProvider):
"""GCP cloud provider plugin. Sets up cloud resources before delegation."""
def __init__(self, args: IntegrationConfig) -> None:
super().__init__(args)
self.uses_config = True
def setup(self) -> None:
"""Setup the cloud resource before delegation and register a cleanup callback."""
super().setup()
if not self._use_static_config():
display.notice(
'static configuration could not be used. are you missing a template file?'
)
class GcpCloudEnvironment(CloudEnvironment):
"""GCP cloud environment plugin. Updates integration test environment after delegation."""
def get_environment_config(self) -> CloudEnvironmentConfig:
"""Return environment configuration for use in the test environment after delegation."""
parser = configparser.ConfigParser()
parser.read(self.config_path)
ansible_vars = dict(
resource_prefix=self.resource_prefix,
)
ansible_vars.update(dict(parser.items('default')))
return CloudEnvironmentConfig(
ansible_vars=ansible_vars,
)
| 1,591
|
Python
|
.py
| 41
| 32.341463
| 96
| 0.682529
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,218
|
scaleway.py
|
ansible_ansible/test/lib/ansible_test/_internal/commands/integration/cloud/scaleway.py
|
"""Scaleway plugin for integration tests."""
from __future__ import annotations
import configparser
from ....util import (
display,
)
from ....config import (
IntegrationConfig,
)
from . import (
CloudEnvironment,
CloudEnvironmentConfig,
CloudProvider,
)
class ScalewayCloudProvider(CloudProvider):
"""Checks if a configuration file has been passed or fixtures are going to be used for testing"""
def __init__(self, args: IntegrationConfig) -> None:
super().__init__(args)
self.uses_config = True
def setup(self) -> None:
"""Setup the cloud resource before delegation and register a cleanup callback."""
super().setup()
self._use_static_config()
class ScalewayCloudEnvironment(CloudEnvironment):
"""Updates integration test environment after delegation. Will setup the config file as parameter."""
def get_environment_config(self) -> CloudEnvironmentConfig:
"""Return environment configuration for use in the test environment after delegation."""
parser = configparser.ConfigParser()
parser.read(self.config_path)
env_vars = dict(
SCW_API_KEY=parser.get('default', 'key'),
SCW_ORG=parser.get('default', 'org'),
)
display.sensitive.add(env_vars['SCW_API_KEY'])
ansible_vars = dict(
scw_org=parser.get('default', 'org'),
)
return CloudEnvironmentConfig(
env_vars=env_vars,
ansible_vars=ansible_vars,
)
| 1,537
|
Python
|
.py
| 41
| 30.707317
| 105
| 0.666667
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,219
|
cloudscale.py
|
ansible_ansible/test/lib/ansible_test/_internal/commands/integration/cloud/cloudscale.py
|
# -*- coding: utf-8 -*-
#
# (c) 2018, Gaudenz Steinlin <gaudenz.steinlin@cloudscale.ch>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
"""Cloudscale plugin for integration tests."""
from __future__ import annotations
import configparser
from ....util import (
display,
)
from ....config import (
IntegrationConfig,
)
from . import (
CloudEnvironment,
CloudEnvironmentConfig,
CloudProvider,
)
class CloudscaleCloudProvider(CloudProvider):
"""Cloudscale cloud provider plugin. Sets up cloud resources before delegation."""
def __init__(self, args: IntegrationConfig) -> None:
super().__init__(args)
self.uses_config = True
def setup(self) -> None:
"""Setup the cloud resource before delegation and register a cleanup callback."""
super().setup()
self._use_static_config()
class CloudscaleCloudEnvironment(CloudEnvironment):
"""Cloudscale cloud environment plugin. Updates integration test environment after delegation."""
def get_environment_config(self) -> CloudEnvironmentConfig:
"""Return environment configuration for use in the test environment after delegation."""
parser = configparser.ConfigParser()
parser.read(self.config_path)
env_vars = dict(
CLOUDSCALE_API_TOKEN=parser.get('default', 'cloudscale_api_token'),
)
display.sensitive.add(env_vars['CLOUDSCALE_API_TOKEN'])
ansible_vars = dict(
cloudscale_resource_prefix=self.resource_prefix,
)
ansible_vars.update(dict((key.lower(), value) for key, value in env_vars.items()))
return CloudEnvironmentConfig(
env_vars=env_vars,
ansible_vars=ansible_vars,
)
| 1,794
|
Python
|
.py
| 45
| 33.644444
| 101
| 0.687861
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,220
|
acme.py
|
ansible_ansible/test/lib/ansible_test/_internal/commands/integration/cloud/acme.py
|
"""ACME plugin for integration tests."""
from __future__ import annotations
import os
from ....config import (
IntegrationConfig,
)
from ....containers import (
run_support_container,
)
from . import (
CloudEnvironment,
CloudEnvironmentConfig,
CloudProvider,
)
class ACMEProvider(CloudProvider):
"""ACME plugin. Sets up cloud resources for tests."""
def __init__(self, args: IntegrationConfig) -> None:
super().__init__(args)
# The simulator must be pinned to a specific version to guarantee CI passes with the version used.
if os.environ.get('ANSIBLE_ACME_CONTAINER'):
self.image = os.environ.get('ANSIBLE_ACME_CONTAINER')
else:
self.image = 'quay.io/ansible/acme-test-container:2.1.0'
self.uses_docker = True
def setup(self) -> None:
"""Setup the cloud resource before delegation and register a cleanup callback."""
super().setup()
if self._use_static_config():
self._setup_static()
else:
self._setup_dynamic()
def _setup_dynamic(self) -> None:
"""Create a ACME test container using docker."""
ports = [
5000, # control port for flask app in container
14000, # Pebble ACME CA
]
descriptor = run_support_container(
self.args,
self.platform,
self.image,
'acme-simulator',
ports,
)
if not descriptor:
return
self._set_cloud_config('acme_host', descriptor.name)
def _setup_static(self) -> None:
raise NotImplementedError()
class ACMEEnvironment(CloudEnvironment):
"""ACME environment plugin. Updates integration test environment after delegation."""
def get_environment_config(self) -> CloudEnvironmentConfig:
"""Return environment configuration for use in the test environment after delegation."""
ansible_vars = dict(
acme_host=self._get_cloud_config('acme_host'),
)
return CloudEnvironmentConfig(
ansible_vars=ansible_vars,
)
| 2,141
|
Python
|
.py
| 59
| 28.305085
| 106
| 0.631426
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,221
|
vcenter.py
|
ansible_ansible/test/lib/ansible_test/_internal/commands/integration/cloud/vcenter.py
|
"""VMware vCenter plugin for integration tests."""
from __future__ import annotations
import configparser
from ....util import (
ApplicationError,
display,
)
from ....config import (
IntegrationConfig,
)
from . import (
CloudEnvironment,
CloudEnvironmentConfig,
CloudProvider,
)
class VcenterProvider(CloudProvider):
"""VMware vcenter/esx plugin. Sets up cloud resources for tests."""
def __init__(self, args: IntegrationConfig) -> None:
super().__init__(args)
self.uses_config = True
def setup(self) -> None:
"""Setup the cloud resource before delegation and register a cleanup callback."""
super().setup()
if not self._use_static_config():
raise ApplicationError('Configuration file does not exist: %s' % self.config_static_path)
class VcenterEnvironment(CloudEnvironment):
"""VMware vcenter/esx environment plugin. Updates integration test environment after delegation."""
def get_environment_config(self) -> CloudEnvironmentConfig:
"""Return environment configuration for use in the test environment after delegation."""
# We may be in a container, so we cannot just reach VMWARE_TEST_PLATFORM,
# We do a try/except instead
parser = configparser.ConfigParser()
parser.read(self.config_path) # static
ansible_vars = dict(
resource_prefix=self.resource_prefix,
)
ansible_vars.update(dict(parser.items('DEFAULT', raw=True)))
for key, value in ansible_vars.items():
if key.endswith('_password'):
display.sensitive.add(value)
return CloudEnvironmentConfig(
ansible_vars=ansible_vars,
module_defaults={
'group/vmware': {
'hostname': ansible_vars['vcenter_hostname'],
'username': ansible_vars['vcenter_username'],
'password': ansible_vars['vcenter_password'],
'port': ansible_vars.get('vcenter_port', '443'),
'validate_certs': ansible_vars.get('vmware_validate_certs', 'no'),
},
},
)
| 2,190
|
Python
|
.py
| 52
| 33.269231
| 103
| 0.636192
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,222
|
aws.py
|
ansible_ansible/test/lib/ansible_test/_internal/commands/integration/cloud/aws.py
|
"""AWS plugin for integration tests."""
from __future__ import annotations
import os
import uuid
import configparser
import typing as t
from ....util import (
ApplicationError,
display,
)
from ....config import (
IntegrationConfig,
)
from ....target import (
IntegrationTarget,
)
from ....core_ci import (
AnsibleCoreCI,
CloudResource,
)
from ....host_configs import (
OriginConfig,
)
from . import (
CloudEnvironment,
CloudEnvironmentConfig,
CloudProvider,
)
class AwsCloudProvider(CloudProvider):
"""AWS cloud provider plugin. Sets up cloud resources before delegation."""
def __init__(self, args: IntegrationConfig) -> None:
super().__init__(args)
self.uses_config = True
def filter(self, targets: tuple[IntegrationTarget, ...], exclude: list[str]) -> None:
"""Filter out the cloud tests when the necessary config and resources are not available."""
aci = self._create_ansible_core_ci()
if aci.available:
return
super().filter(targets, exclude)
def setup(self) -> None:
"""Setup the cloud resource before delegation and register a cleanup callback."""
super().setup()
aws_config_path = os.path.expanduser('~/.aws')
if os.path.exists(aws_config_path) and isinstance(self.args.controller, OriginConfig):
raise ApplicationError('Rename "%s" or use the --docker or --remote option to isolate tests.' % aws_config_path)
if not self._use_static_config():
self._setup_dynamic()
def _setup_dynamic(self) -> None:
"""Request AWS credentials through the Ansible Core CI service."""
display.info('Provisioning %s cloud environment.' % self.platform, verbosity=1)
config = self._read_config_template()
aci = self._create_ansible_core_ci()
response = aci.start()
if not self.args.explain:
credentials = response['aws']['credentials']
values = dict(
ACCESS_KEY=credentials['access_key'],
SECRET_KEY=credentials['secret_key'],
SECURITY_TOKEN=credentials['session_token'],
REGION='us-east-1',
)
display.sensitive.add(values['SECRET_KEY'])
display.sensitive.add(values['SECURITY_TOKEN'])
config = self._populate_config_template(config, values)
self._write_config(config)
def _create_ansible_core_ci(self) -> AnsibleCoreCI:
"""Return an AWS instance of AnsibleCoreCI."""
return AnsibleCoreCI(self.args, CloudResource(platform='aws'))
class AwsCloudEnvironment(CloudEnvironment):
"""AWS cloud environment plugin. Updates integration test environment after delegation."""
def get_environment_config(self) -> CloudEnvironmentConfig:
"""Return environment configuration for use in the test environment after delegation."""
parser = configparser.ConfigParser()
parser.read(self.config_path)
ansible_vars: dict[str, t.Any] = dict(
resource_prefix=self.resource_prefix,
tiny_prefix=uuid.uuid4().hex[0:12]
)
ansible_vars.update(dict(parser.items('default')))
display.sensitive.add(ansible_vars.get('aws_secret_key'))
display.sensitive.add(ansible_vars.get('security_token'))
if 'aws_cleanup' not in ansible_vars:
ansible_vars['aws_cleanup'] = not self.managed
env_vars = {'ANSIBLE_DEBUG_BOTOCORE_LOGS': 'True'}
return CloudEnvironmentConfig(
env_vars=env_vars,
ansible_vars=ansible_vars,
callback_plugins=['aws_resource_actions'],
)
def on_failure(self, target: IntegrationTarget, tries: int) -> None:
"""Callback to run when an integration target fails."""
if not tries and self.managed:
display.notice('If %s failed due to permissions, the IAM test policy may need to be updated. '
'https://docs.ansible.com/ansible/devel/collections/amazon/aws/docsite/dev_guidelines.html#aws-permissions-for-integration-tests'
% target.name)
| 4,202
|
Python
|
.py
| 95
| 35.905263
| 156
| 0.653478
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,223
|
openshift.py
|
ansible_ansible/test/lib/ansible_test/_internal/commands/integration/cloud/openshift.py
|
"""OpenShift plugin for integration tests."""
from __future__ import annotations
import re
from ....io import (
read_text_file,
)
from ....util import (
display,
)
from ....config import (
IntegrationConfig,
)
from ....containers import (
run_support_container,
wait_for_file,
)
from . import (
CloudEnvironment,
CloudEnvironmentConfig,
CloudProvider,
)
class OpenShiftCloudProvider(CloudProvider):
"""OpenShift cloud provider plugin. Sets up cloud resources before delegation."""
def __init__(self, args: IntegrationConfig) -> None:
super().__init__(args, config_extension='.kubeconfig')
# The image must be pinned to a specific version to guarantee CI passes with the version used.
self.image = 'quay.io/ansible/openshift-origin:v3.9.0'
self.uses_docker = True
self.uses_config = True
def setup(self) -> None:
"""Setup the cloud resource before delegation and register a cleanup callback."""
super().setup()
if self._use_static_config():
self._setup_static()
else:
self._setup_dynamic()
def _setup_static(self) -> None:
"""Configure OpenShift tests for use with static configuration."""
config = read_text_file(self.config_static_path)
match = re.search(r'^ *server: (?P<server>.*)$', config, flags=re.MULTILINE)
if not match:
display.warning('Could not find OpenShift endpoint in kubeconfig.')
def _setup_dynamic(self) -> None:
"""Create a OpenShift container using docker."""
port = 8443
ports = [
port,
]
cmd = ['start', 'master', '--listen', 'https://0.0.0.0:%d' % port]
descriptor = run_support_container(
self.args,
self.platform,
self.image,
'openshift-origin',
ports,
cmd=cmd,
)
if not descriptor:
return
if self.args.explain:
config = '# Unknown'
else:
config = self._get_config(descriptor.name, 'https://%s:%s/' % (descriptor.name, port))
self._write_config(config)
def _get_config(self, container_name: str, server: str) -> str:
"""Get OpenShift config from container."""
stdout = wait_for_file(self.args, container_name, '/var/lib/origin/openshift.local.config/master/admin.kubeconfig', sleep=10, tries=30)
config = stdout
config = re.sub(r'^( *)certificate-authority-data: .*$', r'\1insecure-skip-tls-verify: true', config, flags=re.MULTILINE)
config = re.sub(r'^( *)server: .*$', r'\1server: %s' % server, config, flags=re.MULTILINE)
return config
class OpenShiftCloudEnvironment(CloudEnvironment):
"""OpenShift cloud environment plugin. Updates integration test environment after delegation."""
def get_environment_config(self) -> CloudEnvironmentConfig:
"""Return environment configuration for use in the test environment after delegation."""
env_vars = dict(
K8S_AUTH_KUBECONFIG=self.config_path,
)
return CloudEnvironmentConfig(
env_vars=env_vars,
)
| 3,224
|
Python
|
.py
| 81
| 31.962963
| 143
| 0.629939
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,224
|
opennebula.py
|
ansible_ansible/test/lib/ansible_test/_internal/commands/integration/cloud/opennebula.py
|
"""OpenNebula plugin for integration tests."""
from __future__ import annotations
import configparser
from ....util import (
display,
)
from . import (
CloudEnvironment,
CloudEnvironmentConfig,
CloudProvider,
)
class OpenNebulaCloudProvider(CloudProvider):
"""Checks if a configuration file has been passed or fixtures are going to be used for testing"""
def setup(self) -> None:
"""Setup the cloud resource before delegation and register a cleanup callback."""
super().setup()
if not self._use_static_config():
self._setup_dynamic()
self.uses_config = True
def _setup_dynamic(self) -> None:
display.info('No config file provided, will run test from fixtures')
config = self._read_config_template()
values = dict(
URL="http://localhost/RPC2",
USERNAME='oneadmin',
PASSWORD='onepass',
FIXTURES='true',
REPLAY='true',
)
config = self._populate_config_template(config, values)
self._write_config(config)
class OpenNebulaCloudEnvironment(CloudEnvironment):
"""Updates integration test environment after delegation. Will setup the config file as parameter."""
def get_environment_config(self) -> CloudEnvironmentConfig:
"""Return environment configuration for use in the test environment after delegation."""
parser = configparser.ConfigParser()
parser.read(self.config_path)
ansible_vars = dict(
resource_prefix=self.resource_prefix,
)
ansible_vars.update(dict(parser.items('default')))
display.sensitive.add(ansible_vars.get('opennebula_password'))
return CloudEnvironmentConfig(
ansible_vars=ansible_vars,
)
| 1,807
|
Python
|
.py
| 45
| 32.466667
| 105
| 0.669341
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,225
|
digitalocean.py
|
ansible_ansible/test/lib/ansible_test/_internal/commands/integration/cloud/digitalocean.py
|
"""DigitalOcean plugin for integration tests."""
from __future__ import annotations
import configparser
from ....util import (
display,
)
from ....config import (
IntegrationConfig,
)
from . import (
CloudEnvironment,
CloudEnvironmentConfig,
CloudProvider,
)
class DigitalOceanCloudProvider(CloudProvider):
"""Checks if a configuration file has been passed or fixtures are going to be used for testing"""
def __init__(self, args: IntegrationConfig) -> None:
super().__init__(args)
self.uses_config = True
def setup(self) -> None:
"""Setup the cloud resource before delegation and register a cleanup callback."""
super().setup()
self._use_static_config()
class DigitalOceanCloudEnvironment(CloudEnvironment):
"""Updates integration test environment after delegation. Will setup the config file as parameter."""
def get_environment_config(self) -> CloudEnvironmentConfig:
"""Return environment configuration for use in the test environment after delegation."""
parser = configparser.ConfigParser()
parser.read(self.config_path)
env_vars = dict(
DO_API_KEY=parser.get('default', 'key'),
)
display.sensitive.add(env_vars['DO_API_KEY'])
ansible_vars = dict(
resource_prefix=self.resource_prefix,
)
return CloudEnvironmentConfig(
env_vars=env_vars,
ansible_vars=ansible_vars,
)
| 1,497
|
Python
|
.py
| 40
| 30.8
| 105
| 0.679167
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,226
|
vultr.py
|
ansible_ansible/test/lib/ansible_test/_internal/commands/integration/cloud/vultr.py
|
"""Vultr plugin for integration tests."""
from __future__ import annotations
import configparser
from ....util import (
display,
)
from ....config import (
IntegrationConfig,
)
from . import (
CloudEnvironment,
CloudEnvironmentConfig,
CloudProvider,
)
class VultrCloudProvider(CloudProvider):
"""Checks if a configuration file has been passed or fixtures are going to be used for testing"""
def __init__(self, args: IntegrationConfig) -> None:
super().__init__(args)
self.uses_config = True
def setup(self) -> None:
"""Setup the cloud resource before delegation and register a cleanup callback."""
super().setup()
self._use_static_config()
class VultrCloudEnvironment(CloudEnvironment):
"""Updates integration test environment after delegation. Will setup the config file as parameter."""
def get_environment_config(self) -> CloudEnvironmentConfig:
"""Return environment configuration for use in the test environment after delegation."""
parser = configparser.ConfigParser()
parser.read(self.config_path)
env_vars = dict(
VULTR_API_KEY=parser.get('default', 'key'),
)
display.sensitive.add(env_vars['VULTR_API_KEY'])
ansible_vars = dict(
vultr_resource_prefix=self.resource_prefix,
)
return CloudEnvironmentConfig(
env_vars=env_vars,
ansible_vars=ansible_vars,
)
| 1,488
|
Python
|
.py
| 40
| 30.575
| 105
| 0.67645
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,227
|
galaxy.py
|
ansible_ansible/test/lib/ansible_test/_internal/commands/integration/cloud/galaxy.py
|
"""Galaxy (ansible-galaxy) plugin for integration tests."""
from __future__ import annotations
import os
import tempfile
from ....config import (
IntegrationConfig,
)
from ....docker_util import (
docker_cp_to,
docker_exec,
)
from ....containers import (
run_support_container,
)
from ....encoding import (
to_text,
)
from ....util import (
display,
)
from . import (
CloudEnvironment,
CloudEnvironmentConfig,
CloudProvider,
)
GALAXY_HOST_NAME = 'galaxy-pulp'
SETTINGS = {
'PULP_CONTENT_ORIGIN': f'http://{GALAXY_HOST_NAME}',
'PULP_ANSIBLE_API_HOSTNAME': f'http://{GALAXY_HOST_NAME}',
'PULP_GALAXY_API_PATH_PREFIX': '/api/galaxy/',
# These paths are unique to the container image which has an nginx location for /pulp/content to route
# requests to the content backend
'PULP_ANSIBLE_CONTENT_HOSTNAME': f'http://{GALAXY_HOST_NAME}/pulp/content/api/galaxy/v3/artifacts/collections/',
'PULP_CONTENT_PATH_PREFIX': '/pulp/content/api/galaxy/v3/artifacts/collections/',
'PULP_GALAXY_AUTHENTICATION_CLASSES': [
'rest_framework.authentication.SessionAuthentication',
'rest_framework.authentication.TokenAuthentication',
'rest_framework.authentication.BasicAuthentication',
'django.contrib.auth.backends.ModelBackend',
],
# This should probably be false see https://issues.redhat.com/browse/AAH-2328
'PULP_GALAXY_REQUIRE_CONTENT_APPROVAL': 'true',
'PULP_GALAXY_DEPLOYMENT_MODE': 'standalone',
'PULP_GALAXY_AUTO_SIGN_COLLECTIONS': 'false',
'PULP_GALAXY_COLLECTION_SIGNING_SERVICE': 'ansible-default',
'PULP_RH_ENTITLEMENT_REQUIRED': 'insights',
'PULP_TOKEN_AUTH_DISABLED': 'false',
'PULP_TOKEN_SERVER': f'http://{GALAXY_HOST_NAME}/token/',
'PULP_TOKEN_SIGNATURE_ALGORITHM': 'ES256',
'PULP_PUBLIC_KEY_PATH': '/src/galaxy_ng/dev/common/container_auth_public_key.pem',
'PULP_PRIVATE_KEY_PATH': '/src/galaxy_ng/dev/common/container_auth_private_key.pem',
'PULP_ANALYTICS': 'false',
'PULP_GALAXY_ENABLE_UNAUTHENTICATED_COLLECTION_ACCESS': 'true',
'PULP_GALAXY_ENABLE_UNAUTHENTICATED_COLLECTION_DOWNLOAD': 'true',
'PULP_GALAXY_ENABLE_LEGACY_ROLES': 'true',
'PULP_GALAXY_FEATURE_FLAGS__execution_environments': 'false',
'PULP_SOCIAL_AUTH_LOGIN_REDIRECT_URL': '/',
'PULP_GALAXY_FEATURE_FLAGS__ai_deny_index': 'true',
'PULP_DEFAULT_ADMIN_PASSWORD': 'password'
}
GALAXY_IMPORTER = b'''
[galaxy-importer]
ansible_local_tmp=~/.ansible/tmp
ansible_test_local_image=false
check_required_tags=false
check_runtime_yaml=false
check_changelog=false
infra_osd=false
local_image_docker=false
log_level_main=INFO
require_v1_or_greater=false
run_ansible_doc=false
run_ansible_lint=false
run_ansible_test=false
run_flake8=false
'''.strip()
class GalaxyProvider(CloudProvider):
"""
Galaxy plugin. Sets up pulp (ansible-galaxy) servers for tests.
The pulp source itself resides at: https://github.com/pulp/pulp-oci-images
"""
def __init__(self, args: IntegrationConfig) -> None:
super().__init__(args)
self.image = os.environ.get(
'ANSIBLE_PULP_CONTAINER',
'quay.io/pulp/galaxy:4.7.1'
)
self.uses_docker = True
def setup(self) -> None:
"""Setup cloud resource before delegation and reg cleanup callback."""
super().setup()
with tempfile.NamedTemporaryFile(mode='w+') as env_fd:
settings = '\n'.join(
f'{key}={value}' for key, value in SETTINGS.items()
)
env_fd.write(settings)
env_fd.flush()
display.info(f'>>> galaxy_ng Configuration\n{settings}', verbosity=3)
descriptor = run_support_container(
self.args,
self.platform,
self.image,
GALAXY_HOST_NAME,
[
80,
],
aliases=[
GALAXY_HOST_NAME,
],
start=True,
options=[
'--env-file', env_fd.name,
],
)
if not descriptor:
return
injected_files = [
('/etc/galaxy-importer/galaxy-importer.cfg', GALAXY_IMPORTER, 'galaxy-importer'),
]
for path, content, friendly_name in injected_files:
with tempfile.NamedTemporaryFile() as temp_fd:
temp_fd.write(content)
temp_fd.flush()
display.info(f'>>> {friendly_name} Configuration\n{to_text(content)}', verbosity=3)
docker_exec(self.args, descriptor.container_id, ['mkdir', '-p', os.path.dirname(path)], True)
docker_cp_to(self.args, descriptor.container_id, temp_fd.name, path)
docker_exec(self.args, descriptor.container_id, ['chown', 'pulp:pulp', path], True)
self._set_cloud_config('PULP_HOST', GALAXY_HOST_NAME)
self._set_cloud_config('PULP_USER', 'admin')
self._set_cloud_config('PULP_PASSWORD', 'password')
class GalaxyEnvironment(CloudEnvironment):
"""Galaxy environment plugin. Updates integration test environment after delegation."""
def get_environment_config(self) -> CloudEnvironmentConfig:
"""Return environment configuration for use in the test environment after delegation."""
pulp_user = str(self._get_cloud_config('PULP_USER'))
pulp_password = str(self._get_cloud_config('PULP_PASSWORD'))
pulp_host = self._get_cloud_config('PULP_HOST')
return CloudEnvironmentConfig(
ansible_vars=dict(
pulp_user=pulp_user,
pulp_password=pulp_password,
pulp_api=f'http://{pulp_host}',
pulp_server=f'http://{pulp_host}/pulp_ansible/galaxy/',
galaxy_ng_server=f'http://{pulp_host}/api/galaxy/',
),
env_vars=dict(
PULP_USER=pulp_user,
PULP_PASSWORD=pulp_password,
PULP_SERVER=f'http://{pulp_host}/pulp_ansible/galaxy/api/',
GALAXY_NG_SERVER=f'http://{pulp_host}/api/galaxy/',
),
)
| 6,210
|
Python
|
.py
| 152
| 32.743421
| 116
| 0.635007
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,228
|
nios.py
|
ansible_ansible/test/lib/ansible_test/_internal/commands/integration/cloud/nios.py
|
"""NIOS plugin for integration tests."""
from __future__ import annotations
import os
from ....config import (
IntegrationConfig,
)
from ....containers import (
run_support_container,
)
from . import (
CloudEnvironment,
CloudEnvironmentConfig,
CloudProvider,
)
class NiosProvider(CloudProvider):
"""Nios plugin. Sets up NIOS mock server for tests."""
# Default image to run the nios simulator.
#
# The simulator must be pinned to a specific version
# to guarantee CI passes with the version used.
#
# It's source source itself resides at:
# https://github.com/ansible/nios-test-container
DOCKER_IMAGE = 'quay.io/ansible/nios-test-container:6.0.0'
def __init__(self, args: IntegrationConfig) -> None:
super().__init__(args)
self.__container_from_env = os.environ.get('ANSIBLE_NIOSSIM_CONTAINER')
"""
Overrides target container, might be used for development.
Use ANSIBLE_NIOSSIM_CONTAINER=whatever_you_want if you want
to use other image. Omit/empty otherwise.
"""
self.image = self.__container_from_env or self.DOCKER_IMAGE
self.uses_docker = True
def setup(self) -> None:
"""Setup cloud resource before delegation and reg cleanup callback."""
super().setup()
if self._use_static_config():
self._setup_static()
else:
self._setup_dynamic()
def _setup_dynamic(self) -> None:
"""Spawn a NIOS simulator within docker container."""
nios_port = 443
ports = [
nios_port,
]
descriptor = run_support_container(
self.args,
self.platform,
self.image,
'nios-simulator',
ports,
)
if not descriptor:
return
self._set_cloud_config('NIOS_HOST', descriptor.name)
def _setup_static(self) -> None:
raise NotImplementedError()
class NiosEnvironment(CloudEnvironment):
"""NIOS environment plugin. Updates integration test environment after delegation."""
def get_environment_config(self) -> CloudEnvironmentConfig:
"""Return environment configuration for use in the test environment after delegation."""
ansible_vars = dict(
nios_provider=dict(
host=self._get_cloud_config('NIOS_HOST'),
username='admin',
password='infoblox',
),
)
return CloudEnvironmentConfig(
ansible_vars=ansible_vars,
)
| 2,582
|
Python
|
.py
| 73
| 27.356164
| 96
| 0.628169
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,229
|
__init__.py
|
ansible_ansible/test/lib/ansible_test/_internal/commands/integration/cloud/__init__.py
|
"""Plugin system for cloud providers and environments for use in integration tests."""
from __future__ import annotations
import abc
import datetime
import os
import re
import tempfile
import time
import typing as t
from ....encoding import (
to_bytes,
)
from ....io import (
read_text_file,
)
from ....util import (
ANSIBLE_TEST_CONFIG_ROOT,
ApplicationError,
display,
import_plugins,
load_plugins,
cache,
)
from ....util_common import (
ExitHandler,
ResultType,
write_json_test_results,
)
from ....target import (
IntegrationTarget,
)
from ....config import (
IntegrationConfig,
TestConfig,
)
from ....ci import (
get_ci_provider,
)
from ....data import (
data_context,
PayloadConfig,
)
from ....docker_util import (
docker_available,
)
@cache
def get_cloud_plugins() -> tuple[dict[str, t.Type[CloudProvider]], dict[str, t.Type[CloudEnvironment]]]:
"""Import cloud plugins and load them into the plugin dictionaries."""
import_plugins('commands/integration/cloud')
providers: dict[str, t.Type[CloudProvider]] = {}
environments: dict[str, t.Type[CloudEnvironment]] = {}
load_plugins(CloudProvider, providers)
load_plugins(CloudEnvironment, environments)
return providers, environments
@cache
def get_provider_plugins() -> dict[str, t.Type[CloudProvider]]:
"""Return a dictionary of the available cloud provider plugins."""
return get_cloud_plugins()[0]
@cache
def get_environment_plugins() -> dict[str, t.Type[CloudEnvironment]]:
"""Return a dictionary of the available cloud environment plugins."""
return get_cloud_plugins()[1]
def get_cloud_platforms(args: TestConfig, targets: t.Optional[tuple[IntegrationTarget, ...]] = None) -> list[str]:
"""Return cloud platform names for the specified targets."""
if isinstance(args, IntegrationConfig):
if args.list_targets:
return []
if targets is None:
cloud_platforms = set(args.metadata.cloud_config or [])
else:
cloud_platforms = set(get_cloud_platform(target) for target in targets)
cloud_platforms.discard(None)
return sorted(cloud_platforms)
def get_cloud_platform(target: IntegrationTarget) -> t.Optional[str]:
"""Return the name of the cloud platform used for the given target, or None if no cloud platform is used."""
cloud_platforms = set(a.split('/')[1] for a in target.aliases if a.startswith('cloud/') and a.endswith('/') and a != 'cloud/')
if not cloud_platforms:
return None
if len(cloud_platforms) == 1:
cloud_platform = cloud_platforms.pop()
if cloud_platform not in get_provider_plugins():
raise ApplicationError('Target %s aliases contains unknown cloud platform: %s' % (target.name, cloud_platform))
return cloud_platform
raise ApplicationError('Target %s aliases contains multiple cloud platforms: %s' % (target.name, ', '.join(sorted(cloud_platforms))))
def get_cloud_providers(args: IntegrationConfig, targets: t.Optional[tuple[IntegrationTarget, ...]] = None) -> list[CloudProvider]:
"""Return a list of cloud providers for the given targets."""
return [get_provider_plugins()[p](args) for p in get_cloud_platforms(args, targets)]
def get_cloud_environment(args: IntegrationConfig, target: IntegrationTarget) -> t.Optional[CloudEnvironment]:
"""Return the cloud environment for the given target, or None if no cloud environment is used for the target."""
cloud_platform = get_cloud_platform(target)
if not cloud_platform:
return None
return get_environment_plugins()[cloud_platform](args)
def cloud_filter(args: IntegrationConfig, targets: tuple[IntegrationTarget, ...]) -> list[str]:
"""Return a list of target names to exclude based on the given targets."""
if args.metadata.cloud_config is not None:
return [] # cloud filter already performed prior to delegation
exclude: list[str] = []
for provider in get_cloud_providers(args, targets):
provider.filter(targets, exclude)
return exclude
def cloud_init(args: IntegrationConfig, targets: tuple[IntegrationTarget, ...]) -> None:
"""Initialize cloud plugins for the given targets."""
if args.metadata.cloud_config is not None:
return # cloud configuration already established prior to delegation
args.metadata.cloud_config = {}
results = {}
for provider in get_cloud_providers(args, targets):
if args.prime_containers and not provider.uses_docker:
continue
args.metadata.cloud_config[provider.platform] = {}
start_time = time.time()
provider.setup()
end_time = time.time()
results[provider.platform] = dict(
platform=provider.platform,
setup_seconds=int(end_time - start_time),
targets=[target.name for target in targets],
)
if not args.explain and results:
result_name = '%s-%s.json' % (
args.command, re.sub(r'[^0-9]', '-', str(datetime.datetime.now(tz=datetime.timezone.utc).replace(microsecond=0, tzinfo=None))))
data = dict(
clouds=results,
)
write_json_test_results(ResultType.DATA, result_name, data)
class CloudBase(metaclass=abc.ABCMeta):
"""Base class for cloud plugins."""
_CONFIG_PATH = 'config_path'
_RESOURCE_PREFIX = 'resource_prefix'
_MANAGED = 'managed'
_SETUP_EXECUTED = 'setup_executed'
def __init__(self, args: IntegrationConfig) -> None:
self.args = args
self.platform = self.__module__.rsplit('.', 1)[-1]
def config_callback(payload_config: PayloadConfig) -> None:
"""Add the config file to the payload file list."""
if self.platform not in self.args.metadata.cloud_config:
return # platform was initialized, but not used -- such as being skipped due to all tests being disabled
if self._get_cloud_config(self._CONFIG_PATH, ''):
pair = (self.config_path, os.path.relpath(self.config_path, data_context().content.root))
files = payload_config.files
if pair not in files:
display.info('Including %s config: %s -> %s' % (self.platform, pair[0], pair[1]), verbosity=3)
files.append(pair)
data_context().register_payload_callback(config_callback)
@property
def setup_executed(self) -> bool:
"""True if setup has been executed, otherwise False."""
return t.cast(bool, self._get_cloud_config(self._SETUP_EXECUTED, False))
@setup_executed.setter
def setup_executed(self, value: bool) -> None:
"""True if setup has been executed, otherwise False."""
self._set_cloud_config(self._SETUP_EXECUTED, value)
@property
def config_path(self) -> str:
"""Path to the configuration file."""
return os.path.join(data_context().content.root, str(self._get_cloud_config(self._CONFIG_PATH)))
@config_path.setter
def config_path(self, value: str) -> None:
"""Path to the configuration file."""
self._set_cloud_config(self._CONFIG_PATH, value)
@property
def resource_prefix(self) -> str:
"""Resource prefix."""
return str(self._get_cloud_config(self._RESOURCE_PREFIX))
@resource_prefix.setter
def resource_prefix(self, value: str) -> None:
"""Resource prefix."""
self._set_cloud_config(self._RESOURCE_PREFIX, value)
@property
def managed(self) -> bool:
"""True if resources are managed by ansible-test, otherwise False."""
return t.cast(bool, self._get_cloud_config(self._MANAGED))
@managed.setter
def managed(self, value: bool) -> None:
"""True if resources are managed by ansible-test, otherwise False."""
self._set_cloud_config(self._MANAGED, value)
def _get_cloud_config(self, key: str, default: t.Optional[t.Union[str, int, bool]] = None) -> t.Union[str, int, bool]:
"""Return the specified value from the internal configuration."""
if default is not None:
return self.args.metadata.cloud_config[self.platform].get(key, default)
return self.args.metadata.cloud_config[self.platform][key]
def _set_cloud_config(self, key: str, value: t.Union[str, int, bool]) -> None:
"""Set the specified key and value in the internal configuration."""
self.args.metadata.cloud_config[self.platform][key] = value
class CloudProvider(CloudBase):
"""Base class for cloud provider plugins. Sets up cloud resources before delegation."""
def __init__(self, args: IntegrationConfig, config_extension: str = '.ini') -> None:
super().__init__(args)
self.ci_provider = get_ci_provider()
self.remove_config = False
self.config_static_name = 'cloud-config-%s%s' % (self.platform, config_extension)
self.config_static_path = os.path.join(data_context().content.integration_path, self.config_static_name)
self.config_template_path = os.path.join(ANSIBLE_TEST_CONFIG_ROOT, '%s.template' % self.config_static_name)
self.config_extension = config_extension
self.uses_config = False
self.uses_docker = False
def filter(self, targets: tuple[IntegrationTarget, ...], exclude: list[str]) -> None:
"""Filter out the cloud tests when the necessary config and resources are not available."""
if not self.uses_docker and not self.uses_config:
return
if self.uses_docker and docker_available():
return
if self.uses_config and os.path.exists(self.config_static_path):
return
skip = 'cloud/%s/' % self.platform
skipped = [target.name for target in targets if skip in target.aliases]
if skipped:
exclude.append(skip)
if not self.uses_docker and self.uses_config:
display.warning('Excluding tests marked "%s" which require a "%s" config file (see "%s"): %s'
% (skip.rstrip('/'), self.config_static_path, self.config_template_path, ', '.join(skipped)))
elif self.uses_docker and not self.uses_config:
display.warning('Excluding tests marked "%s" which requires container support: %s'
% (skip.rstrip('/'), ', '.join(skipped)))
elif self.uses_docker and self.uses_config:
display.warning('Excluding tests marked "%s" which requires container support or a "%s" config file (see "%s"): %s'
% (skip.rstrip('/'), self.config_static_path, self.config_template_path, ', '.join(skipped)))
def setup(self) -> None:
"""Setup the cloud resource before delegation and register a cleanup callback."""
self.resource_prefix = self.ci_provider.generate_resource_prefix()
self.resource_prefix = re.sub(r'[^a-zA-Z0-9]+', '-', self.resource_prefix)[:63].lower().rstrip('-')
ExitHandler.register(self.cleanup)
def cleanup(self) -> None:
"""Clean up the cloud resource and any temporary configuration files after tests complete."""
if self.remove_config:
os.remove(self.config_path)
def _use_static_config(self) -> bool:
"""Use a static config file if available. Returns True if static config is used, otherwise returns False."""
if os.path.isfile(self.config_static_path):
display.info('Using existing %s cloud config: %s' % (self.platform, self.config_static_path), verbosity=1)
self.config_path = self.config_static_path
static = True
else:
static = False
self.managed = not static
return static
def _write_config(self, content: str) -> None:
"""Write the given content to the config file."""
prefix = '%s-' % os.path.splitext(os.path.basename(self.config_static_path))[0]
with tempfile.NamedTemporaryFile(dir=data_context().content.integration_path, prefix=prefix, suffix=self.config_extension, delete=False) as config_fd:
filename = os.path.join(data_context().content.integration_path, os.path.basename(config_fd.name))
self.config_path = filename
self.remove_config = True
display.info('>>> Config: %s\n%s' % (filename, content.strip()), verbosity=3)
config_fd.write(to_bytes(content))
config_fd.flush()
def _read_config_template(self) -> str:
"""Read and return the configuration template."""
lines = read_text_file(self.config_template_path).splitlines()
lines = [line for line in lines if not line.startswith('#')]
config = '\n'.join(lines).strip() + '\n'
return config
@staticmethod
def _populate_config_template(template: str, values: dict[str, str]) -> str:
"""Populate and return the given template with the provided values."""
for key in sorted(values):
value = values[key]
template = template.replace('@%s' % key, value)
return template
class CloudEnvironment(CloudBase):
"""Base class for cloud environment plugins. Updates integration test environment after delegation."""
def setup_once(self) -> None:
"""Run setup if it has not already been run."""
if self.setup_executed:
return
self.setup()
self.setup_executed = True
def setup(self) -> None:
"""Setup which should be done once per environment instead of once per test target."""
@abc.abstractmethod
def get_environment_config(self) -> CloudEnvironmentConfig:
"""Return environment configuration for use in the test environment after delegation."""
def on_failure(self, target: IntegrationTarget, tries: int) -> None:
"""Callback to run when an integration target fails."""
class CloudEnvironmentConfig:
"""Configuration for the environment."""
def __init__(
self,
env_vars: t.Optional[dict[str, str]] = None,
ansible_vars: t.Optional[dict[str, t.Any]] = None,
module_defaults: t.Optional[dict[str, dict[str, t.Any]]] = None,
callback_plugins: t.Optional[list[str]] = None,
):
self.env_vars = env_vars
self.ansible_vars = ansible_vars
self.module_defaults = module_defaults
self.callback_plugins = callback_plugins
| 14,501
|
Python
|
.py
| 289
| 42.397924
| 158
| 0.661751
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,230
|
hcloud.py
|
ansible_ansible/test/lib/ansible_test/_internal/commands/integration/cloud/hcloud.py
|
"""Hetzner Cloud plugin for integration tests."""
from __future__ import annotations
import configparser
from ....util import (
display,
)
from ....config import (
IntegrationConfig,
)
from ....target import (
IntegrationTarget,
)
from ....core_ci import (
AnsibleCoreCI,
CloudResource,
)
from . import (
CloudEnvironment,
CloudEnvironmentConfig,
CloudProvider,
)
class HcloudCloudProvider(CloudProvider):
"""Hetzner Cloud provider plugin. Sets up cloud resources before delegation."""
def __init__(self, args: IntegrationConfig) -> None:
super().__init__(args)
self.uses_config = True
def filter(self, targets: tuple[IntegrationTarget, ...], exclude: list[str]) -> None:
"""Filter out the cloud tests when the necessary config and resources are not available."""
aci = self._create_ansible_core_ci()
if aci.available:
return
super().filter(targets, exclude)
def setup(self) -> None:
"""Setup the cloud resource before delegation and register a cleanup callback."""
super().setup()
if not self._use_static_config():
self._setup_dynamic()
def _setup_dynamic(self) -> None:
"""Request Hetzner credentials through the Ansible Core CI service."""
display.info('Provisioning %s cloud environment.' % self.platform, verbosity=1)
config = self._read_config_template()
aci = self._create_ansible_core_ci()
response = aci.start()
if not self.args.explain:
token = response['hetzner']['token']
display.sensitive.add(token)
display.info('Hetzner Cloud Token: %s' % token, verbosity=1)
values = dict(
TOKEN=token,
)
display.sensitive.add(values['TOKEN'])
config = self._populate_config_template(config, values)
self._write_config(config)
def _create_ansible_core_ci(self) -> AnsibleCoreCI:
"""Return a Hetzner instance of AnsibleCoreCI."""
return AnsibleCoreCI(self.args, CloudResource(platform='hetzner'))
class HcloudCloudEnvironment(CloudEnvironment):
"""Hetzner Cloud cloud environment plugin. Updates integration test environment after delegation."""
def get_environment_config(self) -> CloudEnvironmentConfig:
"""Return environment configuration for use in the test environment after delegation."""
parser = configparser.ConfigParser()
parser.read(self.config_path)
env_vars = dict(
HCLOUD_TOKEN=parser.get('default', 'hcloud_api_token'),
)
display.sensitive.add(env_vars['HCLOUD_TOKEN'])
ansible_vars = dict(
hcloud_prefix=self.resource_prefix,
)
ansible_vars.update(dict((key.lower(), value) for key, value in env_vars.items()))
return CloudEnvironmentConfig(
env_vars=env_vars,
ansible_vars=ansible_vars,
)
| 3,006
|
Python
|
.py
| 74
| 32.837838
| 104
| 0.653899
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,231
|
cs.py
|
ansible_ansible/test/lib/ansible_test/_internal/commands/integration/cloud/cs.py
|
"""CloudStack plugin for integration tests."""
from __future__ import annotations
import json
import configparser
import os
import urllib.parse
import typing as t
from ....util import (
ApplicationError,
display,
)
from ....config import (
IntegrationConfig,
)
from ....docker_util import (
docker_exec,
)
from ....containers import (
run_support_container,
wait_for_file,
)
from . import (
CloudEnvironment,
CloudEnvironmentConfig,
CloudProvider,
)
class CsCloudProvider(CloudProvider):
"""CloudStack cloud provider plugin. Sets up cloud resources before delegation."""
def __init__(self, args: IntegrationConfig) -> None:
super().__init__(args)
self.image = os.environ.get('ANSIBLE_CLOUDSTACK_CONTAINER', 'quay.io/ansible/cloudstack-test-container:1.7.0')
self.host = ''
self.port = 0
self.uses_docker = True
self.uses_config = True
def setup(self) -> None:
"""Setup the cloud resource before delegation and register a cleanup callback."""
super().setup()
if self._use_static_config():
self._setup_static()
else:
self._setup_dynamic()
def _setup_static(self) -> None:
"""Configure CloudStack tests for use with static configuration."""
parser = configparser.ConfigParser()
parser.read(self.config_static_path)
endpoint = parser.get('cloudstack', 'endpoint')
parts = urllib.parse.urlparse(endpoint)
self.host = parts.hostname
if not self.host:
raise ApplicationError('Could not determine host from endpoint: %s' % endpoint)
if parts.port:
self.port = parts.port
elif parts.scheme == 'http':
self.port = 80
elif parts.scheme == 'https':
self.port = 443
else:
raise ApplicationError('Could not determine port from endpoint: %s' % endpoint)
display.info('Read cs host "%s" and port %d from config: %s' % (self.host, self.port, self.config_static_path), verbosity=1)
def _setup_dynamic(self) -> None:
"""Create a CloudStack simulator using docker."""
config = self._read_config_template()
self.port = 8888
ports = [
self.port,
]
descriptor = run_support_container(
self.args,
self.platform,
self.image,
'cloudstack-sim',
ports,
)
if not descriptor:
return
# apply work-around for OverlayFS issue
# https://github.com/docker/for-linux/issues/72#issuecomment-319904698
docker_exec(self.args, descriptor.name, ['find', '/var/lib/mysql', '-type', 'f', '-exec', 'touch', '{}', ';'], capture=True)
if self.args.explain:
values = dict(
HOST=self.host,
PORT=str(self.port),
)
else:
credentials = self._get_credentials(descriptor.name)
values = dict(
HOST=descriptor.name,
PORT=str(self.port),
KEY=credentials['apikey'],
SECRET=credentials['secretkey'],
)
display.sensitive.add(values['SECRET'])
config = self._populate_config_template(config, values)
self._write_config(config)
def _get_credentials(self, container_name: str) -> dict[str, t.Any]:
"""Wait for the CloudStack simulator to return credentials."""
def check(value) -> bool:
"""Return True if the given configuration is valid JSON, otherwise return False."""
# noinspection PyBroadException
try:
json.loads(value)
except Exception: # pylint: disable=broad-except
return False # sometimes the file exists but is not yet valid JSON
return True
stdout = wait_for_file(self.args, container_name, '/var/www/html/admin.json', sleep=10, tries=30, check=check)
return json.loads(stdout)
class CsCloudEnvironment(CloudEnvironment):
"""CloudStack cloud environment plugin. Updates integration test environment after delegation."""
def get_environment_config(self) -> CloudEnvironmentConfig:
"""Return environment configuration for use in the test environment after delegation."""
parser = configparser.ConfigParser()
parser.read(self.config_path)
config = dict(parser.items('default'))
env_vars = dict(
CLOUDSTACK_ENDPOINT=config['endpoint'],
CLOUDSTACK_KEY=config['key'],
CLOUDSTACK_SECRET=config['secret'],
CLOUDSTACK_TIMEOUT=config['timeout'],
)
display.sensitive.add(env_vars['CLOUDSTACK_SECRET'])
ansible_vars = dict(
cs_resource_prefix=self.resource_prefix,
)
return CloudEnvironmentConfig(
env_vars=env_vars,
ansible_vars=ansible_vars,
)
| 5,027
|
Python
|
.py
| 128
| 30.148438
| 132
| 0.617096
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,232
|
azure.py
|
ansible_ansible/test/lib/ansible_test/_internal/commands/integration/cloud/azure.py
|
"""Azure plugin for integration tests."""
from __future__ import annotations
import configparser
import typing as t
from ....util import (
ApplicationError,
display,
)
from ....config import (
IntegrationConfig,
)
from ....target import (
IntegrationTarget,
)
from ....core_ci import (
AnsibleCoreCI,
CloudResource,
)
from . import (
CloudEnvironment,
CloudEnvironmentConfig,
CloudProvider,
)
class AzureCloudProvider(CloudProvider):
"""Azure cloud provider plugin. Sets up cloud resources before delegation."""
def __init__(self, args: IntegrationConfig) -> None:
super().__init__(args)
self.aci: t.Optional[AnsibleCoreCI] = None
self.uses_config = True
def filter(self, targets: tuple[IntegrationTarget, ...], exclude: list[str]) -> None:
"""Filter out the cloud tests when the necessary config and resources are not available."""
aci = self._create_ansible_core_ci()
if aci.available:
return
super().filter(targets, exclude)
def setup(self) -> None:
"""Setup the cloud resource before delegation and register a cleanup callback."""
super().setup()
if not self._use_static_config():
self._setup_dynamic()
get_config(self.config_path) # check required variables
def cleanup(self) -> None:
"""Clean up the cloud resource and any temporary configuration files after tests complete."""
if self.aci:
self.aci.stop()
super().cleanup()
def _setup_dynamic(self) -> None:
"""Request Azure credentials through ansible-core-ci."""
display.info('Provisioning %s cloud environment.' % self.platform, verbosity=1)
config = self._read_config_template()
response = {}
aci = self._create_ansible_core_ci()
aci_result = aci.start()
if not self.args.explain:
response = aci_result['azure']
self.aci = aci
if not self.args.explain:
values = dict(
AZURE_CLIENT_ID=response['clientId'],
AZURE_SECRET=response['clientSecret'],
AZURE_SUBSCRIPTION_ID=response['subscriptionId'],
AZURE_TENANT=response['tenantId'],
RESOURCE_GROUP=response['resourceGroupNames'][0],
RESOURCE_GROUP_SECONDARY=response['resourceGroupNames'][1],
)
display.sensitive.add(values['AZURE_SECRET'])
config = '\n'.join('%s: %s' % (key, values[key]) for key in sorted(values))
config = '[default]\n' + config
self._write_config(config)
def _create_ansible_core_ci(self) -> AnsibleCoreCI:
"""Return an Azure instance of AnsibleCoreCI."""
return AnsibleCoreCI(self.args, CloudResource(platform='azure'))
class AzureCloudEnvironment(CloudEnvironment):
"""Azure cloud environment plugin. Updates integration test environment after delegation."""
def get_environment_config(self) -> CloudEnvironmentConfig:
"""Return environment configuration for use in the test environment after delegation."""
env_vars = get_config(self.config_path)
display.sensitive.add(env_vars.get('AZURE_SECRET'))
display.sensitive.add(env_vars.get('AZURE_PASSWORD'))
ansible_vars = dict(
resource_prefix=self.resource_prefix,
)
ansible_vars.update(dict((key.lower(), value) for key, value in env_vars.items()))
return CloudEnvironmentConfig(
env_vars=env_vars,
ansible_vars=ansible_vars,
)
def on_failure(self, target: IntegrationTarget, tries: int) -> None:
"""Callback to run when an integration target fails."""
if not tries and self.managed:
display.notice('If %s failed due to permissions, the test policy may need to be updated.' % target.name)
def get_config(config_path: str) -> dict[str, str]:
"""Return a configuration dictionary parsed from the given configuration path."""
parser = configparser.ConfigParser()
parser.read(config_path)
config = dict((key.upper(), value) for key, value in parser.items('default'))
rg_vars = (
'RESOURCE_GROUP',
'RESOURCE_GROUP_SECONDARY',
)
sp_vars = (
'AZURE_CLIENT_ID',
'AZURE_SECRET',
'AZURE_SUBSCRIPTION_ID',
'AZURE_TENANT',
)
ad_vars = (
'AZURE_AD_USER',
'AZURE_PASSWORD',
'AZURE_SUBSCRIPTION_ID',
)
rg_ok = all(var in config for var in rg_vars)
sp_ok = all(var in config for var in sp_vars)
ad_ok = all(var in config for var in ad_vars)
if not rg_ok:
raise ApplicationError('Resource groups must be defined with: %s' % ', '.join(sorted(rg_vars)))
if not sp_ok and not ad_ok:
raise ApplicationError('Credentials must be defined using either:\nService Principal: %s\nActive Directory: %s' % (
', '.join(sorted(sp_vars)), ', '.join(sorted(ad_vars))))
return config
| 5,071
|
Python
|
.py
| 120
| 34.258333
| 123
| 0.641444
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,233
|
httptester.py
|
ansible_ansible/test/lib/ansible_test/_internal/commands/integration/cloud/httptester.py
|
"""HTTP Tester plugin for integration tests."""
from __future__ import annotations
import os
from ....util import (
display,
generate_password,
)
from ....config import (
IntegrationConfig,
)
from ....containers import (
run_support_container,
)
from . import (
CloudEnvironment,
CloudEnvironmentConfig,
CloudProvider,
)
KRB5_PASSWORD_ENV = 'KRB5_PASSWORD'
class HttptesterProvider(CloudProvider):
"""HTTP Tester provider plugin. Sets up resources before delegation."""
def __init__(self, args: IntegrationConfig) -> None:
super().__init__(args)
self.image = os.environ.get('ANSIBLE_HTTP_TEST_CONTAINER', 'quay.io/ansible/http-test-container:3.2.0')
self.uses_docker = True
def setup(self) -> None:
"""Setup resources before delegation."""
super().setup()
ports = [
80,
88,
443,
444,
749,
]
aliases = [
'ansible.http.tests',
'sni1.ansible.http.tests',
'fail.ansible.http.tests',
'self-signed.ansible.http.tests',
]
descriptor = run_support_container(
self.args,
self.platform,
self.image,
'http-test-container',
ports,
aliases=aliases,
env={
KRB5_PASSWORD_ENV: generate_password(),
},
)
if not descriptor:
return
# Read the password from the container environment.
# This allows the tests to work when re-using an existing container.
# The password is marked as sensitive, since it may differ from the one we generated.
krb5_password = descriptor.details.container.env_dict()[KRB5_PASSWORD_ENV]
display.sensitive.add(krb5_password)
self._set_cloud_config(KRB5_PASSWORD_ENV, krb5_password)
class HttptesterEnvironment(CloudEnvironment):
"""HTTP Tester environment plugin. Updates integration test environment after delegation."""
def get_environment_config(self) -> CloudEnvironmentConfig:
"""Return environment configuration for use in the test environment after delegation."""
return CloudEnvironmentConfig(
env_vars=dict(
HTTPTESTER='1', # backwards compatibility for tests intended to work with or without HTTP Tester
KRB5_PASSWORD=str(self._get_cloud_config(KRB5_PASSWORD_ENV)),
)
)
| 2,513
|
Python
|
.py
| 70
| 27.4
| 113
| 0.625929
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,234
|
__init__.py
|
ansible_ansible/test/lib/ansible_test/_internal/commands/units/__init__.py
|
"""Execute unit tests using pytest."""
from __future__ import annotations
import os
import sys
import typing as t
from ...constants import (
CONTROLLER_MIN_PYTHON_VERSION,
CONTROLLER_PYTHON_VERSIONS,
REMOTE_ONLY_PYTHON_VERSIONS,
SUPPORTED_PYTHON_VERSIONS,
)
from ...io import (
write_text_file,
make_dirs,
)
from ...util import (
ANSIBLE_TEST_DATA_ROOT,
display,
is_subdir,
str_to_version,
SubprocessError,
ANSIBLE_LIB_ROOT,
ANSIBLE_TEST_TARGET_ROOT,
)
from ...util_common import (
ResultType,
handle_layout_messages,
create_temp_dir,
)
from ...ansible_util import (
ansible_environment,
get_ansible_python_path,
)
from ...target import (
walk_internal_targets,
walk_units_targets,
)
from ...config import (
UnitsConfig,
)
from ...coverage_util import (
cover_python,
)
from ...data import (
data_context,
)
from ...executor import (
AllTargetsSkipped,
Delegate,
get_changes_filter,
)
from ...python_requirements import (
install_requirements,
)
from ...content_config import (
get_content_config,
)
from ...host_configs import (
PosixConfig,
)
from ...provisioning import (
prepare_profiles,
)
from ...pypi_proxy import (
configure_pypi_proxy,
)
from ...host_profiles import (
PosixProfile,
)
class TestContext:
"""Contexts that unit tests run in based on the type of content."""
controller = 'controller'
modules = 'modules'
module_utils = 'module_utils'
def command_units(args: UnitsConfig) -> None:
"""Run unit tests."""
handle_layout_messages(data_context().content.unit_messages)
changes = get_changes_filter(args)
require = args.require + changes
include = walk_internal_targets(walk_units_targets(), args.include, args.exclude, require)
paths = [target.path for target in include]
content_config = get_content_config(args)
supported_remote_python_versions = content_config.modules.python_versions
if content_config.modules.controller_only:
# controller-only collections run modules/module_utils unit tests as controller-only tests
module_paths = []
module_utils_paths = []
else:
# normal collections run modules/module_utils unit tests isolated from controller code due to differences in python version requirements
module_paths = [path for path in paths if is_subdir(path, data_context().content.unit_module_path)]
module_utils_paths = [path for path in paths if is_subdir(path, data_context().content.unit_module_utils_path)]
controller_paths = sorted(path for path in set(paths) - set(module_paths) - set(module_utils_paths))
remote_paths = module_paths or module_utils_paths
test_context_paths = {
TestContext.modules: module_paths,
TestContext.module_utils: module_utils_paths,
TestContext.controller: controller_paths,
}
if not paths:
raise AllTargetsSkipped()
targets = t.cast(list[PosixConfig], args.targets)
target_versions: dict[str, PosixConfig] = {target.python.version: target for target in targets}
skipped_versions = args.host_settings.skipped_python_versions
warn_versions = []
# requested python versions that are remote-only and not supported by this collection
test_versions = [version for version in target_versions if version in REMOTE_ONLY_PYTHON_VERSIONS and version not in supported_remote_python_versions]
if test_versions:
for version in test_versions:
display.warning(f'Skipping unit tests on Python {version} because it is not supported by this collection.'
f' Supported Python versions are: {", ".join(content_config.python_versions)}')
warn_versions.extend(test_versions)
if warn_versions == list(target_versions):
raise AllTargetsSkipped()
if not remote_paths:
# all selected unit tests are controller tests
# requested python versions that are remote-only
test_versions = [version for version in target_versions if version in REMOTE_ONLY_PYTHON_VERSIONS and version not in warn_versions]
if test_versions:
for version in test_versions:
display.warning(f'Skipping unit tests on Python {version} because it is only supported by module/module_utils unit tests.'
' No module/module_utils unit tests were selected.')
warn_versions.extend(test_versions)
if warn_versions == list(target_versions):
raise AllTargetsSkipped()
if not controller_paths:
# all selected unit tests are remote tests
# requested python versions that are not supported by remote tests for this collection
test_versions = [version for version in target_versions if version not in supported_remote_python_versions and version not in warn_versions]
if test_versions:
for version in test_versions:
display.warning(f'Skipping unit tests on Python {version} because it is not supported by module/module_utils unit tests of this collection.'
f' Supported Python versions are: {", ".join(supported_remote_python_versions)}')
warn_versions.extend(test_versions)
if warn_versions == list(target_versions):
raise AllTargetsSkipped()
host_state = prepare_profiles(args, targets_use_pypi=True) # units
if args.delegate:
raise Delegate(host_state=host_state, require=changes, exclude=args.exclude)
test_sets = []
if args.requirements_mode != 'skip':
configure_pypi_proxy(args, host_state.controller_profile) # units
for version in SUPPORTED_PYTHON_VERSIONS:
if version not in target_versions and version not in skipped_versions:
continue
test_candidates = []
for test_context, paths in test_context_paths.items():
if test_context == TestContext.controller:
if version not in CONTROLLER_PYTHON_VERSIONS:
continue
else:
if version not in supported_remote_python_versions:
continue
if not paths:
continue
env = ansible_environment(args)
env.update(
PYTHONPATH=get_units_ansible_python_path(args, test_context),
ANSIBLE_CONTROLLER_MIN_PYTHON_VERSION=CONTROLLER_MIN_PYTHON_VERSION,
)
test_candidates.append((test_context, paths, env))
if not test_candidates:
continue
if version in skipped_versions:
display.warning("Skipping unit tests on Python %s because it could not be found." % version)
continue
target_profiles: dict[str, PosixProfile] = {profile.config.python.version: profile for profile in host_state.targets(PosixProfile)}
target_profile = target_profiles[version]
final_candidates = [(test_context, target_profile.python, paths, env) for test_context, paths, env in test_candidates]
controller = any(test_context == TestContext.controller for test_context, python, paths, env in final_candidates)
if args.requirements_mode != 'skip':
install_requirements(args, target_profile.python, ansible=controller, command=True, controller=False) # units
test_sets.extend(final_candidates)
if args.requirements_mode == 'only':
sys.exit()
for test_context, python, paths, env in test_sets:
# When using pytest-mock, make sure that features introduced in Python 3.8 are available to older Python versions.
# This is done by enabling the mock_use_standalone_module feature, which forces use of mock even when unittest.mock is available.
# Later Python versions have not introduced additional unittest.mock features, so use of mock is not needed as of Python 3.8.
# If future Python versions introduce new unittest.mock features, they will not be available to older Python versions.
# Having the cutoff at Python 3.8 also eases packaging of ansible-core since no supported controller version requires the use of mock.
#
# NOTE: This only affects use of pytest-mock.
# Collection unit tests may directly import mock, which will be provided by ansible-test when it installs requirements using pip.
# Although mock is available for ansible-core unit tests, they should import unittest.mock instead.
if str_to_version(python.version) < (3, 8):
config_name = 'legacy.ini'
else:
config_name = 'default.ini'
cmd = [
'pytest',
'-r', 'a',
'-n', str(args.num_workers) if args.num_workers else 'auto',
'--color', 'yes' if args.color else 'no',
'-p', 'no:cacheprovider',
'-c', os.path.join(ANSIBLE_TEST_DATA_ROOT, 'pytest', 'config', config_name),
'--junit-xml', os.path.join(ResultType.JUNIT.path, 'python%s-%s-units.xml' % (python.version, test_context)),
'--strict-markers', # added in pytest 4.5.0
'--rootdir', data_context().content.root,
'--confcutdir', data_context().content.root, # avoid permission errors when running from an installed version and using pytest >= 8
] # fmt:skip
if not data_context().content.collection:
cmd.append('--durations=25')
plugins = []
if args.coverage:
plugins.append('ansible_pytest_coverage')
if data_context().content.collection:
plugins.append('ansible_pytest_collections')
plugins.append('ansible_forked')
if plugins:
env['PYTHONPATH'] += ':%s' % os.path.join(ANSIBLE_TEST_TARGET_ROOT, 'pytest/plugins')
env['PYTEST_PLUGINS'] = ','.join(plugins)
if args.collect_only:
cmd.append('--collect-only')
if args.verbosity:
cmd.append('-' + ('v' * args.verbosity))
cmd.extend(paths)
display.info('Unit test %s with Python %s' % (test_context, python.version))
try:
cover_python(args, python, cmd, test_context, env, capture=False)
except SubprocessError as ex:
# pytest exits with status code 5 when all tests are skipped, which isn't an error for our use case
if ex.status != 5:
raise
def get_units_ansible_python_path(args: UnitsConfig, test_context: str) -> str:
"""
Return a directory usable for PYTHONPATH, containing only the modules and module_utils portion of the ansible package.
The temporary directory created will be cached for the lifetime of the process and cleaned up at exit.
"""
if test_context == TestContext.controller:
return get_ansible_python_path(args)
try:
cache = get_units_ansible_python_path.cache # type: ignore[attr-defined]
except AttributeError:
cache = get_units_ansible_python_path.cache = {} # type: ignore[attr-defined]
python_path = cache.get(test_context)
if python_path:
return python_path
python_path = create_temp_dir(prefix='ansible-test-')
ansible_path = os.path.join(python_path, 'ansible')
ansible_test_path = os.path.join(python_path, 'ansible_test')
write_text_file(os.path.join(ansible_path, '__init__.py'), '', True)
os.symlink(os.path.join(ANSIBLE_LIB_ROOT, 'module_utils'), os.path.join(ansible_path, 'module_utils'))
if data_context().content.collection:
# built-in runtime configuration for the collection loader
make_dirs(os.path.join(ansible_path, 'config'))
os.symlink(os.path.join(ANSIBLE_LIB_ROOT, 'config', 'ansible_builtin_runtime.yml'), os.path.join(ansible_path, 'config', 'ansible_builtin_runtime.yml'))
# current collection loader required by all python versions supported by the controller
write_text_file(os.path.join(ansible_path, 'utils', '__init__.py'), '', True)
os.symlink(os.path.join(ANSIBLE_LIB_ROOT, 'utils', 'collection_loader'), os.path.join(ansible_path, 'utils', 'collection_loader'))
# legacy collection loader required by all python versions not supported by the controller
write_text_file(os.path.join(ansible_test_path, '__init__.py'), '', True)
write_text_file(os.path.join(ansible_test_path, '_internal', '__init__.py'), '', True)
elif test_context == TestContext.modules:
# only non-collection ansible module tests should have access to ansible built-in modules
os.symlink(os.path.join(ANSIBLE_LIB_ROOT, 'modules'), os.path.join(ansible_path, 'modules'))
cache[test_context] = python_path
return python_path
| 12,845
|
Python
|
.py
| 257
| 41.883268
| 160
| 0.67488
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,235
|
python.py
|
ansible_ansible/test/lib/ansible_test/_internal/classification/python.py
|
"""Analyze python import statements."""
from __future__ import annotations
import ast
import os
import re
import typing as t
from ..io import (
read_binary_file,
)
from ..util import (
display,
ApplicationError,
is_subdir,
)
from ..data import (
data_context,
)
from ..target import (
TestTarget,
)
VIRTUAL_PACKAGES = {
'ansible.module_utils.six',
}
def get_python_module_utils_imports(compile_targets: list[TestTarget]) -> dict[str, set[str]]:
"""Return a dictionary of module_utils names mapped to sets of python file paths."""
module_utils = enumerate_module_utils()
virtual_utils = set(m for m in module_utils if any(m.startswith('%s.' % v) for v in VIRTUAL_PACKAGES))
module_utils -= virtual_utils
imports_by_target_path = {}
for target in compile_targets:
imports_by_target_path[target.path] = extract_python_module_utils_imports(target.path, module_utils)
def recurse_import(import_name: str, depth: int = 0, seen: t.Optional[set[str]] = None) -> set[str]:
"""Recursively expand module_utils imports from module_utils files."""
display.info('module_utils import: %s%s' % (' ' * depth, import_name), verbosity=4)
if seen is None:
seen = {import_name}
results = {import_name}
# virtual packages depend on the modules they contain instead of the reverse
if import_name in VIRTUAL_PACKAGES:
for sub_import in sorted(virtual_utils):
if sub_import.startswith('%s.' % import_name):
if sub_import in seen:
continue
seen.add(sub_import)
matches = sorted(recurse_import(sub_import, depth + 1, seen))
for result in matches:
results.add(result)
import_path = get_import_path(import_name)
if import_path not in imports_by_target_path:
import_path = get_import_path(import_name, package=True)
if import_path not in imports_by_target_path:
raise ApplicationError('Cannot determine path for module_utils import: %s' % import_name)
# process imports in reverse so the deepest imports come first
for name in sorted(imports_by_target_path[import_path], reverse=True):
if name in virtual_utils:
continue
if name in seen:
continue
seen.add(name)
matches = sorted(recurse_import(name, depth + 1, seen))
for result in matches:
results.add(result)
return results
for module_util in module_utils:
# recurse over module_utils imports while excluding self
module_util_imports = recurse_import(module_util)
module_util_imports.remove(module_util)
# add recursive imports to all path entries which import this module_util
for target_path, modules in imports_by_target_path.items():
if module_util in modules:
for module_util_import in sorted(module_util_imports):
if module_util_import not in modules:
display.info('%s inherits import %s via %s' % (target_path, module_util_import, module_util), verbosity=6)
modules.add(module_util_import)
imports: dict[str, set[str]] = {module_util: set() for module_util in module_utils | virtual_utils}
for target_path, modules in imports_by_target_path.items():
for module_util in modules:
imports[module_util].add(target_path)
# for purposes of mapping module_utils to paths, treat imports of virtual utils the same as the parent package
for virtual_util in virtual_utils:
parent_package = '.'.join(virtual_util.split('.')[:-1])
imports[virtual_util] = imports[parent_package]
display.info('%s reports imports from parent package %s' % (virtual_util, parent_package), verbosity=6)
for module_util in sorted(imports):
if not imports[module_util]:
package_path = get_import_path(module_util, package=True)
if os.path.exists(package_path) and not os.path.getsize(package_path):
continue # ignore empty __init__.py files
display.warning('No imports found which use the "%s" module_util.' % module_util)
return imports
def get_python_module_utils_name(path: str) -> str:
"""Return a namespace and name from the given module_utils path."""
base_path = data_context().content.module_utils_path
if data_context().content.collection:
prefix = 'ansible_collections.' + data_context().content.collection.prefix + 'plugins.module_utils'
else:
prefix = 'ansible.module_utils'
if path.endswith('/__init__.py'):
path = os.path.dirname(path)
if path == base_path:
name = prefix
else:
name = prefix + '.' + os.path.splitext(os.path.relpath(path, base_path))[0].replace(os.path.sep, '.')
return name
def enumerate_module_utils() -> set[str]:
"""Return a list of available module_utils imports."""
module_utils = []
for path in data_context().content.walk_files(data_context().content.module_utils_path):
ext = os.path.splitext(path)[1]
if ext != '.py':
continue
module_utils.append(get_python_module_utils_name(path))
return set(module_utils)
def extract_python_module_utils_imports(path: str, module_utils: set[str]) -> set[str]:
"""Return a list of module_utils imports found in the specified source file."""
# Python code must be read as bytes to avoid a SyntaxError when the source uses comments to declare the file encoding.
# See: https://www.python.org/dev/peps/pep-0263
# Specifically: If a Unicode string with a coding declaration is passed to compile(), a SyntaxError will be raised.
code = read_binary_file(path)
try:
tree = ast.parse(code)
except SyntaxError as ex:
# Treat this error as a warning so tests can be executed as best as possible.
# The compile test will detect and report this syntax error.
display.warning('%s:%s Syntax error extracting module_utils imports: %s' % (path, ex.lineno, ex.msg))
return set()
finder = ModuleUtilFinder(path, module_utils)
finder.visit(tree)
return finder.imports
def get_import_path(name: str, package: bool = False) -> str:
"""Return a path from an import name."""
if package:
filename = os.path.join(name.replace('.', '/'), '__init__.py')
else:
filename = '%s.py' % name.replace('.', '/')
if name.startswith('ansible.module_utils.') or name == 'ansible.module_utils':
path = os.path.join('lib', filename)
elif data_context().content.collection and (
name.startswith('ansible_collections.%s.plugins.module_utils.' % data_context().content.collection.full_name) or
name == 'ansible_collections.%s.plugins.module_utils' % data_context().content.collection.full_name):
path = '/'.join(filename.split('/')[3:])
else:
raise Exception('Unexpected import name: %s' % name)
return path
def path_to_module(path: str) -> str:
"""Convert the given path to a module name."""
module = os.path.splitext(path)[0].replace(os.path.sep, '.')
if module.endswith('.__init__'):
module = module[:-9]
return module
def relative_to_absolute(name: str, level: int, module: str, path: str, lineno: int) -> str:
"""Convert a relative import to an absolute import."""
if level <= 0:
absolute_name = name
elif not module:
display.warning('Cannot resolve relative import "%s%s" in unknown module at %s:%d' % ('.' * level, name, path, lineno))
absolute_name = 'relative.nomodule'
else:
parts = module.split('.')
if level >= len(parts):
display.warning('Cannot resolve relative import "%s%s" above module "%s" at %s:%d' % ('.' * level, name, module, path, lineno))
absolute_name = 'relative.abovelevel'
else:
absolute_name = '.'.join(parts[:-level] + [name])
return absolute_name
class ModuleUtilFinder(ast.NodeVisitor):
"""AST visitor to find valid module_utils imports."""
def __init__(self, path: str, module_utils: set[str]) -> None:
self.path = path
self.module_utils = module_utils
self.imports: set[str] = set()
# implicitly import parent package
if path.endswith('/__init__.py'):
path = os.path.split(path)[0]
if path.startswith('lib/ansible/module_utils/'):
package = os.path.split(path)[0].replace('/', '.')[4:]
if package != 'ansible.module_utils' and package not in VIRTUAL_PACKAGES:
self.add_import(package, 0)
self.module = None
if data_context().content.is_ansible:
# Various parts of the Ansible source tree execute within different modules.
# To support import analysis, each file which uses relative imports must reside under a path defined here.
# The mapping is a tuple consisting of a path pattern to match and a replacement path.
# During analysis, any relative imports not covered here will result in warnings, which can be fixed by adding the appropriate entry.
path_map = (
('^lib/ansible/', 'ansible/'),
('^test/lib/ansible_test/_util/controller/sanity/validate-modules/', 'validate_modules/'),
('^test/units/', 'test/units/'),
('^test/lib/ansible_test/_internal/', 'ansible_test/_internal/'),
('^test/integration/targets/.*/ansible_collections/(?P<ns>[^/]*)/(?P<col>[^/]*)/', r'ansible_collections/\g<ns>/\g<col>/'),
('^test/integration/targets/.*/library/', 'ansible/modules/'),
)
for pattern, replacement in path_map:
if re.search(pattern, self.path):
revised_path = re.sub(pattern, replacement, self.path)
self.module = path_to_module(revised_path)
break
else:
# This assumes that all files within the collection are executed by Ansible as part of the collection.
# While that will usually be true, there are exceptions which will result in this resolution being incorrect.
self.module = path_to_module(os.path.join(data_context().content.collection.directory, self.path))
# pylint: disable=locally-disabled, invalid-name
def visit_Import(self, node: ast.Import) -> None:
"""Visit an import node."""
self.generic_visit(node)
# import ansible.module_utils.MODULE[.MODULE]
# import ansible_collections.{ns}.{col}.plugins.module_utils.module_utils.MODULE[.MODULE]
self.add_imports([alias.name for alias in node.names], node.lineno)
# pylint: disable=locally-disabled, invalid-name
def visit_ImportFrom(self, node: ast.ImportFrom) -> None:
"""Visit an import from node."""
self.generic_visit(node)
if not node.module:
return
module = relative_to_absolute(node.module, node.level, self.module, self.path, node.lineno)
if not module.startswith('ansible'):
return
# from ansible.module_utils import MODULE[, MODULE]
# from ansible.module_utils.MODULE[.MODULE] import MODULE[, MODULE]
# from ansible_collections.{ns}.{col}.plugins.module_utils import MODULE[, MODULE]
# from ansible_collections.{ns}.{col}.plugins.module_utils.MODULE[.MODULE] import MODULE[, MODULE]
self.add_imports(['%s.%s' % (module, alias.name) for alias in node.names], node.lineno)
def add_import(self, name: str, line_number: int) -> None:
"""Record the specified import."""
import_name = name
while self.is_module_util_name(name):
if name in self.module_utils:
if name not in self.imports:
display.info('%s:%d imports module_utils: %s' % (self.path, line_number, name), verbosity=5)
self.imports.add(name)
return # duplicate imports are ignored
name = '.'.join(name.split('.')[:-1])
if is_subdir(self.path, data_context().content.test_path):
return # invalid imports in tests are ignored
# Treat this error as a warning so tests can be executed as best as possible.
# This error should be detected by unit or integration tests.
display.warning('%s:%d Invalid module_utils import: %s' % (self.path, line_number, import_name))
def add_imports(self, names: list[str], line_no: int) -> None:
"""Add the given import names if they are module_utils imports."""
for name in names:
if self.is_module_util_name(name):
self.add_import(name, line_no)
@staticmethod
def is_module_util_name(name: str) -> bool:
"""Return True if the given name is a module_util name for the content under test. External module_utils are ignored."""
if data_context().content.is_ansible and name.startswith('ansible.module_utils.'):
return True
if data_context().content.collection and name.startswith('ansible_collections.%s.plugins.module_utils.' % data_context().content.collection.full_name):
return True
return False
| 13,532
|
Python
|
.py
| 250
| 44.668
| 159
| 0.639148
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,236
|
__init__.py
|
ansible_ansible/test/lib/ansible_test/_internal/classification/__init__.py
|
"""Classify changes in Ansible code."""
from __future__ import annotations
import collections
import os
import re
import time
import typing as t
from ..target import (
walk_module_targets,
walk_integration_targets,
walk_units_targets,
walk_compile_targets,
walk_sanity_targets,
load_integration_prefixes,
analyze_integration_target_dependencies,
IntegrationTarget,
)
from ..util import (
display,
is_subdir,
)
from .python import (
get_python_module_utils_imports,
get_python_module_utils_name,
)
from .csharp import (
get_csharp_module_utils_imports,
get_csharp_module_utils_name,
)
from .powershell import (
get_powershell_module_utils_imports,
get_powershell_module_utils_name,
)
from ..config import (
TestConfig,
IntegrationConfig,
)
from ..metadata import (
ChangeDescription,
)
from ..data import (
data_context,
)
FOCUSED_TARGET = '__focused__'
def categorize_changes(args: TestConfig, paths: list[str], verbose_command: t.Optional[str] = None) -> ChangeDescription:
"""Categorize the given list of changed paths and return a description of the changes."""
mapper = PathMapper(args)
commands: dict[str, set[str]] = {
'sanity': set(),
'units': set(),
'integration': set(),
'windows-integration': set(),
'network-integration': set(),
}
focused_commands = collections.defaultdict(set)
deleted_paths: set[str] = set()
original_paths: set[str] = set()
additional_paths: set[str] = set()
no_integration_paths: set[str] = set()
for path in paths:
if not os.path.exists(path):
deleted_paths.add(path)
continue
original_paths.add(path)
dependent_paths = mapper.get_dependent_paths(path)
if not dependent_paths:
continue
display.info('Expanded "%s" to %d dependent file(s):' % (path, len(dependent_paths)), verbosity=2)
for dependent_path in dependent_paths:
display.info(dependent_path, verbosity=2)
additional_paths.add(dependent_path)
additional_paths -= set(paths) # don't count changed paths as additional paths
if additional_paths:
display.info('Expanded %d changed file(s) into %d additional dependent file(s).' % (len(paths), len(additional_paths)))
paths = sorted(set(paths) | additional_paths)
display.info('Mapping %d changed file(s) to tests.' % len(paths))
none_count = 0
for path in paths:
tests = mapper.classify(path)
if tests is None:
focused_target = False
display.info('%s -> all' % path, verbosity=1)
tests = all_tests(args) # not categorized, run all tests
display.warning('Path not categorized: %s' % path)
else:
focused_target = bool(tests.pop(FOCUSED_TARGET, None)) and path in original_paths
tests = dict((key, value) for key, value in tests.items() if value)
if focused_target and not any('integration' in command for command in tests):
no_integration_paths.add(path) # path triggers no integration tests
if verbose_command:
result = '%s: %s' % (verbose_command, tests.get(verbose_command) or 'none')
# identify targeted integration tests (those which only target a single integration command)
if 'integration' in verbose_command and tests.get(verbose_command):
if not any('integration' in command for command in tests if command != verbose_command):
if focused_target:
result += ' (focused)'
result += ' (targeted)'
else:
result = '%s' % tests
if not tests.get(verbose_command):
# minimize excessive output from potentially thousands of files which do not trigger tests
none_count += 1
verbosity = 2
else:
verbosity = 1
if args.verbosity >= verbosity:
display.info('%s -> %s' % (path, result), verbosity=1)
for command, target in tests.items():
commands[command].add(target)
if focused_target:
focused_commands[command].add(target)
if none_count > 0 and args.verbosity < 2:
display.notice('Omitted %d file(s) that triggered no tests.' % none_count)
for command, targets in commands.items():
targets.discard('none')
if any(target == 'all' for target in targets):
commands[command] = {'all'}
sorted_commands = dict((cmd, sorted(targets)) for cmd, targets in commands.items() if targets)
focused_commands = dict((cmd, sorted(targets)) for cmd, targets in focused_commands.items())
for command, targets in sorted_commands.items():
if targets == ['all']:
sorted_commands[command] = [] # changes require testing all targets, do not filter targets
changes = ChangeDescription()
changes.command = verbose_command
changes.changed_paths = sorted(original_paths)
changes.deleted_paths = sorted(deleted_paths)
changes.regular_command_targets = sorted_commands
changes.focused_command_targets = focused_commands
changes.no_integration_paths = sorted(no_integration_paths)
return changes
class PathMapper:
"""Map file paths to test commands and targets."""
def __init__(self, args: TestConfig) -> None:
self.args = args
self.integration_all_target = get_integration_all_target(self.args)
self.integration_targets = list(walk_integration_targets())
self.module_targets = list(walk_module_targets())
self.compile_targets = list(walk_compile_targets())
self.units_targets = list(walk_units_targets())
self.sanity_targets = list(walk_sanity_targets())
self.powershell_targets = [target for target in self.sanity_targets if os.path.splitext(target.path)[1] in ('.ps1', '.psm1')]
self.csharp_targets = [target for target in self.sanity_targets if os.path.splitext(target.path)[1] == '.cs']
self.units_modules = set(target.module for target in self.units_targets if target.module)
self.units_paths = set(a for target in self.units_targets for a in target.aliases)
self.sanity_paths = set(target.path for target in self.sanity_targets)
self.module_names_by_path = dict((target.path, target.module) for target in self.module_targets)
self.integration_targets_by_name = dict((target.name, target) for target in self.integration_targets)
self.integration_targets_by_alias = dict((a, target) for target in self.integration_targets for a in target.aliases)
self.posix_integration_by_module = dict((m, target.name) for target in self.integration_targets
if 'posix/' in target.aliases for m in target.modules)
self.windows_integration_by_module = dict((m, target.name) for target in self.integration_targets
if 'windows/' in target.aliases for m in target.modules)
self.network_integration_by_module = dict((m, target.name) for target in self.integration_targets
if 'network/' in target.aliases for m in target.modules)
self.prefixes = load_integration_prefixes()
self.integration_dependencies = analyze_integration_target_dependencies(self.integration_targets)
self.python_module_utils_imports: dict[str, set[str]] = {} # populated on first use to reduce overhead when not needed
self.powershell_module_utils_imports: dict[str, set[str]] = {} # populated on first use to reduce overhead when not needed
self.csharp_module_utils_imports: dict[str, set[str]] = {} # populated on first use to reduce overhead when not needed
self.paths_to_dependent_targets: dict[str, set[IntegrationTarget]] = {}
for target in self.integration_targets:
for path in target.needs_file:
if path not in self.paths_to_dependent_targets:
self.paths_to_dependent_targets[path] = set()
self.paths_to_dependent_targets[path].add(target)
def get_dependent_paths(self, path: str) -> list[str]:
"""Return a list of paths which depend on the given path, recursively expanding dependent paths as well."""
unprocessed_paths = set(self.get_dependent_paths_non_recursive(path))
paths = set()
while unprocessed_paths:
queued_paths = list(unprocessed_paths)
paths |= unprocessed_paths
unprocessed_paths = set()
for queued_path in queued_paths:
new_paths = self.get_dependent_paths_non_recursive(queued_path)
for new_path in new_paths:
if new_path not in paths:
unprocessed_paths.add(new_path)
return sorted(paths)
def get_dependent_paths_non_recursive(self, path: str) -> list[str]:
"""Return a list of paths which depend on the given path, including dependent integration test target paths."""
paths = self.get_dependent_paths_internal(path)
paths += [target.path + '/' for target in self.paths_to_dependent_targets.get(path, set())]
paths = sorted(set(paths))
return paths
def get_dependent_paths_internal(self, path: str) -> list[str]:
"""Return a list of paths which depend on the given path."""
ext = os.path.splitext(os.path.split(path)[1])[1]
if is_subdir(path, data_context().content.module_utils_path):
if ext == '.py':
return self.get_python_module_utils_usage(path)
if ext == '.psm1':
return self.get_powershell_module_utils_usage(path)
if ext == '.cs':
return self.get_csharp_module_utils_usage(path)
if is_subdir(path, data_context().content.integration_targets_path):
return self.get_integration_target_usage(path)
return []
def get_python_module_utils_usage(self, path: str) -> list[str]:
"""Return a list of paths which depend on the given path which is a Python module_utils file."""
if not self.python_module_utils_imports:
display.info('Analyzing python module_utils imports...')
before = time.time()
self.python_module_utils_imports = get_python_module_utils_imports(self.compile_targets)
after = time.time()
display.info('Processed %d python module_utils in %d second(s).' % (len(self.python_module_utils_imports), after - before))
name = get_python_module_utils_name(path)
return sorted(self.python_module_utils_imports[name])
def get_powershell_module_utils_usage(self, path: str) -> list[str]:
"""Return a list of paths which depend on the given path which is a PowerShell module_utils file."""
if not self.powershell_module_utils_imports:
display.info('Analyzing powershell module_utils imports...')
before = time.time()
self.powershell_module_utils_imports = get_powershell_module_utils_imports(self.powershell_targets)
after = time.time()
display.info('Processed %d powershell module_utils in %d second(s).' % (len(self.powershell_module_utils_imports), after - before))
name = get_powershell_module_utils_name(path)
return sorted(self.powershell_module_utils_imports[name])
def get_csharp_module_utils_usage(self, path: str) -> list[str]:
"""Return a list of paths which depend on the given path which is a C# module_utils file."""
if not self.csharp_module_utils_imports:
display.info('Analyzing C# module_utils imports...')
before = time.time()
self.csharp_module_utils_imports = get_csharp_module_utils_imports(self.powershell_targets, self.csharp_targets)
after = time.time()
display.info('Processed %d C# module_utils in %d second(s).' % (len(self.csharp_module_utils_imports), after - before))
name = get_csharp_module_utils_name(path)
return sorted(self.csharp_module_utils_imports[name])
def get_integration_target_usage(self, path: str) -> list[str]:
"""Return a list of paths which depend on the given path which is an integration target file."""
target_name = path.split('/')[3]
dependents = [os.path.join(data_context().content.integration_targets_path, target) + os.path.sep
for target in sorted(self.integration_dependencies.get(target_name, set()))]
return dependents
def classify(self, path: str) -> t.Optional[dict[str, str]]:
"""Classify the given path and return an optional dictionary of the results."""
result = self._classify(path)
# run all tests when no result given
if result is None:
return None
# run sanity on path unless result specified otherwise
if path in self.sanity_paths and 'sanity' not in result:
result['sanity'] = path
return result
def _classify(self, path: str) -> t.Optional[dict[str, str]]:
"""Return the classification for the given path."""
if data_context().content.is_ansible:
return self._classify_ansible(path)
if data_context().content.collection:
return self._classify_collection(path)
return None
def _classify_common(self, path: str) -> t.Optional[dict[str, str]]:
"""Return the classification for the given path using rules common to all layouts."""
dirname = os.path.dirname(path)
filename = os.path.basename(path)
name, ext = os.path.splitext(filename)
minimal: dict[str, str] = {}
if os.path.sep not in path:
if filename in (
'azure-pipelines.yml',
):
return all_tests(self.args) # test infrastructure, run all tests
if is_subdir(path, '.azure-pipelines'):
return all_tests(self.args) # test infrastructure, run all tests
if is_subdir(path, '.github'):
return minimal
if is_subdir(path, data_context().content.integration_targets_path):
if not os.path.exists(path):
return minimal
target = self.integration_targets_by_name.get(path.split('/')[3])
if not target:
display.warning('Unexpected non-target found: %s' % path)
return minimal
if 'hidden/' in target.aliases:
return minimal # already expanded using get_dependent_paths
return {
'integration': target.name if 'posix/' in target.aliases else None,
'windows-integration': target.name if 'windows/' in target.aliases else None,
'network-integration': target.name if 'network/' in target.aliases else None,
FOCUSED_TARGET: target.name,
}
if is_subdir(path, data_context().content.integration_path):
if dirname == data_context().content.integration_path:
for command in (
'integration',
'windows-integration',
'network-integration',
):
if name == command and ext == '.cfg':
return {
command: self.integration_all_target,
}
if name == command + '.requirements' and ext == '.txt':
return {
command: self.integration_all_target,
}
return {
'integration': self.integration_all_target,
'windows-integration': self.integration_all_target,
'network-integration': self.integration_all_target,
}
if is_subdir(path, data_context().content.sanity_path):
return {
'sanity': 'all', # test infrastructure, run all sanity checks
}
if is_subdir(path, data_context().content.unit_path):
if path in self.units_paths:
return {
'units': path,
}
# changes to files which are not unit tests should trigger tests from the nearest parent directory
test_path = os.path.dirname(path)
while test_path:
if test_path + '/' in self.units_paths:
return {
'units': test_path + '/',
}
test_path = os.path.dirname(test_path)
if is_subdir(path, data_context().content.module_path):
module_name = self.module_names_by_path.get(path)
if module_name:
return {
'units': module_name if module_name in self.units_modules else None,
'integration': self.posix_integration_by_module.get(module_name) if ext == '.py' else None,
'windows-integration': self.windows_integration_by_module.get(module_name) if ext in ['.cs', '.ps1'] else None,
'network-integration': self.network_integration_by_module.get(module_name),
FOCUSED_TARGET: module_name,
}
return minimal
if is_subdir(path, data_context().content.module_utils_path):
if ext == '.cs':
return minimal # already expanded using get_dependent_paths
if ext == '.psm1':
return minimal # already expanded using get_dependent_paths
if ext == '.py':
return minimal # already expanded using get_dependent_paths
if is_subdir(path, data_context().content.plugin_paths['action']):
if ext == '.py':
if name.startswith('net_'):
network_target = 'network/.*_%s' % name[4:]
if any(re.search(r'^%s$' % network_target, alias) for alias in self.integration_targets_by_alias):
return {
'network-integration': network_target,
'units': 'all',
}
return {
'network-integration': self.integration_all_target,
'units': 'all',
}
if self.prefixes.get(name) == 'network':
network_platform = name
elif name.endswith('_config') and self.prefixes.get(name[:-7]) == 'network':
network_platform = name[:-7]
elif name.endswith('_template') and self.prefixes.get(name[:-9]) == 'network':
network_platform = name[:-9]
else:
network_platform = None
if network_platform:
network_target = 'network/%s/' % network_platform
if network_target in self.integration_targets_by_alias:
return {
'network-integration': network_target,
'units': 'all',
}
display.warning('Integration tests for "%s" not found.' % network_target, unique=True)
return {
'units': 'all',
}
if is_subdir(path, data_context().content.plugin_paths['connection']):
units_dir = os.path.join(data_context().content.unit_path, 'plugins', 'connection')
if name == '__init__':
return {
'integration': self.integration_all_target,
'windows-integration': self.integration_all_target,
'network-integration': self.integration_all_target,
'units': os.path.join(units_dir, ''),
}
units_path = os.path.join(units_dir, 'test_%s.py' % name)
if units_path not in self.units_paths:
units_path = None
integration_name = 'connection_%s' % name
if integration_name not in self.integration_targets_by_name:
integration_name = None
windows_integration_name = 'connection_windows_%s' % name
if windows_integration_name not in self.integration_targets_by_name:
windows_integration_name = None
# entire integration test commands depend on these connection plugins
if name in ['winrm', 'psrp']:
return {
'windows-integration': self.integration_all_target,
'units': units_path,
}
if name == 'local':
return {
'integration': self.integration_all_target,
'network-integration': self.integration_all_target,
'units': units_path,
}
if name == 'network_cli':
return {
'network-integration': self.integration_all_target,
'units': units_path,
}
if name == 'paramiko_ssh':
return {
'integration': integration_name,
'network-integration': self.integration_all_target,
'units': units_path,
}
# other connection plugins have isolated integration and unit tests
return {
'integration': integration_name,
'windows-integration': windows_integration_name,
'units': units_path,
}
if is_subdir(path, data_context().content.plugin_paths['doc_fragments']):
return {
'sanity': 'all',
}
if is_subdir(path, data_context().content.plugin_paths['inventory']):
if name == '__init__':
return all_tests(self.args) # broad impact, run all tests
# These inventory plugins are enabled by default (see INVENTORY_ENABLED).
# Without dedicated integration tests for these we must rely on the incidental coverage from other tests.
test_all = [
'host_list',
'script',
'yaml',
'ini',
'auto',
]
if name in test_all:
posix_integration_fallback = get_integration_all_target(self.args)
else:
posix_integration_fallback = None
target = self.integration_targets_by_name.get('inventory_%s' % name)
units_dir = os.path.join(data_context().content.unit_path, 'plugins', 'inventory')
units_path = os.path.join(units_dir, 'test_%s.py' % name)
if units_path not in self.units_paths:
units_path = None
return {
'integration': target.name if target and 'posix/' in target.aliases else posix_integration_fallback,
'windows-integration': target.name if target and 'windows/' in target.aliases else None,
'network-integration': target.name if target and 'network/' in target.aliases else None,
'units': units_path,
FOCUSED_TARGET: target.name if target else None,
}
if is_subdir(path, data_context().content.plugin_paths['filter']):
return self._simple_plugin_tests('filter', name)
if is_subdir(path, data_context().content.plugin_paths['lookup']):
return self._simple_plugin_tests('lookup', name)
if (is_subdir(path, data_context().content.plugin_paths['terminal']) or
is_subdir(path, data_context().content.plugin_paths['cliconf']) or
is_subdir(path, data_context().content.plugin_paths['netconf'])):
if ext == '.py':
if name in self.prefixes and self.prefixes[name] == 'network':
network_target = 'network/%s/' % name
if network_target in self.integration_targets_by_alias:
return {
'network-integration': network_target,
'units': 'all',
}
display.warning('Integration tests for "%s" not found.' % network_target, unique=True)
return {
'units': 'all',
}
return {
'network-integration': self.integration_all_target,
'units': 'all',
}
if is_subdir(path, data_context().content.plugin_paths['test']):
return self._simple_plugin_tests('test', name)
return None
def _classify_collection(self, path: str) -> t.Optional[dict[str, str]]:
"""Return the classification for the given path using rules specific to collections."""
result = self._classify_common(path)
if result is not None:
return result
filename = os.path.basename(path)
dummy, ext = os.path.splitext(filename)
minimal: dict[str, str] = {}
if path.startswith('changelogs/'):
return minimal
if path.startswith('docs/'):
return minimal
if '/' not in path:
if path in (
'.gitignore',
'COPYING',
'LICENSE',
'Makefile',
):
return minimal
if ext in (
'.in',
'.md',
'.rst',
'.toml',
'.txt',
):
return minimal
return None
def _classify_ansible(self, path: str) -> t.Optional[dict[str, str]]:
"""Return the classification for the given path using rules specific to Ansible."""
dirname = os.path.dirname(path)
filename = os.path.basename(path)
name, ext = os.path.splitext(filename)
minimal: dict[str, str] = {}
packaging = {
'integration': 'packaging/',
}
# Early classification that needs to occur before common classification belongs here.
if dirname == '.azure-pipelines/commands':
test_map = {
'cloud.sh': 'integration:cloud/',
'linux.sh': 'integration:all',
'network.sh': 'network-integration:all',
'remote.sh': 'integration:all',
'sanity.sh': 'sanity:all',
'units.sh': 'units:all',
'windows.sh': 'windows-integration:all',
}
test_match = test_map.get(filename)
if test_match:
test_command, test_target = test_match.split(':')
return {
test_command: test_target,
}
cloud_target = f'cloud/{name}/'
if cloud_target in self.integration_targets_by_alias:
return {
'integration': cloud_target,
}
# Classification common to both ansible and collections.
result = self._classify_common(path)
if result is not None:
return result
# Classification here is specific to ansible, and runs after common classification.
if path.startswith('bin/'):
return all_tests(self.args) # broad impact, run all tests
if path.startswith('changelogs/'):
return minimal
if path.startswith('hacking/'):
return minimal
if path.startswith('lib/ansible/executor/powershell/'):
units_path = 'test/units/executor/powershell/'
if units_path not in self.units_paths:
units_path = None
return {
'windows-integration': self.integration_all_target,
'units': units_path,
}
if path.startswith('lib/ansible/'):
return all_tests(self.args) # broad impact, run all tests
if path.startswith('licenses/'):
return minimal
if path.startswith('packaging/'):
packaging_target = f'packaging_{os.path.splitext(path.split(os.path.sep)[1])[0]}'
if packaging_target in self.integration_targets_by_name:
return {
'integration': packaging_target,
}
return minimal
if path.startswith('test/ansible_test/'):
return minimal # these tests are not invoked from ansible-test
if path.startswith('test/lib/ansible_test/config/'):
if name.startswith('cloud-config-'):
cloud_target = 'cloud/%s/' % name.split('-')[2].split('.')[0]
if cloud_target in self.integration_targets_by_alias:
return {
'integration': cloud_target,
}
if path.startswith('test/lib/ansible_test/_data/completion/'):
if path == 'test/lib/ansible_test/_data/completion/docker.txt':
return all_tests(self.args, force=True) # force all tests due to risk of breaking changes in new test environment
if path.startswith('test/lib/ansible_test/_internal/commands/integration/cloud/'):
cloud_target = 'cloud/%s/' % name
if cloud_target in self.integration_targets_by_alias:
return {
'integration': cloud_target,
}
return all_tests(self.args) # test infrastructure, run all tests
if path.startswith('test/lib/ansible_test/_internal/commands/sanity/'):
return {
'sanity': 'all', # test infrastructure, run all sanity checks
'integration': 'ansible-test/', # run ansible-test self tests
}
if path.startswith('test/lib/ansible_test/_internal/commands/units/'):
return {
'units': 'all', # test infrastructure, run all unit tests
'integration': 'ansible-test/', # run ansible-test self tests
}
if path.startswith('test/lib/ansible_test/_data/requirements/'):
if name in (
'integration',
'network-integration',
'windows-integration',
):
return {
name: self.integration_all_target,
}
if name in (
'sanity',
'units',
):
return {
name: 'all',
}
if path.startswith('test/lib/ansible_test/_util/controller/sanity/') or path.startswith('test/lib/ansible_test/_util/target/sanity/'):
return {
'sanity': 'all', # test infrastructure, run all sanity checks
'integration': 'ansible-test/', # run ansible-test self tests
}
if path.startswith('test/lib/ansible_test/_util/target/pytest/'):
return {
'units': 'all', # test infrastructure, run all unit tests
'integration': 'ansible-test/', # run ansible-test self tests
}
if path.startswith('test/lib/'):
return all_tests(self.args) # test infrastructure, run all tests
if path.startswith('test/support/'):
return all_tests(self.args) # test infrastructure, run all tests
if '/' not in path:
if path in (
'.gitattributes',
'.gitignore',
'.mailmap',
'COPYING',
'Makefile',
):
return minimal
if path in (
'MANIFEST.in',
'pyproject.toml',
'requirements.txt',
):
return packaging
if ext in (
'.md',
'.rst',
):
return minimal
return None # unknown, will result in fall-back to run all tests
def _simple_plugin_tests(self, plugin_type: str, plugin_name: str) -> dict[str, t.Optional[str]]:
"""
Return tests for the given plugin type and plugin name.
This function is useful for plugin types which do not require special processing.
"""
if plugin_name == '__init__':
return all_tests(self.args, True)
integration_target = self.integration_targets_by_name.get('%s_%s' % (plugin_type, plugin_name))
if integration_target:
integration_name = integration_target.name
else:
integration_name = None
units_path = os.path.join(data_context().content.unit_path, 'plugins', plugin_type, 'test_%s.py' % plugin_name)
if units_path not in self.units_paths:
units_path = None
return dict(
integration=integration_name,
units=units_path,
)
def all_tests(args: TestConfig, force: bool = False) -> dict[str, str]:
"""Return the targets for each test command when all tests should be run."""
if force:
integration_all_target = 'all'
else:
integration_all_target = get_integration_all_target(args)
return {
'sanity': 'all',
'units': 'all',
'integration': integration_all_target,
'windows-integration': integration_all_target,
'network-integration': integration_all_target,
}
def get_integration_all_target(args: TestConfig) -> str:
"""Return the target to use when all tests should be run."""
if isinstance(args, IntegrationConfig):
return args.changed_all_target
return 'all'
| 34,142
|
Python
|
.py
| 680
| 36.960294
| 143
| 0.577014
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,237
|
powershell.py
|
ansible_ansible/test/lib/ansible_test/_internal/classification/powershell.py
|
"""Analyze powershell import statements."""
from __future__ import annotations
import os
import re
from ..io import (
read_text_file,
)
from ..util import (
display,
)
from .common import (
resolve_csharp_ps_util,
)
from ..data import (
data_context,
)
from ..target import (
TestTarget,
)
def get_powershell_module_utils_imports(powershell_targets: list[TestTarget]) -> dict[str, set[str]]:
"""Return a dictionary of module_utils names mapped to sets of powershell file paths."""
module_utils = enumerate_module_utils()
imports_by_target_path = {}
for target in powershell_targets:
imports_by_target_path[target.path] = extract_powershell_module_utils_imports(target.path, module_utils)
imports: dict[str, set[str]] = {module_util: set() for module_util in module_utils}
for target_path, modules in imports_by_target_path.items():
for module_util in modules:
imports[module_util].add(target_path)
for module_util in sorted(imports):
if not imports[module_util]:
display.warning('No imports found which use the "%s" module_util.' % module_util)
return imports
def get_powershell_module_utils_name(path: str) -> str:
"""Return a namespace and name from the given module_utils path."""
base_path = data_context().content.module_utils_powershell_path
if data_context().content.collection:
prefix = 'ansible_collections.' + data_context().content.collection.prefix + 'plugins.module_utils.'
else:
prefix = ''
name = prefix + os.path.splitext(os.path.relpath(path, base_path))[0].replace(os.path.sep, '.')
return name
def enumerate_module_utils() -> set[str]:
"""Return a set of available module_utils imports."""
return set(get_powershell_module_utils_name(p)
for p in data_context().content.walk_files(data_context().content.module_utils_powershell_path)
if os.path.splitext(p)[1] == '.psm1')
def extract_powershell_module_utils_imports(path: str, module_utils: set[str]) -> set[str]:
"""Return a set of module_utils imports found in the specified source file."""
imports = set()
code = read_text_file(path)
if data_context().content.is_ansible and '# POWERSHELL_COMMON' in code:
imports.add('Ansible.ModuleUtils.Legacy')
lines = code.splitlines()
line_number = 0
for line in lines:
line_number += 1
match = re.search(r'(?i)^#\s*(?:requires\s+-modules?|ansiblerequires\s+-powershell)\s*((?:Ansible|ansible_collections|\.)\..+)', line)
if not match:
continue
import_name = resolve_csharp_ps_util(match.group(1), path)
if import_name in module_utils:
imports.add(import_name)
elif data_context().content.is_ansible or \
import_name.startswith('ansible_collections.%s' % data_context().content.prefix):
display.warning('%s:%d Invalid module_utils import: %s' % (path, line_number, import_name))
return imports
| 3,053
|
Python
|
.py
| 67
| 39.358209
| 142
| 0.677834
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,238
|
csharp.py
|
ansible_ansible/test/lib/ansible_test/_internal/classification/csharp.py
|
"""Analyze C# import statements."""
from __future__ import annotations
import os
import re
from ..io import (
open_text_file,
)
from ..util import (
display,
)
from .common import (
resolve_csharp_ps_util,
)
from ..data import (
data_context,
)
from ..target import (
TestTarget,
)
def get_csharp_module_utils_imports(powershell_targets: list[TestTarget], csharp_targets: list[TestTarget]) -> dict[str, set[str]]:
"""Return a dictionary of module_utils names mapped to sets of powershell file paths."""
module_utils = enumerate_module_utils()
imports_by_target_path = {}
for target in powershell_targets:
imports_by_target_path[target.path] = extract_csharp_module_utils_imports(target.path, module_utils, False)
for target in csharp_targets:
imports_by_target_path[target.path] = extract_csharp_module_utils_imports(target.path, module_utils, True)
imports: dict[str, set[str]] = {module_util: set() for module_util in module_utils}
for target_path, modules in imports_by_target_path.items():
for module_util in modules:
imports[module_util].add(target_path)
for module_util in sorted(imports):
if not imports[module_util]:
display.warning('No imports found which use the "%s" module_util.' % module_util)
return imports
def get_csharp_module_utils_name(path: str) -> str:
"""Return a namespace and name from the given module_utils path."""
base_path = data_context().content.module_utils_csharp_path
if data_context().content.collection:
prefix = 'ansible_collections.' + data_context().content.collection.prefix + 'plugins.module_utils.'
else:
prefix = ''
name = prefix + os.path.splitext(os.path.relpath(path, base_path))[0].replace(os.path.sep, '.')
return name
def enumerate_module_utils() -> set[str]:
"""Return a set of available module_utils imports."""
return set(get_csharp_module_utils_name(p)
for p in data_context().content.walk_files(data_context().content.module_utils_csharp_path)
if os.path.splitext(p)[1] == '.cs')
def extract_csharp_module_utils_imports(path: str, module_utils: set[str], is_pure_csharp: bool) -> set[str]:
"""Return a set of module_utils imports found in the specified source file."""
imports = set()
if is_pure_csharp:
pattern = re.compile(r'(?i)^using\s((?:Ansible|AnsibleCollections)\..+);$')
else:
pattern = re.compile(r'(?i)^#\s*ansiblerequires\s+-csharputil\s+((?:Ansible|ansible.collections|\.)\..+)')
with open_text_file(path) as module_file:
for line_number, line in enumerate(module_file, 1):
match = re.search(pattern, line)
if not match:
continue
import_name = resolve_csharp_ps_util(match.group(1), path)
if import_name in module_utils:
imports.add(import_name)
elif data_context().content.is_ansible or \
import_name.startswith('ansible_collections.%s' % data_context().content.prefix):
display.warning('%s:%d Invalid module_utils import: %s' % (path, line_number, import_name))
return imports
| 3,241
|
Python
|
.py
| 68
| 40.852941
| 131
| 0.668893
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,239
|
common.py
|
ansible_ansible/test/lib/ansible_test/_internal/classification/common.py
|
"""Common classification code used by multiple languages."""
from __future__ import annotations
import os
from ..data import (
data_context,
)
def resolve_csharp_ps_util(import_name: str, path: str) -> str:
"""Return the fully qualified name of the given import if possible, otherwise return the original import name."""
if data_context().content.is_ansible or not import_name.startswith('.'):
# We don't support relative paths for builtin utils, there's no point.
return import_name
packages = import_name.split('.')
module_packages = path.split(os.path.sep)
for package in packages:
if not module_packages or package:
break
del module_packages[-1]
return 'ansible_collections.%s%s' % (data_context().content.prefix,
'.'.join(module_packages + [p for p in packages if p]))
| 894
|
Python
|
.py
| 19
| 39.736842
| 117
| 0.664747
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,240
|
__init__.py
|
ansible_ansible/test/lib/ansible_test/_internal/provider/__init__.py
|
"""Provider (plugin) infrastructure for ansible-test."""
from __future__ import annotations
import abc
import os
import typing as t
from ..util import (
ApplicationError,
get_subclasses,
)
def get_path_provider_classes(provider_type: t.Type[TPathProvider]) -> list[t.Type[TPathProvider]]:
"""Return a list of path provider classes of the given type."""
return sorted(get_subclasses(provider_type), key=lambda subclass: (subclass.priority, subclass.__name__))
def find_path_provider(
provider_type: t.Type[TPathProvider],
provider_classes: list[t.Type[TPathProvider]],
path: str,
walk: bool,
) -> TPathProvider:
"""Return the first found path provider of the given type for the given path."""
sequences = sorted(set(pc.sequence for pc in provider_classes if pc.sequence > 0))
for sequence in sequences:
candidate_path = path
tier_classes = [pc for pc in provider_classes if pc.sequence == sequence]
while True:
for provider_class in tier_classes:
if provider_class.is_content_root(candidate_path):
return provider_class(candidate_path)
if not walk:
break
parent_path = os.path.dirname(candidate_path)
if parent_path == candidate_path:
break
candidate_path = parent_path
raise ProviderNotFoundForPath(provider_type, path)
class ProviderNotFoundForPath(ApplicationError):
"""Exception generated when a path based provider cannot be found for a given path."""
def __init__(self, provider_type: t.Type, path: str) -> None:
super().__init__('No %s found for path: %s' % (provider_type.__name__, path))
self.provider_type = provider_type
self.path = path
class PathProvider(metaclass=abc.ABCMeta):
"""Base class for provider plugins that are path based."""
sequence = 500
priority = 500
def __init__(self, root: str) -> None:
self.root = root
@staticmethod
@abc.abstractmethod
def is_content_root(path: str) -> bool:
"""Return True if the given path is a content root for this provider."""
TPathProvider = t.TypeVar('TPathProvider', bound=PathProvider)
| 2,246
|
Python
|
.py
| 51
| 37.156863
| 109
| 0.675725
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,241
|
unsupported.py
|
ansible_ansible/test/lib/ansible_test/_internal/provider/layout/unsupported.py
|
"""Layout provider for an unsupported directory layout."""
from __future__ import annotations
from . import (
ContentLayout,
LayoutProvider,
)
class UnsupportedLayout(LayoutProvider):
"""Layout provider for an unsupported directory layout."""
sequence = 0 # disable automatic detection
@staticmethod
def is_content_root(path: str) -> bool:
"""Return True if the given path is a content root for this provider."""
return False
def create(self, root: str, paths: list[str]) -> ContentLayout:
"""Create a Layout using the given root and paths."""
plugin_paths = dict((p, p) for p in self.PLUGIN_TYPES)
return ContentLayout(
root,
paths,
plugin_paths=plugin_paths,
collection=None,
test_path='',
results_path='',
sanity_path='',
sanity_messages=None,
integration_path='',
integration_targets_path='',
integration_vars_path='',
integration_messages=None,
unit_path='',
unit_module_path='',
unit_module_utils_path='',
unit_messages=None,
unsupported=True,
)
| 1,240
|
Python
|
.py
| 35
| 26.228571
| 80
| 0.592654
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,242
|
collection.py
|
ansible_ansible/test/lib/ansible_test/_internal/provider/layout/collection.py
|
"""Layout provider for Ansible collections."""
from __future__ import annotations
import os
from . import (
ContentLayout,
LayoutProvider,
CollectionDetail,
LayoutMessages,
)
from ...util import (
is_valid_identifier,
)
class CollectionLayout(LayoutProvider):
"""Layout provider for Ansible collections."""
@staticmethod
def is_content_root(path: str) -> bool:
"""Return True if the given path is a content root for this provider."""
if os.path.basename(os.path.dirname(os.path.dirname(path))) == 'ansible_collections':
return True
return False
def create(self, root: str, paths: list[str]) -> ContentLayout:
"""Create a Layout using the given root and paths."""
plugin_paths = dict((p, os.path.join('plugins', p)) for p in self.PLUGIN_TYPES)
collection_root = os.path.dirname(os.path.dirname(root))
collection_dir = os.path.relpath(root, collection_root)
collection_namespace: str
collection_name: str
collection_namespace, collection_name = collection_dir.split(os.path.sep)
collection_root = os.path.dirname(collection_root)
sanity_messages = LayoutMessages()
integration_messages = LayoutMessages()
unit_messages = LayoutMessages()
# these apply to all test commands
self.__check_test_path(paths, sanity_messages)
self.__check_test_path(paths, integration_messages)
self.__check_test_path(paths, unit_messages)
# these apply to specific test commands
integration_targets_path = self.__check_integration_path(paths, integration_messages)
self.__check_unit_path(paths, unit_messages)
errors: list[str] = []
if not is_valid_identifier(collection_namespace):
errors.append(f'The namespace "{collection_namespace}" is an invalid identifier or a reserved keyword.')
if not is_valid_identifier(collection_name):
errors.append(f'The name "{collection_name}" is an invalid identifier or a reserved keyword.')
return ContentLayout(
root,
paths,
plugin_paths=plugin_paths,
collection=CollectionDetail(
name=collection_name,
namespace=collection_namespace,
root=collection_root,
),
test_path='tests',
results_path='tests/output',
sanity_path='tests/sanity',
sanity_messages=sanity_messages,
integration_path='tests/integration',
integration_targets_path=integration_targets_path.rstrip(os.path.sep),
integration_vars_path='tests/integration/integration_config.yml',
integration_messages=integration_messages,
unit_path='tests/unit',
unit_module_path='tests/unit/plugins/modules',
unit_module_utils_path='tests/unit/plugins/module_utils',
unit_messages=unit_messages,
unsupported=errors,
)
@staticmethod
def __check_test_path(paths: list[str], messages: LayoutMessages) -> None:
modern_test_path = 'tests/'
modern_test_path_found = any(path.startswith(modern_test_path) for path in paths)
legacy_test_path = 'test/'
legacy_test_path_found = any(path.startswith(legacy_test_path) for path in paths)
if modern_test_path_found and legacy_test_path_found:
messages.warning.append('Ignoring tests in "%s" in favor of "%s".' % (legacy_test_path, modern_test_path))
elif legacy_test_path_found:
messages.warning.append('Ignoring tests in "%s" that should be in "%s".' % (legacy_test_path, modern_test_path))
@staticmethod
def __check_integration_path(paths: list[str], messages: LayoutMessages) -> str:
modern_integration_path = 'roles/test/'
modern_integration_path_found = any(path.startswith(modern_integration_path) for path in paths)
legacy_integration_path = 'tests/integration/targets/'
legacy_integration_path_found = any(path.startswith(legacy_integration_path) for path in paths)
if modern_integration_path_found and legacy_integration_path_found:
messages.warning.append('Ignoring tests in "%s" in favor of "%s".' % (legacy_integration_path, modern_integration_path))
integration_targets_path = modern_integration_path
elif legacy_integration_path_found:
messages.info.append('Falling back to tests in "%s" because "%s" was not found.' % (legacy_integration_path, modern_integration_path))
integration_targets_path = legacy_integration_path
elif modern_integration_path_found:
messages.info.append('Loading tests from "%s".' % modern_integration_path)
integration_targets_path = modern_integration_path
else:
messages.error.append('Cannot run integration tests without "%s" or "%s".' % (modern_integration_path, legacy_integration_path))
integration_targets_path = modern_integration_path
return integration_targets_path
@staticmethod
def __check_unit_path(paths: list[str], messages: LayoutMessages) -> None:
modern_unit_path = 'tests/unit/'
modern_unit_path_found = any(path.startswith(modern_unit_path) for path in paths)
legacy_unit_path = 'tests/units/' # test/units/ will be covered by the warnings for test/ vs tests/
legacy_unit_path_found = any(path.startswith(legacy_unit_path) for path in paths)
if modern_unit_path_found and legacy_unit_path_found:
messages.warning.append('Ignoring tests in "%s" in favor of "%s".' % (legacy_unit_path, modern_unit_path))
elif legacy_unit_path_found:
messages.warning.append('Rename "%s" to "%s" to run unit tests.' % (legacy_unit_path, modern_unit_path))
elif modern_unit_path_found:
pass # unit tests only run from one directory so no message is needed
else:
messages.error.append('Cannot run unit tests without "%s".' % modern_unit_path)
| 6,130
|
Python
|
.py
| 110
| 46.127273
| 146
| 0.667
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,243
|
ansible.py
|
ansible_ansible/test/lib/ansible_test/_internal/provider/layout/ansible.py
|
"""Layout provider for Ansible source."""
from __future__ import annotations
import os
from . import (
ContentLayout,
LayoutProvider,
)
from ...util import (
ANSIBLE_SOURCE_ROOT,
ANSIBLE_TEST_ROOT,
)
class AnsibleLayout(LayoutProvider):
"""Layout provider for Ansible source."""
@staticmethod
def is_content_root(path: str) -> bool:
"""Return True if the given path is a content root for this provider."""
return os.path.isfile(os.path.join(path, 'pyproject.toml')) and os.path.isdir(os.path.join(path, 'test/lib/ansible_test'))
def create(self, root: str, paths: list[str]) -> ContentLayout:
"""Create a Layout using the given root and paths."""
plugin_paths = dict((p, os.path.join('lib/ansible/plugins', p)) for p in self.PLUGIN_TYPES)
plugin_paths.update(
modules='lib/ansible/modules',
module_utils='lib/ansible/module_utils',
)
errors: list[str] = []
if root != ANSIBLE_SOURCE_ROOT:
errors.extend((
f'Cannot test "{root}" with ansible-test from "{ANSIBLE_TEST_ROOT}".',
'',
f'Did you intend to run "{root}/bin/ansible-test" instead?',
))
return ContentLayout(
root,
paths,
plugin_paths=plugin_paths,
collection=None,
test_path='test',
results_path='test/results',
sanity_path='test/sanity',
sanity_messages=None,
integration_path='test/integration',
integration_targets_path='test/integration/targets',
integration_vars_path='test/integration/integration_config.yml',
integration_messages=None,
unit_path='test/units',
unit_module_path='test/units/modules',
unit_module_utils_path='test/units/module_utils',
unit_messages=None,
unsupported=errors,
)
| 1,979
|
Python
|
.py
| 50
| 30.12
| 130
| 0.604797
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,244
|
__init__.py
|
ansible_ansible/test/lib/ansible_test/_internal/provider/layout/__init__.py
|
"""Code for finding content."""
from __future__ import annotations
import abc
import collections
import os
import typing as t
from ...util import (
ANSIBLE_SOURCE_ROOT,
)
from .. import (
PathProvider,
)
class Layout:
"""Description of content locations and helper methods to access content."""
def __init__(
self,
root: str,
paths: list[str],
) -> None:
self.root = root
self.__paths = paths # contains both file paths and symlinked directory paths (ending with os.path.sep)
self.__files = [path for path in paths if not path.endswith(os.path.sep)] # contains only file paths
self.__paths_tree = paths_to_tree(self.__paths)
self.__files_tree = paths_to_tree(self.__files)
def all_files(self, include_symlinked_directories: bool = False) -> list[str]:
"""Return a list of all file paths."""
if include_symlinked_directories:
return self.__paths
return self.__files
def walk_files(self, directory: str, include_symlinked_directories: bool = False) -> list[str]:
"""Return a list of file paths found recursively under the given directory."""
if include_symlinked_directories:
tree = self.__paths_tree
else:
tree = self.__files_tree
parts = directory.rstrip(os.path.sep).split(os.path.sep)
item = get_tree_item(tree, parts)
if not item:
return []
directories = collections.deque(item[0].values())
files = list(item[1])
while directories:
item = directories.pop()
directories.extend(item[0].values())
files.extend(item[1])
return files
def get_dirs(self, directory: str) -> list[str]:
"""Return a list directory paths found directly under the given directory."""
parts = directory.rstrip(os.path.sep).split(os.path.sep)
item = get_tree_item(self.__files_tree, parts)
return [os.path.join(directory, key) for key in item[0].keys()] if item else []
def get_files(self, directory: str) -> list[str]:
"""Return a list of file paths found directly under the given directory."""
parts = directory.rstrip(os.path.sep).split(os.path.sep)
item = get_tree_item(self.__files_tree, parts)
return item[1] if item else []
class ContentLayout(Layout):
"""Information about the current Ansible content being tested."""
def __init__(
self,
root: str,
paths: list[str],
plugin_paths: dict[str, str],
collection: t.Optional[CollectionDetail],
test_path: str,
results_path: str,
sanity_path: str,
sanity_messages: t.Optional[LayoutMessages],
integration_path: str,
integration_targets_path: str,
integration_vars_path: str,
integration_messages: t.Optional[LayoutMessages],
unit_path: str,
unit_module_path: str,
unit_module_utils_path: str,
unit_messages: t.Optional[LayoutMessages],
unsupported: bool | list[str] = False,
) -> None:
super().__init__(root, paths)
self.plugin_paths = plugin_paths
self.collection = collection
self.test_path = test_path
self.results_path = results_path
self.sanity_path = sanity_path
self.sanity_messages = sanity_messages
self.integration_path = integration_path
self.integration_targets_path = integration_targets_path
self.integration_vars_path = integration_vars_path
self.integration_messages = integration_messages
self.unit_path = unit_path
self.unit_module_path = unit_module_path
self.unit_module_utils_path = unit_module_utils_path
self.unit_messages = unit_messages
self.unsupported = unsupported
self.is_ansible = root == ANSIBLE_SOURCE_ROOT
@property
def prefix(self) -> str:
"""Return the collection prefix or an empty string if not a collection."""
if self.collection:
return self.collection.prefix
return ''
@property
def module_path(self) -> t.Optional[str]:
"""Return the path where modules are found, if any."""
return self.plugin_paths.get('modules')
@property
def module_utils_path(self) -> t.Optional[str]:
"""Return the path where module_utils are found, if any."""
return self.plugin_paths.get('module_utils')
@property
def module_utils_powershell_path(self) -> t.Optional[str]:
"""Return the path where powershell module_utils are found, if any."""
if self.is_ansible:
return os.path.join(self.plugin_paths['module_utils'], 'powershell')
return self.plugin_paths.get('module_utils')
@property
def module_utils_csharp_path(self) -> t.Optional[str]:
"""Return the path where csharp module_utils are found, if any."""
if self.is_ansible:
return os.path.join(self.plugin_paths['module_utils'], 'csharp')
return self.plugin_paths.get('module_utils')
class LayoutMessages:
"""Messages generated during layout creation that should be deferred for later display."""
def __init__(self) -> None:
self.info: list[str] = []
self.warning: list[str] = []
self.error: list[str] = []
class CollectionDetail:
"""Details about the layout of the current collection."""
def __init__(
self,
name: str,
namespace: str,
root: str,
) -> None:
self.name = name
self.namespace = namespace
self.root = root
self.full_name = '%s.%s' % (namespace, name)
self.prefix = '%s.' % self.full_name
self.directory = os.path.join('ansible_collections', namespace, name)
class LayoutProvider(PathProvider):
"""Base class for layout providers."""
PLUGIN_TYPES = (
'action',
'become',
'cache',
'callback',
'cliconf',
'connection',
'doc_fragments',
'filter',
'httpapi',
'inventory',
'lookup',
'module_utils',
'modules',
'netconf',
'shell',
'strategy',
'terminal',
'test',
'vars',
# The following are plugin directories not directly supported by ansible-core, but used in collections
# (https://github.com/ansible-collections/overview/blob/main/collection_requirements.rst#modules--plugins)
'plugin_utils',
'sub_plugins',
)
@abc.abstractmethod
def create(self, root: str, paths: list[str]) -> ContentLayout:
"""Create a layout using the given root and paths."""
def paths_to_tree(paths: list[str]) -> tuple[dict[str, t.Any], list[str]]:
"""Return a filesystem tree from the given list of paths."""
tree: tuple[dict[str, t.Any], list[str]] = {}, []
for path in paths:
parts = path.split(os.path.sep)
root = tree
for part in parts[:-1]:
if part not in root[0]:
root[0][part] = {}, []
root = root[0][part]
root[1].append(path)
return tree
def get_tree_item(tree: tuple[dict[str, t.Any], list[str]], parts: list[str]) -> t.Optional[tuple[dict[str, t.Any], list[str]]]:
"""Return the portion of the tree found under the path given by parts, or None if it does not exist."""
root = tree
for part in parts:
root = root[0].get(part)
if not root:
return None
return root
| 7,601
|
Python
|
.py
| 191
| 31.753927
| 128
| 0.621313
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,245
|
unsupported.py
|
ansible_ansible/test/lib/ansible_test/_internal/provider/source/unsupported.py
|
"""Source provider to use when the layout is unsupported."""
from __future__ import annotations
from . import (
SourceProvider,
)
class UnsupportedSource(SourceProvider):
"""Source provider to use when the layout is unsupported."""
sequence = 0 # disable automatic detection
@staticmethod
def is_content_root(path: str) -> bool:
"""Return True if the given path is a content root for this provider."""
return False
def get_paths(self, path: str) -> list[str]:
"""Return the list of available content paths under the given path."""
return []
| 604
|
Python
|
.py
| 15
| 35.133333
| 80
| 0.689537
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,246
|
git.py
|
ansible_ansible/test/lib/ansible_test/_internal/provider/source/git.py
|
"""Source provider for a content root managed by git version control."""
from __future__ import annotations
import os
from ...git import (
Git,
)
from ...encoding import (
to_bytes,
)
from ...util import (
SubprocessError,
)
from . import (
SourceProvider,
)
class GitSource(SourceProvider):
"""Source provider for a content root managed by git version control."""
@staticmethod
def is_content_root(path: str) -> bool:
"""Return True if the given path is a content root for this provider."""
return os.path.exists(os.path.join(path, '.git'))
def get_paths(self, path: str) -> list[str]:
"""Return the list of available content paths under the given path."""
paths = self.__get_paths(path)
try:
submodule_paths = Git(path).get_submodule_paths()
except SubprocessError:
if path == self.root:
raise
# older versions of git require submodule commands to be executed from the top level of the working tree
# git version 2.18.1 (centos8) does not have this restriction
# git version 1.8.3.1 (centos7) does
# fall back to using the top level directory of the working tree only when needed
# this avoids penalizing newer git versions with a potentially slower analysis due to additional submodules
rel_path = os.path.relpath(path, self.root) + os.path.sep
submodule_paths = Git(self.root).get_submodule_paths()
submodule_paths = [os.path.relpath(p, rel_path) for p in submodule_paths if p.startswith(rel_path)]
for submodule_path in submodule_paths:
paths.extend(os.path.join(submodule_path, p) for p in self.__get_paths(os.path.join(path, submodule_path)))
# git reports submodule directories as regular files
paths = [p for p in paths if p not in submodule_paths]
return paths
@staticmethod
def __get_paths(path: str) -> list[str]:
"""Return the list of available content paths under the given path."""
git = Git(path)
paths = git.get_file_names(['--cached', '--others', '--exclude-standard'])
deleted_paths = git.get_file_names(['--deleted'])
paths = sorted(set(paths) - set(deleted_paths))
# directory symlinks are reported by git as regular files but they need to be treated as directories
paths = [path + os.path.sep if os.path.isdir(to_bytes(path)) else path for path in paths]
return paths
| 2,540
|
Python
|
.py
| 52
| 41.115385
| 119
| 0.658704
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,247
|
__init__.py
|
ansible_ansible/test/lib/ansible_test/_internal/provider/source/__init__.py
|
"""Common code for source providers."""
from __future__ import annotations
import abc
from .. import (
PathProvider,
)
class SourceProvider(PathProvider):
"""Base class for source providers."""
@abc.abstractmethod
def get_paths(self, path: str) -> list[str]:
"""Return the list of available content paths under the given path."""
| 359
|
Python
|
.py
| 11
| 29
| 78
| 0.708455
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,248
|
unversioned.py
|
ansible_ansible/test/lib/ansible_test/_internal/provider/source/unversioned.py
|
"""Fallback source provider when no other provider matches the content root."""
from __future__ import annotations
import os
from ...constants import (
TIMEOUT_PATH,
)
from ...encoding import (
to_bytes,
)
from . import (
SourceProvider,
)
class UnversionedSource(SourceProvider):
"""Fallback source provider when no other provider matches the content root."""
sequence = 0 # disable automatic detection
@staticmethod
def is_content_root(path: str) -> bool:
"""Return True if the given path is a content root for this provider."""
return False
def get_paths(self, path: str) -> list[str]:
"""Return the list of available content paths under the given path."""
paths = []
kill_any_dir = (
'.idea',
'.pytest_cache',
'__pycache__',
'ansible.egg-info',
'ansible_base.egg-info',
'ansible_core.egg-info',
)
kill_sub_dir = {
'test': (
'results',
'cache',
'output',
),
'tests': (
'output',
),
}
kill_sub_file = {
'': (
TIMEOUT_PATH,
),
}
kill_extensions = (
'.pyc',
'.pyo',
'.retry',
)
for root, dir_names, file_names in os.walk(path):
rel_root = os.path.relpath(root, path)
if rel_root == '.':
rel_root = ''
for kill in kill_any_dir + kill_sub_dir.get(rel_root, ()):
if kill in dir_names:
dir_names.remove(kill)
kill_files = kill_sub_file.get(rel_root, ())
paths.extend([os.path.join(rel_root, file_name) for file_name in file_names
if not os.path.splitext(file_name)[1] in kill_extensions and file_name not in kill_files])
# include directory symlinks since they will not be traversed and would otherwise go undetected
paths.extend([os.path.join(rel_root, dir_name) + os.path.sep for dir_name in dir_names if os.path.islink(to_bytes(dir_name))])
return paths
| 2,240
|
Python
|
.py
| 63
| 25.253968
| 138
| 0.539175
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,249
|
installed.py
|
ansible_ansible/test/lib/ansible_test/_internal/provider/source/installed.py
|
"""Source provider for content which has been installed."""
from __future__ import annotations
import os
from . import (
SourceProvider,
)
class InstalledSource(SourceProvider):
"""Source provider for content which has been installed."""
sequence = 0 # disable automatic detection
@staticmethod
def is_content_root(path: str) -> bool:
"""Return True if the given path is a content root for this provider."""
return False
def get_paths(self, path: str) -> list[str]:
"""Return the list of available content paths under the given path."""
paths = []
kill_extensions = (
'.pyc',
'.pyo',
)
for root, _dummy, file_names in os.walk(path):
rel_root = os.path.relpath(root, path)
if rel_root == '.':
rel_root = ''
paths.extend([os.path.join(rel_root, file_name) for file_name in file_names
if not os.path.splitext(file_name)[1] in kill_extensions])
# NOTE: directory symlinks are ignored as there should be no directory symlinks for an install
return paths
| 1,165
|
Python
|
.py
| 28
| 32.928571
| 106
| 0.617438
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,250
|
hide_unraisable.py
|
ansible_ansible/test/lib/ansible_test/_util/controller/sanity/pylint/plugins/hide_unraisable.py
|
"""Temporary plugin to prevent stdout noise pollution from finalization of abandoned generators."""
from __future__ import annotations
import sys
import typing as t
if t.TYPE_CHECKING:
from pylint.lint import PyLinter
def _mask_finalizer_valueerror(ur: t.Any) -> None:
"""Mask only ValueErrors from finalizing abandoned generators; delegate everything else"""
# work around Python finalizer issue that sometimes spews this error message to stdout
# see https://github.com/pylint-dev/pylint/issues/9138
if ur.exc_type is ValueError and 'generator already executing' in str(ur.exc_value):
return
sys.__unraisablehook__(ur)
def register(linter: PyLinter) -> None: # pylint: disable=unused-argument
"""PyLint plugin registration entrypoint"""
sys.unraisablehook = _mask_finalizer_valueerror
| 835
|
Python
|
.py
| 16
| 48.25
| 99
| 0.763547
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,251
|
deprecated.py
|
ansible_ansible/test/lib/ansible_test/_util/controller/sanity/pylint/plugins/deprecated.py
|
"""Ansible specific plyint plugin for checking deprecations."""
# (c) 2018, Matt Martz <matt@sivel.net>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# -*- coding: utf-8 -*-
from __future__ import annotations
import datetime
import re
import shlex
import typing as t
from tokenize import COMMENT, TokenInfo
import astroid
# support pylint 2.x and 3.x -- remove when supporting only 3.x
try:
from pylint.interfaces import IAstroidChecker, ITokenChecker
except ImportError:
class IAstroidChecker:
"""Backwards compatibility for 2.x / 3.x support."""
class ITokenChecker:
"""Backwards compatibility for 2.x / 3.x support."""
try:
from pylint.checkers.utils import check_messages
except ImportError:
from pylint.checkers.utils import only_required_for_messages as check_messages
from pylint.checkers import BaseChecker, BaseTokenChecker
from ansible.module_utils.compat.version import LooseVersion
from ansible.release import __version__ as ansible_version_raw
from ansible.utils.version import SemanticVersion
MSGS = {
'E9501': ("Deprecated version (%r) found in call to Display.deprecated "
"or AnsibleModule.deprecate",
"ansible-deprecated-version",
"Used when a call to Display.deprecated specifies a version "
"less than or equal to the current version of Ansible",
{'minversion': (2, 6)}),
'E9502': ("Display.deprecated call without a version or date",
"ansible-deprecated-no-version",
"Used when a call to Display.deprecated does not specify a "
"version or date",
{'minversion': (2, 6)}),
'E9503': ("Invalid deprecated version (%r) found in call to "
"Display.deprecated or AnsibleModule.deprecate",
"ansible-invalid-deprecated-version",
"Used when a call to Display.deprecated specifies an invalid "
"Ansible version number",
{'minversion': (2, 6)}),
'E9504': ("Deprecated version (%r) found in call to Display.deprecated "
"or AnsibleModule.deprecate",
"collection-deprecated-version",
"Used when a call to Display.deprecated specifies a collection "
"version less than or equal to the current version of this "
"collection",
{'minversion': (2, 6)}),
'E9505': ("Invalid deprecated version (%r) found in call to "
"Display.deprecated or AnsibleModule.deprecate",
"collection-invalid-deprecated-version",
"Used when a call to Display.deprecated specifies an invalid "
"collection version number",
{'minversion': (2, 6)}),
'E9506': ("No collection name found in call to Display.deprecated or "
"AnsibleModule.deprecate",
"ansible-deprecated-no-collection-name",
"The current collection name in format `namespace.name` must "
"be provided as collection_name when calling Display.deprecated "
"or AnsibleModule.deprecate (`ansible.builtin` for ansible-core)",
{'minversion': (2, 6)}),
'E9507': ("Wrong collection name (%r) found in call to "
"Display.deprecated or AnsibleModule.deprecate",
"wrong-collection-deprecated",
"The name of the current collection must be passed to the "
"Display.deprecated resp. AnsibleModule.deprecate calls "
"(`ansible.builtin` for ansible-core)",
{'minversion': (2, 6)}),
'E9508': ("Expired date (%r) found in call to Display.deprecated "
"or AnsibleModule.deprecate",
"ansible-deprecated-date",
"Used when a call to Display.deprecated specifies a date "
"before today",
{'minversion': (2, 6)}),
'E9509': ("Invalid deprecated date (%r) found in call to "
"Display.deprecated or AnsibleModule.deprecate",
"ansible-invalid-deprecated-date",
"Used when a call to Display.deprecated specifies an invalid "
"date. It must be a string in format `YYYY-MM-DD` (ISO 8601)",
{'minversion': (2, 6)}),
'E9510': ("Both version and date found in call to "
"Display.deprecated or AnsibleModule.deprecate",
"ansible-deprecated-both-version-and-date",
"Only one of version and date must be specified",
{'minversion': (2, 6)}),
'E9511': ("Removal version (%r) must be a major release, not a minor or "
"patch release (see the specification at https://semver.org/)",
"removal-version-must-be-major",
"Used when a call to Display.deprecated or "
"AnsibleModule.deprecate for a collection specifies a version "
"which is not of the form x.0.0",
{'minversion': (2, 6)}),
}
ANSIBLE_VERSION = LooseVersion('.'.join(ansible_version_raw.split('.')[:3]))
def _get_expr_name(node):
"""Function to get either ``attrname`` or ``name`` from ``node.func.expr``
Created specifically for the case of ``display.deprecated`` or ``self._display.deprecated``
"""
try:
return node.func.expr.attrname
except AttributeError:
# If this fails too, we'll let it raise, the caller should catch it
return node.func.expr.name
def _get_func_name(node):
"""Function to get either ``attrname`` or ``name`` from ``node.func``
Created specifically for the case of ``from ansible.module_utils.common.warnings import deprecate``
"""
try:
return node.func.attrname
except AttributeError:
return node.func.name
def parse_isodate(value):
"""Parse an ISO 8601 date string."""
msg = 'Expected ISO 8601 date string (YYYY-MM-DD)'
if not isinstance(value, str):
raise ValueError(msg)
# From Python 3.7 in, there is datetime.date.fromisoformat(). For older versions,
# we have to do things manually.
if not re.match('^[0-9]{4}-[0-9]{2}-[0-9]{2}$', value):
raise ValueError(msg)
try:
return datetime.datetime.strptime(value, '%Y-%m-%d').date()
except ValueError:
raise ValueError(msg) from None
class AnsibleDeprecatedChecker(BaseChecker):
"""Checks for Display.deprecated calls to ensure that the ``version``
has not passed or met the time for removal
"""
__implements__ = (IAstroidChecker,)
name = 'deprecated'
msgs = MSGS
options = (
('collection-name', {
'default': None,
'type': 'string',
'metavar': '<name>',
'help': 'The collection\'s name used to check collection names in deprecations.',
}),
('collection-version', {
'default': None,
'type': 'string',
'metavar': '<version>',
'help': 'The collection\'s version number used to check deprecations.',
}),
)
def _check_date(self, node, date):
if not isinstance(date, str):
self.add_message('ansible-invalid-deprecated-date', node=node, args=(date,))
return
try:
date_parsed = parse_isodate(date)
except ValueError:
self.add_message('ansible-invalid-deprecated-date', node=node, args=(date,))
return
if date_parsed < datetime.date.today():
self.add_message('ansible-deprecated-date', node=node, args=(date,))
def _check_version(self, node, version, collection_name):
if collection_name is None:
collection_name = 'ansible.builtin'
if not isinstance(version, (str, float)):
if collection_name == 'ansible.builtin':
symbol = 'ansible-invalid-deprecated-version'
else:
symbol = 'collection-invalid-deprecated-version'
self.add_message(symbol, node=node, args=(version,))
return
version_no = str(version)
if collection_name == 'ansible.builtin':
# Ansible-base
try:
if not version_no:
raise ValueError('Version string should not be empty')
loose_version = LooseVersion(str(version_no))
if ANSIBLE_VERSION >= loose_version:
self.add_message('ansible-deprecated-version', node=node, args=(version,))
except ValueError:
self.add_message('ansible-invalid-deprecated-version', node=node, args=(version,))
elif collection_name:
# Collections
try:
if not version_no:
raise ValueError('Version string should not be empty')
semantic_version = SemanticVersion(version_no)
if collection_name == self.collection_name and self.collection_version is not None:
if self.collection_version >= semantic_version:
self.add_message('collection-deprecated-version', node=node, args=(version,))
if semantic_version.major != 0 and (semantic_version.minor != 0 or semantic_version.patch != 0):
self.add_message('removal-version-must-be-major', node=node, args=(version,))
except ValueError:
self.add_message('collection-invalid-deprecated-version', node=node, args=(version,))
@property
def collection_name(self) -> t.Optional[str]:
"""Return the collection name, or None if ansible-core is being tested."""
return self.linter.config.collection_name
@property
def collection_version(self) -> t.Optional[SemanticVersion]:
"""Return the collection version, or None if ansible-core is being tested."""
if self.linter.config.collection_version is None:
return None
sem_ver = SemanticVersion(self.linter.config.collection_version)
# Ignore pre-release for version comparison to catch issues before the final release is cut.
sem_ver.prerelease = ()
return sem_ver
@check_messages(*(MSGS.keys()))
def visit_call(self, node):
"""Visit a call node."""
version = None
date = None
collection_name = None
try:
funcname = _get_func_name(node)
if (funcname == 'deprecated' and 'display' in _get_expr_name(node) or
funcname == 'deprecate'):
if node.keywords:
for keyword in node.keywords:
if len(node.keywords) == 1 and keyword.arg is None:
# This is likely a **kwargs splat
return
if keyword.arg == 'version':
if isinstance(keyword.value.value, astroid.Name):
# This is likely a variable
return
version = keyword.value.value
if keyword.arg == 'date':
if isinstance(keyword.value.value, astroid.Name):
# This is likely a variable
return
date = keyword.value.value
if keyword.arg == 'collection_name':
if isinstance(keyword.value.value, astroid.Name):
# This is likely a variable
return
collection_name = keyword.value.value
if not version and not date:
try:
version = node.args[1].value
except IndexError:
self.add_message('ansible-deprecated-no-version', node=node)
return
if version and date:
self.add_message('ansible-deprecated-both-version-and-date', node=node)
if collection_name:
this_collection = collection_name == (self.collection_name or 'ansible.builtin')
if not this_collection:
self.add_message('wrong-collection-deprecated', node=node, args=(collection_name,))
elif self.collection_name is not None:
self.add_message('ansible-deprecated-no-collection-name', node=node)
if date:
self._check_date(node, date)
elif version:
self._check_version(node, version, collection_name)
except AttributeError:
# Not the type of node we are interested in
pass
class AnsibleDeprecatedCommentChecker(BaseTokenChecker):
"""Checks for ``# deprecated:`` comments to ensure that the ``version``
has not passed or met the time for removal
"""
__implements__ = (ITokenChecker,)
name = 'deprecated-comment'
msgs = {
'E9601': ("Deprecated core version (%r) found: %s",
"ansible-deprecated-version-comment",
"Used when a '# deprecated:' comment specifies a version "
"less than or equal to the current version of Ansible",
{'minversion': (2, 6)}),
'E9602': ("Deprecated comment contains invalid keys %r",
"ansible-deprecated-version-comment-invalid-key",
"Used when a '#deprecated:' comment specifies invalid data",
{'minversion': (2, 6)}),
'E9603': ("Deprecated comment missing version",
"ansible-deprecated-version-comment-missing-version",
"Used when a '#deprecated:' comment specifies invalid data",
{'minversion': (2, 6)}),
'E9604': ("Deprecated python version (%r) found: %s",
"ansible-deprecated-python-version-comment",
"Used when a '#deprecated:' comment specifies a python version "
"less than or equal to the minimum python version",
{'minversion': (2, 6)}),
'E9605': ("Deprecated comment contains invalid version %r: %s",
"ansible-deprecated-version-comment-invalid-version",
"Used when a '#deprecated:' comment specifies an invalid version",
{'minversion': (2, 6)}),
}
def process_tokens(self, tokens: list[TokenInfo]) -> None:
for token in tokens:
if token.type == COMMENT:
self._process_comment(token)
def _deprecated_string_to_dict(self, token: TokenInfo, string: str) -> dict[str, str]:
valid_keys = {'description', 'core_version', 'python_version'}
data = dict.fromkeys(valid_keys)
for opt in shlex.split(string):
if '=' not in opt:
data[opt] = None
continue
key, _sep, value = opt.partition('=')
data[key] = value
if not any((data['core_version'], data['python_version'])):
self.add_message(
'ansible-deprecated-version-comment-missing-version',
line=token.start[0],
col_offset=token.start[1],
)
bad = set(data).difference(valid_keys)
if bad:
self.add_message(
'ansible-deprecated-version-comment-invalid-key',
line=token.start[0],
col_offset=token.start[1],
args=(','.join(bad),)
)
return data
def _process_python_version(self, token: TokenInfo, data: dict[str, str]) -> None:
check_version = '.'.join(map(str, self.linter.config.py_version))
try:
if LooseVersion(data['python_version']) < LooseVersion(check_version):
self.add_message(
'ansible-deprecated-python-version-comment',
line=token.start[0],
col_offset=token.start[1],
args=(
data['python_version'],
data['description'] or 'description not provided',
),
)
except (ValueError, TypeError) as exc:
self.add_message(
'ansible-deprecated-version-comment-invalid-version',
line=token.start[0],
col_offset=token.start[1],
args=(data['python_version'], exc)
)
def _process_core_version(self, token: TokenInfo, data: dict[str, str]) -> None:
try:
if ANSIBLE_VERSION >= LooseVersion(data['core_version']):
self.add_message(
'ansible-deprecated-version-comment',
line=token.start[0],
col_offset=token.start[1],
args=(
data['core_version'],
data['description'] or 'description not provided',
)
)
except (ValueError, TypeError) as exc:
self.add_message(
'ansible-deprecated-version-comment-invalid-version',
line=token.start[0],
col_offset=token.start[1],
args=(data['core_version'], exc)
)
def _process_comment(self, token: TokenInfo) -> None:
if token.string.startswith('# deprecated:'):
data = self._deprecated_string_to_dict(token, token.string[13:].strip())
if data['core_version']:
self._process_core_version(token, data)
if data['python_version']:
self._process_python_version(token, data)
def register(linter):
"""required method to auto register this checker """
linter.register_checker(AnsibleDeprecatedChecker(linter))
linter.register_checker(AnsibleDeprecatedCommentChecker(linter))
| 18,008
|
Python
|
.py
| 366
| 36.374317
| 112
| 0.579758
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,252
|
string_format.py
|
ansible_ansible/test/lib/ansible_test/_util/controller/sanity/pylint/plugins/string_format.py
|
"""Ansible specific pylint plugin for checking format string usage."""
# (c) 2018, Matt Martz <matt@sivel.net>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# -*- coding: utf-8 -*-
from __future__ import annotations
import astroid
# support pylint 2.x and 3.x -- remove when supporting only 3.x
try:
from pylint.interfaces import IAstroidChecker
except ImportError:
class IAstroidChecker:
"""Backwards compatibility for 2.x / 3.x support."""
try:
from pylint.checkers.utils import check_messages
except ImportError:
from pylint.checkers.utils import only_required_for_messages as check_messages
from pylint.checkers import BaseChecker
from pylint.checkers import utils
MSGS = {
'E9305': ("disabled", # kept for backwards compatibility with inline ignores, remove after 2.14 is EOL
"ansible-format-automatic-specification",
"disabled"),
'E9390': ("bytes object has no .format attribute",
"ansible-no-format-on-bytestring",
"Used when a bytestring was used as a PEP 3101 format string "
"as Python3 bytestrings do not have a .format attribute",
{'minversion': (3, 0)}),
}
class AnsibleStringFormatChecker(BaseChecker):
"""Checks string formatting operations to ensure that the format string
is valid and the arguments match the format string.
"""
__implements__ = (IAstroidChecker,)
name = 'string'
msgs = MSGS
@check_messages(*(MSGS.keys()))
def visit_call(self, node):
"""Visit a call node."""
func = utils.safe_infer(node.func)
if (isinstance(func, astroid.BoundMethod)
and isinstance(func.bound, astroid.Instance)
and func.bound.name in ('str', 'unicode', 'bytes')):
if func.name == 'format':
self._check_new_format(node, func)
def _check_new_format(self, node, func):
""" Check the new string formatting """
if (isinstance(node.func, astroid.Attribute)
and not isinstance(node.func.expr, astroid.Const)):
return
try:
strnode = next(func.bound.infer())
except astroid.InferenceError:
return
if not isinstance(strnode, astroid.Const):
return
if isinstance(strnode.value, bytes):
self.add_message('ansible-no-format-on-bytestring', node=node)
return
def register(linter):
"""required method to auto register this checker """
linter.register_checker(AnsibleStringFormatChecker(linter))
| 2,626
|
Python
|
.py
| 61
| 35.606557
| 107
| 0.663401
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,253
|
unwanted.py
|
ansible_ansible/test/lib/ansible_test/_util/controller/sanity/pylint/plugins/unwanted.py
|
"""A plugin for pylint to identify imports and functions which should not be used."""
from __future__ import annotations
import os
import typing as t
import astroid
# support pylint 2.x and 3.x -- remove when supporting only 3.x
try:
from pylint.interfaces import IAstroidChecker
except ImportError:
class IAstroidChecker:
"""Backwards compatibility for 2.x / 3.x support."""
from pylint.checkers import BaseChecker
ANSIBLE_TEST_MODULES_PATH = os.environ['ANSIBLE_TEST_MODULES_PATH']
ANSIBLE_TEST_MODULE_UTILS_PATH = os.environ['ANSIBLE_TEST_MODULE_UTILS_PATH']
class UnwantedEntry:
"""Defines an unwanted import."""
def __init__(
self,
alternative, # type: str
modules_only=False, # type: bool
names=None, # type: t.Optional[t.Tuple[str, ...]]
ignore_paths=None, # type: t.Optional[t.Tuple[str, ...]]
ansible_test_only=False, # type: bool
): # type: (...) -> None
self.alternative = alternative
self.modules_only = modules_only
self.names = set(names) if names else set()
self.ignore_paths = ignore_paths
self.ansible_test_only = ansible_test_only
def applies_to(self, path, name=None): # type: (str, t.Optional[str]) -> bool
"""Return True if this entry applies to the given path, otherwise return False."""
if self.names:
if not name:
return False
if name not in self.names:
return False
if self.ignore_paths and any(path.endswith(ignore_path) for ignore_path in self.ignore_paths):
return False
if self.ansible_test_only and '/test/lib/ansible_test/_internal/' not in path:
return False
if self.modules_only:
return is_module_path(path)
return True
def is_module_path(path): # type: (str) -> bool
"""Return True if the given path is a module or module_utils path, otherwise return False."""
return path.startswith(ANSIBLE_TEST_MODULES_PATH) or path.startswith(ANSIBLE_TEST_MODULE_UTILS_PATH)
class AnsibleUnwantedChecker(BaseChecker):
"""Checker for unwanted imports and functions."""
__implements__ = (IAstroidChecker,)
name = 'unwanted'
BAD_IMPORT = 'ansible-bad-import'
BAD_IMPORT_FROM = 'ansible-bad-import-from'
BAD_FUNCTION = 'ansible-bad-function'
BAD_MODULE_IMPORT = 'ansible-bad-module-import'
msgs = dict(
E5101=('Import %s instead of %s',
BAD_IMPORT,
'Identifies imports which should not be used.'),
E5102=('Import %s from %s instead of %s',
BAD_IMPORT_FROM,
'Identifies imports which should not be used.'),
E5103=('Call %s instead of %s',
BAD_FUNCTION,
'Identifies functions which should not be used.'),
E5104=('Import external package or ansible.module_utils not %s',
BAD_MODULE_IMPORT,
'Identifies imports which should not be used.'),
)
unwanted_imports = dict(
# Additional imports that we may want to start checking:
# boto=UnwantedEntry('boto3', modules_only=True),
# requests=UnwantedEntry('ansible.module_utils.urls', modules_only=True),
# urllib=UnwantedEntry('ansible.module_utils.urls', modules_only=True),
# see https://docs.python.org/2/library/urllib2.html
urllib2=UnwantedEntry('ansible.module_utils.urls',
ignore_paths=(
'/lib/ansible/module_utils/urls.py',
)),
# see https://docs.python.org/3/library/collections.abc.html
collections=UnwantedEntry('ansible.module_utils.six.moves.collections_abc',
names=(
'MappingView',
'ItemsView',
'KeysView',
'ValuesView',
'Mapping', 'MutableMapping',
'Sequence', 'MutableSequence',
'Set', 'MutableSet',
'Container',
'Hashable',
'Sized',
'Callable',
'Iterable',
'Iterator',
)),
)
unwanted_functions = {
# see https://docs.python.org/3/library/tempfile.html#tempfile.mktemp
'tempfile.mktemp': UnwantedEntry('tempfile.mkstemp'),
# os.chmod resolves as posix.chmod
'posix.chmod': UnwantedEntry('verified_chmod',
ansible_test_only=True),
'sys.exit': UnwantedEntry('exit_json or fail_json',
ignore_paths=(
'/lib/ansible/module_utils/basic.py',
'/lib/ansible/modules/async_wrapper.py',
),
modules_only=True),
'builtins.print': UnwantedEntry('module.log or module.debug',
ignore_paths=(
'/lib/ansible/module_utils/basic.py',
),
modules_only=True),
}
def visit_import(self, node): # type: (astroid.node_classes.Import) -> None
"""Visit an import node."""
for name in node.names:
self._check_import(node, name[0])
def visit_importfrom(self, node): # type: (astroid.node_classes.ImportFrom) -> None
"""Visit an import from node."""
self._check_importfrom(node, node.modname, node.names)
def visit_attribute(self, node): # type: (astroid.node_classes.Attribute) -> None
"""Visit an attribute node."""
last_child = node.last_child()
# this is faster than using type inference and will catch the most common cases
if not isinstance(last_child, astroid.node_classes.Name):
return
module = last_child.name
entry = self.unwanted_imports.get(module)
if entry and entry.names:
if entry.applies_to(self.linter.current_file, node.attrname):
self.add_message(self.BAD_IMPORT_FROM, args=(node.attrname, entry.alternative, module), node=node)
def visit_call(self, node): # type: (astroid.node_classes.Call) -> None
"""Visit a call node."""
try:
for i in node.func.inferred():
func = None
if isinstance(i, astroid.scoped_nodes.FunctionDef) and isinstance(i.parent, astroid.scoped_nodes.Module):
func = '%s.%s' % (i.parent.name, i.name)
if not func:
continue
entry = self.unwanted_functions.get(func)
if entry and entry.applies_to(self.linter.current_file):
self.add_message(self.BAD_FUNCTION, args=(entry.alternative, func), node=node)
except astroid.exceptions.InferenceError:
pass
def _check_import(self, node, modname): # type: (astroid.node_classes.Import, str) -> None
"""Check the imports on the specified import node."""
self._check_module_import(node, modname)
entry = self.unwanted_imports.get(modname)
if not entry:
return
if entry.applies_to(self.linter.current_file):
self.add_message(self.BAD_IMPORT, args=(entry.alternative, modname), node=node)
def _check_importfrom(self, node, modname, names): # type: (astroid.node_classes.ImportFrom, str, t.List[str]) -> None
"""Check the imports on the specified import from node."""
self._check_module_import(node, modname)
entry = self.unwanted_imports.get(modname)
if not entry:
return
for name in names:
if entry.applies_to(self.linter.current_file, name[0]):
self.add_message(self.BAD_IMPORT_FROM, args=(name[0], entry.alternative, modname), node=node)
def _check_module_import(self, node, modname): # type: (t.Union[astroid.node_classes.Import, astroid.node_classes.ImportFrom], str) -> None
"""Check the module import on the given import or import from node."""
if not is_module_path(self.linter.current_file):
return
if modname == 'ansible.module_utils' or modname.startswith('ansible.module_utils.'):
return
if modname == 'ansible' or modname.startswith('ansible.'):
self.add_message(self.BAD_MODULE_IMPORT, args=(modname,), node=node)
def register(linter):
"""required method to auto register this checker """
linter.register_checker(AnsibleUnwantedChecker(linter))
| 9,061
|
Python
|
.py
| 174
| 38.022989
| 144
| 0.573401
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,254
|
yaml_to_json.py
|
ansible_ansible/test/lib/ansible_test/_util/controller/sanity/integration-aliases/yaml_to_json.py
|
"""Read YAML from stdin and write JSON to stdout."""
from __future__ import annotations
import json
import sys
from yaml import load
try:
from yaml import CSafeLoader as SafeLoader
except ImportError:
from yaml import SafeLoader
json.dump(load(sys.stdin, Loader=SafeLoader), sys.stdout)
| 299
|
Python
|
.py
| 10
| 27.7
| 57
| 0.796491
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,255
|
yamllinter.py
|
ansible_ansible/test/lib/ansible_test/_util/controller/sanity/yamllint/yamllinter.py
|
"""Wrapper around yamllint that supports YAML embedded in Ansible modules."""
from __future__ import annotations
import ast
import json
import os
import re
import sys
import typing as t
import yaml
from yaml.resolver import Resolver
from yaml.constructor import SafeConstructor
from yaml.error import MarkedYAMLError
from yaml.cyaml import CParser
from yamllint import linter
from yamllint.config import YamlLintConfig
def main():
"""Main program body."""
paths = sys.argv[1:] or sys.stdin.read().splitlines()
checker = YamlChecker()
checker.check(paths)
checker.report()
class TestConstructor(SafeConstructor):
"""Yaml Safe Constructor that knows about Ansible tags."""
def construct_yaml_unsafe(self, node):
"""Construct an unsafe tag."""
try:
constructor = getattr(node, 'id', 'object')
if constructor is not None:
constructor = getattr(self, 'construct_%s' % constructor)
except AttributeError:
constructor = self.construct_object
value = constructor(node)
return value
TestConstructor.add_constructor(
'!unsafe',
TestConstructor.construct_yaml_unsafe)
TestConstructor.add_constructor(
'!vault',
TestConstructor.construct_yaml_str)
TestConstructor.add_constructor(
'!vault-encrypted',
TestConstructor.construct_yaml_str)
class TestLoader(CParser, TestConstructor, Resolver):
"""Custom YAML loader that recognizes custom Ansible tags."""
def __init__(self, stream):
CParser.__init__(self, stream)
TestConstructor.__init__(self)
Resolver.__init__(self)
class YamlChecker:
"""Wrapper around yamllint that supports YAML embedded in Ansible modules."""
def __init__(self):
self.messages = []
def report(self):
"""Print yamllint report to stdout."""
report = dict(
messages=self.messages,
)
print(json.dumps(report, indent=4, sort_keys=True))
def check(self, paths): # type: (t.List[str]) -> None
"""Check the specified paths."""
config_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'config')
yaml_conf = YamlLintConfig(file=os.path.join(config_path, 'default.yml'))
module_conf = YamlLintConfig(file=os.path.join(config_path, 'modules.yml'))
plugin_conf = YamlLintConfig(file=os.path.join(config_path, 'plugins.yml'))
for path in paths:
extension = os.path.splitext(path)[1]
with open(path, encoding='utf-8') as file:
contents = file.read()
if extension in ('.yml', '.yaml'):
self.check_yaml(yaml_conf, path, contents)
elif extension == '.py':
if path.startswith('lib/ansible/modules/') or path.startswith('plugins/modules/'):
conf = module_conf
else:
conf = plugin_conf
self.check_module(conf, path, contents)
else:
raise Exception('unsupported extension: %s' % extension)
def check_yaml(self, conf, path, contents): # type: (YamlLintConfig, str, str) -> None
"""Check the given YAML."""
self.check_parsable(path, contents)
self.messages += [self.result_to_message(r, path) for r in linter.run(contents, conf, path)]
def check_module(self, conf, path, contents): # type: (YamlLintConfig, str, str) -> None
"""Check the given module."""
docs = self.get_module_docs(path, contents)
for key, value in docs.items():
yaml_data = value['yaml']
lineno = value['lineno']
fmt = value['fmt']
if fmt != 'yaml':
continue
if yaml_data.startswith('\n'):
yaml_data = yaml_data[1:]
lineno += 1
multiple_docs_allowed = [
"EXAMPLES",
]
self.check_parsable(path, yaml_data, lineno, (key in multiple_docs_allowed), key)
messages = list(linter.run(yaml_data, conf, path))
self.messages += [self.result_to_message(r, path, lineno - 1, key) for r in messages]
def check_parsable(self, path, contents, lineno=1, allow_multiple=False, prefix=""): # type: (str, str, int, bool) -> None
"""Check the given contents to verify they can be parsed as YAML."""
prefix = f"{prefix}: " if prefix else ""
try:
documents = len(list(yaml.load_all(contents, Loader=TestLoader)))
if documents > 1 and not allow_multiple:
self.messages += [{'code': 'multiple-yaml-documents',
'message': f'{prefix}expected a single document in the stream',
'path': path,
'line': lineno,
'column': 1,
'level': 'error',
}]
except MarkedYAMLError as ex:
self.messages += [{'code': 'unparsable-with-libyaml',
'message': f'{prefix}{ex.args[0]} - {ex.args[2]}',
'path': path,
'line': ex.problem_mark.line + lineno,
'column': ex.problem_mark.column + 1,
'level': 'error',
}]
@staticmethod
def result_to_message(result, path, line_offset=0, prefix=''): # type: (t.Any, str, int, str) -> t.Dict[str, t.Any]
"""Convert the given result to a dictionary and return it."""
if prefix:
prefix = '%s: ' % prefix
return dict(
code=result.rule or result.level,
message=prefix + result.desc,
path=path,
line=result.line + line_offset,
column=result.column,
level=result.level,
)
def get_module_docs(self, path, contents): # type: (str, str) -> t.Dict[str, t.Any]
"""Return the module documentation for the given module contents."""
module_doc_types = [
'DOCUMENTATION',
'EXAMPLES',
'RETURN',
]
docs = {}
fmt_re = re.compile(r'^# fmt:\s+(\S+)')
def check_assignment(statement, doc_types=None):
"""Check the given statement for a documentation assignment."""
for target in statement.targets:
if not isinstance(target, ast.Name):
continue
if doc_types and target.id not in doc_types:
continue
fmt_match = fmt_re.match(statement.value.value.lstrip())
fmt = 'yaml'
if fmt_match:
fmt = fmt_match.group(1)
docs[target.id] = dict(
yaml=statement.value.value,
lineno=statement.lineno,
end_lineno=statement.lineno + len(statement.value.value.splitlines()),
fmt=fmt.lower(),
)
module_ast = self.parse_module(path, contents)
if not module_ast:
return {}
is_plugin = path.startswith('lib/ansible/modules/') or path.startswith('lib/ansible/plugins/') or path.startswith('plugins/')
is_doc_fragment = path.startswith('lib/ansible/plugins/doc_fragments/') or path.startswith('plugins/doc_fragments/')
if is_plugin and not is_doc_fragment:
for body_statement in module_ast.body:
if isinstance(body_statement, ast.Assign):
check_assignment(body_statement, module_doc_types)
elif is_doc_fragment:
for body_statement in module_ast.body:
if isinstance(body_statement, ast.ClassDef):
for class_statement in body_statement.body:
if isinstance(class_statement, ast.Assign):
check_assignment(class_statement)
else:
raise Exception('unsupported path: %s' % path)
return docs
def parse_module(self, path, contents): # type: (str, str) -> t.Optional[ast.Module]
"""Parse the given contents and return a module if successful, otherwise return None."""
try:
return ast.parse(contents)
except SyntaxError as ex:
self.messages.append(dict(
code='python-syntax-error',
message=str(ex),
path=path,
line=ex.lineno,
column=ex.offset,
level='error',
))
except Exception as ex: # pylint: disable=broad-except
self.messages.append(dict(
code='python-parse-error',
message=str(ex),
path=path,
line=0,
column=0,
level='error',
))
return None
if __name__ == '__main__':
main()
| 9,076
|
Python
|
.py
| 203
| 32.423645
| 133
| 0.563506
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,256
|
no-get-exception.py
|
ansible_ansible/test/lib/ansible_test/_util/controller/sanity/code-smell/no-get-exception.py
|
"""Disallow use of the get_exception function."""
from __future__ import annotations
import re
import sys
def main():
"""Main entry point."""
basic_allow_once = True
for path in sys.argv[1:] or sys.stdin.read().splitlines():
with open(path, 'r', encoding='utf-8') as path_fd:
for line, text in enumerate(path_fd.readlines()):
match = re.search(r'([^a-zA-Z0-9_]get_exception[^a-zA-Z0-9_])', text)
if match:
if path == 'lib/ansible/module_utils/basic.py' and basic_allow_once:
# basic.py is allowed to import get_exception for backwards compatibility but should not call it anywhere
basic_allow_once = False
continue
print('%s:%d:%d: do not use `get_exception`' % (
path, line + 1, match.start(1) + 1))
if __name__ == '__main__':
main()
| 947
|
Python
|
.py
| 20
| 35.75
| 129
| 0.551687
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,257
|
no-illegal-filenames.py
|
ansible_ansible/test/lib/ansible_test/_util/controller/sanity/code-smell/no-illegal-filenames.py
|
"""
Check for illegal filenames on various operating systems.
The main rules are derived from restrictions on Windows:
https://docs.microsoft.com/en-us/windows/win32/fileio/naming-a-file#naming-conventions
"""
from __future__ import annotations
import os
import struct
import sys
from ansible.module_utils.basic import to_bytes
ILLEGAL_CHARS = [
b'<',
b'>',
b':',
b'"',
b'/',
b'\\',
b'|',
b'?',
b'*'
] + [struct.pack("b", i) for i in range(32)]
ILLEGAL_NAMES = [
"CON",
"PRN",
"AUX",
"NUL",
"COM1",
"COM2",
"COM3",
"COM4",
"COM5",
"COM6",
"COM7",
"COM8",
"COM9",
"LPT1",
"LPT2",
"LPT3",
"LPT4",
"LPT5",
"LPT6",
"LPT7",
"LPT8",
"LPT9",
]
ILLEGAL_END_CHARS = [
'.',
' ',
]
def check_path(path, is_dir=False):
"""Check the specified path for unwanted characters and names."""
type_name = 'directory' if is_dir else 'file'
file_name = os.path.basename(path.rstrip(os.path.sep))
name = os.path.splitext(file_name)[0]
if name.upper() in ILLEGAL_NAMES:
print("%s: illegal %s name %s" % (path, type_name, name.upper()))
if file_name[-1] in ILLEGAL_END_CHARS:
print("%s: illegal %s name end-char '%s'" % (path, type_name, file_name[-1]))
bfile = to_bytes(file_name, encoding='utf-8')
for char in ILLEGAL_CHARS:
if char in bfile:
bpath = to_bytes(path, encoding='utf-8')
print("%s: illegal char '%s' in %s name" % (bpath, char, type_name))
def main():
"""Main entry point."""
for path in sys.argv[1:] or sys.stdin.read().splitlines():
check_path(path, is_dir=path.endswith(os.path.sep))
if __name__ == '__main__':
main()
| 1,757
|
Python
|
.py
| 69
| 20.898551
| 86
| 0.589606
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,258
|
use-compat-six.py
|
ansible_ansible/test/lib/ansible_test/_util/controller/sanity/code-smell/use-compat-six.py
|
"""Disallow importing of the six module."""
from __future__ import annotations
import re
import sys
def main():
"""Main entry point."""
for path in sys.argv[1:] or sys.stdin.read().splitlines():
with open(path, 'r', encoding='utf-8') as path_fd:
for line, text in enumerate(path_fd.readlines()):
match = re.search(r'((^\s*import\s+six\b)|(^\s*from\s+six\b))', text)
if match:
print('%s:%d:%d: use `ansible.module_utils.six` instead of `six`' % (
path, line + 1, match.start(1) + 1))
if __name__ == '__main__':
main()
| 631
|
Python
|
.py
| 15
| 33.466667
| 89
| 0.545902
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,259
|
no-assert.py
|
ansible_ansible/test/lib/ansible_test/_util/controller/sanity/code-smell/no-assert.py
|
"""Disallow use of assert."""
from __future__ import annotations
import re
import sys
ASSERT_RE = re.compile(r'^\s*assert[^a-z0-9_:]')
def main():
"""Main entry point."""
for path in sys.argv[1:] or sys.stdin.read().splitlines():
with open(path, 'r', encoding='utf-8') as file:
for i, line in enumerate(file.readlines()):
matches = ASSERT_RE.findall(line)
if matches:
lineno = i + 1
colno = line.index('assert') + 1
print('%s:%d:%d: raise AssertionError instead of: %s' % (path, lineno, colno, matches[0][colno - 1:]))
if __name__ == '__main__':
main()
| 684
|
Python
|
.py
| 17
| 31.529412
| 122
| 0.54697
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,260
|
replace-urlopen.py
|
ansible_ansible/test/lib/ansible_test/_util/controller/sanity/code-smell/replace-urlopen.py
|
"""Disallow use of the urlopen function."""
from __future__ import annotations
import re
import sys
def main():
"""Main entry point."""
for path in sys.argv[1:] or sys.stdin.read().splitlines():
with open(path, 'r', encoding='utf-8') as path_fd:
for line, text in enumerate(path_fd.readlines()):
match = re.search(r'^(?:[^#]*?)(urlopen)', text)
if match:
print('%s:%d:%d: use `ansible.module_utils.urls.open_url` instead of `urlopen`' % (
path, line + 1, match.start(1) + 1))
if __name__ == '__main__':
main()
| 624
|
Python
|
.py
| 15
| 33
| 103
| 0.547264
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,261
|
use-argspec-type-path.py
|
ansible_ansible/test/lib/ansible_test/_util/controller/sanity/code-smell/use-argspec-type-path.py
|
"""Disallow use of the expanduser function."""
from __future__ import annotations
import re
import sys
def main():
"""Main entry point."""
for path in sys.argv[1:] or sys.stdin.read().splitlines():
with open(path, 'r', encoding='utf-8') as path_fd:
for line, text in enumerate(path_fd.readlines()):
match = re.search(r'(expanduser)', text)
if match:
print('%s:%d:%d: use argspec type="path" instead of type="str" to avoid use of `expanduser`' % (
path, line + 1, match.start(1) + 1))
if __name__ == '__main__':
main()
| 632
|
Python
|
.py
| 15
| 33.533333
| 116
| 0.563011
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,262
|
shebang.py
|
ansible_ansible/test/lib/ansible_test/_util/controller/sanity/code-smell/shebang.py
|
"""Check shebangs, execute bits and byte order marks."""
from __future__ import annotations
import os
import re
import stat
import sys
def main():
"""Main entry point."""
standard_shebangs = set([
b'#!/bin/bash -eu',
b'#!/bin/bash -eux',
b'#!/bin/sh',
b'#!/usr/bin/env bash',
b'#!/usr/bin/env fish',
b'#!/usr/bin/env pwsh',
b'#!/usr/bin/env python',
b'#!/usr/bin/make -f',
])
integration_shebangs = set([
b'#!/bin/sh',
b'#!/usr/bin/env bash',
b'#!/usr/bin/env python',
])
module_shebangs = {
'': b'#!/usr/bin/python',
'.py': b'#!/usr/bin/python',
'.ps1': b'#!powershell',
}
# see https://unicode.org/faq/utf_bom.html#bom1
byte_order_marks = (
(b'\x00\x00\xFE\xFF', 'UTF-32 (BE)'),
(b'\xFF\xFE\x00\x00', 'UTF-32 (LE)'),
(b'\xFE\xFF', 'UTF-16 (BE)'),
(b'\xFF\xFE', 'UTF-16 (LE)'),
(b'\xEF\xBB\xBF', 'UTF-8'),
)
for path in sys.argv[1:] or sys.stdin.read().splitlines():
with open(path, 'rb') as path_fd:
shebang = path_fd.readline().strip()
mode = os.stat(path).st_mode
executable = (stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH) & mode
if not shebang or not shebang.startswith(b'#!'):
if executable:
print('%s:%d:%d: file without shebang should not be executable' % (path, 0, 0))
for mark, name in byte_order_marks:
if shebang.startswith(mark):
print('%s:%d:%d: file starts with a %s byte order mark' % (path, 0, 0, name))
break
continue
is_module = False
is_integration = False
dirname = os.path.dirname(path)
if path.startswith('lib/ansible/modules/'):
is_module = True
elif re.search('^test/support/[^/]+/plugins/modules/', path):
is_module = True
elif re.search('^test/support/[^/]+/collections/ansible_collections/[^/]+/[^/]+/plugins/modules/', path):
is_module = True
elif path == 'test/lib/ansible_test/_util/target/cli/ansible_test_cli_stub.py':
pass # ansible-test entry point must be executable and have a shebang
elif re.search(r'^lib/ansible/cli/[^/]+\.py', path):
pass # cli entry points must be executable and have a shebang
elif path.startswith('examples/'):
continue # examples trigger some false positives due to location
elif path.startswith('lib/') or path.startswith('test/lib/'):
if executable:
print('%s:%d:%d: should not be executable' % (path, 0, 0))
if shebang:
print('%s:%d:%d: should not have a shebang' % (path, 0, 0))
continue
elif path.startswith('test/integration/targets/') or path.startswith('tests/integration/targets/'):
is_integration = True
if dirname.endswith('/library') or '/plugins/modules' in dirname or dirname in (
# non-standard module library directories
'test/integration/targets/module_precedence/lib_no_extension',
'test/integration/targets/module_precedence/lib_with_extension',
):
is_module = True
elif path.startswith('plugins/modules/'):
is_module = True
if is_module:
if executable:
print('%s:%d:%d: module should not be executable' % (path, 0, 0))
ext = os.path.splitext(path)[1]
expected_shebang = module_shebangs.get(ext)
expected_ext = ' or '.join(['"%s"' % k for k in module_shebangs])
if expected_shebang:
if shebang == expected_shebang:
continue
print('%s:%d:%d: expected module shebang "%s" but found: %s' % (path, 1, 1, expected_shebang, shebang))
else:
print('%s:%d:%d: expected module extension %s but found: %s' % (path, 0, 0, expected_ext, ext))
else:
if is_integration:
allowed = integration_shebangs
else:
allowed = standard_shebangs
if shebang not in allowed:
print('%s:%d:%d: unexpected non-module shebang: %s' % (path, 1, 1, shebang))
if __name__ == '__main__':
main()
| 4,672
|
Python
|
.py
| 101
| 33.069307
| 123
| 0.516711
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,263
|
line-endings.py
|
ansible_ansible/test/lib/ansible_test/_util/controller/sanity/code-smell/line-endings.py
|
"""Require Unix line endings."""
from __future__ import annotations
import sys
def main():
"""Main entry point."""
for path in sys.argv[1:] or sys.stdin.read().splitlines():
with open(path, 'rb') as path_fd:
contents = path_fd.read()
if b'\r' in contents:
print('%s: use "\\n" for line endings instead of "\\r\\n"' % path)
if __name__ == '__main__':
main()
| 415
|
Python
|
.py
| 12
| 28.75
| 78
| 0.571788
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,264
|
changelog.py
|
ansible_ansible/test/lib/ansible_test/_util/controller/sanity/code-smell/changelog.py
|
"""Check changelog fragment naming, syntax, etc."""
from __future__ import annotations
import os
import sys
import subprocess
def main():
"""Main entry point."""
paths = sys.argv[1:] or sys.stdin.read().splitlines()
allowed_extensions = ('.yml', '.yaml')
config_path = 'changelogs/config.yaml'
# config must be detected independent of the file list since the file list only contains files under test (changed)
has_config = os.path.exists(config_path)
paths_to_check = []
for path in paths:
if path == config_path:
continue
if path.startswith('changelogs/fragments/.'):
if path in ('changelogs/fragments/.keep', 'changelogs/fragments/.gitkeep'):
continue
print('%s:%d:%d: file must not be a dotfile' % (path, 0, 0))
continue
ext = os.path.splitext(path)[1]
if ext not in allowed_extensions:
print('%s:%d:%d: extension must be one of: %s' % (path, 0, 0, ', '.join(allowed_extensions)))
paths_to_check.append(path)
if not has_config:
print('changelogs/config.yaml:0:0: config file does not exist')
return
if not paths_to_check:
return
cmd = [sys.executable, '-m', 'antsibull_changelog', 'lint'] + paths_to_check
# The sphinx module is a soft dependency for rstcheck, which is used by the changelog linter.
# If sphinx is found it will be loaded by rstcheck, which can affect the results of the test.
# To maintain consistency across environments, loading of sphinx is blocked, since any version (or no version) of sphinx may be present.
env = os.environ.copy()
env.update(PYTHONPATH='%s:%s' % (os.path.join(os.path.dirname(__file__), 'changelog'), env['PYTHONPATH']))
# ignore the return code, rely on the output instead
process = subprocess.run(cmd, stdin=subprocess.DEVNULL, capture_output=True, text=True, env=env, check=False)
sys.stdout.write(process.stdout)
sys.stderr.write(process.stderr)
if __name__ == '__main__':
main()
| 2,070
|
Python
|
.py
| 42
| 42.52381
| 140
| 0.662687
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,265
|
runtime-metadata.py
|
ansible_ansible/test/lib/ansible_test/_util/controller/sanity/code-smell/runtime-metadata.py
|
"""Schema validation of ansible-core's ansible_builtin_runtime.yml and collection's meta/runtime.yml"""
from __future__ import annotations
import datetime
import os
import re
import sys
from collections.abc import Sequence, Mapping
from functools import partial
import yaml
from voluptuous import All, Any, MultipleInvalid, PREVENT_EXTRA
from voluptuous import Required, Schema, Invalid
from voluptuous.humanize import humanize_error
from ansible.module_utils.compat.version import StrictVersion, LooseVersion
from ansible.module_utils.six import string_types
from ansible.utils.collection_loader import AnsibleCollectionRef
from ansible.utils.version import SemanticVersion
def fqcr(value):
"""Validate a FQCR."""
if not isinstance(value, string_types):
raise Invalid('Must be a string that is a FQCR')
if not AnsibleCollectionRef.is_valid_fqcr(value):
raise Invalid('Must be a FQCR')
return value
def fqcr_or_shortname(value):
"""Validate a FQCR or a shortname."""
if not isinstance(value, string_types):
raise Invalid('Must be a string that is a FQCR or a short name')
if '.' in value and not AnsibleCollectionRef.is_valid_fqcr(value):
raise Invalid('Must be a FQCR or a short name')
return value
def isodate(value, check_deprecation_date=False, is_tombstone=False):
"""Validate a datetime.date or ISO 8601 date string."""
# datetime.date objects come from YAML dates, these are ok
if isinstance(value, datetime.date):
removal_date = value
else:
# make sure we have a string
msg = 'Expected ISO 8601 date string (YYYY-MM-DD), or YAML date'
if not isinstance(value, string_types):
raise Invalid(msg)
# From Python 3.7 in, there is datetime.date.fromisoformat(). For older versions,
# we have to do things manually.
if not re.match('^[0-9]{4}-[0-9]{2}-[0-9]{2}$', value):
raise Invalid(msg)
try:
removal_date = datetime.datetime.strptime(value, '%Y-%m-%d').date()
except ValueError:
raise Invalid(msg)
# Make sure date is correct
today = datetime.date.today()
if is_tombstone:
# For a tombstone, the removal date must be in the past
if today < removal_date:
raise Invalid(
'The tombstone removal_date (%s) must not be after today (%s)' % (removal_date, today))
else:
# For a deprecation, the removal date must be in the future. Only test this if
# check_deprecation_date is truish, to avoid checks to suddenly start to fail.
if check_deprecation_date and today > removal_date:
raise Invalid(
'The deprecation removal_date (%s) must be after today (%s)' % (removal_date, today))
return value
def removal_version(value, is_ansible, current_version=None, is_tombstone=False):
"""Validate a removal version string."""
msg = (
'Removal version must be a string' if is_ansible else
'Removal version must be a semantic version (https://semver.org/)'
)
if not isinstance(value, string_types):
raise Invalid(msg)
try:
if is_ansible:
version = StrictVersion()
version.parse(value)
version = LooseVersion(value) # We're storing Ansible's version as a LooseVersion
else:
version = SemanticVersion()
version.parse(value)
if version.major != 0 and (version.minor != 0 or version.patch != 0):
raise Invalid('removal_version (%r) must be a major release, not a minor or patch release '
'(see specification at https://semver.org/)' % (value, ))
if current_version is not None:
if is_tombstone:
# For a tombstone, the removal version must not be in the future
if version > current_version:
raise Invalid('The tombstone removal_version (%r) must not be after the '
'current version (%s)' % (value, current_version))
else:
# For a deprecation, the removal version must be in the future
if version <= current_version:
raise Invalid('The deprecation removal_version (%r) must be after the '
'current version (%s)' % (value, current_version))
except ValueError:
raise Invalid(msg)
return value
def any_value(value):
"""Accepts anything."""
return value
def get_ansible_version():
"""Return current ansible-core version"""
from ansible.release import __version__
return LooseVersion('.'.join(__version__.split('.')[:3]))
def get_collection_version():
"""Return current collection version, or None if it is not available"""
import importlib.util
collection_detail_path = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(__file__))), 'tools', 'collection_detail.py')
collection_detail_spec = importlib.util.spec_from_file_location('collection_detail', collection_detail_path)
collection_detail = importlib.util.module_from_spec(collection_detail_spec)
sys.modules['collection_detail'] = collection_detail
collection_detail_spec.loader.exec_module(collection_detail)
# noinspection PyBroadException
try:
result = collection_detail.read_manifest_json('.') or collection_detail.read_galaxy_yml('.')
version = SemanticVersion()
version.parse(result['version'])
return version
except Exception: # pylint: disable=broad-except
# We do not care why it fails, in case we cannot get the version
# just return None to indicate "we don't know".
return None
def validate_metadata_file(path, is_ansible, check_deprecation_dates=False):
"""Validate explicit runtime metadata file"""
try:
with open(path, 'r', encoding='utf-8') as f_path:
routing = yaml.safe_load(f_path)
except yaml.error.MarkedYAMLError as ex:
print('%s:%d:%d: YAML load failed: %s' % (
path,
ex.context_mark.line + 1 if ex.context_mark else 0,
ex.context_mark.column + 1 if ex.context_mark else 0,
re.sub(r'\s+', ' ', str(ex)),
))
return
except Exception as ex: # pylint: disable=broad-except
print('%s:%d:%d: YAML load failed: %s' % (path, 0, 0, re.sub(r'\s+', ' ', str(ex))))
return
if is_ansible:
current_version = get_ansible_version()
else:
current_version = get_collection_version()
# Updates to schema MUST also be reflected in the documentation
# ~https://docs.ansible.com/ansible-core/devel/dev_guide/developing_collections.html
# plugin_routing schema
avoid_additional_data = Schema(
Any(
{
Required('removal_version'): any_value,
'warning_text': any_value,
},
{
Required('removal_date'): any_value,
'warning_text': any_value,
}
),
extra=PREVENT_EXTRA
)
deprecation_schema = All(
# The first schema validates the input, and the second makes sure no extra keys are specified
Schema(
{
'removal_version': partial(removal_version, is_ansible=is_ansible,
current_version=current_version),
'removal_date': partial(isodate, check_deprecation_date=check_deprecation_dates),
'warning_text': Any(*string_types),
}
),
avoid_additional_data
)
tombstoning_schema = All(
# The first schema validates the input, and the second makes sure no extra keys are specified
Schema(
{
'removal_version': partial(removal_version, is_ansible=is_ansible,
current_version=current_version, is_tombstone=True),
'removal_date': partial(isodate, is_tombstone=True),
'warning_text': Any(*string_types),
}
),
avoid_additional_data
)
plugins_routing_common_schema = Schema({
('deprecation'): Any(deprecation_schema),
('tombstone'): Any(tombstoning_schema),
('redirect'): fqcr,
}, extra=PREVENT_EXTRA)
plugin_routing_schema = Any(plugins_routing_common_schema)
# Adjusted schema for modules only
plugin_routing_schema_modules = Any(
plugins_routing_common_schema.extend({
('action_plugin'): fqcr}
)
)
# Adjusted schema for module_utils
plugin_routing_schema_mu = Any(
plugins_routing_common_schema.extend({
('redirect'): Any(*string_types)}
),
)
list_dict_plugin_routing_schema = [{str_type: plugin_routing_schema}
for str_type in string_types]
list_dict_plugin_routing_schema_mu = [{str_type: plugin_routing_schema_mu}
for str_type in string_types]
list_dict_plugin_routing_schema_modules = [{str_type: plugin_routing_schema_modules}
for str_type in string_types]
plugin_schema = Schema({
('action'): Any(None, *list_dict_plugin_routing_schema),
('become'): Any(None, *list_dict_plugin_routing_schema),
('cache'): Any(None, *list_dict_plugin_routing_schema),
('callback'): Any(None, *list_dict_plugin_routing_schema),
('cliconf'): Any(None, *list_dict_plugin_routing_schema),
('connection'): Any(None, *list_dict_plugin_routing_schema),
('doc_fragments'): Any(None, *list_dict_plugin_routing_schema),
('filter'): Any(None, *list_dict_plugin_routing_schema),
('httpapi'): Any(None, *list_dict_plugin_routing_schema),
('inventory'): Any(None, *list_dict_plugin_routing_schema),
('lookup'): Any(None, *list_dict_plugin_routing_schema),
('module_utils'): Any(None, *list_dict_plugin_routing_schema_mu),
('modules'): Any(None, *list_dict_plugin_routing_schema_modules),
('netconf'): Any(None, *list_dict_plugin_routing_schema),
('shell'): Any(None, *list_dict_plugin_routing_schema),
('strategy'): Any(None, *list_dict_plugin_routing_schema),
('terminal'): Any(None, *list_dict_plugin_routing_schema),
('test'): Any(None, *list_dict_plugin_routing_schema),
('vars'): Any(None, *list_dict_plugin_routing_schema),
}, extra=PREVENT_EXTRA)
# import_redirection schema
import_redirection_schema = Any(
Schema({
('redirect'): Any(*string_types),
# import_redirect doesn't currently support deprecation
}, extra=PREVENT_EXTRA)
)
list_dict_import_redirection_schema = [{str_type: import_redirection_schema}
for str_type in string_types]
# action_groups schema
def at_most_one_dict(value):
if isinstance(value, Sequence):
if sum(1 for v in value if isinstance(v, Mapping)) > 1:
raise Invalid('List must contain at most one dictionary')
return value
metadata_dict = Schema({
Required('metadata'): Schema({
'extend_group': [fqcr_or_shortname],
}, extra=PREVENT_EXTRA)
}, extra=PREVENT_EXTRA)
action_group_schema = All([metadata_dict, fqcr_or_shortname], at_most_one_dict)
list_dict_action_groups_schema = [{str_type: action_group_schema} for str_type in string_types]
# top level schema
schema = Schema({
# All of these are optional
('plugin_routing'): Any(plugin_schema),
('import_redirection'): Any(None, *list_dict_import_redirection_schema),
# requires_ansible: In the future we should validate this with SpecifierSet
('requires_ansible'): Any(*string_types),
('action_groups'): Any(*list_dict_action_groups_schema),
}, extra=PREVENT_EXTRA)
# Ensure schema is valid
try:
schema(routing)
except MultipleInvalid as ex:
for error in ex.errors:
# No way to get line/column numbers
print('%s:%d:%d: %s' % (path, 0, 0, humanize_error(routing, error)))
def main():
"""Main entry point."""
paths = sys.argv[1:] or sys.stdin.read().splitlines()
collection_legacy_file = 'meta/routing.yml'
collection_runtime_file = 'meta/runtime.yml'
# This is currently disabled, because if it is enabled this test can start failing
# at a random date. For this to be properly activated, we (a) need to be able to return
# codes for this test, and (b) make this error optional.
check_deprecation_dates = False
for path in paths:
if path == collection_legacy_file:
print('%s:%d:%d: %s' % (path, 0, 0, ("Should be called '%s'" % collection_runtime_file)))
continue
validate_metadata_file(
path,
is_ansible=path not in (collection_legacy_file, collection_runtime_file),
check_deprecation_dates=check_deprecation_dates)
if __name__ == '__main__':
main()
| 13,273
|
Python
|
.py
| 282
| 37.77305
| 135
| 0.6293
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,266
|
action-plugin-docs.py
|
ansible_ansible/test/lib/ansible_test/_util/controller/sanity/code-smell/action-plugin-docs.py
|
"""Test to verify action plugins have an associated module to provide documentation."""
from __future__ import annotations
import os
import sys
def main():
"""Main entry point."""
paths = sys.argv[1:] or sys.stdin.read().splitlines()
module_names = set()
module_prefixes = {
'lib/ansible/modules/': True,
'plugins/modules/': False,
}
action_prefixes = {
'lib/ansible/plugins/action/': True,
'plugins/action/': False,
}
for path in paths:
full_name = get_full_name(path, module_prefixes)
if full_name:
module_names.add(full_name)
for path in paths:
full_name = get_full_name(path, action_prefixes, extensions=('.py',))
if full_name and full_name not in module_names:
print('%s: action plugin has no matching module to provide documentation' % path)
def get_full_name(path: str, prefixes: dict[str, bool], extensions: tuple[str] | None = None) -> str | None:
"""Return the full name of the plugin at the given path by matching against the given path prefixes, or None if no match is found."""
for prefix, flat in prefixes.items():
if path.startswith(prefix):
relative_path = os.path.relpath(path, prefix)
if flat:
full_name = os.path.basename(relative_path)
else:
full_name = relative_path
full_name, file_ext = os.path.splitext(full_name)
name = os.path.basename(full_name)
if name == '__init__':
return None
if extensions and file_ext not in extensions:
return None
if name.startswith('_'):
name = name[1:]
full_name = os.path.join(os.path.dirname(full_name), name).replace(os.path.sep, '.')
return full_name
return None
if __name__ == '__main__':
main()
| 1,926
|
Python
|
.py
| 46
| 32.804348
| 137
| 0.602585
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,267
|
symlinks.py
|
ansible_ansible/test/lib/ansible_test/_util/controller/sanity/code-smell/symlinks.py
|
"""Check for unwanted symbolic links."""
from __future__ import annotations
import os
import sys
def main():
"""Main entry point."""
root_dir = os.getcwd() + os.path.sep
for path in sys.argv[1:] or sys.stdin.read().splitlines():
if not os.path.islink(path.rstrip(os.path.sep)):
continue
if not os.path.exists(path):
print('%s: broken symlinks are not allowed' % path)
continue
if path.endswith(os.path.sep):
print('%s: symlinks to directories are not allowed' % path)
continue
real_path = os.path.realpath(path)
if not real_path.startswith(root_dir):
print('%s: symlinks outside content tree are not allowed: %s' % (path, os.path.relpath(real_path, os.path.dirname(path))))
continue
if __name__ == '__main__':
main()
| 867
|
Python
|
.py
| 22
| 31.590909
| 134
| 0.610778
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,268
|
empty-init.py
|
ansible_ansible/test/lib/ansible_test/_util/controller/sanity/code-smell/empty-init.py
|
"""Require empty __init__.py files."""
from __future__ import annotations
import os
import sys
def main():
"""Main entry point."""
for path in sys.argv[1:] or sys.stdin.read().splitlines():
if os.path.getsize(path) > 0:
print('%s: empty __init__.py required' % path)
if __name__ == '__main__':
main()
| 338
|
Python
|
.py
| 11
| 26.363636
| 62
| 0.596273
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,269
|
no-smart-quotes.py
|
ansible_ansible/test/lib/ansible_test/_util/controller/sanity/code-smell/no-smart-quotes.py
|
"""Disallow use of Unicode quotes."""
# -*- coding: utf-8 -*-
from __future__ import annotations
import re
import sys
def main():
"""Main entry point."""
for path in sys.argv[1:] or sys.stdin.read().splitlines():
with open(path, 'rb') as path_fd:
for line, text in enumerate(path_fd.readlines()):
try:
text = text.decode('utf-8')
except UnicodeDecodeError as ex:
print('%s:%d:%d: UnicodeDecodeError: %s' % (path, line + 1, ex.start + 1, ex))
continue
match = re.search('([‘’“”])', text)
if match:
print('%s:%d:%d: use ASCII quotes `\'` and `"` instead of Unicode quotes' % (
path, line + 1, match.start(1) + 1))
if __name__ == '__main__':
main()
| 861
|
Python
|
.py
| 21
| 29.761905
| 98
| 0.49697
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,270
|
sphinx.py
|
ansible_ansible/test/lib/ansible_test/_util/controller/sanity/code-smell/changelog/sphinx.py
|
"""Block the sphinx module from being loaded."""
from __future__ import annotations
raise ImportError('The sphinx module has been prevented from loading to maintain consistent test results.')
| 193
|
Python
|
.py
| 3
| 63
| 107
| 0.798942
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,271
|
validate.py
|
ansible_ansible/test/lib/ansible_test/_util/controller/sanity/validate-modules/validate.py
|
from __future__ import annotations
from validate_modules.main import main
if __name__ == '__main__':
main()
| 114
|
Python
|
.py
| 4
| 26
| 38
| 0.685185
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,272
|
module_args.py
|
ansible_ansible/test/lib/ansible_test/_util/controller/sanity/validate-modules/validate_modules/module_args.py
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2016 Matt Martz <matt@sivel.net>
# Copyright (C) 2016 Rackspace US, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import annotations
import runpy
import inspect
import json
import os
import subprocess
import sys
from contextlib import contextmanager
from ansible.executor.powershell.module_manifest import PSModuleDepFinder
from ansible.module_utils.basic import FILE_COMMON_ARGUMENTS, AnsibleModule
from ansible.module_utils.six import reraise
from ansible.module_utils.common.text.converters import to_bytes, to_text
from .utils import CaptureStd, find_executable, get_module_name_from_filename
ANSIBLE_MODULE_CONSTRUCTOR_ARGS = tuple(list(inspect.signature(AnsibleModule.__init__).parameters)[1:])
class AnsibleModuleCallError(RuntimeError):
pass
class AnsibleModuleImportError(ImportError):
pass
class AnsibleModuleNotInitialized(Exception):
pass
class _FakeAnsibleModuleInit:
def __init__(self):
self.args = tuple()
self.kwargs = {}
self.called = False
def __call__(self, *args, **kwargs):
if args and isinstance(args[0], AnsibleModule):
# Make sure, due to creative calling, that we didn't end up with
# ``self`` in ``args``
self.args = args[1:]
else:
self.args = args
self.kwargs = kwargs
self.called = True
raise AnsibleModuleCallError('AnsibleModuleCallError')
def _fake_load_params():
pass
@contextmanager
def setup_env(filename):
# Used to clean up imports later
pre_sys_modules = list(sys.modules.keys())
fake = _FakeAnsibleModuleInit()
module = __import__('ansible.module_utils.basic').module_utils.basic
_original_init = module.AnsibleModule.__init__
_original_load_params = module._load_params
setattr(module.AnsibleModule, '__init__', fake)
setattr(module, '_load_params', _fake_load_params)
try:
yield fake
finally:
setattr(module.AnsibleModule, '__init__', _original_init)
setattr(module, '_load_params', _original_load_params)
# Clean up imports to prevent issues with mutable data being used in modules
for k in list(sys.modules.keys()):
# It's faster if we limit to items in ansible.module_utils
# But if this causes problems later, we should remove it
if k not in pre_sys_modules and k.startswith('ansible.module_utils.'):
del sys.modules[k]
def get_ps_argument_spec(filename, collection):
fqc_name = get_module_name_from_filename(filename, collection)
pwsh = find_executable('pwsh')
if not pwsh:
raise FileNotFoundError('Required program for PowerShell arg spec inspection "pwsh" not found.')
module_path = os.path.join(os.getcwd(), filename)
b_module_path = to_bytes(module_path, errors='surrogate_or_strict')
with open(b_module_path, mode='rb') as module_fd:
b_module_data = module_fd.read()
ps_dep_finder = PSModuleDepFinder()
ps_dep_finder.scan_module(b_module_data, fqn=fqc_name)
# For ps_argspec.ps1 to compile Ansible.Basic it also needs the AddType module_util.
ps_dep_finder._add_module(name=b"Ansible.ModuleUtils.AddType", ext=".psm1", fqn=None, optional=False, wrapper=False)
util_manifest = json.dumps({
'module_path': to_text(module_path, errors='surrogate_or_strict'),
'ansible_basic': ps_dep_finder.cs_utils_module["Ansible.Basic"]['path'],
'ps_utils': {name: info['path'] for name, info in ps_dep_finder.ps_modules.items()}
})
script_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'ps_argspec.ps1')
proc = subprocess.run(['pwsh', script_path, util_manifest], stdin=subprocess.DEVNULL, capture_output=True, text=True, check=False)
if proc.returncode != 0:
raise AnsibleModuleImportError("STDOUT:\n%s\nSTDERR:\n%s" % (proc.stdout, proc.stderr))
kwargs = json.loads(proc.stdout)
# the validate-modules code expects the options spec to be under the argument_spec key not options as set in PS
kwargs['argument_spec'] = kwargs.pop('options', {})
return kwargs['argument_spec'], kwargs
def get_py_argument_spec(filename, collection):
name = get_module_name_from_filename(filename, collection)
with setup_env(filename) as fake:
try:
with CaptureStd():
runpy.run_module(name, run_name='__main__', alter_sys=True)
except AnsibleModuleCallError:
pass
except BaseException as e:
# we want to catch all exceptions here, including sys.exit
reraise(AnsibleModuleImportError, AnsibleModuleImportError('%s' % e), sys.exc_info()[2])
if not fake.called:
raise AnsibleModuleNotInitialized()
try:
# Convert positional arguments to kwargs to make sure that all parameters are actually checked
for arg, arg_name in zip(fake.args, ANSIBLE_MODULE_CONSTRUCTOR_ARGS):
fake.kwargs[arg_name] = arg
# for ping kwargs == {'argument_spec':{'data':{'type':'str','default':'pong'}}, 'supports_check_mode':True}
argument_spec = fake.kwargs.get('argument_spec') or {}
# If add_file_common_args is truish, add options from FILE_COMMON_ARGUMENTS when not present.
# This is the only modification to argument_spec done by AnsibleModule itself, and which is
# not caught by setup_env's AnsibleModule replacement
if fake.kwargs.get('add_file_common_args'):
for k, v in FILE_COMMON_ARGUMENTS.items():
if k not in argument_spec:
argument_spec[k] = v
return argument_spec, fake.kwargs
except (TypeError, IndexError):
return {}, {}
| 6,422
|
Python
|
.py
| 130
| 43.053846
| 134
| 0.692468
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,273
|
utils.py
|
ansible_ansible/test/lib/ansible_test/_util/controller/sanity/validate-modules/validate_modules/utils.py
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2015 Matt Martz <matt@sivel.net>
# Copyright (C) 2015 Rackspace US, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import annotations
import ast
import datetime
import os
import re
import sys
from io import BytesIO, TextIOWrapper
import yaml
import yaml.reader
from ansible.module_utils.common.text.converters import to_text
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.common.yaml import SafeLoader
from ansible.module_utils.six import string_types
from ansible.parsing.yaml.loader import AnsibleLoader
class AnsibleTextIOWrapper(TextIOWrapper):
def write(self, s):
super(AnsibleTextIOWrapper, self).write(to_text(s, self.encoding, errors='replace'))
def find_executable(executable, cwd=None, path=None):
"""Finds the full path to the executable specified"""
match = None
real_cwd = os.getcwd()
if not cwd:
cwd = real_cwd
if os.path.dirname(executable):
target = os.path.join(cwd, executable)
if os.path.exists(target) and os.access(target, os.F_OK | os.X_OK):
match = executable
else:
path = os.environ.get('PATH', os.path.defpath)
path_dirs = path.split(os.path.pathsep)
seen_dirs = set()
for path_dir in path_dirs:
if path_dir in seen_dirs:
continue
seen_dirs.add(path_dir)
if os.path.abspath(path_dir) == real_cwd:
path_dir = cwd
candidate = os.path.join(path_dir, executable)
if os.path.exists(candidate) and os.access(candidate, os.F_OK | os.X_OK):
match = candidate
break
return match
def find_globals(g, tree):
"""Uses AST to find globals in an ast tree"""
for child in tree:
if hasattr(child, 'body') and isinstance(child.body, list):
find_globals(g, child.body)
elif isinstance(child, (ast.FunctionDef, ast.ClassDef)):
g.add(child.name)
continue
elif isinstance(child, ast.Assign):
try:
g.add(child.targets[0].id)
except (IndexError, AttributeError):
pass
elif isinstance(child, ast.Import):
g.add(child.names[0].name)
elif isinstance(child, ast.ImportFrom):
for name in child.names:
g_name = name.asname or name.name
if g_name == '*':
continue
g.add(g_name)
class CaptureStd():
"""Context manager to handle capturing stderr and stdout"""
def __enter__(self):
self.sys_stdout = sys.stdout
self.sys_stderr = sys.stderr
sys.stdout = self.stdout = AnsibleTextIOWrapper(BytesIO(), encoding=self.sys_stdout.encoding)
sys.stderr = self.stderr = AnsibleTextIOWrapper(BytesIO(), encoding=self.sys_stderr.encoding)
return self
def __exit__(self, exc_type, exc_value, traceback):
sys.stdout = self.sys_stdout
sys.stderr = self.sys_stderr
def get(self):
"""Return ``(stdout, stderr)``"""
return self.stdout.buffer.getvalue(), self.stderr.buffer.getvalue()
def get_module_name_from_filename(filename, collection):
# Calculate the module's name so that relative imports work correctly
if collection:
# collection is a relative path, example: ansible_collections/my_namespace/my_collection
# filename is a relative path, example: plugins/modules/my_module.py
path = os.path.join(collection, filename)
else:
# filename is a relative path, example: lib/ansible/modules/system/ping.py
path = os.path.relpath(filename, 'lib')
name = os.path.splitext(path)[0].replace(os.path.sep, '.')
return name
def parse_yaml(value, lineno, module, name, load_all=False, ansible_loader=False):
traces = []
errors = []
data = None
if load_all:
yaml_load = yaml.load_all
else:
yaml_load = yaml.load
if ansible_loader:
loader = AnsibleLoader
else:
loader = SafeLoader
try:
data = yaml_load(value, Loader=loader)
if load_all:
data = list(data)
except yaml.MarkedYAMLError as e:
errors.append({
'msg': '%s is not valid YAML' % name,
'line': e.problem_mark.line + lineno,
'column': e.problem_mark.column + 1
})
traces.append(e)
except yaml.reader.ReaderError as e:
traces.append(e)
# TODO: Better line/column detection
errors.append({
'msg': ('%s is not valid YAML. Character '
'0x%x at position %d.' % (name, e.character, e.position)),
'line': lineno
})
except yaml.YAMLError as e:
traces.append(e)
errors.append({
'msg': '%s is not valid YAML: %s: %s' % (name, type(e), e),
'line': lineno
})
return data, errors, traces
def is_empty(value):
"""Evaluate null like values excluding False"""
if value is False:
return False
return not bool(value)
def compare_unordered_lists(a, b):
"""Safe list comparisons
Supports:
- unordered lists
- unhashable elements
"""
return len(a) == len(b) and all(x in b for x in a) and all(x in a for x in b)
class NoArgsAnsibleModule(AnsibleModule):
"""AnsibleModule that does not actually load params. This is used to get access to the
methods within AnsibleModule without having to fake a bunch of data
"""
def _load_params(self):
self.params = {'_ansible_selinux_special_fs': [], '_ansible_remote_tmp': '/tmp', '_ansible_keep_remote_files': False, '_ansible_check_mode': False}
def parse_isodate(v, allow_date):
if allow_date:
if isinstance(v, datetime.date):
return v
msg = 'Expected ISO 8601 date string (YYYY-MM-DD) or YAML date'
else:
msg = 'Expected ISO 8601 date string (YYYY-MM-DD)'
if not isinstance(v, string_types):
raise ValueError(msg)
# From Python 3.7 in, there is datetime.date.fromisoformat(). For older versions,
# we have to do things manually.
if not re.match('^[0-9]{4}-[0-9]{2}-[0-9]{2}$', v):
raise ValueError(msg)
try:
return datetime.datetime.strptime(v, '%Y-%m-%d').date()
except ValueError:
raise ValueError(msg)
| 7,081
|
Python
|
.py
| 178
| 32.488764
| 155
| 0.641639
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,274
|
__init__.py
|
ansible_ansible/test/lib/ansible_test/_util/controller/sanity/validate-modules/validate_modules/__init__.py
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2015 Matt Martz <matt@sivel.net>
# Copyright (C) 2015 Rackspace US, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import annotations
| 816
|
Python
|
.py
| 18
| 44.333333
| 74
| 0.734336
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,275
|
schema.py
|
ansible_ansible/test/lib/ansible_test/_util/controller/sanity/validate-modules/validate_modules/schema.py
|
# -*- coding: utf-8 -*-
# Copyright: (c) 2015, Matt Martz <matt@sivel.net>
# Copyright: (c) 2015, Rackspace US, Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
import re
from ansible.module_utils.compat.version import StrictVersion
from functools import partial
from urllib.parse import urlparse
from voluptuous import ALLOW_EXTRA, PREVENT_EXTRA, All, Any, Invalid, Length, MultipleInvalid, Required, Schema, Self, ValueInvalid, Exclusive
from ansible.constants import DOCUMENTABLE_PLUGINS
from ansible.module_utils.six import string_types
from ansible.module_utils.common.collections import is_iterable
from ansible.module_utils.parsing.convert_bool import boolean
from ansible.parsing.quoting import unquote
from ansible.utils.version import SemanticVersion
from ansible.release import __version__
from antsibull_docs_parser import dom
from antsibull_docs_parser.parser import parse, Context
from .utils import parse_isodate
list_string_types = list(string_types)
tuple_string_types = tuple(string_types)
any_string_types = Any(*string_types)
# Valid DOCUMENTATION.author lines
# Based on Ansibulbot's extract_github_id()
# author: First Last (@name) [optional anything]
# "Ansible Core Team" - Used by the Bot
# "Michael DeHaan" - nop
# "OpenStack Ansible SIG" - OpenStack does not use GitHub
# "Name (!UNKNOWN)" - For the few untraceable authors
author_line = re.compile(r'^\w.*(\(@([\w-]+)\)|!UNKNOWN)(?![\w.])|^Ansible Core Team$|^Michael DeHaan$|^OpenStack Ansible SIG$')
def _add_ansible_error_code(exception, error_code):
setattr(exception, 'ansible_error_code', error_code)
return exception
def isodate(v, error_code=None):
try:
parse_isodate(v, allow_date=True)
except ValueError as e:
raise _add_ansible_error_code(Invalid(str(e)), error_code or 'ansible-invalid-date')
return v
COLLECTION_NAME_RE = re.compile(r'^\w+(?:\.\w+)+$')
FULLY_QUALIFIED_COLLECTION_RESOURCE_RE = re.compile(r'^\w+(?:\.\w+){2,}$')
def collection_name(v, error_code=None):
if not isinstance(v, string_types):
raise _add_ansible_error_code(
Invalid('Collection name must be a string'), error_code or 'collection-invalid-name')
m = COLLECTION_NAME_RE.match(v)
if not m:
raise _add_ansible_error_code(
Invalid('Collection name must be of format `<namespace>.<name>`'), error_code or 'collection-invalid-name')
return v
def deprecation_versions():
"""Create a list of valid version for deprecation entries, current+4"""
major, minor = [int(version) for version in __version__.split('.')[0:2]]
return Any(*['{0}.{1}'.format(major, minor + increment) for increment in range(0, 5)])
def version(for_collection=False):
if for_collection:
# We do not accept floats for versions in collections
return Any(*string_types)
return Any(float, *string_types)
def date(error_code=None):
return Any(isodate, error_code=error_code)
def require_only_one(keys):
def f(obj):
found = None
for k in obj.keys():
if k in keys:
if found is None:
found = k
else:
raise Invalid('Found conflicting keys, must contain only one of {}'.format(keys))
if found is None:
raise Invalid('Must contain one of {}'.format(keys))
return obj
return f
# Roles can also be referenced by semantic markup
_VALID_PLUGIN_TYPES = set(DOCUMENTABLE_PLUGINS + ('role', ))
def _check_url(directive, content):
try:
parsed_url = urlparse(content)
if parsed_url.scheme not in ('', 'http', 'https'):
raise ValueError('Schema must be HTTP, HTTPS, or not specified')
return []
except ValueError:
return [_add_ansible_error_code(
Invalid('Directive %s must contain a valid URL' % directive), 'invalid-documentation-markup')]
def doc_string(v):
"""Match a documentation string."""
if not isinstance(v, string_types):
raise _add_ansible_error_code(
Invalid('Must be a string'), 'invalid-documentation')
errors = []
for par in parse(v, Context(), errors='message', strict=True, add_source=True):
for part in par:
if part.type == dom.PartType.ERROR:
errors.append(_add_ansible_error_code(Invalid(part.message), 'invalid-documentation-markup'))
if part.type == dom.PartType.URL:
errors.extend(_check_url('U()', part.url))
if part.type == dom.PartType.LINK:
errors.extend(_check_url('L()', part.url))
if part.type == dom.PartType.MODULE:
if not FULLY_QUALIFIED_COLLECTION_RESOURCE_RE.match(part.fqcn):
errors.append(_add_ansible_error_code(Invalid(
'Directive "%s" must contain a FQCN; found "%s"' % (part.source, part.fqcn)),
'invalid-documentation-markup'))
if part.type == dom.PartType.PLUGIN:
if not FULLY_QUALIFIED_COLLECTION_RESOURCE_RE.match(part.plugin.fqcn):
errors.append(_add_ansible_error_code(Invalid(
'Directive "%s" must contain a FQCN; found "%s"' % (part.source, part.plugin.fqcn)),
'invalid-documentation-markup'))
if part.plugin.type not in _VALID_PLUGIN_TYPES:
errors.append(_add_ansible_error_code(Invalid(
'Directive "%s" must contain a valid plugin type; found "%s"' % (part.source, part.plugin.type)),
'invalid-documentation-markup'))
if part.type == dom.PartType.OPTION_NAME:
if part.plugin is not None and not FULLY_QUALIFIED_COLLECTION_RESOURCE_RE.match(part.plugin.fqcn):
errors.append(_add_ansible_error_code(Invalid(
'Directive "%s" must contain a FQCN; found "%s"' % (part.source, part.plugin.fqcn)),
'invalid-documentation-markup'))
if part.plugin is not None and part.plugin.type not in _VALID_PLUGIN_TYPES:
errors.append(_add_ansible_error_code(Invalid(
'Directive "%s" must contain a valid plugin type; found "%s"' % (part.source, part.plugin.type)),
'invalid-documentation-markup'))
if part.type == dom.PartType.RETURN_VALUE:
if part.plugin is not None and not FULLY_QUALIFIED_COLLECTION_RESOURCE_RE.match(part.plugin.fqcn):
errors.append(_add_ansible_error_code(Invalid(
'Directive "%s" must contain a FQCN; found "%s"' % (part.source, part.plugin.fqcn)),
'invalid-documentation-markup'))
if part.plugin is not None and part.plugin.type not in _VALID_PLUGIN_TYPES:
errors.append(_add_ansible_error_code(Invalid(
'Directive "%s" must contain a valid plugin type; found "%s"' % (part.source, part.plugin.type)),
'invalid-documentation-markup'))
if len(errors) == 1:
raise errors[0]
if errors:
raise MultipleInvalid(errors)
return v
doc_string_or_strings = Any(doc_string, [doc_string])
def is_callable(v):
if not callable(v):
raise ValueInvalid('not a valid value')
return v
def sequence_of_sequences(min=None, max=None):
return All(
Any(
None,
[Any(list, tuple)],
tuple([Any(list, tuple)]),
),
Any(
None,
[Length(min=min, max=max)],
tuple([Length(min=min, max=max)]),
),
)
seealso_schema = Schema(
[
Any(
{
Required('module'): Any(*string_types),
'description': doc_string,
},
{
Required('plugin'): Any(*string_types),
Required('plugin_type'): Any(*DOCUMENTABLE_PLUGINS),
'description': doc_string,
},
{
Required('ref'): Any(*string_types),
Required('description'): doc_string,
},
{
Required('name'): Any(*string_types),
Required('link'): Any(*string_types),
Required('description'): doc_string,
},
),
]
)
argument_spec_types = ['bits', 'bool', 'bytes', 'dict', 'float', 'int', 'json', 'jsonarg', 'list', 'path', 'raw',
'sid', 'str']
argument_spec_modifiers = {
'mutually_exclusive': sequence_of_sequences(min=2),
'required_together': sequence_of_sequences(min=2),
'required_one_of': sequence_of_sequences(min=2),
'required_if': sequence_of_sequences(min=3, max=4),
'required_by': Schema({str: Any(list_string_types, tuple_string_types, *string_types)}),
}
def no_required_with_default(v):
if v.get('default') and v.get('required'):
raise Invalid('required=True cannot be supplied with a default')
return v
def elements_with_list(v):
if v.get('elements') and v.get('type') != 'list':
raise Invalid('type must be list to use elements')
return v
def options_with_apply_defaults(v):
if v.get('apply_defaults') and not v.get('options'):
raise Invalid('apply_defaults=True requires options to be set')
return v
def check_removal_version(v, version_field, collection_name_field, error_code='invalid-removal-version'):
version = v.get(version_field)
collection_name = v.get(collection_name_field)
if not isinstance(version, string_types) or not isinstance(collection_name, string_types):
# If they are not strings, schema validation will have already complained.
return v
if collection_name == 'ansible.builtin':
try:
parsed_version = StrictVersion()
parsed_version.parse(version)
except ValueError as exc:
raise _add_ansible_error_code(
Invalid('%s (%r) is not a valid ansible-core version: %s' % (version_field, version, exc)),
error_code=error_code)
return v
try:
parsed_version = SemanticVersion()
parsed_version.parse(version)
if parsed_version.major != 0 and (parsed_version.minor != 0 or parsed_version.patch != 0):
raise _add_ansible_error_code(
Invalid('%s (%r) must be a major release, not a minor or patch release (see specification at '
'https://semver.org/)' % (version_field, version)),
error_code='removal-version-must-be-major')
except ValueError as exc:
raise _add_ansible_error_code(
Invalid('%s (%r) is not a valid collection version (see specification at https://semver.org/): '
'%s' % (version_field, version, exc)),
error_code=error_code)
return v
def option_deprecation(v):
if v.get('removed_in_version') or v.get('removed_at_date'):
if v.get('removed_in_version') and v.get('removed_at_date'):
raise _add_ansible_error_code(
Invalid('Only one of removed_in_version and removed_at_date must be specified'),
error_code='deprecation-either-date-or-version')
if not v.get('removed_from_collection'):
raise _add_ansible_error_code(
Invalid('If removed_in_version or removed_at_date is specified, '
'removed_from_collection must be specified as well'),
error_code='deprecation-collection-missing')
check_removal_version(v,
version_field='removed_in_version',
collection_name_field='removed_from_collection',
error_code='invalid-removal-version')
return
if v.get('removed_from_collection'):
raise Invalid('removed_from_collection cannot be specified without either '
'removed_in_version or removed_at_date')
def argument_spec_schema(for_collection):
any_string_types = Any(*string_types)
schema = {
any_string_types: {
'type': Any(is_callable, *argument_spec_types),
'elements': Any(*argument_spec_types),
'default': object,
'fallback': Any(
(is_callable, list_string_types),
[is_callable, list_string_types],
),
'choices': Any([object], (object,)),
'context': dict,
'required': bool,
'no_log': bool,
'aliases': Any(list_string_types, tuple(list_string_types)),
'apply_defaults': bool,
'removed_in_version': version(for_collection),
'removed_at_date': date(),
'removed_from_collection': collection_name,
'options': Self,
'deprecated_aliases': Any([All(
Any(
{
Required('name'): Any(*string_types),
Required('date'): date(),
Required('collection_name'): collection_name,
},
{
Required('name'): Any(*string_types),
Required('version'): version(for_collection),
Required('collection_name'): collection_name,
},
),
partial(check_removal_version,
version_field='version',
collection_name_field='collection_name',
error_code='invalid-removal-version')
)]),
}
}
schema[any_string_types].update(argument_spec_modifiers)
schemas = All(
schema,
Schema({any_string_types: no_required_with_default}),
Schema({any_string_types: elements_with_list}),
Schema({any_string_types: options_with_apply_defaults}),
Schema({any_string_types: option_deprecation}),
)
return Schema(schemas)
def ansible_module_kwargs_schema(module_name, for_collection):
schema = {
'argument_spec': argument_spec_schema(for_collection),
'bypass_checks': bool,
'no_log': bool,
'check_invalid_arguments': Any(None, bool),
'add_file_common_args': bool,
'supports_check_mode': bool,
}
if module_name.endswith(('_info', '_facts')):
del schema['supports_check_mode']
schema[Required('supports_check_mode')] = True
schema.update(argument_spec_modifiers)
return Schema(schema)
json_value = Schema(Any(
None,
int,
float,
[Self],
*(list({str_type: Self} for str_type in string_types) + list(string_types))
))
def version_added(v, error_code='version-added-invalid', accept_historical=False):
if 'version_added' in v:
version_added = v.get('version_added')
if isinstance(version_added, string_types):
# If it is not a string, schema validation will have already complained
# - or we have a float and we are in ansible/ansible, in which case we're
# also happy.
if v.get('version_added_collection') == 'ansible.builtin':
if version_added == 'historical' and accept_historical:
return v
try:
version = StrictVersion()
version.parse(version_added)
except ValueError as exc:
raise _add_ansible_error_code(
Invalid('version_added (%r) is not a valid ansible-core version: '
'%s' % (version_added, exc)),
error_code=error_code)
else:
try:
version = SemanticVersion()
version.parse(version_added)
if version.major != 0 and version.patch != 0:
raise _add_ansible_error_code(
Invalid('version_added (%r) must be a major or minor release, '
'not a patch release (see specification at '
'https://semver.org/)' % (version_added, )),
error_code='version-added-must-be-major-or-minor')
except ValueError as exc:
raise _add_ansible_error_code(
Invalid('version_added (%r) is not a valid collection version '
'(see specification at https://semver.org/): '
'%s' % (version_added, exc)),
error_code=error_code)
elif 'version_added_collection' in v:
# Must have been manual intervention, since version_added_collection is only
# added automatically when version_added is present
raise Invalid('version_added_collection cannot be specified without version_added')
return v
def check_option_elements(v):
# Check whether elements is there iff type == 'list'
v_type = v.get('type')
v_elements = v.get('elements')
if v_type == 'list' and v_elements is None:
raise _add_ansible_error_code(
Invalid('Argument defines type as list but elements is not defined'),
error_code='parameter-list-no-elements') # FIXME: adjust error code?
if v_type != 'list' and v_elements is not None:
raise _add_ansible_error_code(
Invalid('Argument defines parameter elements as %s but it is valid only when value of parameter type is list' % (v_elements, )),
error_code='doc-elements-invalid')
return v
def get_type_checker(v):
v_type = v.get('type')
if v_type == 'list':
elt_checker, elt_name = get_type_checker({'type': v.get('elements')})
def list_checker(value):
if isinstance(value, string_types):
value = [unquote(x.strip()) for x in value.split(',')]
if not isinstance(value, list):
raise ValueError('Value must be a list')
if elt_checker:
for elt in value:
try:
elt_checker(elt)
except Exception as exc:
raise ValueError('Entry %r is not of type %s: %s' % (elt, elt_name, exc))
return list_checker, ('list of %s' % elt_name) if elt_checker else 'list'
if v_type in ('boolean', 'bool'):
return partial(boolean, strict=False), v_type
if v_type in ('integer', 'int'):
return int, v_type
if v_type == 'float':
return float, v_type
if v_type == 'none':
def none_checker(value):
if value not in ('None', None):
raise ValueError('Value must be "None" or none')
return none_checker, v_type
if v_type in ('str', 'string', 'path', 'tmp', 'temppath', 'tmppath'):
def str_checker(value):
if not isinstance(value, string_types):
raise ValueError('Value must be string')
return str_checker, v_type
if v_type in ('pathspec', 'pathlist'):
def path_list_checker(value):
if not isinstance(value, string_types) and not is_iterable(value):
raise ValueError('Value must be string or list of strings')
return path_list_checker, v_type
if v_type in ('dict', 'dictionary'):
def dict_checker(value):
if not isinstance(value, dict):
raise ValueError('Value must be dictionary')
return dict_checker, v_type
return None, 'unknown'
def check_option_choices(v):
# Check whether choices have the correct type
v_choices = v.get('choices')
if not is_iterable(v_choices):
return v
if v.get('type') == 'list':
# choices for a list type means that every list element must be one of these choices
type_checker, type_name = get_type_checker({'type': v.get('elements')})
else:
type_checker, type_name = get_type_checker(v)
if type_checker is None:
return v
if isinstance(v_choices, dict):
# choices are still a list (the keys) but dict form serves to document each choice.
iterate = v_choices.keys()
else:
iterate = v_choices
for value in iterate:
try:
type_checker(value)
except Exception as exc:
raise _add_ansible_error_code(
Invalid(
'Argument defines choices as (%r) but this is incompatible with argument type %s: %s' % (value, type_name, exc)),
error_code='doc-choices-incompatible-type')
return v
def check_option_default(v):
# Check whether default is only present if required=False, and whether default has correct type
v_default = v.get('default')
if v.get('required') and v_default is not None:
raise _add_ansible_error_code(
Invalid(
'Argument is marked as required but specifies a default.'
' Arguments with a default should not be marked as required'),
error_code='no-default-for-required-parameter') # FIXME: adjust error code?
if v_default is None:
return v
type_checker, type_name = get_type_checker(v)
if type_checker is None:
return v
try:
type_checker(v_default)
except Exception as exc:
raise _add_ansible_error_code(
Invalid(
'Argument defines default as (%r) but this is incompatible with parameter type %s: %s' % (v_default, type_name, exc)),
error_code='incompatible-default-type')
return v
def list_dict_option_schema(for_collection, plugin_type):
if plugin_type == 'module':
option_types = Any(None, 'bits', 'bool', 'bytes', 'dict', 'float', 'int', 'json', 'jsonarg', 'list', 'path', 'raw', 'sid', 'str')
element_types = option_types
else:
option_types = Any(None, 'boolean', 'bool', 'integer', 'int', 'float', 'list', 'dict', 'dictionary', 'none',
'path', 'tmp', 'temppath', 'tmppath', 'pathspec', 'pathlist', 'str', 'string', 'raw')
element_types = Any(None, 'boolean', 'bool', 'integer', 'int', 'float', 'list', 'dict', 'dictionary', 'path', 'str', 'string', 'raw')
basic_option_schema = {
Required('description'): doc_string_or_strings,
'required': bool,
'choices': Any(list, {object: doc_string_or_strings}),
'aliases': Any(list_string_types),
'version_added': version(for_collection),
'version_added_collection': collection_name,
'default': json_value,
# Note: Types are strings, not literal bools, such as True or False
'type': option_types,
# in case of type='list' elements define type of individual item in list
'elements': element_types,
}
if plugin_type != 'module':
basic_option_schema['name'] = Any(*string_types)
deprecated_schema = All(
Schema(
All(
{
# This definition makes sure everything has the correct types/values
'why': doc_string,
# TODO: phase out either plural or singular, 'alt' is exclusive group
Exclusive('alternative', 'alt'): doc_string,
Exclusive('alternatives', 'alt'): doc_string,
# vod stands for 'version or date'; this is the name of the exclusive group
Exclusive('removed_at_date', 'vod'): date(),
Exclusive('version', 'vod'): version(for_collection),
'collection_name': collection_name,
},
{
# This definition makes sure that everything we require is there
Required('why'): Any(*string_types),
Required(Any('alternatives', 'alternative')): Any(*string_types),
Required(Any('removed_at_date', 'version')): Any(*string_types),
Required('collection_name'): Any(*string_types),
},
),
extra=PREVENT_EXTRA
),
partial(check_removal_version,
version_field='version',
collection_name_field='collection_name',
error_code='invalid-removal-version'),
)
env_schema = All(
Schema({
Required('name'): Any(*string_types),
'deprecated': deprecated_schema,
'version_added': version(for_collection),
'version_added_collection': collection_name,
}, extra=PREVENT_EXTRA),
partial(version_added, error_code='option-invalid-version-added')
)
ini_schema = All(
Schema({
Required('key'): Any(*string_types),
Required('section'): Any(*string_types),
'deprecated': deprecated_schema,
'version_added': version(for_collection),
'version_added_collection': collection_name,
}, extra=PREVENT_EXTRA),
partial(version_added, error_code='option-invalid-version-added')
)
vars_schema = All(
Schema({
Required('name'): Any(*string_types),
'deprecated': deprecated_schema,
'version_added': version(for_collection),
'version_added_collection': collection_name,
}, extra=PREVENT_EXTRA),
partial(version_added, error_code='option-invalid-version-added')
)
cli_schema = All(
Schema({
Required('name'): Any(*string_types),
'option': Any(*string_types),
'deprecated': deprecated_schema,
'version_added': version(for_collection),
'version_added_collection': collection_name,
}, extra=PREVENT_EXTRA),
partial(version_added, error_code='option-invalid-version-added')
)
keyword_schema = All(
Schema({
Required('name'): Any(*string_types),
'deprecated': deprecated_schema,
'version_added': version(for_collection),
'version_added_collection': collection_name,
}, extra=PREVENT_EXTRA),
partial(version_added, error_code='option-invalid-version-added')
)
basic_option_schema.update({
'env': [env_schema],
'ini': [ini_schema],
'vars': [vars_schema],
'cli': [cli_schema],
'keyword': [keyword_schema],
'deprecated': deprecated_schema,
})
suboption_schema = dict(basic_option_schema)
suboption_schema.update({
# Recursive suboptions
'suboptions': Any(None, *list({str_type: Self} for str_type in string_types)),
})
suboption_schema = Schema(All(
suboption_schema,
check_option_elements,
check_option_choices,
check_option_default,
), extra=PREVENT_EXTRA)
# This generates list of dicts with keys from string_types and suboption_schema value
# for example in Python 3: {str: suboption_schema}
list_dict_suboption_schema = [{str_type: suboption_schema} for str_type in string_types]
option_schema = dict(basic_option_schema)
option_schema.update({
'suboptions': Any(None, *list_dict_suboption_schema),
})
option_schema = Schema(All(
option_schema,
check_option_elements,
check_option_choices,
check_option_default,
), extra=PREVENT_EXTRA)
option_version_added = Schema(
All({
'suboptions': Any(None, *[{str_type: Self} for str_type in string_types]),
}, partial(version_added, error_code='option-invalid-version-added')),
extra=ALLOW_EXTRA
)
# This generates list of dicts with keys from string_types and option_schema value
# for example in Python 3: {str: option_schema}
return [{str_type: All(option_schema, option_version_added)} for str_type in string_types]
def return_contains(v):
schema = Schema(
{
Required('contains'): Any(dict, list, *string_types)
},
extra=ALLOW_EXTRA
)
if v.get('type') == 'complex':
return schema(v)
return v
def return_schema(for_collection, plugin_type='module'):
if plugin_type == 'module':
return_types = Any('bool', 'complex', 'dict', 'float', 'int', 'list', 'raw', 'str')
element_types = Any(None, 'bits', 'bool', 'bytes', 'dict', 'float', 'int', 'json', 'jsonarg', 'list', 'path', 'raw', 'sid', 'str')
else:
return_types = Any(None, 'boolean', 'bool', 'integer', 'int', 'float', 'list', 'dict', 'dictionary', 'path', 'str', 'string', 'raw')
element_types = return_types
basic_return_option_schema = {
Required('description'): doc_string_or_strings,
'returned': doc_string,
'version_added': version(for_collection),
'version_added_collection': collection_name,
'sample': json_value,
'example': json_value,
# in case of type='list' elements define type of individual item in list
'elements': element_types,
'choices': Any([object], (object,)),
}
if plugin_type == 'module':
# type is only required for modules right now
basic_return_option_schema[Required('type')] = return_types
else:
basic_return_option_schema['type'] = return_types
inner_return_option_schema = dict(basic_return_option_schema)
inner_return_option_schema.update({
'contains': Any(None, *list({str_type: Self} for str_type in string_types)),
})
return_contains_schema = Any(
All(
Schema(inner_return_option_schema),
Schema(return_contains),
Schema(partial(version_added, error_code='option-invalid-version-added')),
),
Schema(type(None)),
)
# This generates list of dicts with keys from string_types and return_contains_schema value
# for example in Python 3: {str: return_contains_schema}
list_dict_return_contains_schema = [{str_type: return_contains_schema} for str_type in string_types]
return_option_schema = dict(basic_return_option_schema)
return_option_schema.update({
'contains': Any(None, *list_dict_return_contains_schema),
})
if plugin_type == 'module':
# 'returned' is required on top-level
del return_option_schema['returned']
return_option_schema[Required('returned')] = Any(*string_types)
return Any(
All(
Schema(
{
any_string_types: return_option_schema
}
),
Schema({any_string_types: return_contains}),
Schema({any_string_types: partial(version_added, error_code='option-invalid-version-added')}),
),
Schema(type(None)),
)
def deprecation_schema(for_collection):
main_fields = {
Required('why'): doc_string,
'alternative': doc_string,
'alternatives': doc_string,
}
if for_collection:
main_fields.update({Required('removed_from_collection'): collection_name, 'removed': Any(True)})
date_schema = {
Required('removed_at_date'): date(),
}
date_schema.update(main_fields)
if for_collection:
version_schema = {
Required('removed_in'): version(for_collection),
}
else:
version_schema = {
Required('removed_in'): deprecation_versions(),
}
version_schema.update(main_fields)
result = Any(
Schema(version_schema, extra=PREVENT_EXTRA),
Schema(date_schema, extra=PREVENT_EXTRA),
)
if for_collection:
result = All(
result,
require_only_one(['alternative', 'alternatives']),
partial(check_removal_version,
version_field='removed_in',
collection_name_field='removed_from_collection',
error_code='invalid-removal-version'))
return result
def author(value):
if value is None:
return value # let schema checks handle
if not is_iterable(value):
value = [value]
for line in value:
if not isinstance(line, string_types):
continue # let schema checks handle
m = author_line.search(line)
if not m:
raise Invalid("Invalid author")
return value
def doc_schema(module_name, for_collection=False, deprecated_module=False, plugin_type='module'):
if module_name.startswith('_') and not for_collection:
module_name = module_name[1:]
deprecated_module = True
if for_collection is False and plugin_type == 'connection' and module_name == 'paramiko_ssh':
# The plugin loader has a hard-coded exception: when the builtin connection 'paramiko' is
# referenced, it loads 'paramiko_ssh' instead. That's why in this plugin, the name must be
# 'paramiko' and not 'paramiko_ssh'.
module_name = 'paramiko'
doc_schema_dict = {
Required('module' if plugin_type == 'module' else 'name'): module_name,
Required('short_description'): doc_string,
Required('description'): doc_string_or_strings,
'notes': Any(None, [doc_string]),
'seealso': Any(None, seealso_schema),
'requirements': [doc_string],
'todo': Any(None, doc_string_or_strings),
'options': Any(None, *list_dict_option_schema(for_collection, plugin_type)),
'extends_documentation_fragment': Any(list_string_types, *string_types),
'version_added_collection': collection_name,
}
if plugin_type == 'module':
doc_schema_dict[Required('author')] = All(Any(None, list_string_types, *string_types), author)
else:
# author is optional for plugins (for now)
doc_schema_dict['author'] = All(Any(None, list_string_types, *string_types), author)
if plugin_type == 'callback':
doc_schema_dict[Required('type')] = Any('aggregate', 'notification', 'stdout')
if for_collection:
# Optional
doc_schema_dict['version_added'] = version(for_collection=True)
else:
doc_schema_dict[Required('version_added')] = version(for_collection=False)
if deprecated_module:
deprecation_required_scheme = {
Required('deprecated'): Any(deprecation_schema(for_collection=for_collection)),
}
doc_schema_dict.update(deprecation_required_scheme)
def add_default_attributes(more=None):
schema = {
'description': doc_string_or_strings,
'details': doc_string_or_strings,
'support': any_string_types,
'version_added_collection': any_string_types,
'version_added': any_string_types,
}
if more:
schema.update(more)
return schema
doc_schema_dict['attributes'] = Schema(
All(
Schema({
any_string_types: {
Required('description'): doc_string_or_strings,
Required('support'): Any('full', 'partial', 'none', 'N/A'),
'details': doc_string_or_strings,
'version_added_collection': collection_name,
'version_added': version(for_collection=for_collection),
},
}, extra=ALLOW_EXTRA),
partial(version_added, error_code='attribute-invalid-version-added', accept_historical=False),
Schema({
any_string_types: add_default_attributes(),
'action_group': add_default_attributes({
Required('membership'): list_string_types,
}),
'platform': add_default_attributes({
Required('platforms'): Any(list_string_types, *string_types)
}),
}, extra=PREVENT_EXTRA),
)
)
return Schema(
All(
Schema(
doc_schema_dict,
extra=PREVENT_EXTRA
),
partial(version_added, error_code='module-invalid-version-added', accept_historical=not for_collection),
)
)
# Things to add soon
####################
# 1) Recursively validate `type: complex` fields
# This will improve documentation, though require fair amount of module tidyup
# Possible Future Enhancements
##############################
# 1) Don't allow empty options for choices, aliases, etc
# 2) If type: bool ensure choices isn't set - perhaps use Exclusive
# 3) both version_added should be quoted floats
# Tool that takes JSON and generates RETURN skeleton (needs to support complex structures)
| 37,212
|
Python
|
.py
| 807
| 35.261462
| 142
| 0.590957
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,276
|
main.py
|
ansible_ansible/test/lib/ansible_test/_util/controller/sanity/validate-modules/validate_modules/main.py
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2015 Matt Martz <matt@sivel.net>
# Copyright (C) 2015 Rackspace US, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import annotations
import abc
import argparse
import ast
import datetime
import json
import os
import re
import sys
import traceback
import warnings
from collections import OrderedDict
from collections.abc import Mapping
from contextlib import contextmanager
from fnmatch import fnmatch
from antsibull_docs_parser import dom
from antsibull_docs_parser.parser import parse, Context
import yaml
from voluptuous.humanize import humanize_error
def setup_collection_loader():
"""
Configure the collection loader if a collection is being tested.
This must be done before the plugin loader is imported.
"""
if '--collection' not in sys.argv:
return
# noinspection PyProtectedMember
from ansible.utils.collection_loader._collection_finder import _AnsibleCollectionFinder
collections_paths = os.environ.get('ANSIBLE_COLLECTIONS_PATH', '').split(os.pathsep)
collection_loader = _AnsibleCollectionFinder(collections_paths)
# noinspection PyProtectedMember
collection_loader._install() # pylint: disable=protected-access
warnings.filterwarnings(
"ignore",
"AnsibleCollectionFinder has already been configured")
setup_collection_loader()
from ansible import __version__ as ansible_version
from ansible.executor.module_common import REPLACER_WINDOWS, NEW_STYLE_PYTHON_MODULE_RE
from ansible.module_utils.common.collections import is_iterable
from ansible.module_utils.common.parameters import DEFAULT_TYPE_VALIDATORS
from ansible.module_utils.compat.version import StrictVersion, LooseVersion
from ansible.module_utils.basic import to_bytes
from ansible.plugins.loader import fragment_loader
from ansible.plugins.list import IGNORE as REJECTLIST
from ansible.utils.plugin_docs import add_collection_to_versions_and_dates, add_fragments, get_docstring
from ansible.utils.version import SemanticVersion
from .module_args import AnsibleModuleImportError, AnsibleModuleNotInitialized, get_py_argument_spec, get_ps_argument_spec
from .schema import (
ansible_module_kwargs_schema,
doc_schema,
return_schema,
)
from .utils import CaptureStd, NoArgsAnsibleModule, compare_unordered_lists, parse_yaml, parse_isodate
# Because there is no ast.TryExcept in Python 3 ast module
TRY_EXCEPT = ast.Try
# REPLACER_WINDOWS from ansible.executor.module_common is byte
# string but we need unicode for Python 3
REPLACER_WINDOWS = REPLACER_WINDOWS.decode('utf-8')
REJECTLIST_DIRS = frozenset(('.git', 'test', '.github', '.idea'))
INDENT_REGEX = re.compile(r'([\t]*)')
SYS_EXIT_REGEX = re.compile(r'[^#]*sys.exit\s*\(.*')
NO_LOG_REGEX = re.compile(r'(?:pass(?!ive)|secret|token|key)', re.I)
REJECTLIST_IMPORTS = {
'requests': {
'new_only': True,
'error': {
'code': 'use-module-utils-urls',
'msg': ('requests import found, should use '
'ansible.module_utils.urls instead')
}
},
r'boto(?:\.|$)': {
'new_only': True,
'error': {
'code': 'use-boto3',
'msg': 'boto import found, new modules should use boto3'
}
},
}
SUBPROCESS_REGEX = re.compile(r'subprocess\.Po.*')
OS_CALL_REGEX = re.compile(r'os\.call.*')
LOOSE_ANSIBLE_VERSION = LooseVersion('.'.join(ansible_version.split('.')[:3]))
PLUGINS_WITH_RETURN_VALUES = ('module', )
PLUGINS_WITH_EXAMPLES = ('module', )
PLUGINS_WITH_YAML_EXAMPLES = ('module', )
def is_potential_secret_option(option_name):
if not NO_LOG_REGEX.search(option_name):
return False
# If this is a count, type, algorithm, timeout, filename, or name, it is probably not a secret
if option_name.endswith((
'_count', '_type', '_alg', '_algorithm', '_timeout', '_name', '_comment',
'_bits', '_id', '_identifier', '_period', '_file', '_filename',
)):
return False
# 'key' also matches 'publickey', which is generally not secret
if any(part in option_name for part in (
'publickey', 'public_key', 'keyusage', 'key_usage', 'keyserver', 'key_server',
'keysize', 'key_size', 'keyservice', 'key_service', 'pub_key', 'pubkey',
'keyboard', 'secretary',
)):
return False
return True
def compare_dates(d1, d2):
try:
date1 = parse_isodate(d1, allow_date=True)
date2 = parse_isodate(d2, allow_date=True)
return date1 == date2
except ValueError:
# At least one of d1 and d2 cannot be parsed. Simply compare values.
return d1 == d2
class ReporterEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, Exception):
return str(o)
return json.JSONEncoder.default(self, o)
class Reporter:
def __init__(self):
self.files = OrderedDict()
def _ensure_default_entry(self, path):
try:
self.files[path]
except KeyError:
self.files[path] = {
'errors': [],
'warnings': [],
'traces': [],
'warning_traces': []
}
def _log(self, path, code, msg, level='error', line=0, column=0):
self._ensure_default_entry(path)
lvl_dct = self.files[path]['%ss' % level]
lvl_dct.append({
'code': code,
'msg': msg,
'line': line,
'column': column
})
def error(self, *args, **kwargs):
self._log(*args, level='error', **kwargs)
def warning(self, *args, **kwargs):
self._log(*args, level='warning', **kwargs)
def trace(self, path, tracebk):
self._ensure_default_entry(path)
self.files[path]['traces'].append(tracebk)
def warning_trace(self, path, tracebk):
self._ensure_default_entry(path)
self.files[path]['warning_traces'].append(tracebk)
@staticmethod
@contextmanager
def _output_handle(output):
if output != '-':
handle = open(output, 'w+')
else:
handle = sys.stdout
yield handle
handle.flush()
handle.close()
@staticmethod
def _filter_out_ok(reports):
temp_reports = OrderedDict()
for path, report in reports.items():
if report['errors'] or report['warnings']:
temp_reports[path] = report
return temp_reports
def plain(self, warnings=False, output='-'):
"""Print out the test results in plain format
output is ignored here for now
"""
ret = []
for path, report in Reporter._filter_out_ok(self.files).items():
traces = report['traces'][:]
if warnings and report['warnings']:
traces.extend(report['warning_traces'])
for trace in traces:
print('TRACE:')
print('\n '.join((' %s' % trace).splitlines()))
for error in report['errors']:
error['path'] = path
print('%(path)s:%(line)d:%(column)d: E%(code)s %(msg)s' % error)
ret.append(1)
if warnings:
for warning in report['warnings']:
warning['path'] = path
print('%(path)s:%(line)d:%(column)d: W%(code)s %(msg)s' % warning)
return 3 if ret else 0
def json(self, warnings=False, output='-'):
"""Print out the test results in json format
warnings is not respected in this output
"""
ret = [len(r['errors']) for r in self.files.values()]
with Reporter._output_handle(output) as handle:
print(json.dumps(Reporter._filter_out_ok(self.files), indent=4, cls=ReporterEncoder), file=handle)
return 3 if sum(ret) else 0
class Validator(metaclass=abc.ABCMeta):
"""Validator instances are intended to be run on a single object. if you
are scanning multiple objects for problems, you'll want to have a separate
Validator for each one."""
def __init__(self, reporter=None):
self.reporter = reporter
@property
@abc.abstractmethod
def object_name(self):
"""Name of the object we validated"""
pass
@property
@abc.abstractmethod
def object_path(self):
"""Path of the object we validated"""
pass
@abc.abstractmethod
def validate(self):
"""Run this method to generate the test results"""
pass
class ModuleValidator(Validator):
REJECTLIST_PATTERNS = ('.git*', '*.pyc', '*.pyo', '.*', '*.md', '*.rst', '*.txt')
REJECTLIST_FILES = frozenset(('.git', '.gitignore', '.travis.yml',
'.gitattributes', '.gitmodules', 'COPYING',
'__init__.py', 'VERSION', 'test-docs.sh'))
REJECTLIST = REJECTLIST_FILES.union(REJECTLIST['module'])
# win_dsc is a dynamic arg spec, the docs won't ever match
PS_ARG_VALIDATE_REJECTLIST = frozenset(('win_dsc.ps1', ))
def __init__(self, path, git_cache: GitCache, analyze_arg_spec=False, collection=None, collection_version=None,
reporter=None, routing=None, plugin_type='module'):
super(ModuleValidator, self).__init__(reporter=reporter or Reporter())
self.path = path
self.basename = os.path.basename(self.path)
self.name = os.path.splitext(self.basename)[0]
self.plugin_type = plugin_type
self.analyze_arg_spec = analyze_arg_spec and plugin_type == 'module'
self._Version = LooseVersion
self._StrictVersion = StrictVersion
self.collection = collection
self.collection_name = 'ansible.builtin'
if self.collection:
self._Version = SemanticVersion
self._StrictVersion = SemanticVersion
collection_namespace_path, collection_name = os.path.split(self.collection)
self.collection_name = '%s.%s' % (os.path.basename(collection_namespace_path), collection_name)
self.routing = routing
self.collection_version = None
if collection_version is not None:
self.collection_version_str = collection_version
self.collection_version = SemanticVersion(collection_version)
self.git_cache = git_cache
self.base_module = self.git_cache.get_original_path(self.path)
with open(path) as f:
self.text = f.read()
self.length = len(self.text.splitlines())
try:
self.ast = ast.parse(self.text)
except Exception:
self.ast = None
def _create_version(self, v, collection_name=None):
if not v:
raise ValueError('Empty string is not a valid version')
if collection_name == 'ansible.builtin':
return LooseVersion(v)
if collection_name is not None:
return SemanticVersion(v)
return self._Version(v)
def _create_strict_version(self, v, collection_name=None):
if not v:
raise ValueError('Empty string is not a valid version')
if collection_name == 'ansible.builtin':
return StrictVersion(v)
if collection_name is not None:
return SemanticVersion(v)
return self._StrictVersion(v)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
pass
@property
def object_name(self):
return self.basename
@property
def object_path(self):
return self.path
def _get_collection_meta(self):
"""Implement if we need this for version_added comparisons
"""
pass
def _python_module(self):
if self.path.endswith('.py'):
return True
return False
def _powershell_module(self):
if self.path.endswith('.ps1'):
return True
return False
def _sidecar_doc(self):
if self.path.endswith('.yml') or self.path.endswith('.yaml'):
return True
return False
def _just_docs(self):
"""Module can contain just docs and from __future__ boilerplate
"""
try:
for child in self.ast.body:
if not isinstance(child, ast.Assign):
# allow string constant expressions (these are docstrings)
if isinstance(child, ast.Expr) and isinstance(child.value, ast.Constant) and isinstance(child.value.value, str):
continue
# allow __future__ imports (the specific allowed imports are checked by other sanity tests)
if isinstance(child, ast.ImportFrom) and child.module == '__future__':
continue
return False
return True
except AttributeError:
return False
def _is_new_module(self) -> bool | None:
"""Return True if the content is new, False if it is not and None if the information is not available."""
return self.git_cache.is_new(self.path)
def _check_interpreter(self, powershell=False):
if self._powershell_module():
if not self.text.startswith('#!powershell\n'):
self.reporter.error(
path=self.object_path,
code='missing-powershell-interpreter',
msg='Interpreter line is not "#!powershell"'
)
return
if self._python_module():
missing_python_interpreter = False
if not self.text.startswith('#!/usr/bin/python'):
if NEW_STYLE_PYTHON_MODULE_RE.search(to_bytes(self.text)):
missing_python_interpreter = self.text.startswith('#!') # shebang optional, but if present must match
else:
missing_python_interpreter = True # shebang required
if missing_python_interpreter:
self.reporter.error(
path=self.object_path,
code='missing-python-interpreter',
msg='Interpreter line is not "#!/usr/bin/python"',
)
def _check_for_sys_exit(self):
# Optimize out the happy path
if 'sys.exit' not in self.text:
return
for line_no, line in enumerate(self.text.splitlines()):
sys_exit_usage = SYS_EXIT_REGEX.match(line)
if sys_exit_usage:
# TODO: add column
self.reporter.error(
path=self.object_path,
code='use-fail-json-not-sys-exit',
msg='sys.exit() call found. Should be exit_json/fail_json',
line=line_no + 1
)
def _check_gpl3_header(self):
header = '\n'.join(self.text.split('\n')[:20])
if ('GNU General Public License' not in header or
('version 3' not in header and 'v3.0' not in header)):
self.reporter.error(
path=self.object_path,
code='missing-gplv3-license',
msg='GPLv3 license header not found in the first 20 lines of the module'
)
elif self._is_new_module():
if len([line for line in header
if 'GNU General Public License' in line]) > 1:
self.reporter.error(
path=self.object_path,
code='use-short-gplv3-license',
msg='Found old style GPLv3 license header: '
'https://docs.ansible.com/ansible-core/devel/dev_guide/developing_modules_documenting.html#copyright'
)
def _check_for_subprocess(self):
for child in self.ast.body:
if isinstance(child, ast.Import):
if child.names[0].name == 'subprocess':
for line_no, line in enumerate(self.text.splitlines()):
sp_match = SUBPROCESS_REGEX.search(line)
if sp_match:
self.reporter.error(
path=self.object_path,
code='use-run-command-not-popen',
msg=('subprocess.Popen call found. Should be module.run_command'),
line=(line_no + 1),
column=(sp_match.span()[0] + 1)
)
def _check_for_os_call(self):
if 'os.call' in self.text:
for line_no, line in enumerate(self.text.splitlines()):
os_call_match = OS_CALL_REGEX.search(line)
if os_call_match:
self.reporter.error(
path=self.object_path,
code='use-run-command-not-os-call',
msg=('os.call() call found. Should be module.run_command'),
line=(line_no + 1),
column=(os_call_match.span()[0] + 1)
)
def _find_rejectlist_imports(self):
for child in self.ast.body:
names = []
if isinstance(child, ast.Import):
names.extend(child.names)
elif isinstance(child, TRY_EXCEPT):
bodies = child.body
for handler in child.handlers:
bodies.extend(handler.body)
for grandchild in bodies:
if isinstance(grandchild, ast.Import):
names.extend(grandchild.names)
for name in names:
# TODO: Add line/col
for rejectlist_import, options in REJECTLIST_IMPORTS.items():
if re.search(rejectlist_import, name.name):
new_only = options['new_only']
if self._is_new_module() and new_only:
self.reporter.error(
path=self.object_path,
**options['error']
)
elif not new_only:
self.reporter.error(
path=self.object_path,
**options['error']
)
def _find_module_utils(self):
linenos = []
found_basic = False
for child in self.ast.body:
if isinstance(child, (ast.Import, ast.ImportFrom)):
names = []
try:
names.append(child.module)
if child.module.endswith('.basic'):
found_basic = True
except AttributeError:
pass
names.extend([n.name for n in child.names])
if [n for n in names if n.startswith('ansible.module_utils')]:
linenos.append(child.lineno)
for name in child.names:
if ('module_utils' in getattr(child, 'module', '') and
isinstance(name, ast.alias) and
name.name == '*'):
msg = (
'module-utils-specific-import',
('module_utils imports should import specific '
'components, not "*"')
)
if self._is_new_module():
self.reporter.error(
path=self.object_path,
code=msg[0],
msg=msg[1],
line=child.lineno
)
else:
self.reporter.warning(
path=self.object_path,
code=msg[0],
msg=msg[1],
line=child.lineno
)
if (isinstance(name, ast.alias) and
name.name == 'basic'):
found_basic = True
if not found_basic:
self.reporter.warning(
path=self.object_path,
code='missing-module-utils-basic-import',
msg='Did not find "ansible.module_utils.basic" import'
)
return linenos
def _get_first_callable(self):
linenos = []
for child in self.ast.body:
if isinstance(child, (ast.FunctionDef, ast.ClassDef)):
linenos.append(child.lineno)
return min(linenos) if linenos else None
def _find_has_import(self):
for child in self.ast.body:
found_try_except_import = False
found_has = False
if isinstance(child, TRY_EXCEPT):
bodies = child.body
for handler in child.handlers:
bodies.extend(handler.body)
for grandchild in bodies:
if isinstance(grandchild, ast.Import):
found_try_except_import = True
if isinstance(grandchild, ast.Assign):
for target in grandchild.targets:
if not isinstance(target, ast.Name):
continue
if target.id.lower().startswith('has_'):
found_has = True
if found_try_except_import and not found_has:
# TODO: Add line/col
self.reporter.warning(
path=self.object_path,
code='try-except-missing-has',
msg='Found Try/Except block without HAS_ assignment'
)
def _ensure_imports_below_docs(self, doc_info, first_callable):
doc_line_numbers = [lineno for lineno in (doc_info[key]['lineno'] for key in doc_info) if lineno > 0]
min_doc_line = min(doc_line_numbers) if doc_line_numbers else None
max_doc_line = max(doc_info[key]['end_lineno'] for key in doc_info)
import_lines = []
for child in self.ast.body:
if isinstance(child, (ast.Import, ast.ImportFrom)):
# allow __future__ imports (the specific allowed imports are checked by other sanity tests)
if isinstance(child, ast.ImportFrom) and child.module == '__future__':
continue
import_lines.append(child.lineno)
if min_doc_line and child.lineno < min_doc_line:
self.reporter.error(
path=self.object_path,
code='import-before-documentation',
msg=('Import found before documentation variables. '
'All imports must appear below '
'DOCUMENTATION/EXAMPLES/RETURN.'),
line=child.lineno
)
break
elif isinstance(child, TRY_EXCEPT):
bodies = child.body
for handler in child.handlers:
bodies.extend(handler.body)
for grandchild in bodies:
if isinstance(grandchild, (ast.Import, ast.ImportFrom)):
import_lines.append(grandchild.lineno)
if min_doc_line and grandchild.lineno < min_doc_line:
self.reporter.error(
path=self.object_path,
code='import-before-documentation',
msg=('Import found before documentation '
'variables. All imports must appear below '
'DOCUMENTATION/EXAMPLES/RETURN.'),
line=child.lineno
)
break
for import_line in import_lines:
if not (max_doc_line < import_line < first_callable):
msg = (
'import-placement',
('Imports should be directly below DOCUMENTATION/EXAMPLES/'
'RETURN.')
)
if self._is_new_module():
self.reporter.error(
path=self.object_path,
code=msg[0],
msg=msg[1],
line=import_line
)
else:
self.reporter.warning(
path=self.object_path,
code=msg[0],
msg=msg[1],
line=import_line
)
def _validate_ps_replacers(self):
# loop all (for/else + error)
# get module list for each
# check "shape" of each module name
module_requires = r'(?im)^#\s*requires\s+\-module(?:s?)\s*(Ansible\.ModuleUtils\..+)'
csharp_requires = r'(?im)^#\s*ansiblerequires\s+\-csharputil\s*(Ansible\..+)'
found_requires = False
for req_stmt in re.finditer(module_requires, self.text):
found_requires = True
# this will bomb on dictionary format - "don't do that"
module_list = [x.strip() for x in req_stmt.group(1).split(',')]
if len(module_list) > 1:
self.reporter.error(
path=self.object_path,
code='multiple-utils-per-requires',
msg='Ansible.ModuleUtils requirements do not support multiple modules per statement: "%s"' % req_stmt.group(0)
)
continue
module_name = module_list[0]
if module_name.lower().endswith('.psm1'):
self.reporter.error(
path=self.object_path,
code='invalid-requires-extension',
msg='Module #Requires should not end in .psm1: "%s"' % module_name
)
for req_stmt in re.finditer(csharp_requires, self.text):
found_requires = True
# this will bomb on dictionary format - "don't do that"
module_list = [x.strip() for x in req_stmt.group(1).split(',')]
if len(module_list) > 1:
self.reporter.error(
path=self.object_path,
code='multiple-csharp-utils-per-requires',
msg='Ansible C# util requirements do not support multiple utils per statement: "%s"' % req_stmt.group(0)
)
continue
module_name = module_list[0]
if module_name.lower().endswith('.cs'):
self.reporter.error(
path=self.object_path,
code='illegal-extension-cs',
msg='Module #AnsibleRequires -CSharpUtil should not end in .cs: "%s"' % module_name
)
# also accept the legacy #POWERSHELL_COMMON replacer signal
if not found_requires and REPLACER_WINDOWS not in self.text:
self.reporter.error(
path=self.object_path,
code='missing-module-utils-import-csharp-requirements',
msg='No Ansible.ModuleUtils or C# Ansible util requirements/imports found'
)
def _find_ps_docs_file(self):
sidecar = self._find_sidecar_docs()
if sidecar:
return sidecar
py_path = self.path.replace('.ps1', '.py')
if not os.path.isfile(py_path):
self.reporter.error(
path=self.object_path,
code='missing-documentation',
msg='No DOCUMENTATION provided'
)
return py_path
def _find_sidecar_docs(self):
base_path = os.path.splitext(self.path)[0]
for ext in ('.yml', '.yaml'):
doc_path = f"{base_path}{ext}"
if os.path.isfile(doc_path):
return doc_path
def _get_py_docs(self):
docs = {
'DOCUMENTATION': {
'value': None,
'lineno': 0,
'end_lineno': 0,
},
'EXAMPLES': {
'value': None,
'lineno': 0,
'end_lineno': 0,
},
'RETURN': {
'value': None,
'lineno': 0,
'end_lineno': 0,
},
}
for child in self.ast.body:
if isinstance(child, ast.Assign):
for grandchild in child.targets:
if not isinstance(grandchild, ast.Name):
continue
if grandchild.id == 'DOCUMENTATION':
docs['DOCUMENTATION']['value'] = child.value.value
docs['DOCUMENTATION']['lineno'] = child.lineno
docs['DOCUMENTATION']['end_lineno'] = (
child.lineno + len(child.value.value.splitlines())
)
elif grandchild.id == 'EXAMPLES':
docs['EXAMPLES']['value'] = child.value.value
docs['EXAMPLES']['lineno'] = child.lineno
docs['EXAMPLES']['end_lineno'] = (
child.lineno + len(child.value.value.splitlines())
)
elif grandchild.id == 'RETURN':
docs['RETURN']['value'] = child.value.value
docs['RETURN']['lineno'] = child.lineno
docs['RETURN']['end_lineno'] = (
child.lineno + len(child.value.value.splitlines())
)
return docs
def _validate_docs_schema(self, doc, schema, name, error_code):
# TODO: Add line/col
errors = []
try:
schema(doc)
except Exception as e:
for error in e.errors:
error.data = doc
errors.extend(e.errors)
for error in errors:
path = [str(p) for p in error.path]
local_error_code = getattr(error, 'ansible_error_code', error_code)
if isinstance(error.data, dict):
error_message = humanize_error(error.data, error)
else:
error_message = error
if path:
combined_path = '%s.%s' % (name, '.'.join(path))
else:
combined_path = name
self.reporter.error(
path=self.object_path,
code=local_error_code,
msg='%s: %s' % (combined_path, error_message)
)
def _validate_option_docs(self, options, context=None):
if not isinstance(options, dict):
return
if context is None:
context = []
normalized_option_alias_names = dict()
def add_option_alias_name(name, option_name):
normalized_name = str(name).lower()
normalized_option_alias_names.setdefault(normalized_name, {}).setdefault(option_name, set()).add(name)
for option, data in options.items():
if 'suboptions' in data:
self._validate_option_docs(data.get('suboptions'), context + [option])
add_option_alias_name(option, option)
if 'aliases' in data and isinstance(data['aliases'], list):
for alias in data['aliases']:
add_option_alias_name(alias, option)
for normalized_name, options in normalized_option_alias_names.items():
if len(options) < 2:
continue
what = []
for option_name, names in sorted(options.items()):
if option_name in names:
what.append("option '%s'" % option_name)
else:
what.append("alias '%s' of option '%s'" % (sorted(names)[0], option_name))
msg = "Multiple options/aliases"
if context:
msg += " found in %s" % " -> ".join(context)
msg += " are equal up to casing: %s" % ", ".join(what)
self.reporter.error(
path=self.object_path,
code='option-equal-up-to-casing',
msg=msg,
)
def _validate_docs(self):
doc = None
# We have three ways of marking deprecated/removed files. Have to check each one
# individually and then make sure they all agree
filename_deprecated_or_removed = False
deprecated = False
doc_deprecated = None # doc legally might not exist
routing_says_deprecated = False
if self.object_name.startswith('_') and not os.path.islink(self.object_path):
filename_deprecated_or_removed = True
# We are testing a collection
if self.routing:
routing_deprecation = self.routing.get('plugin_routing', {})
routing_deprecation = routing_deprecation.get('modules' if self.plugin_type == 'module' else self.plugin_type, {})
routing_deprecation = routing_deprecation.get(self.name, {}).get('deprecation', {})
if routing_deprecation:
# meta/runtime.yml says this is deprecated
routing_says_deprecated = True
deprecated = True
if self._python_module():
doc_info = self._get_py_docs()
else:
doc_info = None
sidecar_text = None
if self._sidecar_doc():
sidecar_text = self.text
elif sidecar_path := self._find_sidecar_docs():
with open(sidecar_path, mode='r', encoding='utf-8') as fd:
sidecar_text = fd.read()
if sidecar_text:
sidecar_doc, errors, traces = parse_yaml(sidecar_text, 0, self.name, 'DOCUMENTATION')
for error in errors:
self.reporter.error(
path=self.object_path,
code='documentation-syntax-error',
**error
)
for trace in traces:
self.reporter.trace(
path=self.object_path,
tracebk=trace
)
doc = sidecar_doc.get('DOCUMENTATION', None)
examples_raw = sidecar_doc.get('EXAMPLES', None)
examples_lineno = 1
returns = sidecar_doc.get('RETURN', None)
elif doc_info:
if bool(doc_info['DOCUMENTATION']['value']):
doc, errors, traces = parse_yaml(
doc_info['DOCUMENTATION']['value'],
doc_info['DOCUMENTATION']['lineno'],
self.name, 'DOCUMENTATION'
)
for error in errors:
self.reporter.error(
path=self.object_path,
code='documentation-syntax-error',
**error
)
for trace in traces:
self.reporter.trace(
path=self.object_path,
tracebk=trace
)
examples_raw = doc_info['EXAMPLES']['value']
examples_lineno = doc_info['EXAMPLES']['lineno']
returns = None
if bool(doc_info['RETURN']['value']):
returns, errors, traces = parse_yaml(doc_info['RETURN']['value'],
doc_info['RETURN']['lineno'],
self.name, 'RETURN')
for error in errors:
self.reporter.error(
path=self.object_path,
code='return-syntax-error',
**error
)
for trace in traces:
self.reporter.trace(
path=self.object_path,
tracebk=trace
)
if doc:
add_collection_to_versions_and_dates(doc, self.collection_name,
is_module=self.plugin_type == 'module')
missing_fragment = False
with CaptureStd():
try:
get_docstring(self.path, fragment_loader=fragment_loader,
verbose=True,
collection_name=self.collection_name,
plugin_type=self.plugin_type)
except AssertionError:
fragment = doc['extends_documentation_fragment']
self.reporter.error(
path=self.object_path,
code='missing-doc-fragment',
msg='DOCUMENTATION fragment missing: %s' % fragment
)
missing_fragment = True
except Exception as e:
self.reporter.trace(
path=self.object_path,
tracebk=traceback.format_exc()
)
self.reporter.error(
path=self.object_path,
code='documentation-error',
msg='Unknown DOCUMENTATION error, see TRACE: %s' % e
)
if not missing_fragment:
add_fragments(doc, self.object_path, fragment_loader=fragment_loader,
is_module=self.plugin_type == 'module')
if 'options' in doc and doc['options'] is None:
self.reporter.error(
path=self.object_path,
code='invalid-documentation-options',
msg='DOCUMENTATION.options must be a dictionary/hash when used',
)
if 'deprecated' in doc and doc.get('deprecated'):
doc_deprecated = True
doc_deprecation = doc['deprecated']
documentation_collection = doc_deprecation.get('removed_from_collection')
if documentation_collection != self.collection_name:
self.reporter.error(
path=self.object_path,
code='deprecation-wrong-collection',
msg='"DOCUMENTATION.deprecation.removed_from_collection must be the current collection name: %r vs. %r' % (
documentation_collection, self.collection_name)
)
else:
doc_deprecated = False
if os.path.islink(self.object_path):
# This module has an alias, which we can tell as it's a symlink
# Rather than checking for `module: $filename` we need to check against the true filename
self._validate_docs_schema(
doc,
doc_schema(
os.readlink(self.object_path).split('.')[0],
for_collection=bool(self.collection),
deprecated_module=deprecated,
plugin_type=self.plugin_type,
),
'DOCUMENTATION',
'invalid-documentation',
)
else:
# This is the normal case
self._validate_docs_schema(
doc,
doc_schema(
self.object_name.split('.')[0],
for_collection=bool(self.collection),
deprecated_module=deprecated,
plugin_type=self.plugin_type,
),
'DOCUMENTATION',
'invalid-documentation',
)
if doc:
self._validate_option_docs(doc.get('options'))
self._validate_all_semantic_markup(doc, returns)
if not self.collection:
existing_doc = self._check_for_new_args(doc)
self._check_version_added(doc, existing_doc)
else:
self.reporter.error(
path=self.object_path,
code='missing-documentation',
msg='No DOCUMENTATION provided',
)
if not examples_raw and self.plugin_type in PLUGINS_WITH_EXAMPLES:
if self.plugin_type in PLUGINS_WITH_EXAMPLES:
self.reporter.error(
path=self.object_path,
code='missing-examples',
msg='No EXAMPLES provided'
)
elif self.plugin_type in PLUGINS_WITH_YAML_EXAMPLES:
dummy, errors, traces = parse_yaml(examples_raw,
examples_lineno,
self.name, 'EXAMPLES',
load_all=True,
ansible_loader=True)
for error in errors:
self.reporter.error(
path=self.object_path,
code='invalid-examples',
**error
)
for trace in traces:
self.reporter.trace(
path=self.object_path,
tracebk=trace
)
if returns:
if returns:
add_collection_to_versions_and_dates(
returns,
self.collection_name,
is_module=self.plugin_type == 'module',
return_docs=True)
self._validate_docs_schema(
returns,
return_schema(for_collection=bool(self.collection), plugin_type=self.plugin_type),
'RETURN', 'return-syntax-error')
elif self.plugin_type in PLUGINS_WITH_RETURN_VALUES:
if self._is_new_module():
self.reporter.error(
path=self.object_path,
code='missing-return',
msg='No RETURN provided'
)
else:
self.reporter.warning(
path=self.object_path,
code='missing-return-legacy',
msg='No RETURN provided'
)
# Check for mismatched deprecation
if not self.collection:
mismatched_deprecation = True
if not (filename_deprecated_or_removed or deprecated or doc_deprecated):
mismatched_deprecation = False
else:
if (filename_deprecated_or_removed and doc_deprecated):
mismatched_deprecation = False
if (filename_deprecated_or_removed and not doc):
mismatched_deprecation = False
if mismatched_deprecation:
self.reporter.error(
path=self.object_path,
code='deprecation-mismatch',
msg='Module deprecation/removed must agree in documentation, by prepending filename with'
' "_", and setting DOCUMENTATION.deprecated for deprecation or by removing all'
' documentation for removed'
)
else:
if not (doc_deprecated == routing_says_deprecated):
# DOCUMENTATION.deprecated and meta/runtime.yml disagree
self.reporter.error(
path=self.object_path,
code='deprecation-mismatch',
msg='"meta/runtime.yml" and DOCUMENTATION.deprecation do not agree.'
)
elif routing_says_deprecated:
# Both DOCUMENTATION.deprecated and meta/runtime.yml agree that the module is deprecated.
# Make sure they give the same version or date.
routing_date = routing_deprecation.get('removal_date')
routing_version = routing_deprecation.get('removal_version')
# The versions and dates in the module documentation are auto-tagged, so remove the tag
# to make comparison possible and to avoid confusing the user.
documentation_date = doc_deprecation.get('removed_at_date')
documentation_version = doc_deprecation.get('removed_in')
if not compare_dates(routing_date, documentation_date):
self.reporter.error(
path=self.object_path,
code='deprecation-mismatch',
msg='"meta/runtime.yml" and DOCUMENTATION.deprecation do not agree on removal date: %r vs. %r' % (
routing_date, documentation_date)
)
if routing_version != documentation_version:
self.reporter.error(
path=self.object_path,
code='deprecation-mismatch',
msg='"meta/runtime.yml" and DOCUMENTATION.deprecation do not agree on removal version: %r vs. %r' % (
routing_version, documentation_version)
)
# In the future we should error if ANSIBLE_METADATA exists in a collection
return doc_info, doc
def _check_sem_option(self, part: dom.OptionNamePart, current_plugin: dom.PluginIdentifier) -> None:
if part.plugin is None or part.plugin != current_plugin:
return
if part.entrypoint is not None:
return
if tuple(part.link) not in self._all_options:
self.reporter.error(
path=self.object_path,
code='invalid-documentation-markup',
msg='Directive "%s" contains a non-existing option "%s"' % (part.source, part.name)
)
def _check_sem_return_value(self, part: dom.ReturnValuePart, current_plugin: dom.PluginIdentifier) -> None:
if part.plugin is None or part.plugin != current_plugin:
return
if part.entrypoint is not None:
return
if tuple(part.link) not in self._all_return_values:
self.reporter.error(
path=self.object_path,
code='invalid-documentation-markup',
msg='Directive "%s" contains a non-existing return value "%s"' % (part.source, part.name)
)
def _validate_semantic_markup(self, object) -> None:
# Make sure we operate on strings
if is_iterable(object):
for entry in object:
self._validate_semantic_markup(entry)
return
if not isinstance(object, str):
return
if self.collection:
fqcn = f'{self.collection_name}.{self.name}'
else:
fqcn = f'ansible.builtin.{self.name}'
current_plugin = dom.PluginIdentifier(fqcn=fqcn, type=self.plugin_type)
for par in parse(object, Context(current_plugin=current_plugin), errors='message', add_source=True):
for part in par:
# Errors are already covered during schema validation, we only check for option and
# return value references
if part.type == dom.PartType.OPTION_NAME:
self._check_sem_option(part, current_plugin)
if part.type == dom.PartType.RETURN_VALUE:
self._check_sem_return_value(part, current_plugin)
def _validate_semantic_markup_collect(self, destination, sub_key, data, all_paths):
if not isinstance(data, dict):
return
for key, value in data.items():
if not isinstance(value, dict):
continue
keys = {key}
if is_iterable(value.get('aliases')):
keys.update(value['aliases'])
new_paths = [path + [key] for path in all_paths for key in keys]
destination.update([tuple(path) for path in new_paths])
self._validate_semantic_markup_collect(destination, sub_key, value.get(sub_key), new_paths)
def _validate_semantic_markup_options(self, options):
if not isinstance(options, dict):
return
for key, value in options.items():
self._validate_semantic_markup(value.get('description'))
self._validate_semantic_markup_options(value.get('suboptions'))
def _validate_semantic_markup_return_values(self, return_vars):
if not isinstance(return_vars, dict):
return
for key, value in return_vars.items():
self._validate_semantic_markup(value.get('description'))
self._validate_semantic_markup(value.get('returned'))
self._validate_semantic_markup_return_values(value.get('contains'))
def _validate_all_semantic_markup(self, docs, return_docs):
if not isinstance(docs, dict):
docs = {}
if not isinstance(return_docs, dict):
return_docs = {}
self._all_options = set()
self._all_return_values = set()
self._validate_semantic_markup_collect(self._all_options, 'suboptions', docs.get('options'), [[]])
self._validate_semantic_markup_collect(self._all_return_values, 'contains', return_docs, [[]])
for string_keys in ('short_description', 'description', 'notes', 'requirements', 'todo'):
self._validate_semantic_markup(docs.get(string_keys))
if is_iterable(docs.get('seealso')):
for entry in docs.get('seealso'):
if isinstance(entry, dict):
self._validate_semantic_markup(entry.get('description'))
if isinstance(docs.get('attributes'), dict):
for entry in docs.get('attributes').values():
if isinstance(entry, dict):
for key in ('description', 'details'):
self._validate_semantic_markup(entry.get(key))
if isinstance(docs.get('deprecated'), dict):
for key in ('why', 'alternative', 'alternatives'):
self._validate_semantic_markup(docs.get('deprecated').get(key))
self._validate_semantic_markup_options(docs.get('options'))
self._validate_semantic_markup_return_values(return_docs)
def _check_version_added(self, doc, existing_doc):
version_added_raw = doc.get('version_added')
try:
collection_name = doc.get('version_added_collection')
version_added = self._create_strict_version(
str(version_added_raw or '0.0'),
collection_name=collection_name)
except ValueError as e:
version_added = version_added_raw or '0.0'
if self._is_new_module() or version_added != 'historical':
# already reported during schema validation, except:
if version_added == 'historical':
self.reporter.error(
path=self.object_path,
code='module-invalid-version-added',
msg='version_added is not a valid version number: %r. Error: %s' % (version_added, e)
)
return
if existing_doc and str(version_added_raw) != str(existing_doc.get('version_added')):
self.reporter.error(
path=self.object_path,
code='module-incorrect-version-added',
msg='version_added should be %r. Currently %r' % (existing_doc.get('version_added'), version_added_raw)
)
if not self._is_new_module():
return
should_be = '.'.join(ansible_version.split('.')[:2])
strict_ansible_version = self._create_strict_version(should_be, collection_name='ansible.builtin')
if (version_added < strict_ansible_version or
strict_ansible_version < version_added):
self.reporter.error(
path=self.object_path,
code='module-incorrect-version-added',
msg='version_added should be %r. Currently %r' % (should_be, version_added_raw)
)
def _validate_ansible_module_call(self, docs):
try:
if self._python_module():
spec, kwargs = get_py_argument_spec(self.path, self.collection)
elif self._powershell_module():
spec, kwargs = get_ps_argument_spec(self.path, self.collection)
else:
raise NotImplementedError()
except AnsibleModuleNotInitialized:
self.reporter.error(
path=self.object_path,
code='ansible-module-not-initialized',
msg="Execution of the module did not result in initialization of AnsibleModule",
)
return
except AnsibleModuleImportError as e:
self.reporter.error(
path=self.object_path,
code='import-error',
msg="Exception attempting to import module for argument_spec introspection, '%s'" % e
)
self.reporter.trace(
path=self.object_path,
tracebk=traceback.format_exc()
)
return
schema = ansible_module_kwargs_schema(self.object_name.split('.')[0], for_collection=bool(self.collection))
self._validate_docs_schema(kwargs, schema, 'AnsibleModule', 'invalid-ansiblemodule-schema')
self._validate_argument_spec(docs, spec, kwargs)
if isinstance(docs, Mapping) and isinstance(docs.get('attributes'), Mapping):
if isinstance(docs['attributes'].get('check_mode'), Mapping):
support_value = docs['attributes']['check_mode'].get('support')
if not kwargs.get('supports_check_mode', False):
if support_value != 'none':
self.reporter.error(
path=self.object_path,
code='attributes-check-mode',
msg="The module does not declare support for check mode, but the check_mode attribute's"
" support value is '%s' and not 'none'" % support_value
)
else:
if support_value not in ('full', 'partial', 'N/A'):
self.reporter.error(
path=self.object_path,
code='attributes-check-mode',
msg="The module does declare support for check mode, but the check_mode attribute's support value is '%s'" % support_value
)
if support_value in ('partial', 'N/A') and docs['attributes']['check_mode'].get('details') in (None, '', []):
self.reporter.error(
path=self.object_path,
code='attributes-check-mode-details',
msg="The module declares it does not fully support check mode, but has no details on what exactly that means"
)
def _validate_list_of_module_args(self, name, terms, spec, context):
if terms is None:
return
if not isinstance(terms, (list, tuple)):
# This is already reported by schema checking
return
for check in terms:
if not isinstance(check, (list, tuple)):
# This is already reported by schema checking
continue
bad_term = False
for term in check:
if not isinstance(term, str):
msg = name
if context:
msg += " found in %s" % " -> ".join(context)
msg += " must contain strings in the lists or tuples; found value %r" % (term, )
self.reporter.error(
path=self.object_path,
code=name + '-type',
msg=msg,
)
bad_term = True
if bad_term:
continue
if len(set(check)) != len(check):
msg = name
if context:
msg += " found in %s" % " -> ".join(context)
msg += " has repeated terms"
self.reporter.error(
path=self.object_path,
code=name + '-collision',
msg=msg,
)
if not set(check) <= set(spec):
msg = name
if context:
msg += " found in %s" % " -> ".join(context)
msg += " contains terms which are not part of argument_spec: %s" % ", ".join(sorted(set(check).difference(set(spec))))
self.reporter.error(
path=self.object_path,
code=name + '-unknown',
msg=msg,
)
def _validate_required_if(self, terms, spec, context, module):
if terms is None:
return
if not isinstance(terms, (list, tuple)):
# This is already reported by schema checking
return
for check in terms:
if not isinstance(check, (list, tuple)) or len(check) not in [3, 4]:
# This is already reported by schema checking
continue
if len(check) == 4 and not isinstance(check[3], bool):
msg = "required_if"
if context:
msg += " found in %s" % " -> ".join(context)
msg += " must have forth value omitted or of type bool; got %r" % (check[3], )
self.reporter.error(
path=self.object_path,
code='required_if-is_one_of-type',
msg=msg,
)
requirements = check[2]
if not isinstance(requirements, (list, tuple)):
msg = "required_if"
if context:
msg += " found in %s" % " -> ".join(context)
msg += " must have third value (requirements) being a list or tuple; got type %r" % (requirements, )
self.reporter.error(
path=self.object_path,
code='required_if-requirements-type',
msg=msg,
)
continue
bad_term = False
for term in requirements:
if not isinstance(term, str):
msg = "required_if"
if context:
msg += " found in %s" % " -> ".join(context)
msg += " must have only strings in third value (requirements); got %r" % (term, )
self.reporter.error(
path=self.object_path,
code='required_if-requirements-type',
msg=msg,
)
bad_term = True
if bad_term:
continue
if len(set(requirements)) != len(requirements):
msg = "required_if"
if context:
msg += " found in %s" % " -> ".join(context)
msg += " has repeated terms in requirements"
self.reporter.error(
path=self.object_path,
code='required_if-requirements-collision',
msg=msg,
)
if not set(requirements) <= set(spec):
msg = "required_if"
if context:
msg += " found in %s" % " -> ".join(context)
msg += " contains terms in requirements which are not part of argument_spec: %s" % ", ".join(sorted(set(requirements).difference(set(spec))))
self.reporter.error(
path=self.object_path,
code='required_if-requirements-unknown',
msg=msg,
)
key = check[0]
if key not in spec:
msg = "required_if"
if context:
msg += " found in %s" % " -> ".join(context)
msg += " must have its key %s in argument_spec" % key
self.reporter.error(
path=self.object_path,
code='required_if-unknown-key',
msg=msg,
)
continue
if key in requirements:
msg = "required_if"
if context:
msg += " found in %s" % " -> ".join(context)
msg += " contains its key %s in requirements" % key
self.reporter.error(
path=self.object_path,
code='required_if-key-in-requirements',
msg=msg,
)
value = check[1]
if value is not None:
_type = spec[key].get('type', 'str')
if callable(_type):
_type_checker = _type
else:
_type_checker = DEFAULT_TYPE_VALIDATORS.get(_type)
try:
with CaptureStd():
dummy = _type_checker(value)
except (Exception, SystemExit):
msg = "required_if"
if context:
msg += " found in %s" % " -> ".join(context)
msg += " has value %r which does not fit to %s's parameter type %r" % (value, key, _type)
self.reporter.error(
path=self.object_path,
code='required_if-value-type',
msg=msg,
)
def _validate_required_by(self, terms, spec, context):
if terms is None:
return
if not isinstance(terms, Mapping):
# This is already reported by schema checking
return
for key, value in terms.items():
if isinstance(value, str):
value = [value]
if not isinstance(value, (list, tuple)):
# This is already reported by schema checking
continue
for term in value:
if not isinstance(term, str):
# This is already reported by schema checking
continue
if len(set(value)) != len(value) or key in value:
msg = "required_by"
if context:
msg += " found in %s" % " -> ".join(context)
msg += " has repeated terms"
self.reporter.error(
path=self.object_path,
code='required_by-collision',
msg=msg,
)
if not set(value) <= set(spec) or key not in spec:
msg = "required_by"
if context:
msg += " found in %s" % " -> ".join(context)
msg += " contains terms which are not part of argument_spec: %s" % ", ".join(sorted(set(value).difference(set(spec))))
self.reporter.error(
path=self.object_path,
code='required_by-unknown',
msg=msg,
)
def _validate_argument_spec(self, docs, spec, kwargs, context=None, last_context_spec=None):
if not self.analyze_arg_spec:
return
if docs is None:
docs = {}
if context is None:
context = []
if last_context_spec is None:
last_context_spec = kwargs
try:
if not context:
add_fragments(docs, self.object_path, fragment_loader=fragment_loader,
is_module=self.plugin_type == 'module')
except Exception:
# Cannot merge fragments
return
# Use this to access type checkers later
module = NoArgsAnsibleModule({})
self._validate_list_of_module_args('mutually_exclusive', last_context_spec.get('mutually_exclusive'), spec, context)
self._validate_list_of_module_args('required_together', last_context_spec.get('required_together'), spec, context)
self._validate_list_of_module_args('required_one_of', last_context_spec.get('required_one_of'), spec, context)
self._validate_required_if(last_context_spec.get('required_if'), spec, context, module)
self._validate_required_by(last_context_spec.get('required_by'), spec, context)
provider_args = set()
args_from_argspec = set()
deprecated_args_from_argspec = set()
doc_options = docs.get('options', {})
if doc_options is None:
doc_options = {}
for arg, data in spec.items():
restricted_argument_names = ('message', 'syslog_facility')
if arg.lower() in restricted_argument_names:
msg = "Argument '%s' in argument_spec " % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += "must not be one of %s as it is used " \
"internally by Ansible Core Engine" % (",".join(restricted_argument_names))
self.reporter.error(
path=self.object_path,
code='invalid-argument-name',
msg=msg,
)
continue
if 'aliases' in data:
for al in data['aliases']:
if al.lower() in restricted_argument_names:
msg = "Argument alias '%s' in argument_spec " % al
if context:
msg += " found in %s" % " -> ".join(context)
msg += "must not be one of %s as it is used " \
"internally by Ansible Core Engine" % (",".join(restricted_argument_names))
self.reporter.error(
path=self.object_path,
code='invalid-argument-name',
msg=msg,
)
continue
# Could this a place where secrets are leaked?
# If it is type: path we know it's not a secret key as it's a file path.
# If it is type: bool it is more likely a flag indicating that something is secret, than an actual secret.
if all((
data.get('no_log') is None, is_potential_secret_option(arg),
data.get('type') not in ("path", "bool"), data.get('choices') is None,
)):
msg = "Argument '%s' in argument_spec could be a secret, though doesn't have `no_log` set" % arg
if context:
msg += " found in %s" % " -> ".join(context)
self.reporter.error(
path=self.object_path,
code='no-log-needed',
msg=msg,
)
if not isinstance(data, dict):
msg = "Argument '%s' in argument_spec" % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " must be a dictionary/hash when used"
self.reporter.error(
path=self.object_path,
code='invalid-argument-spec',
msg=msg,
)
continue
removed_at_date = data.get('removed_at_date', None)
if removed_at_date is not None:
try:
if parse_isodate(removed_at_date, allow_date=False) < datetime.date.today():
msg = "Argument '%s' in argument_spec" % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " has a removed_at_date '%s' before today" % removed_at_date
self.reporter.error(
path=self.object_path,
code='deprecated-date',
msg=msg,
)
except ValueError:
# This should only happen when removed_at_date is not in ISO format. Since schema
# validation already reported this as an error, don't report it a second time.
pass
deprecated_aliases = data.get('deprecated_aliases', None)
if deprecated_aliases is not None:
for deprecated_alias in deprecated_aliases:
if 'name' in deprecated_alias and 'date' in deprecated_alias:
try:
date = deprecated_alias['date']
if parse_isodate(date, allow_date=False) < datetime.date.today():
msg = "Argument '%s' in argument_spec" % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " has deprecated aliases '%s' with removal date '%s' before today" % (
deprecated_alias['name'], deprecated_alias['date'])
self.reporter.error(
path=self.object_path,
code='deprecated-date',
msg=msg,
)
except ValueError:
# This should only happen when deprecated_alias['date'] is not in ISO format. Since
# schema validation already reported this as an error, don't report it a second
# time.
pass
has_version = False
if self.collection and self.collection_version is not None:
compare_version = self.collection_version
version_of_what = "this collection (%s)" % self.collection_version_str
code_prefix = 'collection'
has_version = True
elif not self.collection:
compare_version = LOOSE_ANSIBLE_VERSION
version_of_what = "Ansible (%s)" % ansible_version
code_prefix = 'ansible'
has_version = True
removed_in_version = data.get('removed_in_version', None)
if removed_in_version is not None:
try:
collection_name = data.get('removed_from_collection')
removed_in = self._create_version(str(removed_in_version), collection_name=collection_name)
if has_version and collection_name == self.collection_name and compare_version >= removed_in:
msg = "Argument '%s' in argument_spec" % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " has a deprecated removed_in_version %r," % removed_in_version
msg += " i.e. the version is less than or equal to the current version of %s" % version_of_what
self.reporter.error(
path=self.object_path,
code=code_prefix + '-deprecated-version',
msg=msg,
)
except ValueError as e:
msg = "Argument '%s' in argument_spec" % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " has an invalid removed_in_version number %r: %s" % (removed_in_version, e)
self.reporter.error(
path=self.object_path,
code='invalid-deprecated-version',
msg=msg,
)
except TypeError:
msg = "Argument '%s' in argument_spec" % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " has an invalid removed_in_version number %r: " % (removed_in_version, )
msg += " error while comparing to version of %s" % version_of_what
self.reporter.error(
path=self.object_path,
code='invalid-deprecated-version',
msg=msg,
)
if deprecated_aliases is not None:
for deprecated_alias in deprecated_aliases:
if 'name' in deprecated_alias and 'version' in deprecated_alias:
try:
collection_name = deprecated_alias.get('collection_name')
version = self._create_version(str(deprecated_alias['version']), collection_name=collection_name)
if has_version and collection_name == self.collection_name and compare_version >= version:
msg = "Argument '%s' in argument_spec" % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " has deprecated aliases '%s' with removal in version %r," % (
deprecated_alias['name'], deprecated_alias['version'])
msg += " i.e. the version is less than or equal to the current version of %s" % version_of_what
self.reporter.error(
path=self.object_path,
code=code_prefix + '-deprecated-version',
msg=msg,
)
except ValueError as e:
msg = "Argument '%s' in argument_spec" % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " has deprecated aliases '%s' with invalid removal version %r: %s" % (
deprecated_alias['name'], deprecated_alias['version'], e)
self.reporter.error(
path=self.object_path,
code='invalid-deprecated-version',
msg=msg,
)
except TypeError:
msg = "Argument '%s' in argument_spec" % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " has deprecated aliases '%s' with invalid removal version %r:" % (
deprecated_alias['name'], deprecated_alias['version'])
msg += " error while comparing to version of %s" % version_of_what
self.reporter.error(
path=self.object_path,
code='invalid-deprecated-version',
msg=msg,
)
aliases = data.get('aliases', [])
if arg in aliases:
msg = "Argument '%s' in argument_spec" % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " is specified as its own alias"
self.reporter.error(
path=self.object_path,
code='parameter-alias-self',
msg=msg
)
if len(aliases) > len(set(aliases)):
msg = "Argument '%s' in argument_spec" % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " has at least one alias specified multiple times in aliases"
self.reporter.error(
path=self.object_path,
code='parameter-alias-repeated',
msg=msg
)
if not context and arg == 'state':
bad_states = set(['list', 'info', 'get']) & set(data.get('choices', set()))
for bad_state in bad_states:
self.reporter.error(
path=self.object_path,
code='parameter-state-invalid-choice',
msg="Argument 'state' includes the value '%s' as a choice" % bad_state)
if not data.get('removed_in_version', None) and not data.get('removed_at_date', None):
args_from_argspec.add(arg)
args_from_argspec.update(aliases)
else:
deprecated_args_from_argspec.add(arg)
deprecated_args_from_argspec.update(aliases)
if arg == 'provider' and self.object_path.startswith('lib/ansible/modules/network/'):
if data.get('options') is not None and not isinstance(data.get('options'), Mapping):
self.reporter.error(
path=self.object_path,
code='invalid-argument-spec-options',
msg="Argument 'options' in argument_spec['provider'] must be a dictionary/hash when used",
)
elif data.get('options'):
# Record provider options from network modules, for later comparison
for provider_arg, provider_data in data.get('options', {}).items():
provider_args.add(provider_arg)
provider_args.update(provider_data.get('aliases', []))
if data.get('required') and data.get('default', object) != object:
msg = "Argument '%s' in argument_spec" % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " is marked as required but specifies a default. Arguments with a" \
" default should not be marked as required"
self.reporter.error(
path=self.object_path,
code='no-default-for-required-parameter',
msg=msg
)
if arg in provider_args:
# Provider args are being removed from network module top level
# don't validate docs<->arg_spec checks below
continue
_type = data.get('type', 'str')
if callable(_type):
_type_checker = _type
else:
_type_checker = DEFAULT_TYPE_VALIDATORS.get(_type)
_elements = data.get('elements')
if (_type == 'list') and not _elements:
msg = "Argument '%s' in argument_spec" % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " defines type as list but elements is not defined"
self.reporter.error(
path=self.object_path,
code='parameter-list-no-elements',
msg=msg
)
if _elements:
if not callable(_elements):
DEFAULT_TYPE_VALIDATORS.get(_elements)
if _type != 'list':
msg = "Argument '%s' in argument_spec" % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " defines elements as %s but it is valid only when value of parameter type is list" % _elements
self.reporter.error(
path=self.object_path,
code='parameter-invalid-elements',
msg=msg
)
arg_default = None
if 'default' in data and data['default'] is not None:
try:
with CaptureStd():
arg_default = _type_checker(data['default'])
except (Exception, SystemExit):
msg = "Argument '%s' in argument_spec" % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " defines default as (%r) but this is incompatible with parameter type %r" % (data['default'], _type)
self.reporter.error(
path=self.object_path,
code='incompatible-default-type',
msg=msg
)
continue
doc_options_args = []
for alias in sorted(set([arg] + list(aliases))):
if alias in doc_options:
doc_options_args.append(alias)
if len(doc_options_args) == 0:
# Undocumented arguments will be handled later (search for undocumented-parameter)
doc_options_arg = {}
doc_option_name = None
else:
doc_option_name = doc_options_args[0]
doc_options_arg = doc_options[doc_option_name]
if len(doc_options_args) > 1:
msg = "Argument '%s' in argument_spec" % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " with aliases %s is documented multiple times, namely as %s" % (
", ".join([("'%s'" % alias) for alias in aliases]),
", ".join([("'%s'" % alias) for alias in doc_options_args])
)
self.reporter.error(
path=self.object_path,
code='parameter-documented-multiple-times',
msg=msg
)
all_aliases = set(aliases + [arg])
all_docs_aliases = set(
([doc_option_name] if doc_option_name is not None else [])
+
(doc_options_arg['aliases'] if isinstance(doc_options_arg.get('aliases'), list) else [])
)
if all_docs_aliases and all_aliases != all_docs_aliases:
msg = "Argument '%s' in argument_spec" % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " has names %s, but its documentation has names %s" % (
", ".join([("'%s'" % alias) for alias in sorted(all_aliases)]),
", ".join([("'%s'" % alias) for alias in sorted(all_docs_aliases)])
)
self.reporter.error(
path=self.object_path,
code='parameter-documented-aliases-differ',
msg=msg
)
try:
doc_default = None
if 'default' in doc_options_arg and doc_options_arg['default'] is not None:
with CaptureStd():
doc_default = _type_checker(doc_options_arg['default'])
except (Exception, SystemExit):
msg = "Argument '%s' in documentation" % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " defines default as (%r) but this is incompatible with parameter type %r" % (doc_options_arg.get('default'), _type)
self.reporter.error(
path=self.object_path,
code='doc-default-incompatible-type',
msg=msg
)
continue
if arg_default != doc_default:
msg = "Argument '%s' in argument_spec" % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " defines default as (%r) but documentation defines default as (%r)" % (arg_default, doc_default)
self.reporter.error(
path=self.object_path,
code='doc-default-does-not-match-spec',
msg=msg
)
doc_type = doc_options_arg.get('type')
if 'type' in data and data['type'] is not None:
if doc_type is None:
if not arg.startswith('_'): # hidden parameter, for example _raw_params
msg = "Argument '%s' in argument_spec" % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " defines type as %r but documentation doesn't define type" % (data['type'])
self.reporter.error(
path=self.object_path,
code='parameter-type-not-in-doc',
msg=msg
)
elif data['type'] != doc_type:
msg = "Argument '%s' in argument_spec" % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " defines type as %r but documentation defines type as %r" % (data['type'], doc_type)
self.reporter.error(
path=self.object_path,
code='doc-type-does-not-match-spec',
msg=msg
)
else:
if doc_type is None:
msg = "Argument '%s' in argument_spec" % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " uses default type ('str') but documentation doesn't define type"
self.reporter.error(
path=self.object_path,
code='doc-missing-type',
msg=msg
)
elif doc_type != 'str':
msg = "Argument '%s' in argument_spec" % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " implies type as 'str' but documentation defines as %r" % doc_type
self.reporter.error(
path=self.object_path,
code='implied-parameter-type-mismatch',
msg=msg
)
doc_choices = []
try:
for choice in doc_options_arg.get('choices', []):
try:
with CaptureStd():
doc_choices.append(_type_checker(choice))
except (Exception, SystemExit):
msg = "Argument '%s' in documentation" % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " defines choices as (%r) but this is incompatible with argument type %r" % (choice, _type)
self.reporter.error(
path=self.object_path,
code='doc-choices-incompatible-type',
msg=msg
)
raise StopIteration()
except StopIteration:
continue
arg_choices = []
try:
for choice in data.get('choices', []):
try:
with CaptureStd():
arg_choices.append(_type_checker(choice))
except (Exception, SystemExit):
msg = "Argument '%s' in argument_spec" % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " defines choices as (%r) but this is incompatible with argument type %r" % (choice, _type)
self.reporter.error(
path=self.object_path,
code='incompatible-choices',
msg=msg
)
raise StopIteration()
except StopIteration:
continue
if not compare_unordered_lists(arg_choices, doc_choices):
msg = "Argument '%s' in argument_spec" % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " defines choices as (%r) but documentation defines choices as (%r)" % (arg_choices, doc_choices)
self.reporter.error(
path=self.object_path,
code='doc-choices-do-not-match-spec',
msg=msg
)
doc_required = doc_options_arg.get('required', False)
data_required = data.get('required', False)
if (doc_required or data_required) and not (doc_required and data_required):
msg = "Argument '%s' in argument_spec" % arg
if context:
msg += " found in %s" % " -> ".join(context)
if doc_required:
msg += " is not required, but is documented as being required"
else:
msg += " is required, but is not documented as being required"
self.reporter.error(
path=self.object_path,
code='doc-required-mismatch',
msg=msg
)
doc_elements = doc_options_arg.get('elements', None)
doc_type = doc_options_arg.get('type', 'str')
data_elements = data.get('elements', None)
if (doc_elements or data_elements) and not (doc_elements == data_elements):
msg = "Argument '%s' in argument_spec" % arg
if context:
msg += " found in %s" % " -> ".join(context)
if data_elements:
msg += " specifies elements as %s," % data_elements
else:
msg += " does not specify elements,"
if doc_elements:
msg += "but elements is documented as being %s" % doc_elements
else:
msg += "but elements is not documented"
self.reporter.error(
path=self.object_path,
code='doc-elements-mismatch',
msg=msg
)
spec_suboptions = data.get('options')
doc_suboptions = doc_options_arg.get('suboptions', {})
if spec_suboptions:
if not doc_suboptions:
msg = "Argument '%s' in argument_spec" % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " has sub-options but documentation does not define it"
self.reporter.error(
path=self.object_path,
code='missing-suboption-docs',
msg=msg
)
self._validate_argument_spec({'options': doc_suboptions}, spec_suboptions, kwargs,
context=context + [arg], last_context_spec=data)
for arg in args_from_argspec:
if not str(arg).isidentifier():
msg = "Argument '%s' in argument_spec" % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " is not a valid python identifier"
self.reporter.error(
path=self.object_path,
code='parameter-invalid',
msg=msg
)
if docs:
args_from_docs = set()
for arg, data in doc_options.items():
args_from_docs.add(arg)
args_from_docs.update(data.get('aliases', []))
args_missing_from_docs = args_from_argspec.difference(args_from_docs)
docs_missing_from_args = args_from_docs.difference(args_from_argspec | deprecated_args_from_argspec)
for arg in args_missing_from_docs:
if arg in provider_args:
# Provider args are being removed from network module top level
# So they are likely not documented on purpose
continue
msg = "Argument '%s'" % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " is listed in the argument_spec, but not documented in the module documentation"
self.reporter.error(
path=self.object_path,
code='undocumented-parameter',
msg=msg
)
for arg in docs_missing_from_args:
msg = "Argument '%s'" % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " is listed in DOCUMENTATION.options, but not accepted by the module argument_spec"
self.reporter.error(
path=self.object_path,
code='nonexistent-parameter-documented',
msg=msg
)
def _check_for_new_args(self, doc):
if not self.base_module:
return
with CaptureStd():
try:
existing_doc, dummy_examples, dummy_return, existing_metadata = get_docstring(
self.base_module, fragment_loader, verbose=True, collection_name=self.collection_name,
is_module=self.plugin_type == 'module')
existing_options = existing_doc.get('options', {}) or {}
except AssertionError:
fragment = doc['extends_documentation_fragment']
self.reporter.warning(
path=self.object_path,
code='missing-existing-doc-fragment',
msg='Pre-existing DOCUMENTATION fragment missing: %s' % fragment
)
return
except Exception as e:
self.reporter.warning_trace(
path=self.object_path,
tracebk=e
)
self.reporter.warning(
path=self.object_path,
code='unknown-doc-fragment',
msg=('Unknown pre-existing DOCUMENTATION error, see TRACE. Submodule refs may need updated')
)
return
try:
mod_collection_name = existing_doc.get('version_added_collection')
mod_version_added = self._create_strict_version(
str(existing_doc.get('version_added', '0.0')),
collection_name=mod_collection_name)
except ValueError:
mod_collection_name = self.collection_name
mod_version_added = self._create_strict_version('0.0')
options = doc.get('options', {}) or {}
should_be = '.'.join(ansible_version.split('.')[:2])
strict_ansible_version = self._create_strict_version(should_be, collection_name='ansible.builtin')
for option, details in options.items():
try:
names = [option] + details.get('aliases', [])
except (TypeError, AttributeError):
# Reporting of this syntax error will be handled by schema validation.
continue
if any(name in existing_options for name in names):
# The option already existed. Make sure version_added didn't change.
for name in names:
existing_collection_name = existing_options.get(name, {}).get('version_added_collection')
existing_version = existing_options.get(name, {}).get('version_added')
if existing_version:
break
current_collection_name = details.get('version_added_collection')
current_version = details.get('version_added')
if current_collection_name != existing_collection_name:
self.reporter.error(
path=self.object_path,
code='option-incorrect-version-added-collection',
msg=('version_added for existing option (%s) should '
'belong to collection %r. Currently belongs to %r' %
(option, current_collection_name, existing_collection_name))
)
elif str(current_version) != str(existing_version):
self.reporter.error(
path=self.object_path,
code='option-incorrect-version-added',
msg=('version_added for existing option (%s) should '
'be %r. Currently %r' %
(option, existing_version, current_version))
)
continue
try:
collection_name = details.get('version_added_collection')
version_added = self._create_strict_version(
str(details.get('version_added', '0.0')),
collection_name=collection_name)
except ValueError as e:
# already reported during schema validation
continue
builtin = self.collection_name == 'ansible.builtin' and collection_name in ('ansible.builtin', None)
if not builtin and collection_name != self.collection_name:
continue
if (strict_ansible_version != mod_version_added and
(version_added < strict_ansible_version or
strict_ansible_version < version_added)):
self.reporter.error(
path=self.object_path,
code='option-incorrect-version-added',
msg=('version_added for new option (%s) should '
'be %r. Currently %r' %
(option, should_be, version_added))
)
return existing_doc
@staticmethod
def is_on_rejectlist(path):
base_name = os.path.basename(path)
file_name = os.path.splitext(base_name)[0]
if file_name.startswith('_') and os.path.islink(path):
return True
if not frozenset((base_name, file_name)).isdisjoint(ModuleValidator.REJECTLIST):
return True
for pat in ModuleValidator.REJECTLIST_PATTERNS:
if fnmatch(base_name, pat):
return True
return False
def validate(self):
super(ModuleValidator, self).validate()
if not self._python_module() and not self._powershell_module() and not self._sidecar_doc():
self.reporter.error(
path=self.object_path,
code='invalid-extension',
msg=('Official Ansible modules must have a .py '
'extension for python modules or a .ps1 '
'for powershell modules')
)
if self._python_module() and self.ast is None:
self.reporter.error(
path=self.object_path,
code='python-syntax-error',
msg='Python SyntaxError while parsing module'
)
try:
compile(self.text, self.path, 'exec')
except Exception:
self.reporter.trace(
path=self.object_path,
tracebk=traceback.format_exc()
)
return
end_of_deprecation_should_be_removed_only = False
doc_info = None
if self._python_module() or self._sidecar_doc():
doc_info, docs = self._validate_docs()
# See if current version => deprecated.removed_in, ie, should be docs only
if docs and docs.get('deprecated', False):
if 'removed_in' in docs['deprecated']:
removed_in = None
collection_name = docs['deprecated'].get('removed_from_collection')
version = docs['deprecated']['removed_in']
if collection_name != self.collection_name:
self.reporter.error(
path=self.object_path,
code='invalid-module-deprecation-source',
msg=('The deprecation version for a module must be added in this collection')
)
else:
try:
removed_in = self._create_strict_version(str(version), collection_name=collection_name)
except ValueError as e:
self.reporter.error(
path=self.object_path,
code='invalid-module-deprecation-version',
msg=('The deprecation version %r cannot be parsed: %s' % (version, e))
)
if removed_in:
if not self.collection:
strict_ansible_version = self._create_strict_version(
'.'.join(ansible_version.split('.')[:2]), self.collection_name)
end_of_deprecation_should_be_removed_only = strict_ansible_version >= removed_in
if end_of_deprecation_should_be_removed_only:
self.reporter.error(
path=self.object_path,
code='ansible-deprecated-module',
msg='Module is marked for removal in version %s of Ansible when the current version is %s' % (
version, ansible_version),
)
elif self.collection_version:
strict_ansible_version = self.collection_version
end_of_deprecation_should_be_removed_only = strict_ansible_version >= removed_in
if end_of_deprecation_should_be_removed_only:
self.reporter.error(
path=self.object_path,
code='collection-deprecated-module',
msg='Module is marked for removal in version %s of this collection when the current version is %s' % (
version, self.collection_version_str),
)
# handle deprecation by date
if 'removed_at_date' in docs['deprecated']:
try:
removed_at_date = docs['deprecated']['removed_at_date']
if parse_isodate(removed_at_date, allow_date=True) < datetime.date.today():
msg = "Module's deprecated.removed_at_date date '%s' is before today" % removed_at_date
self.reporter.error(path=self.object_path, code='deprecated-date', msg=msg)
except ValueError:
# This happens if the date cannot be parsed. This is already checked by the schema.
pass
if self._python_module() and not self._just_docs() and not end_of_deprecation_should_be_removed_only:
if self.plugin_type == 'module':
self._validate_ansible_module_call(docs)
self._check_for_sys_exit()
self._find_rejectlist_imports()
if self.plugin_type == 'module':
self._find_module_utils()
self._find_has_import()
if doc_info:
first_callable = self._get_first_callable() or 1000000 # use a bogus "high" line number if no callable exists
self._ensure_imports_below_docs(doc_info, first_callable)
if self.plugin_type == 'module':
self._check_for_subprocess()
self._check_for_os_call()
if self._powershell_module():
self._validate_ps_replacers()
docs_path = self._find_ps_docs_file()
# We can only validate PowerShell arg spec if it is using the new Ansible.Basic.AnsibleModule util
pattern = r'(?im)^#\s*ansiblerequires\s+\-csharputil\s*Ansible\.Basic'
if re.search(pattern, self.text) and self.object_name not in self.PS_ARG_VALIDATE_REJECTLIST:
with ModuleValidator(docs_path, git_cache=self.git_cache) as docs_mv:
docs = docs_mv._validate_docs()[1]
self._validate_ansible_module_call(docs)
self._check_gpl3_header()
if not self._just_docs() and not self._sidecar_doc() and not end_of_deprecation_should_be_removed_only:
if self.plugin_type == 'module':
self._check_interpreter()
class PythonPackageValidator(Validator):
REJECTLIST_FILES = frozenset(('__pycache__',))
def __init__(self, path, reporter=None):
super(PythonPackageValidator, self).__init__(reporter=reporter or Reporter())
self.path = path
self.basename = os.path.basename(path)
@property
def object_name(self):
return self.basename
@property
def object_path(self):
return self.path
def validate(self):
super(PythonPackageValidator, self).validate()
if self.basename in self.REJECTLIST_FILES:
return
init_file = os.path.join(self.path, '__init__.py')
if not os.path.exists(init_file):
self.reporter.error(
path=self.object_path,
code='subdirectory-missing-init',
msg='Ansible module subdirectories must contain an __init__.py'
)
class GitCache(metaclass=abc.ABCMeta):
"""Base class for access to original files."""
@abc.abstractmethod
def get_original_path(self, path: str) -> str | None:
"""Return the path to the original version of the specified file, or None if there isn't one."""
@abc.abstractmethod
def is_new(self, path: str) -> bool | None:
"""Return True if the content is new, False if it is not and None if the information is not available."""
@staticmethod
def create(original_plugins: str | None, plugin_type: str) -> GitCache:
return CoreGitCache(original_plugins, plugin_type) if original_plugins else NoOpGitCache()
class CoreGitCache(GitCache):
"""Provides access to original files when testing core."""
def __init__(self, original_plugins: str | None, plugin_type: str) -> None:
super().__init__()
self.original_plugins = original_plugins
rel_path = 'lib/ansible/modules/' if plugin_type == 'module' else f'lib/ansible/plugins/{plugin_type}/'
head_tree = self._find_files(rel_path)
head_aliased_modules = set()
for path in head_tree:
filename = os.path.basename(path)
if filename.startswith('_') and filename != '__init__.py':
if os.path.islink(path):
head_aliased_modules.add(os.path.basename(os.path.realpath(path)))
self._head_aliased_modules = head_aliased_modules
def get_original_path(self, path: str) -> str | None:
"""Return the path to the original version of the specified file, or None if there isn't one."""
path = os.path.join(self.original_plugins, path)
if not os.path.exists(path):
path = None
return path
def is_new(self, path: str) -> bool | None:
"""Return True if the content is new, False if it is not and None if the information is not available."""
if os.path.basename(path).startswith('_'):
return False
if os.path.basename(path) in self._head_aliased_modules:
return False
return not self.get_original_path(path)
@staticmethod
def _find_files(path: str) -> list[str]:
"""Return a list of files found in the specified directory."""
paths = []
for (dir_path, dir_names, file_names) in os.walk(path):
for file_name in file_names:
paths.append(os.path.join(dir_path, file_name))
return sorted(paths)
class NoOpGitCache(GitCache):
"""Provides a no-op interface for access to original files."""
def get_original_path(self, path: str) -> str | None:
"""Return the path to the original version of the specified file, or None if there isn't one."""
return None
def is_new(self, path: str) -> bool | None:
"""Return True if the content is new, False if it is not and None if the information is not available."""
return None
def re_compile(value):
"""
Argparse expects things to raise TypeError, re.compile raises an re.error
exception
This function is a shorthand to convert the re.error exception to a
TypeError
"""
try:
return re.compile(value)
except re.error as e:
raise TypeError(e)
def run():
parser = argparse.ArgumentParser(prog="validate-modules")
parser.add_argument('plugins', nargs='+',
help='Path to module/plugin or module/plugin directory')
parser.add_argument('-w', '--warnings', help='Show warnings',
action='store_true')
parser.add_argument('--exclude', help='RegEx exclusion pattern',
type=re_compile)
parser.add_argument('--arg-spec', help='Analyze module argument spec',
action='store_true', default=False)
parser.add_argument('--format', choices=['json', 'plain'], default='plain',
help='Output format. Default: "%(default)s"')
parser.add_argument('--output', default='-',
help='Output location, use "-" for stdout. '
'Default "%(default)s"')
parser.add_argument('--collection',
help='Specifies the path to the collection, when '
'validating files within a collection. Ensure '
'that ANSIBLE_COLLECTIONS_PATH is set so the '
'contents of the collection can be located')
parser.add_argument('--collection-version',
help='The collection\'s version number used to check '
'deprecations')
parser.add_argument('--plugin-type',
default='module',
help='The plugin type to validate. Defaults to %(default)s')
parser.add_argument('--original-plugins')
args = parser.parse_args()
args.plugins = [m.rstrip('/') for m in args.plugins]
reporter = Reporter()
git_cache = GitCache.create(args.original_plugins, args.plugin_type)
check_dirs = set()
routing = None
if args.collection:
routing_file = 'meta/runtime.yml'
# Load meta/runtime.yml if it exists, as it may contain deprecation information
if os.path.isfile(routing_file):
try:
with open(routing_file) as f:
routing = yaml.safe_load(f)
except yaml.error.MarkedYAMLError as ex:
print('%s:%d:%d: YAML load failed: %s' % (routing_file, ex.context_mark.line + 1, ex.context_mark.column + 1, re.sub(r'\s+', ' ', str(ex))))
except Exception as ex: # pylint: disable=broad-except
print('%s:%d:%d: YAML load failed: %s' % (routing_file, 0, 0, re.sub(r'\s+', ' ', str(ex))))
for plugin in args.plugins:
if os.path.isfile(plugin):
path = plugin
if args.exclude and args.exclude.search(path):
continue
if ModuleValidator.is_on_rejectlist(path):
continue
with ModuleValidator(path, collection=args.collection, collection_version=args.collection_version,
analyze_arg_spec=args.arg_spec,
git_cache=git_cache, reporter=reporter, routing=routing,
plugin_type=args.plugin_type) as mv1:
mv1.validate()
check_dirs.add(os.path.dirname(path))
for root, dirs, files in os.walk(plugin):
basedir = root[len(plugin) + 1:].split('/', 1)[0]
if basedir in REJECTLIST_DIRS:
continue
for dirname in dirs:
if root == plugin and dirname in REJECTLIST_DIRS:
continue
path = os.path.join(root, dirname)
if args.exclude and args.exclude.search(path):
continue
check_dirs.add(path)
for filename in files:
path = os.path.join(root, filename)
if args.exclude and args.exclude.search(path):
continue
if ModuleValidator.is_on_rejectlist(path):
continue
with ModuleValidator(path, collection=args.collection, collection_version=args.collection_version,
analyze_arg_spec=args.arg_spec,
git_cache=git_cache, reporter=reporter, routing=routing,
plugin_type=args.plugin_type) as mv2:
mv2.validate()
if not args.collection and args.plugin_type == 'module':
for path in sorted(check_dirs):
pv = PythonPackageValidator(path, reporter=reporter)
pv.validate()
if args.format == 'plain':
sys.exit(reporter.plain(warnings=args.warnings, output=args.output))
else:
sys.exit(reporter.json(warnings=args.warnings, output=args.output))
def main():
try:
run()
except KeyboardInterrupt:
pass
| 115,791
|
Python
|
.py
| 2,347
| 32.647209
| 157
| 0.512503
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,277
|
collection_detail.py
|
ansible_ansible/test/lib/ansible_test/_util/controller/tools/collection_detail.py
|
"""Retrieve collection detail."""
from __future__ import annotations
import json
import os
import re
import sys
import yaml
# See semantic versioning specification (https://semver.org/)
NUMERIC_IDENTIFIER = r'(?:0|[1-9][0-9]*)'
ALPHANUMERIC_IDENTIFIER = r'(?:[0-9]*[a-zA-Z-][a-zA-Z0-9-]*)'
PRE_RELEASE_IDENTIFIER = r'(?:' + NUMERIC_IDENTIFIER + r'|' + ALPHANUMERIC_IDENTIFIER + r')'
BUILD_IDENTIFIER = r'[a-zA-Z0-9-]+' # equivalent to r'(?:[0-9]+|' + ALPHANUMERIC_IDENTIFIER + r')'
VERSION_CORE = NUMERIC_IDENTIFIER + r'\.' + NUMERIC_IDENTIFIER + r'\.' + NUMERIC_IDENTIFIER
PRE_RELEASE = r'(?:-' + PRE_RELEASE_IDENTIFIER + r'(?:\.' + PRE_RELEASE_IDENTIFIER + r')*)?'
BUILD = r'(?:\+' + BUILD_IDENTIFIER + r'(?:\.' + BUILD_IDENTIFIER + r')*)?'
SEMVER_REGULAR_EXPRESSION = r'^' + VERSION_CORE + PRE_RELEASE + BUILD + r'$'
def validate_version(version):
"""Raise exception if the provided version is not None or a valid semantic version."""
if version is None:
return
if not re.match(SEMVER_REGULAR_EXPRESSION, version):
raise Exception('Invalid version number "{0}". Collection version numbers must '
'follow semantic versioning (https://semver.org/).'.format(version))
def read_manifest_json(collection_path):
"""Return collection information from the MANIFEST.json file."""
manifest_path = os.path.join(collection_path, 'MANIFEST.json')
if not os.path.exists(manifest_path):
return None
try:
with open(manifest_path, encoding='utf-8') as manifest_file:
manifest = json.load(manifest_file)
collection_info = manifest.get('collection_info') or {}
result = dict(
version=collection_info.get('version'),
)
validate_version(result['version'])
except Exception as ex: # pylint: disable=broad-except
raise Exception('{0}: {1}'.format(os.path.basename(manifest_path), ex)) from None
return result
def read_galaxy_yml(collection_path):
"""Return collection information from the galaxy.yml file."""
galaxy_path = os.path.join(collection_path, 'galaxy.yml')
if not os.path.exists(galaxy_path):
return None
try:
with open(galaxy_path, encoding='utf-8') as galaxy_file:
galaxy = yaml.safe_load(galaxy_file)
result = dict(
version=galaxy.get('version'),
)
validate_version(result['version'])
except Exception as ex: # pylint: disable=broad-except
raise Exception('{0}: {1}'.format(os.path.basename(galaxy_path), ex)) from None
return result
def main():
"""Retrieve collection detail."""
collection_path = sys.argv[1]
try:
result = read_manifest_json(collection_path) or read_galaxy_yml(collection_path) or {}
except Exception as ex: # pylint: disable=broad-except
result = dict(
error='{0}'.format(ex),
)
print(json.dumps(result))
if __name__ == '__main__':
main()
| 2,995
|
Python
|
.py
| 66
| 39.227273
| 99
| 0.652878
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,278
|
yaml_to_json.py
|
ansible_ansible/test/lib/ansible_test/_util/controller/tools/yaml_to_json.py
|
"""Read YAML from stdin and write JSON to stdout."""
from __future__ import annotations
import datetime
import json
import sys
from yaml import load
try:
from yaml import CSafeLoader as SafeLoader
except ImportError:
from yaml import SafeLoader
# unique ISO date marker matching the one present in importer.py
ISO_DATE_MARKER = 'isodate:f23983df-f3df-453c-9904-bcd08af468cc:'
def default(value):
"""Custom default serializer which supports datetime.date types."""
if isinstance(value, datetime.date):
return '%s%s' % (ISO_DATE_MARKER, value.isoformat())
raise TypeError('cannot serialize type: %s' % type(value))
json.dump(load(sys.stdin, Loader=SafeLoader), sys.stdout, default=default)
| 725
|
Python
|
.py
| 18
| 37.222222
| 74
| 0.765043
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,279
|
compile.py
|
ansible_ansible/test/lib/ansible_test/_util/target/sanity/compile/compile.py
|
"""Python syntax checker with lint friendly output."""
from __future__ import annotations
import sys
ENCODING = 'utf-8'
ERRORS = 'replace'
def main():
"""Main program entry point."""
for path in sys.argv[1:] or sys.stdin.read().splitlines():
compile_source(path)
def compile_source(path):
"""Compile the specified source file, printing an error if one occurs."""
with open(path, 'rb') as source_fd:
source = source_fd.read()
try:
compile(source, path, 'exec', dont_inherit=True)
except SyntaxError as ex:
extype, message, lineno, offset = type(ex), ex.text, ex.lineno, ex.offset
except BaseException as ex: # pylint: disable=broad-except
extype, message, lineno, offset = type(ex), str(ex), 0, 0
else:
return
result = "%s:%d:%d: %s: %s" % (path, lineno, offset, extype.__name__, safe_message(message))
print(result)
def safe_message(value):
"""Given an input value as str or bytes, return the first non-empty line as str, ensuring it can be round-tripped as UTF-8."""
if isinstance(value, str):
value = value.encode(ENCODING, ERRORS)
value = value.decode(ENCODING, ERRORS)
value = value.strip().splitlines()[0].strip()
return value
if __name__ == '__main__':
main()
| 1,302
|
Python
|
.py
| 32
| 35.46875
| 130
| 0.658964
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,280
|
importer.py
|
ansible_ansible/test/lib/ansible_test/_util/target/sanity/import/importer.py
|
"""Import the given python module(s) and report error(s) encountered."""
from __future__ import annotations
def main():
"""
Main program function used to isolate globals from imported code.
Changes to globals in imported modules on Python 2.x will overwrite our own globals.
"""
import os
import sys
import types
# preload an empty ansible._vendor module to prevent use of any embedded modules during the import test
vendor_module_name = 'ansible._vendor'
vendor_module = types.ModuleType(vendor_module_name)
vendor_module.__file__ = os.path.join(os.path.sep.join(os.path.abspath(__file__).split(os.path.sep)[:-8]), 'lib/ansible/_vendor/__init__.py')
vendor_module.__path__ = []
vendor_module.__package__ = vendor_module_name
sys.modules[vendor_module_name] = vendor_module
import ansible
import contextlib
import datetime
import json
import re
import runpy
import subprocess
import traceback
import warnings
ansible_path = os.path.dirname(os.path.dirname(ansible.__file__))
temp_path = os.environ['SANITY_TEMP_PATH'] + os.path.sep
external_python = os.environ.get('SANITY_EXTERNAL_PYTHON')
yaml_to_json_path = os.environ.get('SANITY_YAML_TO_JSON')
collection_full_name = os.environ.get('SANITY_COLLECTION_FULL_NAME')
collection_root = os.environ.get('ANSIBLE_COLLECTIONS_PATH')
import_type = os.environ.get('SANITY_IMPORTER_TYPE')
try:
# noinspection PyCompatibility
from importlib import import_module
except ImportError:
def import_module(name, package=None): # type: (str, str | None) -> types.ModuleType
assert package is None
__import__(name)
return sys.modules[name]
from io import BytesIO, TextIOWrapper
try:
from importlib.util import spec_from_loader, module_from_spec
from importlib.machinery import SourceFileLoader, ModuleSpec # pylint: disable=unused-import
except ImportError:
has_py3_loader = False
else:
has_py3_loader = True
if collection_full_name:
# allow importing code from collections when testing a collection
from ansible.module_utils.common.text.converters import to_bytes, to_text, to_native, text_type
# noinspection PyProtectedMember
from ansible.utils.collection_loader._collection_finder import _AnsibleCollectionFinder
from ansible.utils.collection_loader import _collection_finder
yaml_to_dict_cache = {}
# unique ISO date marker matching the one present in yaml_to_json.py
iso_date_marker = 'isodate:f23983df-f3df-453c-9904-bcd08af468cc:'
iso_date_re = re.compile('^%s([0-9]{4})-([0-9]{2})-([0-9]{2})$' % iso_date_marker)
def parse_value(value):
"""Custom value parser for JSON deserialization that recognizes our internal ISO date format."""
if isinstance(value, text_type):
match = iso_date_re.search(value)
if match:
value = datetime.date(int(match.group(1)), int(match.group(2)), int(match.group(3)))
return value
def object_hook(data):
"""Object hook for custom ISO date deserialization from JSON."""
return dict((key, parse_value(value)) for key, value in data.items())
def yaml_to_dict(yaml, content_id):
"""
Return a Python dict version of the provided YAML.
Conversion is done in a subprocess since the current Python interpreter does not have access to PyYAML.
"""
if content_id in yaml_to_dict_cache:
return yaml_to_dict_cache[content_id]
try:
cmd = [external_python, yaml_to_json_path]
proc = subprocess.Popen([to_bytes(c) for c in cmd], # pylint: disable=consider-using-with
stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout_bytes, stderr_bytes = proc.communicate(to_bytes(yaml))
if proc.returncode != 0:
raise Exception('command %s failed with return code %d: %s' % ([to_native(c) for c in cmd], proc.returncode, to_native(stderr_bytes)))
data = yaml_to_dict_cache[content_id] = json.loads(to_text(stdout_bytes), object_hook=object_hook)
return data
except Exception as ex:
raise Exception('internal importer error - failed to parse yaml: %s' % to_native(ex))
_collection_finder._meta_yml_to_dict = yaml_to_dict # pylint: disable=protected-access
collection_loader = _AnsibleCollectionFinder(paths=[collection_root])
# noinspection PyProtectedMember
collection_loader._install() # pylint: disable=protected-access
else:
# do not support collection loading when not testing a collection
collection_loader = None
if collection_loader and import_type == 'plugin':
# do not unload ansible code for collection plugin (not module) tests
# doing so could result in the collection loader being initialized multiple times
pass
else:
# remove all modules under the ansible package, except the preloaded vendor module
list(map(sys.modules.pop, [m for m in sys.modules if m.partition('.')[0] == ansible.__name__ and m != vendor_module_name]))
if import_type == 'module':
# pre-load an empty ansible package to prevent unwanted code in __init__.py from loading
# this more accurately reflects the environment that AnsiballZ runs modules under
# it also avoids issues with imports in the ansible package that are not allowed
ansible_module = types.ModuleType(ansible.__name__)
ansible_module.__file__ = ansible.__file__
ansible_module.__path__ = ansible.__path__
ansible_module.__package__ = ansible.__package__
sys.modules[ansible.__name__] = ansible_module
class ImporterAnsibleModuleException(Exception):
"""Exception thrown during initialization of ImporterAnsibleModule."""
class ImporterAnsibleModule:
"""Replacement for AnsibleModule to support import testing."""
def __init__(self, *args, **kwargs):
raise ImporterAnsibleModuleException()
class RestrictedModuleLoader:
"""Python module loader that restricts inappropriate imports."""
def __init__(self, path, name, restrict_to_module_paths):
self.path = path
self.name = name
self.loaded_modules = set()
self.restrict_to_module_paths = restrict_to_module_paths
def find_spec(self, fullname, path=None, target=None): # pylint: disable=unused-argument
# type: (RestrictedModuleLoader, str, list[str], types.ModuleType | None ) -> ModuleSpec | None | ImportError
"""Return the spec from the loader or None"""
loader = self._get_loader(fullname, path=path)
if loader is not None:
if has_py3_loader:
# loader is expected to be Optional[importlib.abc.Loader], but RestrictedModuleLoader does not inherit from importlib.abc.Loader
return spec_from_loader(fullname, loader) # type: ignore[arg-type]
raise ImportError("Failed to import '%s' due to a bug in ansible-test. Check importlib imports for typos." % fullname)
return None
def find_module(self, fullname, path=None):
# type: (RestrictedModuleLoader, str, list[str]) -> RestrictedModuleLoader | None
"""Return self if the given fullname is restricted, otherwise return None."""
return self._get_loader(fullname, path=path)
def _get_loader(self, fullname, path=None):
# type: (RestrictedModuleLoader, str, list[str]) -> RestrictedModuleLoader | None
"""Return self if the given fullname is restricted, otherwise return None."""
if fullname in self.loaded_modules:
return None # ignore modules that are already being loaded
if is_name_in_namepace(fullname, ['ansible']):
if not self.restrict_to_module_paths:
return None # for non-modules, everything in the ansible namespace is allowed
if fullname in ('ansible.module_utils.basic',):
return self # intercept loading so we can modify the result
if is_name_in_namepace(fullname, ['ansible.module_utils', self.name]):
return None # module_utils and module under test are always allowed
if any(os.path.exists(candidate_path) for candidate_path in convert_ansible_name_to_absolute_paths(fullname)):
return self # restrict access to ansible files that exist
return None # ansible file does not exist, do not restrict access
if is_name_in_namepace(fullname, ['ansible_collections']):
if not collection_loader:
return self # restrict access to collections when we are not testing a collection
if not self.restrict_to_module_paths:
return None # for non-modules, everything in the ansible namespace is allowed
if is_name_in_namepace(fullname, ['ansible_collections...plugins.module_utils', self.name]):
return None # module_utils and module under test are always allowed
if collection_loader.find_module(fullname, path):
return self # restrict access to collection files that exist
return None # collection file does not exist, do not restrict access
# not a namespace we care about
return None
def create_module(self, spec): # pylint: disable=unused-argument
# type: (RestrictedModuleLoader, ModuleSpec) -> None
"""Return None to use default module creation."""
return None
def exec_module(self, module):
# type: (RestrictedModuleLoader, types.ModuleType) -> None | ImportError
"""Execute the module if the name is ansible.module_utils.basic and otherwise raise an ImportError"""
fullname = module.__spec__.name
if fullname == 'ansible.module_utils.basic':
self.loaded_modules.add(fullname)
for path in convert_ansible_name_to_absolute_paths(fullname):
if not os.path.exists(path):
continue
loader = SourceFileLoader(fullname, path)
spec = spec_from_loader(fullname, loader)
real_module = module_from_spec(spec)
loader.exec_module(real_module)
real_module.AnsibleModule = ImporterAnsibleModule # type: ignore[attr-defined]
real_module._load_params = lambda *args, **kwargs: {} # type: ignore[attr-defined] # pylint: disable=protected-access
sys.modules[fullname] = real_module
return None
raise ImportError('could not find "%s"' % fullname)
raise ImportError('import of "%s" is not allowed in this context' % fullname)
def load_module(self, fullname):
# type: (RestrictedModuleLoader, str) -> types.ModuleType | ImportError
"""Return the module if the name is ansible.module_utils.basic and otherwise raise an ImportError."""
if fullname == 'ansible.module_utils.basic':
module = self.__load_module(fullname)
# stop Ansible module execution during AnsibleModule instantiation
module.AnsibleModule = ImporterAnsibleModule # type: ignore[attr-defined]
# no-op for _load_params since it may be called before instantiating AnsibleModule
module._load_params = lambda *args, **kwargs: {} # type: ignore[attr-defined] # pylint: disable=protected-access
return module
raise ImportError('import of "%s" is not allowed in this context' % fullname)
def __load_module(self, fullname):
# type: (RestrictedModuleLoader, str) -> types.ModuleType
"""Load the requested module while avoiding infinite recursion."""
self.loaded_modules.add(fullname)
return import_module(fullname)
def run(restrict_to_module_paths):
"""Main program function."""
base_dir = os.getcwd()
messages = set()
for path in sys.argv[1:] or sys.stdin.read().splitlines():
name = convert_relative_path_to_name(path)
test_python_module(path, name, base_dir, messages, restrict_to_module_paths)
if messages:
sys.exit(10)
def test_python_module(path, name, base_dir, messages, restrict_to_module_paths):
"""Test the given python module by importing it.
:type path: str
:type name: str
:type base_dir: str
:type messages: set[str]
:type restrict_to_module_paths: bool
"""
if name in sys.modules:
return # cannot be tested because it has already been loaded
is_ansible_module = (path.startswith('lib/ansible/modules/') or path.startswith('plugins/modules/')) and os.path.basename(path) != '__init__.py'
run_main = is_ansible_module
if path == 'lib/ansible/modules/async_wrapper.py':
# async_wrapper is a non-standard Ansible module (does not use AnsibleModule) so we cannot test the main function
run_main = False
capture_normal = Capture()
capture_main = Capture()
run_module_ok = False
try:
with monitor_sys_modules(path, messages):
with restrict_imports(path, name, messages, restrict_to_module_paths):
with capture_output(capture_normal):
import_module(name)
if run_main:
run_module_ok = is_ansible_module
with monitor_sys_modules(path, messages):
with restrict_imports(path, name, messages, restrict_to_module_paths):
with capture_output(capture_main):
runpy.run_module(name, run_name='__main__', alter_sys=True)
except ImporterAnsibleModuleException:
# module instantiated AnsibleModule without raising an exception
if not run_module_ok:
if is_ansible_module:
report_message(path, 0, 0, 'module-guard', "AnsibleModule instantiation not guarded by `if __name__ == '__main__'`", messages)
else:
report_message(path, 0, 0, 'non-module', "AnsibleModule instantiated by import of non-module", messages)
except BaseException as ex: # pylint: disable=locally-disabled, broad-except
# intentionally catch all exceptions, including calls to sys.exit
exc_type, _exc, exc_tb = sys.exc_info()
message = str(ex)
results = list(reversed(traceback.extract_tb(exc_tb)))
line = 0
offset = 0
full_path = os.path.join(base_dir, path)
base_path = base_dir + os.path.sep
source = None
# avoid line wraps in messages
message = re.sub(r'\n *', ': ', message)
for result in results:
if result[0] == full_path:
# save the line number for the file under test
line = result[1] or 0
if not source and result[0].startswith(base_path) and not result[0].startswith(temp_path):
# save the first path and line number in the traceback which is in our source tree
source = (os.path.relpath(result[0], base_path), result[1] or 0, 0)
if isinstance(ex, SyntaxError):
# SyntaxError has better information than the traceback
if ex.filename == full_path: # pylint: disable=locally-disabled, no-member
# syntax error was reported in the file under test
line = ex.lineno or 0 # pylint: disable=locally-disabled, no-member
offset = ex.offset or 0 # pylint: disable=locally-disabled, no-member
elif ex.filename.startswith(base_path) and not ex.filename.startswith(temp_path): # pylint: disable=locally-disabled, no-member
# syntax error was reported in our source tree
source = (os.path.relpath(ex.filename, base_path), ex.lineno or 0, ex.offset or 0) # pylint: disable=locally-disabled, no-member
# remove the filename and line number from the message
# either it was extracted above, or it's not really useful information
message = re.sub(r' \(.*?, line [0-9]+\)$', '', message)
if source and source[0] != path:
message += ' (at %s:%d:%d)' % (source[0], source[1], source[2])
report_message(path, line, offset, 'traceback', '%s: %s' % (exc_type.__name__, message), messages)
finally:
capture_report(path, capture_normal, messages)
capture_report(path, capture_main, messages)
def is_name_in_namepace(name, namespaces):
"""Returns True if the given name is one of the given namespaces, otherwise returns False."""
name_parts = name.split('.')
for namespace in namespaces:
namespace_parts = namespace.split('.')
length = min(len(name_parts), len(namespace_parts))
truncated_name = name_parts[0:length]
truncated_namespace = namespace_parts[0:length]
# empty parts in the namespace are treated as wildcards
# to simplify the comparison, use those empty parts to indicate the positions in the name to be empty as well
for idx, part in enumerate(truncated_namespace):
if not part:
truncated_name[idx] = part
# example: name=ansible, allowed_name=ansible.module_utils
# example: name=ansible.module_utils.system.ping, allowed_name=ansible.module_utils
if truncated_name == truncated_namespace:
return True
return False
def check_sys_modules(path, before, messages):
"""Check for unwanted changes to sys.modules.
:type path: str
:type before: dict[str, module]
:type messages: set[str]
"""
after = sys.modules
removed = set(before.keys()) - set(after.keys())
changed = set(key for key, value in before.items() if key in after and value != after[key])
# additions are checked by our custom PEP 302 loader, so we don't need to check them again here
for module in sorted(removed):
report_message(path, 0, 0, 'unload', 'unloading of "%s" in sys.modules is not supported' % module, messages)
for module in sorted(changed):
report_message(path, 0, 0, 'reload', 'reloading of "%s" in sys.modules is not supported' % module, messages)
def convert_ansible_name_to_absolute_paths(name):
"""Calculate the module path from the given name.
:type name: str
:rtype: list[str]
"""
return [
os.path.join(ansible_path, name.replace('.', os.path.sep)),
os.path.join(ansible_path, name.replace('.', os.path.sep)) + '.py',
]
def convert_relative_path_to_name(path):
"""Calculate the module name from the given path.
:type path: str
:rtype: str
"""
if path.endswith('/__init__.py'):
clean_path = os.path.dirname(path)
else:
clean_path = path
clean_path = os.path.splitext(clean_path)[0]
name = clean_path.replace(os.path.sep, '.')
if collection_loader:
# when testing collections the relative paths (and names) being tested are within the collection under test
name = 'ansible_collections.%s.%s' % (collection_full_name, name)
else:
# when testing ansible all files being imported reside under the lib directory
name = name[len('lib/'):]
return name
class Capture:
"""Captured output and/or exception."""
def __init__(self):
# use buffered IO to simulate StringIO; allows Ansible's stream patching to behave without warnings
self.stdout = TextIOWrapper(BytesIO())
self.stderr = TextIOWrapper(BytesIO())
def capture_report(path, capture, messages):
"""Report on captured output.
:type path: str
:type capture: Capture
:type messages: set[str]
"""
# since we're using buffered IO, flush before checking for data
capture.stdout.flush()
capture.stderr.flush()
stdout_value = capture.stdout.buffer.getvalue()
if stdout_value:
first = stdout_value.decode().strip().splitlines()[0].strip()
report_message(path, 0, 0, 'stdout', first, messages)
stderr_value = capture.stderr.buffer.getvalue()
if stderr_value:
first = stderr_value.decode().strip().splitlines()[0].strip()
report_message(path, 0, 0, 'stderr', first, messages)
def report_message(path, line, column, code, message, messages):
"""Report message if not already reported.
:type path: str
:type line: int
:type column: int
:type code: str
:type message: str
:type messages: set[str]
"""
message = '%s:%d:%d: %s: %s' % (path, line, column, code, message)
if message not in messages:
messages.add(message)
print(message)
@contextlib.contextmanager
def restrict_imports(path, name, messages, restrict_to_module_paths):
"""Restrict available imports.
:type path: str
:type name: str
:type messages: set[str]
:type restrict_to_module_paths: bool
"""
restricted_loader = RestrictedModuleLoader(path, name, restrict_to_module_paths)
# noinspection PyTypeChecker
sys.meta_path.insert(0, restricted_loader)
sys.path_importer_cache.clear()
try:
yield
finally:
if import_type == 'plugin' and not collection_loader:
from ansible.utils.collection_loader._collection_finder import _AnsibleCollectionFinder
_AnsibleCollectionFinder._remove() # pylint: disable=protected-access
if sys.meta_path[0] != restricted_loader:
report_message(path, 0, 0, 'metapath', 'changes to sys.meta_path[0] are not permitted', messages)
while restricted_loader in sys.meta_path:
# noinspection PyTypeChecker
sys.meta_path.remove(restricted_loader)
sys.path_importer_cache.clear()
@contextlib.contextmanager
def monitor_sys_modules(path, messages):
"""Monitor sys.modules for unwanted changes, reverting any additions made to our own namespaces."""
snapshot = sys.modules.copy()
try:
yield
finally:
check_sys_modules(path, snapshot, messages)
for key in set(sys.modules.keys()) - set(snapshot.keys()):
if is_name_in_namepace(key, ('ansible', 'ansible_collections')):
del sys.modules[key] # only unload our own code since we know it's native Python
@contextlib.contextmanager
def capture_output(capture):
"""Capture sys.stdout and sys.stderr.
:type capture: Capture
"""
old_stdout = sys.stdout
old_stderr = sys.stderr
sys.stdout = capture.stdout
sys.stderr = capture.stderr
# clear all warnings registries to make all warnings available
for module in sys.modules.values():
try:
# noinspection PyUnresolvedReferences
module.__warningregistry__.clear()
except AttributeError:
pass
with warnings.catch_warnings():
warnings.simplefilter('error')
if collection_loader and import_type == 'plugin':
warnings.filterwarnings(
"ignore",
"AnsibleCollectionFinder has already been configured")
try:
yield
finally:
sys.stdout = old_stdout
sys.stderr = old_stderr
run(import_type == 'module')
if __name__ == '__main__':
main()
| 24,873
|
Python
|
.py
| 445
| 43.835955
| 154
| 0.621078
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,281
|
quiet_pip.py
|
ansible_ansible/test/lib/ansible_test/_util/target/setup/quiet_pip.py
|
"""Custom entry-point for pip that filters out unwanted logging and warnings."""
from __future__ import annotations
import logging
import os
import re
import runpy
import sys
BUILTIN_FILTERER_FILTER = logging.Filterer.filter
LOGGING_MESSAGE_FILTER = re.compile("^("
".*Running pip install with root privileges is generally not a good idea.*|" # custom Fedora patch [1]
".*Running pip as the 'root' user can result in broken permissions .*|" # pip 21.1
"Ignoring .*: markers .* don't match your environment|"
"Looking in indexes: .*|" # pypi-test-container
"Requirement already satisfied.*"
")$")
# [1] https://src.fedoraproject.org/rpms/python-pip/blob/f34/f/emit-a-warning-when-running-with-root-privileges.patch
def custom_filterer_filter(self, record):
"""Globally omit logging of unwanted messages."""
if LOGGING_MESSAGE_FILTER.search(record.getMessage()):
return 0
return BUILTIN_FILTERER_FILTER(self, record)
def main():
"""Main program entry point."""
# Filtering logging output globally avoids having to intercept stdout/stderr.
# It also avoids problems with loss of color output and mixing up the order of stdout/stderr messages.
logging.Filterer.filter = custom_filterer_filter
get_pip = os.environ.get('GET_PIP')
try:
if get_pip:
directory, filename = os.path.split(get_pip)
module = os.path.splitext(filename)[0]
sys.path.insert(0, directory)
runpy.run_module(module, run_name='__main__', alter_sys=True)
else:
runpy.run_module('pip.__main__', run_name='__main__', alter_sys=True)
except ImportError as ex:
print('pip is unavailable: %s' % ex)
sys.exit(1)
if __name__ == '__main__':
main()
| 1,979
|
Python
|
.py
| 40
| 39.15
| 139
| 0.615784
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,282
|
probe_cgroups.py
|
ansible_ansible/test/lib/ansible_test/_util/target/setup/probe_cgroups.py
|
"""A tool for probing cgroups to determine write access."""
from __future__ import annotations
import json
import os
import sys
def main(): # type: () -> None
"""Main program entry point."""
probe_dir = sys.argv[1]
paths = sys.argv[2:]
results = {}
for path in paths:
probe_path = os.path.join(path, probe_dir)
try:
os.mkdir(probe_path)
os.rmdir(probe_path)
except Exception as ex: # pylint: disable=broad-except
results[path] = str(ex)
else:
results[path] = None
print(json.dumps(results, sort_keys=True))
if __name__ == '__main__':
main()
| 659
|
Python
|
.py
| 22
| 23.681818
| 63
| 0.596184
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,283
|
requirements.py
|
ansible_ansible/test/lib/ansible_test/_util/target/setup/requirements.py
|
"""A tool for installing test requirements on the controller and target host."""
from __future__ import annotations
# pylint: disable=wrong-import-position
import resource
# Setting a low soft RLIMIT_NOFILE value will improve the performance of subprocess.Popen on Python 2.x when close_fds=True.
# This will affect all Python subprocesses. It will also affect the current Python process if set before subprocess is imported for the first time.
SOFT_RLIMIT_NOFILE = 1024
CURRENT_RLIMIT_NOFILE = resource.getrlimit(resource.RLIMIT_NOFILE)
DESIRED_RLIMIT_NOFILE = (SOFT_RLIMIT_NOFILE, CURRENT_RLIMIT_NOFILE[1])
if DESIRED_RLIMIT_NOFILE < CURRENT_RLIMIT_NOFILE:
resource.setrlimit(resource.RLIMIT_NOFILE, DESIRED_RLIMIT_NOFILE)
CURRENT_RLIMIT_NOFILE = DESIRED_RLIMIT_NOFILE
import base64
import contextlib
import errno
import io
import json
import os
import shlex
import shutil
import subprocess
import sys
import tempfile
import typing as t
import urllib.request
ENCODING = 'utf-8'
Text = type(u'')
VERBOSITY = 0
CONSOLE = sys.stderr
def main(): # type: () -> None
"""Main program entry point."""
global VERBOSITY # pylint: disable=global-statement
payload = json.loads(to_text(base64.b64decode(PAYLOAD)))
VERBOSITY = payload['verbosity']
script = payload['script']
commands = payload['commands']
with tempfile.NamedTemporaryFile(prefix='ansible-test-', suffix='-pip.py') as pip:
pip.write(to_bytes(script))
pip.flush()
for name, options in commands:
try:
globals()[name](pip.name, options)
except ApplicationError as ex:
print(ex)
sys.exit(1)
# noinspection PyUnusedLocal
def bootstrap(pip, options): # type: (str, t.Dict[str, t.Any]) -> None
"""Bootstrap pip and related packages in an empty virtual environment."""
pip_version = options['pip_version']
packages = options['packages']
setuptools = options['setuptools']
wheel = options['wheel']
url = 'https://ci-files.testing.ansible.com/ansible-test/get-pip-%s.py' % pip_version
cache_path = os.path.expanduser('~/.ansible/test/cache/get_pip_%s.py' % pip_version.replace(".", "_"))
temp_path = cache_path + '.download'
if os.path.exists(cache_path):
log('Using cached pip %s bootstrap script: %s' % (pip_version, cache_path))
else:
log('Downloading pip %s bootstrap script: %s' % (pip_version, url))
make_dirs(os.path.dirname(cache_path))
try:
download_file(url, temp_path)
except Exception as ex:
raise ApplicationError(('''
Download failed: %s
The bootstrap script can be manually downloaded and saved to: %s
If you're behind a proxy, consider commenting on the following GitHub issue:
https://github.com/ansible/ansible/issues/77304
''' % (ex, cache_path)).strip())
shutil.move(temp_path, cache_path)
log('Cached pip %s bootstrap script: %s' % (pip_version, cache_path))
env = common_pip_environment()
env.update(GET_PIP=cache_path)
options = common_pip_options()
options.extend(packages)
if not setuptools:
options.append('--no-setuptools')
if not wheel:
options.append('--no-wheel')
command = [sys.executable, pip] + options
execute_command(command, env=env)
def install(pip, options): # type: (str, t.Dict[str, t.Any]) -> None
"""Perform a pip install."""
requirements = options['requirements']
constraints = options['constraints']
packages = options['packages']
tempdir = tempfile.mkdtemp(prefix='ansible-test-', suffix='-requirements')
try:
options = common_pip_options()
options.extend(packages)
for path, content in requirements:
if path.split(os.sep)[0] in ('test', 'requirements'):
# Support for pre-build is currently limited to requirements embedded in ansible-test and those used by ansible-core.
# Requirements from ansible-core can be found in the 'test' and 'requirements' directories.
# This feature will probably be extended to support collections after further testing.
# Requirements from collections can be found in the 'tests' directory.
for pre_build in parse_pre_build_instructions(content):
pre_build.execute(pip)
write_text_file(os.path.join(tempdir, path), content, True)
options.extend(['-r', path])
for path, content in constraints:
write_text_file(os.path.join(tempdir, path), content, True)
options.extend(['-c', path])
command = [sys.executable, pip, 'install'] + options
env = common_pip_environment()
execute_command(command, env=env, cwd=tempdir)
finally:
remove_tree(tempdir)
class PreBuild:
"""Parsed pre-build instructions."""
def __init__(self, requirement): # type: (str) -> None
self.requirement = requirement
self.constraints = [] # type: list[str]
def execute(self, pip): # type: (str) -> None
"""Execute these pre-build instructions."""
tempdir = tempfile.mkdtemp(prefix='ansible-test-', suffix='-pre-build')
try:
options = common_pip_options()
options.append(self.requirement)
constraints = '\n'.join(self.constraints) + '\n'
constraints_path = os.path.join(tempdir, 'constraints.txt')
write_text_file(constraints_path, constraints, True)
env = common_pip_environment()
env.update(PIP_CONSTRAINT=constraints_path)
command = [sys.executable, pip, 'wheel'] + options
execute_command(command, env=env, cwd=tempdir)
finally:
remove_tree(tempdir)
def parse_pre_build_instructions(requirements): # type: (str) -> list[PreBuild]
"""Parse the given pip requirements and return a list of extracted pre-build instructions."""
# CAUTION: This code must be kept in sync with the sanity test hashing code in:
# test/lib/ansible_test/_internal/commands/sanity/__init__.py
pre_build_prefix = '# pre-build '
pre_build_requirement_prefix = pre_build_prefix + 'requirement: '
pre_build_constraint_prefix = pre_build_prefix + 'constraint: '
lines = requirements.splitlines()
pre_build_lines = [line for line in lines if line.startswith(pre_build_prefix)]
instructions = [] # type: list[PreBuild]
for line in pre_build_lines:
if line.startswith(pre_build_requirement_prefix):
instructions.append(PreBuild(line[len(pre_build_requirement_prefix):]))
elif line.startswith(pre_build_constraint_prefix):
instructions[-1].constraints.append(line[len(pre_build_constraint_prefix):])
else:
raise RuntimeError('Unsupported pre-build comment: ' + line)
return instructions
def uninstall(pip, options): # type: (str, t.Dict[str, t.Any]) -> None
"""Perform a pip uninstall."""
packages = options['packages']
ignore_errors = options['ignore_errors']
options = common_pip_options()
options.extend(packages)
command = [sys.executable, pip, 'uninstall', '-y'] + options
env = common_pip_environment()
try:
execute_command(command, env=env, capture=True)
except SubprocessError:
if not ignore_errors:
raise
# noinspection PyUnusedLocal
def version(pip, options): # type: (str, t.Dict[str, t.Any]) -> None
"""Report the pip version."""
del options
options = common_pip_options()
command = [sys.executable, pip, '-V'] + options
env = common_pip_environment()
execute_command(command, env=env, capture=True)
def common_pip_environment(): # type: () -> t.Dict[str, str]
"""Return common environment variables used to run pip."""
env = os.environ.copy()
# When ansible-test installs requirements outside a virtual environment, it does so under one of two conditions:
# 1) The environment is an ephemeral one provisioned by ansible-test.
# 2) The user has provided the `--requirements` option to force installation of requirements.
# It seems reasonable to bypass PEP 668 checks in both of these cases.
# Doing so with an environment variable allows it to work under any version of pip which supports it, without breaking older versions.
# NOTE: pip version 23.0 enforces PEP 668 but does not support the override, in which case upgrading pip is required.
env.update(PIP_BREAK_SYSTEM_PACKAGES='1')
return env
def common_pip_options(): # type: () -> t.List[str]
"""Return a list of common pip options."""
return [
'--disable-pip-version-check',
]
def devnull(): # type: () -> t.IO[bytes]
"""Return a file object that references devnull."""
try:
return devnull.file
except AttributeError:
devnull.file = open(os.devnull, 'w+b') # pylint: disable=consider-using-with
return devnull.file
def download_file(url, path): # type: (str, str) -> None
"""Download the given URL to the specified file path."""
with open(to_bytes(path), 'wb') as saved_file:
with contextlib.closing(urllib.request.urlopen(url)) as download:
shutil.copyfileobj(download, saved_file)
class ApplicationError(Exception):
"""Base class for application exceptions."""
class SubprocessError(ApplicationError):
"""A command returned a non-zero status."""
def __init__(self, cmd, status, stdout, stderr): # type: (t.List[str], int, str, str) -> None
message = 'A command failed with status %d: %s' % (status, shlex.join(cmd))
if stderr:
message += '\n>>> Standard Error\n%s' % stderr.strip()
if stdout:
message += '\n>>> Standard Output\n%s' % stdout.strip()
super(SubprocessError, self).__init__(message)
def log(message, verbosity=0): # type: (str, int) -> None
"""Log a message to the console if the verbosity is high enough."""
if verbosity > VERBOSITY:
return
print(message, file=CONSOLE)
CONSOLE.flush()
def execute_command(cmd, cwd=None, capture=False, env=None): # type: (t.List[str], t.Optional[str], bool, t.Optional[t.Dict[str, str]]) -> None
"""Execute the specified command."""
log('Execute command: %s' % shlex.join(cmd), verbosity=1)
cmd_bytes = [to_bytes(c) for c in cmd]
if capture:
stdout = subprocess.PIPE
stderr = subprocess.PIPE
else:
stdout = None
stderr = None
cwd_bytes = to_optional_bytes(cwd)
process = subprocess.Popen(cmd_bytes, cwd=cwd_bytes, stdin=devnull(), stdout=stdout, stderr=stderr, env=env) # pylint: disable=consider-using-with
stdout_bytes, stderr_bytes = process.communicate()
stdout_text = to_optional_text(stdout_bytes) or u''
stderr_text = to_optional_text(stderr_bytes) or u''
if process.returncode != 0:
raise SubprocessError(cmd, process.returncode, stdout_text, stderr_text)
def write_text_file(path, content, create_directories=False): # type: (str, str, bool) -> None
"""Write the given text content to the specified path, optionally creating missing directories."""
if create_directories:
make_dirs(os.path.dirname(path))
with open_binary_file(path, 'wb') as file_obj:
file_obj.write(to_bytes(content))
def remove_tree(path): # type: (str) -> None
"""Remove the specified directory tree."""
try:
shutil.rmtree(to_bytes(path))
except OSError as ex:
if ex.errno != errno.ENOENT:
raise
def make_dirs(path): # type: (str) -> None
"""Create a directory at path, including any necessary parent directories."""
try:
os.makedirs(to_bytes(path))
except OSError as ex:
if ex.errno != errno.EEXIST:
raise
def open_binary_file(path, mode='rb'): # type: (str, str) -> t.IO[bytes]
"""Open the given path for binary access."""
if 'b' not in mode:
raise Exception('mode must include "b" for binary files: %s' % mode)
return io.open(to_bytes(path), mode) # pylint: disable=consider-using-with,unspecified-encoding
def to_optional_bytes(value, errors='strict'): # type: (t.Optional[str | bytes], str) -> t.Optional[bytes]
"""Return the given value as bytes encoded using UTF-8 if not already bytes, or None if the value is None."""
return None if value is None else to_bytes(value, errors)
def to_optional_text(value, errors='strict'): # type: (t.Optional[str | bytes], str) -> t.Optional[t.Text]
"""Return the given value as text decoded using UTF-8 if not already text, or None if the value is None."""
return None if value is None else to_text(value, errors)
def to_bytes(value, errors='strict'): # type: (str | bytes, str) -> bytes
"""Return the given value as bytes encoded using UTF-8 if not already bytes."""
if isinstance(value, bytes):
return value
if isinstance(value, Text):
return value.encode(ENCODING, errors)
raise Exception('value is not bytes or text: %s' % type(value))
def to_text(value, errors='strict'): # type: (str | bytes, str) -> t.Text
"""Return the given value as text decoded using UTF-8 if not already text."""
if isinstance(value, bytes):
return value.decode(ENCODING, errors)
if isinstance(value, Text):
return value
raise Exception('value is not bytes or text: %s' % type(value))
PAYLOAD = b'{payload}' # base-64 encoded JSON payload which will be populated before this script is executed
if __name__ == '__main__':
main()
| 13,698
|
Python
|
.py
| 277
| 42.833935
| 151
| 0.673535
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,284
|
constants.py
|
ansible_ansible/test/lib/ansible_test/_util/target/common/constants.py
|
"""Constants used by ansible-test's CLI entry point (as well as the rest of ansible-test). Imports should not be used in this file."""
# NOTE: This file resides in the _util/target directory to ensure compatibility with all supported Python versions.
from __future__ import annotations
REMOTE_ONLY_PYTHON_VERSIONS = (
'3.8',
'3.9',
'3.10',
)
CONTROLLER_PYTHON_VERSIONS = (
'3.11',
'3.12',
'3.13',
)
| 427
|
Python
|
.py
| 13
| 29.692308
| 134
| 0.695122
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,285
|
yamlcheck.py
|
ansible_ansible/test/lib/ansible_test/_util/target/tools/yamlcheck.py
|
"""Show availability of PyYAML and libyaml support."""
from __future__ import annotations
import json
try:
import yaml
except ImportError:
yaml = None
try:
from yaml import CLoader
except ImportError:
CLoader = None
print(json.dumps(dict(
yaml=bool(yaml),
cloader=bool(CLoader),
)))
| 311
|
Python
|
.py
| 15
| 17.866667
| 54
| 0.739726
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,286
|
virtualenvcheck.py
|
ansible_ansible/test/lib/ansible_test/_util/target/tools/virtualenvcheck.py
|
"""Detect the real python interpreter when running in a virtual environment created by the 'virtualenv' module."""
from __future__ import annotations
import json
try:
# virtualenv <20
from sys import real_prefix
except ImportError:
real_prefix = None
try:
# venv and virtualenv >= 20
from sys import base_exec_prefix
except ImportError:
base_exec_prefix = None
print(json.dumps(dict(
real_prefix=real_prefix or base_exec_prefix,
)))
| 465
|
Python
|
.py
| 16
| 26.0625
| 114
| 0.750562
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,287
|
ansible_pytest_coverage.py
|
ansible_ansible/test/lib/ansible_test/_util/target/pytest/plugins/ansible_pytest_coverage.py
|
"""Monkey patch os._exit when running under coverage so we don't lose coverage data in forks, such as with `pytest --boxed`. PYTEST_DONT_REWRITE"""
from __future__ import annotations
def pytest_configure():
"""Configure this pytest plugin."""
try:
if pytest_configure.executed:
return
except AttributeError:
pytest_configure.executed = True
try:
import coverage
except ImportError:
coverage = None
try:
coverage.Coverage
except AttributeError:
coverage = None
if not coverage:
return
import gc
import os
coverage_instances = []
for obj in gc.get_objects():
if isinstance(obj, coverage.Coverage):
coverage_instances.append(obj)
if not coverage_instances:
coverage_config = os.environ.get('COVERAGE_CONF')
if not coverage_config:
return
coverage_output = os.environ.get('COVERAGE_FILE')
if not coverage_output:
return
cov = coverage.Coverage(config_file=coverage_config)
coverage_instances.append(cov)
else:
cov = None
# noinspection PyProtectedMember
os_exit = os._exit # pylint: disable=protected-access
def coverage_exit(*args, **kwargs):
for instance in coverage_instances:
# skip coverage instances which have no collector, or the collector is not the active collector
# this avoids issues with coverage 7.4.0+ when tests create subprocesses which inherit our overridden os._exit method
# pylint: disable=protected-access
if not instance._collector or not instance._collector._collectors or instance._collector != instance._collector._collectors[-1]:
continue
instance.stop()
instance.save()
os_exit(*args, **kwargs)
os._exit = coverage_exit # pylint: disable=protected-access
if cov:
cov.start()
pytest_configure()
| 1,998
|
Python
|
.py
| 52
| 30.173077
| 147
| 0.656104
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,288
|
ansible_forked.py
|
ansible_ansible/test/lib/ansible_test/_util/target/pytest/plugins/ansible_forked.py
|
"""Run each test in its own fork. PYTEST_DONT_REWRITE"""
# MIT License (see licenses/MIT-license.txt or https://opensource.org/licenses/MIT)
# Based on code originally from:
# https://github.com/pytest-dev/pytest-forked
# https://github.com/pytest-dev/py
# TIP: Disable pytest-xdist when debugging internal errors in this plugin.
from __future__ import annotations
import os
import pickle
import tempfile
import warnings
from pytest import Item, hookimpl, TestReport
from _pytest.runner import runtestprotocol
@hookimpl(tryfirst=True)
def pytest_runtest_protocol(item, nextitem): # type: (Item, Item | None) -> object | None
"""Entry point for enabling this plugin."""
# This is needed because pytest-xdist creates an OS thread (using execnet).
# See: https://github.com/pytest-dev/execnet/blob/d6aa1a56773c2e887515d63e50b1d08338cb78a7/execnet/gateway_base.py#L51
warnings.filterwarnings("ignore", "^This process .* is multi-threaded, use of .* may lead to deadlocks in the child.$", DeprecationWarning)
item_hook = item.ihook
item_hook.pytest_runtest_logstart(nodeid=item.nodeid, location=item.location)
reports = run_item(item, nextitem)
for report in reports:
item_hook.pytest_runtest_logreport(report=report)
item_hook.pytest_runtest_logfinish(nodeid=item.nodeid, location=item.location)
return True
def run_item(item, nextitem): # type: (Item, Item | None) -> list[TestReport]
"""Run the item in a child process and return a list of reports."""
with tempfile.NamedTemporaryFile() as temp_file:
pid = os.fork()
if not pid:
temp_file.delete = False
run_child(item, nextitem, temp_file.name)
return run_parent(item, pid, temp_file.name)
def run_child(item, nextitem, result_path): # type: (Item, Item | None, str) -> None
"""Run the item, record the result and exit. Called in the child process."""
with warnings.catch_warnings(record=True) as captured_warnings:
reports = runtestprotocol(item, nextitem=nextitem, log=False)
with open(result_path, "wb") as result_file:
pickle.dump((reports, captured_warnings), result_file)
os._exit(0) # noqa
def run_parent(item, pid, result_path): # type: (Item, int, str) -> list[TestReport]
"""Wait for the child process to exit and return the test reports. Called in the parent process."""
exit_code = waitstatus_to_exitcode(os.waitpid(pid, 0)[1])
if exit_code:
reason = "Test CRASHED with exit code {}.".format(exit_code)
report = TestReport(item.nodeid, item.location, {x: 1 for x in item.keywords}, "failed", reason, "call", user_properties=item.user_properties)
if item.get_closest_marker("xfail"):
report.outcome = "skipped"
report.wasxfail = reason
reports = [report]
else:
with open(result_path, "rb") as result_file:
reports, captured_warnings = pickle.load(result_file) # type: list[TestReport], list[warnings.WarningMessage]
for warning in captured_warnings:
warnings.warn_explicit(warning.message, warning.category, warning.filename, warning.lineno)
return reports
def waitstatus_to_exitcode(status): # type: (int) -> int
"""Convert a wait status to an exit code."""
# This function was added in Python 3.9.
# See: https://docs.python.org/3/library/os.html#os.waitstatus_to_exitcode
if os.WIFEXITED(status):
return os.WEXITSTATUS(status)
if os.WIFSIGNALED(status):
return -os.WTERMSIG(status)
raise ValueError(status)
| 3,599
|
Python
|
.py
| 66
| 48.651515
| 150
| 0.70825
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,289
|
ansible_pytest_collections.py
|
ansible_ansible/test/lib/ansible_test/_util/target/pytest/plugins/ansible_pytest_collections.py
|
"""Enable unit testing of Ansible collections. PYTEST_DONT_REWRITE"""
from __future__ import annotations
import os
# set by ansible-test to a single directory, rather than a list of directories as supported by Ansible itself
ANSIBLE_COLLECTIONS_PATH = os.path.join(os.environ['ANSIBLE_COLLECTIONS_PATH'], 'ansible_collections')
# set by ansible-test to the minimum python version supported on the controller
ANSIBLE_CONTROLLER_MIN_PYTHON_VERSION = tuple(int(x) for x in os.environ['ANSIBLE_CONTROLLER_MIN_PYTHON_VERSION'].split('.'))
# this monkeypatch to _pytest.pathlib.resolve_package_path fixes PEP420 resolution for collections in pytest >= 6.0.0
# NB: this code should never run under py2
def collection_resolve_package_path(path):
"""Configure the Python package path so that pytest can find our collections."""
for parent in path.parents:
if str(parent) == ANSIBLE_COLLECTIONS_PATH:
return parent
raise Exception('File "%s" not found in collection path "%s".' % (path, ANSIBLE_COLLECTIONS_PATH))
# this monkeypatch to py.path.local.LocalPath.pypkgpath fixes PEP420 resolution for collections in pytest < 6.0.0
def collection_pypkgpath(self):
"""Configure the Python package path so that pytest can find our collections."""
for parent in self.parts(reverse=True):
if str(parent) == ANSIBLE_COLLECTIONS_PATH:
return parent
raise Exception('File "%s" not found in collection path "%s".' % (self.strpath, ANSIBLE_COLLECTIONS_PATH))
def enable_assertion_rewriting_hook(): # type: () -> None
"""
Enable pytest's AssertionRewritingHook on Python 3.x.
This is necessary because the Ansible collection loader intercepts imports before the pytest provided loader ever sees them.
"""
import sys
hook_name = '_pytest.assertion.rewrite.AssertionRewritingHook'
hooks = [hook for hook in sys.meta_path if hook.__class__.__module__ + '.' + hook.__class__.__qualname__ == hook_name]
if len(hooks) != 1:
raise Exception('Found {} instance(s) of "{}" in sys.meta_path.'.format(len(hooks), hook_name))
assertion_rewriting_hook = hooks[0]
# This is based on `_AnsibleCollectionPkgLoaderBase.exec_module` from `ansible/utils/collection_loader/_collection_finder.py`.
def exec_module(self, module):
# short-circuit redirect; avoid reinitializing existing modules
if self._redirect_module: # pylint: disable=protected-access
return
# execute the module's code in its namespace
code_obj = self.get_code(self._fullname) # pylint: disable=protected-access
if code_obj is not None: # things like NS packages that can't have code on disk will return None
# This logic is loosely based on `AssertionRewritingHook._should_rewrite` from pytest.
# See: https://github.com/pytest-dev/pytest/blob/779a87aada33af444f14841a04344016a087669e/src/_pytest/assertion/rewrite.py#L209
should_rewrite = self._package_to_load == 'conftest' or self._package_to_load.startswith('test_') # pylint: disable=protected-access
if should_rewrite:
# noinspection PyUnresolvedReferences
assertion_rewriting_hook.exec_module(module)
else:
exec(code_obj, module.__dict__) # pylint: disable=exec-used
# noinspection PyProtectedMember
from ansible.utils.collection_loader._collection_finder import _AnsibleCollectionPkgLoaderBase
_AnsibleCollectionPkgLoaderBase.exec_module = exec_module
def pytest_configure():
"""Configure this pytest plugin."""
try:
if pytest_configure.executed:
return
except AttributeError:
pytest_configure.executed = True
enable_assertion_rewriting_hook()
# noinspection PyProtectedMember
from ansible.utils.collection_loader._collection_finder import _AnsibleCollectionFinder
# allow unit tests to import code from collections
# noinspection PyProtectedMember
_AnsibleCollectionFinder(paths=[os.path.dirname(ANSIBLE_COLLECTIONS_PATH)])._install() # pylint: disable=protected-access
try:
# noinspection PyProtectedMember
from _pytest import pathlib as _pytest_pathlib
except ImportError:
_pytest_pathlib = None
if hasattr(_pytest_pathlib, 'resolve_package_path'):
_pytest_pathlib.resolve_package_path = collection_resolve_package_path
else:
# looks like pytest <= 6.0.0, use the old hack against py.path
# noinspection PyProtectedMember
import py._path.local
# force collections unit tests to be loaded with the ansible_collections namespace
# original idea from https://stackoverflow.com/questions/50174130/how-do-i-pytest-a-project-using-pep-420-namespace-packages/50175552#50175552
# noinspection PyProtectedMember
py._path.local.LocalPath.pypkgpath = collection_pypkgpath # pylint: disable=protected-access
pytest_configure()
| 4,997
|
Python
|
.py
| 81
| 54.679012
| 150
| 0.724667
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,290
|
python.py
|
ansible_ansible/test/lib/ansible_test/_util/target/injector/python.py
|
# auto-shebang
"""Provides an entry point for python scripts and python modules on the controller with the current python interpreter and optional code coverage collection."""
from __future__ import annotations
import importlib.util
import os
import sys
NETWORKING_CLI_STUB_SCRIPT = 'ansible_connection_cli_stub.py'
def main():
"""Main entry point."""
name = os.path.basename(__file__)
args = [sys.executable]
ansible_lib_root = os.environ.get('ANSIBLE_TEST_ANSIBLE_LIB_ROOT')
coverage_config = os.environ.get('COVERAGE_CONF')
coverage_output = os.environ.get('COVERAGE_FILE')
if coverage_config:
if coverage_output:
args += ['-m', 'coverage.__main__', 'run', '--rcfile', coverage_config]
else:
found = bool(importlib.util.find_spec('coverage'))
if not found:
sys.exit('ERROR: Could not find `coverage` module. '
'Did you use a virtualenv created without --system-site-packages or with the wrong interpreter?')
if name == 'python.py':
if sys.argv[1] == '-c':
# prevent simple misuse of python.py with -c which does not work with coverage
sys.exit('ERROR: Use `python -c` instead of `python.py -c` to avoid errors when code coverage is collected.')
elif name == 'pytest':
args += ['-m', 'pytest']
elif name == 'importer.py':
args += [find_program(name, False)]
elif name == NETWORKING_CLI_STUB_SCRIPT:
args += [os.path.join(ansible_lib_root, 'cli/scripts', NETWORKING_CLI_STUB_SCRIPT)]
else:
args += [find_program(name, True)]
args += sys.argv[1:]
os.execv(args[0], args)
def find_program(name, executable): # type: (str, bool) -> str
"""
Find and return the full path to the named program, optionally requiring it to be executable.
Raises an exception if the program is not found.
"""
path = os.environ.get('PATH', os.path.defpath)
seen = {os.path.abspath(__file__)}
mode = os.F_OK | os.X_OK if executable else os.F_OK
for base in path.split(os.path.pathsep):
candidate = os.path.abspath(os.path.join(base, name))
if candidate in seen:
continue
seen.add(candidate)
if os.path.exists(candidate) and os.access(candidate, mode):
return candidate
raise Exception('Executable "%s" not found in path: %s' % (name, path))
if __name__ == '__main__':
main()
| 2,478
|
Python
|
.py
| 54
| 38.814815
| 160
| 0.64158
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,291
|
ansible_test_cli_stub.py
|
ansible_ansible/test/lib/ansible_test/_util/target/cli/ansible_test_cli_stub.py
|
#!/usr/bin/env python
# PYTHON_ARGCOMPLETE_OK
"""Command line entry point for ansible-test."""
# NOTE: This file resides in the _util/target directory to ensure compatibility with all supported Python versions.
from __future__ import annotations
import os
import sys
def main(args=None):
"""Main program entry point."""
ansible_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
source_root = os.path.join(ansible_root, 'test', 'lib')
if os.path.exists(os.path.join(source_root, 'ansible_test', '_internal', '__init__.py')):
# running from source, use that version of ansible-test instead of any version that may already be installed
sys.path.insert(0, source_root)
# noinspection PyProtectedMember
from ansible_test._util.target.common.constants import CONTROLLER_PYTHON_VERSIONS
if version_to_str(sys.version_info[:2]) not in CONTROLLER_PYTHON_VERSIONS:
raise SystemExit('This version of ansible-test cannot be executed with Python version %s. Supported Python versions are: %s' % (
version_to_str(sys.version_info[:3]), ', '.join(CONTROLLER_PYTHON_VERSIONS)))
if any(not os.get_blocking(handle.fileno()) for handle in (sys.stdin, sys.stdout, sys.stderr)):
raise SystemExit('Standard input, output and error file handles must be blocking to run ansible-test.')
# noinspection PyProtectedMember
from ansible_test._internal import main as cli_main
cli_main(args)
def version_to_str(version):
"""Return a version string from a version tuple."""
return '.'.join(str(n) for n in version)
if __name__ == '__main__':
main()
| 1,650
|
Python
|
.py
| 29
| 51.931034
| 136
| 0.717933
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,292
|
pkgng.py
|
ansible_ansible/test/support/integration/plugins/modules/pkgng.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, bleader
# Written by bleader <bleader@ratonland.org>
# Based on pkgin module written by Shaun Zinck <shaun.zinck at gmail.com>
# that was based on pacman module written by Afterburn <https://github.com/afterburn>
# that was based on apt module written by Matthew Williams <matthew@flowroute.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: pkgng
short_description: Package manager for FreeBSD >= 9.0
description:
- Manage binary packages for FreeBSD using 'pkgng' which is available in versions after 9.0.
version_added: "1.2"
options:
name:
description:
- Name or list of names of packages to install/remove.
required: true
state:
description:
- State of the package.
- 'Note: "latest" added in 2.7'
choices: [ 'present', 'latest', 'absent' ]
required: false
default: present
cached:
description:
- Use local package base instead of fetching an updated one.
type: bool
required: false
default: no
annotation:
description:
- A comma-separated list of keyvalue-pairs of the form
C(<+/-/:><key>[=<value>]). A C(+) denotes adding an annotation, a
C(-) denotes removing an annotation, and C(:) denotes modifying an
annotation.
If setting or modifying annotations, a value must be provided.
required: false
version_added: "1.6"
pkgsite:
description:
- For pkgng versions before 1.1.4, specify packagesite to use
for downloading packages. If not specified, use settings from
C(/usr/local/etc/pkg.conf).
- For newer pkgng versions, specify a the name of a repository
configured in C(/usr/local/etc/pkg/repos).
required: false
rootdir:
description:
- For pkgng versions 1.5 and later, pkg will install all packages
within the specified root directory.
- Can not be used together with I(chroot) or I(jail) options.
required: false
chroot:
version_added: "2.1"
description:
- Pkg will chroot in the specified environment.
- Can not be used together with I(rootdir) or I(jail) options.
required: false
jail:
version_added: "2.4"
description:
- Pkg will execute in the given jail name or id.
- Can not be used together with I(chroot) or I(rootdir) options.
autoremove:
version_added: "2.2"
description:
- Remove automatically installed packages which are no longer needed.
required: false
type: bool
default: no
author: "bleader (@bleader)"
notes:
- When using pkgsite, be careful that already in cache packages won't be downloaded again.
- When used with a `loop:` each package will be processed individually,
it is much more efficient to pass the list directly to the `name` option.
"""
EXAMPLES = """
- name: Install package foo
pkgng:
name: foo
state: present
- name: Annotate package foo and bar
pkgng:
name: foo,bar
annotation: '+test1=baz,-test2,:test3=foobar'
- name: Remove packages foo and bar
pkgng:
name: foo,bar
state: absent
# "latest" support added in 2.7
- name: Upgrade package baz
pkgng:
name: baz
state: latest
"""
import re
from ansible.module_utils.basic import AnsibleModule
def query_package(module, pkgng_path, name, dir_arg):
rc, out, err = module.run_command("%s %s info -g -e %s" % (pkgng_path, dir_arg, name))
if rc == 0:
return True
return False
def query_update(module, pkgng_path, name, dir_arg, old_pkgng, pkgsite):
# Check to see if a package upgrade is available.
# rc = 0, no updates available or package not installed
# rc = 1, updates available
if old_pkgng:
rc, out, err = module.run_command("%s %s upgrade -g -n %s" % (pkgsite, pkgng_path, name))
else:
rc, out, err = module.run_command("%s %s upgrade %s -g -n %s" % (pkgng_path, dir_arg, pkgsite, name))
if rc == 1:
return True
return False
def pkgng_older_than(module, pkgng_path, compare_version):
rc, out, err = module.run_command("%s -v" % pkgng_path)
version = [int(x) for x in re.split(r'[\._]', out)]
i = 0
new_pkgng = True
while compare_version[i] == version[i]:
i += 1
if i == min(len(compare_version), len(version)):
break
else:
if compare_version[i] > version[i]:
new_pkgng = False
return not new_pkgng
def remove_packages(module, pkgng_path, packages, dir_arg):
remove_c = 0
# Using a for loop in case of error, we can report the package that failed
for package in packages:
# Query the package first, to see if we even need to remove
if not query_package(module, pkgng_path, package, dir_arg):
continue
if not module.check_mode:
rc, out, err = module.run_command("%s %s delete -y %s" % (pkgng_path, dir_arg, package))
if not module.check_mode and query_package(module, pkgng_path, package, dir_arg):
module.fail_json(msg="failed to remove %s: %s" % (package, out))
remove_c += 1
if remove_c > 0:
return (True, "removed %s package(s)" % remove_c)
return (False, "package(s) already absent")
def install_packages(module, pkgng_path, packages, cached, pkgsite, dir_arg, state):
install_c = 0
# as of pkg-1.1.4, PACKAGESITE is deprecated in favor of repository definitions
# in /usr/local/etc/pkg/repos
old_pkgng = pkgng_older_than(module, pkgng_path, [1, 1, 4])
if pkgsite != "":
if old_pkgng:
pkgsite = "PACKAGESITE=%s" % (pkgsite)
else:
pkgsite = "-r %s" % (pkgsite)
# This environment variable skips mid-install prompts,
# setting them to their default values.
batch_var = 'env BATCH=yes'
if not module.check_mode and not cached:
if old_pkgng:
rc, out, err = module.run_command("%s %s update" % (pkgsite, pkgng_path))
else:
rc, out, err = module.run_command("%s %s update" % (pkgng_path, dir_arg))
if rc != 0:
module.fail_json(msg="Could not update catalogue [%d]: %s %s" % (rc, out, err))
for package in packages:
already_installed = query_package(module, pkgng_path, package, dir_arg)
if already_installed and state == "present":
continue
update_available = query_update(module, pkgng_path, package, dir_arg, old_pkgng, pkgsite)
if not update_available and already_installed and state == "latest":
continue
if not module.check_mode:
if already_installed:
action = "upgrade"
else:
action = "install"
if old_pkgng:
rc, out, err = module.run_command("%s %s %s %s -g -U -y %s" % (batch_var, pkgsite, pkgng_path, action, package))
else:
rc, out, err = module.run_command("%s %s %s %s %s -g -U -y %s" % (batch_var, pkgng_path, dir_arg, action, pkgsite, package))
if not module.check_mode and not query_package(module, pkgng_path, package, dir_arg):
module.fail_json(msg="failed to %s %s: %s" % (action, package, out), stderr=err)
install_c += 1
if install_c > 0:
return (True, "added %s package(s)" % (install_c))
return (False, "package(s) already %s" % (state))
def annotation_query(module, pkgng_path, package, tag, dir_arg):
rc, out, err = module.run_command("%s %s info -g -A %s" % (pkgng_path, dir_arg, package))
match = re.search(r'^\s*(?P<tag>%s)\s*:\s*(?P<value>\w+)' % tag, out, flags=re.MULTILINE)
if match:
return match.group('value')
return False
def annotation_add(module, pkgng_path, package, tag, value, dir_arg):
_value = annotation_query(module, pkgng_path, package, tag, dir_arg)
if not _value:
# Annotation does not exist, add it.
rc, out, err = module.run_command('%s %s annotate -y -A %s %s "%s"'
% (pkgng_path, dir_arg, package, tag, value))
if rc != 0:
module.fail_json(msg="could not annotate %s: %s"
% (package, out), stderr=err)
return True
elif _value != value:
# Annotation exists, but value differs
module.fail_json(
mgs="failed to annotate %s, because %s is already set to %s, but should be set to %s"
% (package, tag, _value, value))
return False
else:
# Annotation exists, nothing to do
return False
def annotation_delete(module, pkgng_path, package, tag, value, dir_arg):
_value = annotation_query(module, pkgng_path, package, tag, dir_arg)
if _value:
rc, out, err = module.run_command('%s %s annotate -y -D %s %s'
% (pkgng_path, dir_arg, package, tag))
if rc != 0:
module.fail_json(msg="could not delete annotation to %s: %s"
% (package, out), stderr=err)
return True
return False
def annotation_modify(module, pkgng_path, package, tag, value, dir_arg):
_value = annotation_query(module, pkgng_path, package, tag, dir_arg)
if not value:
# No such tag
module.fail_json(msg="could not change annotation to %s: tag %s does not exist"
% (package, tag))
elif _value == value:
# No change in value
return False
else:
rc, out, err = module.run_command('%s %s annotate -y -M %s %s "%s"'
% (pkgng_path, dir_arg, package, tag, value))
if rc != 0:
module.fail_json(msg="could not change annotation annotation to %s: %s"
% (package, out), stderr=err)
return True
def annotate_packages(module, pkgng_path, packages, annotation, dir_arg):
annotate_c = 0
annotations = map(lambda _annotation:
re.match(r'(?P<operation>[\+-:])(?P<tag>\w+)(=(?P<value>\w+))?',
_annotation).groupdict(),
re.split(r',', annotation))
operation = {
'+': annotation_add,
'-': annotation_delete,
':': annotation_modify
}
for package in packages:
for _annotation in annotations:
if operation[_annotation['operation']](module, pkgng_path, package, _annotation['tag'], _annotation['value']):
annotate_c += 1
if annotate_c > 0:
return (True, "added %s annotations." % annotate_c)
return (False, "changed no annotations")
def autoremove_packages(module, pkgng_path, dir_arg):
rc, out, err = module.run_command("%s %s autoremove -n" % (pkgng_path, dir_arg))
autoremove_c = 0
match = re.search('^Deinstallation has been requested for the following ([0-9]+) packages', out, re.MULTILINE)
if match:
autoremove_c = int(match.group(1))
if autoremove_c == 0:
return False, "no package(s) to autoremove"
if not module.check_mode:
rc, out, err = module.run_command("%s %s autoremove -y" % (pkgng_path, dir_arg))
return True, "autoremoved %d package(s)" % (autoremove_c)
def main():
module = AnsibleModule(
argument_spec=dict(
state=dict(default="present", choices=["present", "latest", "absent"], required=False),
name=dict(aliases=["pkg"], required=True, type='list'),
cached=dict(default=False, type='bool'),
annotation=dict(default="", required=False),
pkgsite=dict(default="", required=False),
rootdir=dict(default="", required=False, type='path'),
chroot=dict(default="", required=False, type='path'),
jail=dict(default="", required=False, type='str'),
autoremove=dict(default=False, type='bool')),
supports_check_mode=True,
mutually_exclusive=[["rootdir", "chroot", "jail"]])
pkgng_path = module.get_bin_path('pkg', True)
p = module.params
pkgs = p["name"]
changed = False
msgs = []
dir_arg = ""
if p["rootdir"] != "":
old_pkgng = pkgng_older_than(module, pkgng_path, [1, 5, 0])
if old_pkgng:
module.fail_json(msg="To use option 'rootdir' pkg version must be 1.5 or greater")
else:
dir_arg = "--rootdir %s" % (p["rootdir"])
if p["chroot"] != "":
dir_arg = '--chroot %s' % (p["chroot"])
if p["jail"] != "":
dir_arg = '--jail %s' % (p["jail"])
if p["state"] in ("present", "latest"):
_changed, _msg = install_packages(module, pkgng_path, pkgs, p["cached"], p["pkgsite"], dir_arg, p["state"])
changed = changed or _changed
msgs.append(_msg)
elif p["state"] == "absent":
_changed, _msg = remove_packages(module, pkgng_path, pkgs, dir_arg)
changed = changed or _changed
msgs.append(_msg)
if p["autoremove"]:
_changed, _msg = autoremove_packages(module, pkgng_path, dir_arg)
changed = changed or _changed
msgs.append(_msg)
if p["annotation"]:
_changed, _msg = annotate_packages(module, pkgng_path, pkgs, p["annotation"], dir_arg)
changed = changed or _changed
msgs.append(_msg)
module.exit_json(changed=changed, msg=", ".join(msgs))
if __name__ == '__main__':
main()
| 13,924
|
Python
|
.py
| 321
| 34.847352
| 140
| 0.60034
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,293
|
_reboot.py
|
ansible_ansible/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/plugin_utils/_reboot.py
|
# Copyright: (c) 2021, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
"""Reboot action for Windows hosts
This contains the code to reboot a Windows host for use by other action plugins
in this collection. Right now it should only be used in this collection as the
interface is not final and count be subject to change.
"""
# FOR INTERNAL COLLECTION USE ONLY
# The interfaces in this file are meant for use within the ansible.windows collection
# and may not remain stable to outside uses. Changes may be made in ANY release, even a bugfix release.
# See also: https://github.com/ansible/community/issues/539#issuecomment-780839686
# Please open an issue if you have questions about this.
from __future__ import annotations
import datetime
import json
import random
import time
import traceback
import uuid
import typing as t
from ansible.errors import AnsibleConnectionFailure, AnsibleError
from ansible.module_utils.common.text.converters import to_text
from ansible.plugins.connection import ConnectionBase
from ansible.utils.display import Display
from ansible_collections.ansible.windows.plugins.plugin_utils._quote import quote_pwsh
# This is not ideal but the psrp connection plugin doesn't catch all these exceptions as an AnsibleConnectionFailure.
# Until we can guarantee we are using a version of psrp that handles all this we try to handle those issues.
try:
from requests.exceptions import (
RequestException,
)
except ImportError:
RequestException = AnsibleConnectionFailure
_LOGON_UI_KEY = (
r"HKLM:\SOFTWARE\Microsoft\Windows NT\CurrentVersion\Winlogon\AutoLogonChecked"
)
_DEFAULT_BOOT_TIME_COMMAND = (
"(Get-CimInstance -ClassName Win32_OperatingSystem -Property LastBootUpTime)"
".LastBootUpTime.ToFileTime()"
)
T = t.TypeVar("T")
display = Display()
class _ReturnResultException(Exception):
"""Used to sneak results back to the return dict from an exception"""
def __init__(self, msg, **result):
super().__init__(msg)
self.result = result
class _TestCommandFailure(Exception):
"""Differentiates between a connection failure and just a command assertion failure during the reboot loop"""
def reboot_host(
task_action: str,
connection: ConnectionBase,
boot_time_command: str = _DEFAULT_BOOT_TIME_COMMAND,
connect_timeout: int = 5,
msg: str = "Reboot initiated by Ansible",
post_reboot_delay: int = 0,
pre_reboot_delay: int = 2,
reboot_timeout: int = 600,
test_command: t.Optional[str] = None,
) -> t.Dict[str, t.Any]:
"""Reboot a Windows Host.
Used by action plugins in ansible.windows to reboot a Windows host. It
takes in the connection plugin so it can run the commands on the targeted
host and monitor the reboot process. The return dict will have the
following keys set:
changed: Whether a change occurred (reboot was done)
elapsed: Seconds elapsed between the reboot and it coming back online
failed: Whether a failure occurred
unreachable: Whether it failed to connect to the host on the first cmd
rebooted: Whether the host was rebooted
When failed=True there may be more keys to give some information around
the failure like msg, exception. There are other keys that might be
returned as well but they are dependent on the failure that occurred.
Verbosity levels used:
2: Message when each reboot step is completed
4: Connection plugin operations and their results
5: Raw commands run and the results of those commands
Debug: Everything, very verbose
Args:
task_action: The name of the action plugin that is running for logging.
connection: The connection plugin to run the reboot commands on.
boot_time_command: The command to run when getting the boot timeout.
connect_timeout: Override the connection timeout of the connection
plugin when polling the rebooted host.
msg: The message to display to interactive users when rebooting the
host.
post_reboot_delay: Seconds to wait after sending the reboot command
before checking to see if it has returned.
pre_reboot_delay: Seconds to wait when sending the reboot command.
reboot_timeout: Seconds to wait while polling for the host to come
back online.
test_command: Command to run when the host is back online and
determines the machine is ready for management. When not defined
the default command should wait until the reboot is complete and
all pre-login configuration has completed.
Returns:
(Dict[str, Any]): The return result as a dictionary. Use the 'failed'
key to determine if there was a failure or not.
"""
result: t.Dict[str, t.Any] = {
"changed": False,
"elapsed": 0,
"failed": False,
"unreachable": False,
"rebooted": False,
}
host_context = {"do_close_on_reset": True}
# Get current boot time. A lot of tasks that require a reboot leave the WSMan stack in a bad place. Will try to
# get the initial boot time 3 times before giving up.
try:
previous_boot_time = _do_until_success_or_retry_limit(
task_action,
connection,
host_context,
"pre-reboot boot time check",
3,
_get_system_boot_time,
task_action,
connection,
boot_time_command,
)
except Exception as e:
# Report a the failure based on the last exception received.
if isinstance(e, _ReturnResultException):
result.update(e.result)
if isinstance(e, AnsibleConnectionFailure):
result["unreachable"] = True
else:
result["failed"] = True
result["msg"] = str(e)
result["exception"] = traceback.format_exc()
return result
# Get the original connection_timeout option var so it can be reset after
original_connection_timeout: t.Optional[float] = None
try:
original_connection_timeout = connection.get_option("connection_timeout")
display.vvvv(
f"{task_action}: saving original connection_timeout of {original_connection_timeout}"
)
except KeyError:
display.vvvv(
f"{task_action}: connection_timeout connection option has not been set"
)
# Initiate reboot
# This command may be wrapped in other shells or command making it hard to detect what shutdown.exe actually
# returned. We use this hackery to return a json that contains the stdout/stderr/rc as a structured object for our
# code to parse and detect if something went wrong.
reboot_command = """$ErrorActionPreference = 'Continue'
if ($%s) {
Remove-Item -LiteralPath '%s' -Force -ErrorAction SilentlyContinue
}
$stdout = $null
$stderr = . { shutdown.exe /r /t %s /c %s | Set-Variable stdout } 2>&1 | ForEach-Object ToString
ConvertTo-Json -Compress -InputObject @{
stdout = (@($stdout) -join "`n")
stderr = (@($stderr) -join "`n")
rc = $LASTEXITCODE
}
""" % (
str(not test_command),
_LOGON_UI_KEY,
int(pre_reboot_delay),
quote_pwsh(msg),
)
expected_test_result = (
None # We cannot have an expected result if the command is user defined
)
if not test_command:
# It turns out that LogonUI will create this registry key if it does not exist when it's about to show the
# logon prompt. Normally this is a volatile key but if someone has explicitly created it that might no longer
# be the case. We ensure it is not present on a reboot so we can wait until LogonUI creates it to determine
# the host is actually online and ready, e.g. no configurations/updates still to be applied.
# We echo a known successful statement to catch issues with powershell failing to start but the rc mysteriously
# being 0 causing it to consider a successful reboot too early (seen on ssh connections).
expected_test_result = f"success-{uuid.uuid4()}"
test_command = f"Get-Item -LiteralPath '{_LOGON_UI_KEY}' -ErrorAction Stop; '{expected_test_result}'"
start = None
try:
_perform_reboot(task_action, connection, reboot_command)
start = datetime.datetime.utcnow()
result["changed"] = True
result["rebooted"] = True
if post_reboot_delay != 0:
display.vv(
f"{task_action}: waiting an additional {post_reboot_delay} seconds"
)
time.sleep(post_reboot_delay)
# Keep on trying to run the last boot time check until it is successful or the timeout is raised
display.vv(f"{task_action} validating reboot")
_do_until_success_or_timeout(
task_action,
connection,
host_context,
"last boot time check",
reboot_timeout,
_check_boot_time,
task_action,
connection,
host_context,
previous_boot_time,
boot_time_command,
connect_timeout,
)
# Reset the connection plugin connection timeout back to the original
if original_connection_timeout is not None:
_set_connection_timeout(
task_action,
connection,
host_context,
original_connection_timeout,
)
# Run test command until ti is successful or a timeout occurs
display.vv(f"{task_action} running post reboot test command")
_do_until_success_or_timeout(
task_action,
connection,
host_context,
"post-reboot test command",
reboot_timeout,
_run_test_command,
task_action,
connection,
test_command,
expected=expected_test_result,
)
display.vv(f"{task_action}: system successfully rebooted")
except Exception as e:
if isinstance(e, _ReturnResultException):
result.update(e.result)
result["failed"] = True
result["msg"] = str(e)
result["exception"] = traceback.format_exc()
if start:
elapsed = datetime.datetime.utcnow() - start
result["elapsed"] = elapsed.seconds
return result
def _check_boot_time(
task_action: str,
connection: ConnectionBase,
host_context: t.Dict[str, t.Any],
previous_boot_time: int,
boot_time_command: str,
timeout: int,
):
"""Checks the system boot time has been changed or not"""
display.vvvv("%s: attempting to get system boot time" % task_action)
# override connection timeout from defaults to custom value
if timeout:
_set_connection_timeout(task_action, connection, host_context, timeout)
# try and get boot time
current_boot_time = _get_system_boot_time(
task_action, connection, boot_time_command
)
if current_boot_time == previous_boot_time:
raise _TestCommandFailure("boot time has not changed")
def _do_until_success_or_retry_limit(
task_action: str,
connection: ConnectionBase,
host_context: t.Dict[str, t.Any],
action_desc: str,
retries: int,
func: t.Callable[..., T],
*args: t.Any,
**kwargs: t.Any,
) -> t.Optional[T]:
"""Runs the function multiple times ignoring errors until the retry limit is hit"""
def wait_condition(idx):
return idx < retries
return _do_until_success_or_condition(
task_action,
connection,
host_context,
action_desc,
wait_condition,
func,
*args,
**kwargs,
)
def _do_until_success_or_timeout(
task_action: str,
connection: ConnectionBase,
host_context: t.Dict[str, t.Any],
action_desc: str,
timeout: float,
func: t.Callable[..., T],
*args: t.Any,
**kwargs: t.Any,
) -> t.Optional[T]:
"""Runs the function multiple times ignoring errors until a timeout occurs"""
max_end_time = datetime.datetime.utcnow() + datetime.timedelta(seconds=timeout)
def wait_condition(idx):
return datetime.datetime.utcnow() < max_end_time
try:
return _do_until_success_or_condition(
task_action,
connection,
host_context,
action_desc,
wait_condition,
func,
*args,
**kwargs,
)
except Exception:
raise Exception(
"Timed out waiting for %s (timeout=%s)" % (action_desc, timeout)
)
def _do_until_success_or_condition(
task_action: str,
connection: ConnectionBase,
host_context: t.Dict[str, t.Any],
action_desc: str,
condition: t.Callable[[int], bool],
func: t.Callable[..., T],
*args: t.Any,
**kwargs: t.Any,
) -> t.Optional[T]:
"""Runs the function multiple times ignoring errors until the condition is false"""
fail_count = 0
max_fail_sleep = 12
reset_required = False
last_error = None
while fail_count == 0 or condition(fail_count):
try:
if reset_required:
# Keep on trying the reset until it succeeds.
_reset_connection(task_action, connection, host_context)
reset_required = False
else:
res = func(*args, **kwargs)
display.vvvvv("%s: %s success" % (task_action, action_desc))
return res
except Exception as e:
last_error = e
if not isinstance(e, _TestCommandFailure):
# The error may be due to a connection problem, just reset the connection just in case
reset_required = True
# Use exponential backoff with a max timeout, plus a little bit of randomness
random_int = random.randint(0, 1000) / 1000
fail_sleep = 2**fail_count + random_int
if fail_sleep > max_fail_sleep:
fail_sleep = max_fail_sleep + random_int
try:
error = str(e).splitlines()[-1]
except IndexError:
error = str(e)
display.vvvvv(
"{action}: {desc} fail {e_type} '{err}', retrying in {sleep:.4} seconds...\n{tcb}".format(
action=task_action,
desc=action_desc,
e_type=type(e).__name__,
err=error,
sleep=fail_sleep,
tcb=traceback.format_exc(),
)
)
fail_count += 1
time.sleep(fail_sleep)
if last_error:
raise last_error
return None
def _execute_command(
task_action: str,
connection: ConnectionBase,
command: str,
) -> t.Tuple[int, str, str]:
"""Runs a command on the Windows host and returned the result"""
display.vvvvv(f"{task_action}: running command: {command}")
# Need to wrap the command in our PowerShell encoded wrapper. This is done to align the command input to a
# common shell and to allow the psrp connection plugin to report the correct exit code without manually setting
# $LASTEXITCODE for just that plugin.
command = connection._shell._encode_script(command)
try:
rc, stdout, stderr = connection.exec_command(
command, in_data=None, sudoable=False
)
except RequestException as e:
# The psrp connection plugin should be doing this but until we can guarantee it does we just convert it here
# to ensure AnsibleConnectionFailure refers to actual connection errors.
raise AnsibleConnectionFailure(f"Failed to connect to the host: {e}")
rc = rc or 0
stdout = to_text(stdout, errors="surrogate_or_strict").strip()
stderr = to_text(stderr, errors="surrogate_or_strict").strip()
display.vvvvv(
f"{task_action}: command result - rc: {rc}, stdout: {stdout}, stderr: {stderr}"
)
return rc, stdout, stderr
def _get_system_boot_time(
task_action: str,
connection: ConnectionBase,
boot_time_command: str,
) -> str:
"""Gets a unique identifier to represent the boot time of the Windows host"""
display.vvvv(f"{task_action}: getting boot time")
rc, stdout, stderr = _execute_command(task_action, connection, boot_time_command)
if rc != 0:
msg = f"{task_action}: failed to get host boot time info"
raise _ReturnResultException(msg, rc=rc, stdout=stdout, stderr=stderr)
display.vvvv(f"{task_action}: last boot time: {stdout}")
return stdout
def _perform_reboot(
task_action: str,
connection: ConnectionBase,
reboot_command: str,
handle_abort: bool = True,
) -> None:
"""Runs the reboot command"""
display.vv(f"{task_action}: rebooting server...")
stdout = stderr = None
try:
rc, stdout, stderr = _execute_command(task_action, connection, reboot_command)
except AnsibleConnectionFailure as e:
# If the connection is closed too quickly due to the system being shutdown, carry on
display.vvvv(f"{task_action}: AnsibleConnectionFailure caught and handled: {e}")
rc = 0
if stdout:
try:
reboot_result = json.loads(stdout)
except getattr(json.decoder, "JSONDecodeError", ValueError):
# While the reboot command should output json it may have failed for some other reason. We continue
# reporting with that output instead
pass
else:
stdout = reboot_result.get("stdout", stdout)
stderr = reboot_result.get("stderr", stderr)
rc = int(reboot_result.get("rc", rc))
# Test for "A system shutdown has already been scheduled. (1190)" and handle it gracefully
if handle_abort and (rc == 1190 or (rc != 0 and stderr and "(1190)" in stderr)):
display.warning("A scheduled reboot was pre-empted by Ansible.")
# Try to abort (this may fail if it was already aborted)
rc, stdout, stderr = _execute_command(
task_action, connection, "shutdown.exe /a"
)
display.vvvv(
f"{task_action}: result from trying to abort existing shutdown - rc: {rc}, stdout: {stdout}, stderr: {stderr}"
)
return _perform_reboot(
task_action, connection, reboot_command, handle_abort=False
)
if rc != 0:
msg = f"{task_action}: Reboot command failed"
raise _ReturnResultException(msg, rc=rc, stdout=stdout, stderr=stderr)
def _reset_connection(
task_action: str,
connection: ConnectionBase,
host_context: t.Dict[str, t.Any],
ignore_errors: bool = False,
) -> None:
"""Resets the connection handling any errors"""
def _wrap_conn_err(func, *args, **kwargs):
try:
func(*args, **kwargs)
except (AnsibleError, RequestException) as e:
if ignore_errors:
return False
raise AnsibleError(e)
return True
# While reset() should probably better handle this some connection plugins don't clear the existing connection on
# reset() leaving resources still in use on the target (WSMan shells). Instead we try to manually close the
# connection then call reset. If it fails once we want to skip closing to avoid a perpetual loop and just hope
# reset() brings us back into a good state. If it's successful we still want to try it again.
if host_context["do_close_on_reset"]:
display.vvvv(f"{task_action}: closing connection plugin")
try:
success = _wrap_conn_err(connection.close)
except Exception:
host_context["do_close_on_reset"] = False
raise
host_context["do_close_on_reset"] = success
# For some connection plugins (ssh) reset actually does something more than close so we also class that
display.vvvv(f"{task_action}: resetting connection plugin")
try:
_wrap_conn_err(connection.reset)
except AttributeError:
# Not all connection plugins have reset so we just ignore those, close should have done our job.
pass
def _run_test_command(
task_action: str,
connection: ConnectionBase,
command: str,
expected: t.Optional[str] = None,
) -> None:
"""Runs the user specified test command until the host is able to run it properly"""
display.vvvv(f"{task_action}: attempting post-reboot test command")
rc, stdout, stderr = _execute_command(task_action, connection, command)
if rc != 0:
msg = f"{task_action}: Test command failed - rc: {rc}, stdout: {stdout}, stderr: {stderr}"
raise _TestCommandFailure(msg)
if expected and expected not in stdout:
msg = f"{task_action}: Test command failed - '{expected}' was not in stdout: {stdout}"
raise _TestCommandFailure(msg)
def _set_connection_timeout(
task_action: str,
connection: ConnectionBase,
host_context: t.Dict[str, t.Any],
timeout: float,
) -> None:
"""Sets the connection plugin connection_timeout option and resets the connection"""
try:
current_connection_timeout = connection.get_option("connection_timeout")
except KeyError:
# Not all connection plugins implement this, just ignore the setting if it doesn't work
return
if timeout == current_connection_timeout:
return
display.vvvv(f"{task_action}: setting connect_timeout {timeout}")
connection.set_option("connection_timeout", timeout)
_reset_connection(task_action, connection, host_context, ignore_errors=True)
| 21,796
|
Python
|
.py
| 508
| 35.05315
| 122
| 0.657143
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,294
|
_quote.py
|
ansible_ansible/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/plugin_utils/_quote.py
|
# Copyright (c) 2021 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
"""Quoting helpers for Windows
This contains code to help with quoting values for use in the variable Windows
shell. Right now it should only be used in ansible.windows as the interface is
not final and could be subject to change.
"""
# FOR INTERNAL COLLECTION USE ONLY
# The interfaces in this file are meant for use within the ansible.windows collection
# and may not remain stable to outside uses. Changes may be made in ANY release, even a bugfix release.
# See also: https://github.com/ansible/community/issues/539#issuecomment-780839686
# Please open an issue if you have questions about this.
from __future__ import annotations
import re
from ansible.module_utils.six import text_type
_UNSAFE_C = re.compile(u'[\\s\t"]')
_UNSAFE_CMD = re.compile(u'[\\s\\(\\)\\^\\|%!"<>&]')
# PowerShell has 5 characters it uses as a single quote, we need to double up on all of them.
# https://github.com/PowerShell/PowerShell/blob/b7cb335f03fe2992d0cbd61699de9d9aafa1d7c1/src/System.Management.Automation/engine/parser/CharTraits.cs#L265-L272
# https://github.com/PowerShell/PowerShell/blob/b7cb335f03fe2992d0cbd61699de9d9aafa1d7c1/src/System.Management.Automation/engine/parser/CharTraits.cs#L18-L21
_UNSAFE_PWSH = re.compile(u"(['\u2018\u2019\u201a\u201b])")
def quote_c(s): # type: (text_type) -> text_type
"""Quotes a value for the raw Win32 process command line.
Quotes a value to be safely used by anything that calls the Win32
CreateProcess API.
Args:
s: The string to quote.
Returns:
(text_type): The quoted string value.
"""
# https://docs.microsoft.com/en-us/archive/blogs/twistylittlepassagesallalike/everyone-quotes-command-line-arguments-the-wrong-way
if not s:
return u'""'
if not _UNSAFE_C.search(s):
return s
# Replace any double quotes in an argument with '\"'.
s = s.replace('"', '\\"')
# We need to double up on any '\' chars that preceded a double quote (now '\"').
s = re.sub(r'(\\+)\\"', r'\1\1\"', s)
# Double up '\' at the end of the argument so it doesn't escape out end quote.
s = re.sub(r'(\\+)$', r'\1\1', s)
# Finally wrap the entire argument in double quotes now we've escaped the double quotes within.
return u'"{0}"'.format(s)
def quote_cmd(s): # type: (text_type) -> text_type
"""Quotes a value for cmd.
Quotes a value to be safely used by a command prompt call.
Args:
s: The string to quote.
Returns:
(text_type): The quoted string value.
"""
# https://docs.microsoft.com/en-us/archive/blogs/twistylittlepassagesallalike/everyone-quotes-command-line-arguments-the-wrong-way#a-better-method-of-quoting
if not s:
return u'""'
if not _UNSAFE_CMD.search(s):
return s
# Escape the metachars as we are quoting the string to stop cmd from interpreting that metachar. For example
# 'file &whoami.exe' would result in 'whoami.exe' being executed and then that output being used as the argument
# instead of the literal string.
# https://stackoverflow.com/questions/3411771/multiple-character-replace-with-python
for c in u'^()%!"<>&|': # '^' must be the first char that we scan and replace
if c in s:
# I can't find any docs that explicitly say this but to escape ", it needs to be prefixed with \^.
s = s.replace(c, (u"\\^" if c == u'"' else u"^") + c)
return u'^"{0}^"'.format(s)
def quote_pwsh(s): # type: (text_type) -> text_type
"""Quotes a value for PowerShell.
Quotes a value to be safely used by a PowerShell expression. The input
string because something that is safely wrapped in single quotes.
Args:
s: The string to quote.
Returns:
(text_type): The quoted string value.
"""
# https://docs.microsoft.com/en-us/powershell/module/microsoft.powershell.core/about/about_quoting_rules?view=powershell-5.1
if not s:
return u"''"
# We should always quote values in PowerShell as it has conflicting rules where strings can and can't be quoted.
# This means we quote the entire arg with single quotes and just double up on the single quote equivalent chars.
return u"'{0}'".format(_UNSAFE_PWSH.sub(u'\\1\\1', s))
| 4,387
|
Python
|
.py
| 80
| 49.825
| 161
| 0.700515
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,295
|
win_reboot.py
|
ansible_ansible/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/action/win_reboot.py
|
# Copyright: (c) 2018, Matt Davis <mdavis@ansible.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
from ansible.errors import AnsibleError
from ansible.module_utils.common.text.converters import to_native
from ansible.module_utils.common.validation import check_type_str, check_type_float
from ansible.plugins.action import ActionBase
from ansible.utils.display import Display
from ansible_collections.ansible.windows.plugins.plugin_utils._reboot import reboot_host
display = Display()
def _positive_float(val):
float_val = check_type_float(val)
if float_val < 0:
return 0
else:
return float_val
class ActionModule(ActionBase):
TRANSFERS_FILES = False
_VALID_ARGS = frozenset((
'boot_time_command',
'connect_timeout',
'connect_timeout_sec',
'msg',
'post_reboot_delay',
'post_reboot_delay_sec',
'pre_reboot_delay',
'pre_reboot_delay_sec',
'reboot_timeout',
'reboot_timeout_sec',
'shutdown_timeout',
'shutdown_timeout_sec',
'test_command',
))
def run(self, tmp=None, task_vars=None):
self._supports_check_mode = True
self._supports_async = True
if self._play_context.check_mode:
return {'changed': True, 'elapsed': 0, 'rebooted': True}
if task_vars is None:
task_vars = {}
super(ActionModule, self).run(tmp, task_vars)
parameters = {}
for names, check_func in [
(['boot_time_command'], check_type_str),
(['connect_timeout', 'connect_timeout_sec'], _positive_float),
(['msg'], check_type_str),
(['post_reboot_delay', 'post_reboot_delay_sec'], _positive_float),
(['pre_reboot_delay', 'pre_reboot_delay_sec'], _positive_float),
(['reboot_timeout', 'reboot_timeout_sec'], _positive_float),
(['test_command'], check_type_str),
]:
for name in names:
value = self._task.args.get(name, None)
if value:
break
else:
value = None
# Defaults are applied in reboot_action so skip adding to kwargs if the input wasn't set (None)
if value is not None:
try:
value = check_func(value)
except TypeError as e:
raise AnsibleError("Invalid value given for '%s': %s." % (names[0], to_native(e)))
# Setting a lower value and kill PowerShell when sending the shutdown command. Just use the defaults
# if this is the case.
if names[0] == 'pre_reboot_delay' and value < 2:
continue
parameters[names[0]] = value
result = reboot_host(self._task.action, self._connection, **parameters)
# Not needed for testing and collection_name kwargs causes sanity error
# Historical behaviour had ignore_errors=True being able to ignore unreachable hosts and not just task errors.
# This snippet will allow that to continue but state that it will be removed in a future version and to use
# ignore_unreachable to ignore unreachable hosts.
# if result['unreachable'] and self._task.ignore_errors and not self._task.ignore_unreachable:
# dep_msg = "Host was unreachable but is being skipped because ignore_errors=True is set. In the future " \
# "only ignore_unreachable will be able to ignore an unreachable host for %s" % self._task.action
# display.deprecated(dep_msg, date="2023-05-01", collection_name="ansible.windows")
# result['unreachable'] = False
# result['failed'] = True
return result
| 3,875
|
Python
|
.py
| 80
| 38.5875
| 119
| 0.618808
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,296
|
win_copy.py
|
ansible_ansible/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/action/win_copy.py
|
# This file is part of Ansible
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
import base64
import json
import os
import os.path
import shutil
import tempfile
import traceback
import zipfile
from ansible import constants as C
from ansible.errors import AnsibleError, AnsibleFileNotFound
from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text
from ansible.module_utils.parsing.convert_bool import boolean
from ansible.plugins.action import ActionBase
from ansible.utils.hashing import checksum
def _walk_dirs(topdir, loader, decrypt=True, base_path=None, local_follow=False, trailing_slash_detector=None, checksum_check=False):
"""
Walk a filesystem tree returning enough information to copy the files.
This is similar to the _walk_dirs function in ``copy.py`` but returns
a dict instead of a tuple for each entry and includes the checksum of
a local file if wanted.
:arg topdir: The directory that the filesystem tree is rooted at
:arg loader: The self._loader object from ActionBase
:kwarg decrypt: Whether to decrypt a file encrypted with ansible-vault
:kwarg base_path: The initial directory structure to strip off of the
files for the destination directory. If this is None (the default),
the base_path is set to ``top_dir``.
:kwarg local_follow: Whether to follow symlinks on the source. When set
to False, no symlinks are dereferenced. When set to True (the
default), the code will dereference most symlinks. However, symlinks
can still be present if needed to break a circular link.
:kwarg trailing_slash_detector: Function to determine if a path has
a trailing directory separator. Only needed when dealing with paths on
a remote machine (in which case, pass in a function that is aware of the
directory separator conventions on the remote machine).
:kawrg whether to get the checksum of the local file and add to the dict
:returns: dictionary of dictionaries. All of the path elements in the structure are text string.
This separates all the files, directories, and symlinks along with
import information about each::
{
'files'; [{
src: '/absolute/path/to/copy/from',
dest: 'relative/path/to/copy/to',
checksum: 'b54ba7f5621240d403f06815f7246006ef8c7d43'
}, ...],
'directories'; [{
src: '/absolute/path/to/copy/from',
dest: 'relative/path/to/copy/to'
}, ...],
'symlinks'; [{
src: '/symlink/target/path',
dest: 'relative/path/to/copy/to'
}, ...],
}
The ``symlinks`` field is only populated if ``local_follow`` is set to False
*or* a circular symlink cannot be dereferenced. The ``checksum`` entry is set
to None if checksum_check=False.
"""
# Convert the path segments into byte strings
r_files = {'files': [], 'directories': [], 'symlinks': []}
def _recurse(topdir, rel_offset, parent_dirs, rel_base=u'', checksum_check=False):
"""
This is a closure (function utilizing variables from it's parent
function's scope) so that we only need one copy of all the containers.
Note that this function uses side effects (See the Variables used from
outer scope).
:arg topdir: The directory we are walking for files
:arg rel_offset: Integer defining how many characters to strip off of
the beginning of a path
:arg parent_dirs: Directories that we're copying that this directory is in.
:kwarg rel_base: String to prepend to the path after ``rel_offset`` is
applied to form the relative path.
Variables used from the outer scope
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:r_files: Dictionary of files in the hierarchy. See the return value
for :func:`walk` for the structure of this dictionary.
:local_follow: Read-only inside of :func:`_recurse`. Whether to follow symlinks
"""
for base_path, sub_folders, files in os.walk(topdir):
for filename in files:
filepath = os.path.join(base_path, filename)
dest_filepath = os.path.join(rel_base, filepath[rel_offset:])
if os.path.islink(filepath):
# Dereference the symlnk
real_file = loader.get_real_file(os.path.realpath(filepath), decrypt=decrypt)
if local_follow and os.path.isfile(real_file):
# Add the file pointed to by the symlink
r_files['files'].append(
{
"src": real_file,
"dest": dest_filepath,
"checksum": _get_local_checksum(checksum_check, real_file)
}
)
else:
# Mark this file as a symlink to copy
r_files['symlinks'].append({"src": os.readlink(filepath), "dest": dest_filepath})
else:
# Just a normal file
real_file = loader.get_real_file(filepath, decrypt=decrypt)
r_files['files'].append(
{
"src": real_file,
"dest": dest_filepath,
"checksum": _get_local_checksum(checksum_check, real_file)
}
)
for dirname in sub_folders:
dirpath = os.path.join(base_path, dirname)
dest_dirpath = os.path.join(rel_base, dirpath[rel_offset:])
real_dir = os.path.realpath(dirpath)
dir_stats = os.stat(real_dir)
if os.path.islink(dirpath):
if local_follow:
if (dir_stats.st_dev, dir_stats.st_ino) in parent_dirs:
# Just insert the symlink if the target directory
# exists inside of the copy already
r_files['symlinks'].append({"src": os.readlink(dirpath), "dest": dest_dirpath})
else:
# Walk the dirpath to find all parent directories.
new_parents = set()
parent_dir_list = os.path.dirname(dirpath).split(os.path.sep)
for parent in range(len(parent_dir_list), 0, -1):
parent_stat = os.stat(u'/'.join(parent_dir_list[:parent]))
if (parent_stat.st_dev, parent_stat.st_ino) in parent_dirs:
# Reached the point at which the directory
# tree is already known. Don't add any
# more or we might go to an ancestor that
# isn't being copied.
break
new_parents.add((parent_stat.st_dev, parent_stat.st_ino))
if (dir_stats.st_dev, dir_stats.st_ino) in new_parents:
# This was a circular symlink. So add it as
# a symlink
r_files['symlinks'].append({"src": os.readlink(dirpath), "dest": dest_dirpath})
else:
# Walk the directory pointed to by the symlink
r_files['directories'].append({"src": real_dir, "dest": dest_dirpath})
offset = len(real_dir) + 1
_recurse(real_dir, offset, parent_dirs.union(new_parents),
rel_base=dest_dirpath,
checksum_check=checksum_check)
else:
# Add the symlink to the destination
r_files['symlinks'].append({"src": os.readlink(dirpath), "dest": dest_dirpath})
else:
# Just a normal directory
r_files['directories'].append({"src": dirpath, "dest": dest_dirpath})
# Check if the source ends with a "/" so that we know which directory
# level to work at (similar to rsync)
source_trailing_slash = False
if trailing_slash_detector:
source_trailing_slash = trailing_slash_detector(topdir)
else:
source_trailing_slash = topdir.endswith(os.path.sep)
# Calculate the offset needed to strip the base_path to make relative
# paths
if base_path is None:
base_path = topdir
if not source_trailing_slash:
base_path = os.path.dirname(base_path)
if topdir.startswith(base_path):
offset = len(base_path)
# Make sure we're making the new paths relative
if trailing_slash_detector and not trailing_slash_detector(base_path):
offset += 1
elif not base_path.endswith(os.path.sep):
offset += 1
if os.path.islink(topdir) and not local_follow:
r_files['symlinks'] = {"src": os.readlink(topdir), "dest": os.path.basename(topdir)}
return r_files
dir_stats = os.stat(topdir)
parents = frozenset(((dir_stats.st_dev, dir_stats.st_ino),))
# Actually walk the directory hierarchy
_recurse(topdir, offset, parents, checksum_check=checksum_check)
return r_files
def _get_local_checksum(get_checksum, local_path):
if get_checksum:
return checksum(local_path)
else:
return None
class ActionModule(ActionBase):
WIN_PATH_SEPARATOR = "\\"
def _create_content_tempfile(self, content):
""" Create a tempfile containing defined content """
fd, content_tempfile = tempfile.mkstemp(dir=C.DEFAULT_LOCAL_TMP)
content = to_bytes(content)
try:
with os.fdopen(fd, 'wb') as f:
f.write(content)
except Exception as err:
os.remove(content_tempfile)
raise Exception(err)
return content_tempfile
def _create_zip_tempfile(self, files, directories):
tmpdir = tempfile.mkdtemp(dir=C.DEFAULT_LOCAL_TMP)
zip_file_path = os.path.join(tmpdir, "win_copy.zip")
zip_file = zipfile.ZipFile(zip_file_path, "w", zipfile.ZIP_STORED, True)
# encoding the file/dir name with base64 so Windows can unzip a unicode
# filename and get the right name, Windows doesn't handle unicode names
# very well
for directory in directories:
directory_path = to_bytes(directory['src'], errors='surrogate_or_strict')
archive_path = to_bytes(directory['dest'], errors='surrogate_or_strict')
encoded_path = to_text(base64.b64encode(archive_path), errors='surrogate_or_strict')
zip_file.write(directory_path, encoded_path, zipfile.ZIP_DEFLATED)
for file in files:
file_path = to_bytes(file['src'], errors='surrogate_or_strict')
archive_path = to_bytes(file['dest'], errors='surrogate_or_strict')
encoded_path = to_text(base64.b64encode(archive_path), errors='surrogate_or_strict')
zip_file.write(file_path, encoded_path, zipfile.ZIP_DEFLATED)
return zip_file_path
def _remove_tempfile_if_content_defined(self, content, content_tempfile):
if content is not None:
os.remove(content_tempfile)
def _copy_single_file(self, local_file, dest, source_rel, task_vars, tmp, backup):
if self._play_context.check_mode:
module_return = dict(changed=True)
return module_return
# copy the file across to the server
tmp_src = self._connection._shell.join_path(tmp, 'source')
self._transfer_file(local_file, tmp_src)
copy_args = self._task.args.copy()
copy_args.update(
dict(
dest=dest,
src=tmp_src,
_original_basename=source_rel,
_copy_mode="single",
backup=backup,
)
)
copy_args.pop('content', None)
copy_result = self._execute_module(module_name="copy",
module_args=copy_args,
task_vars=task_vars)
return copy_result
def _copy_zip_file(self, dest, files, directories, task_vars, tmp, backup):
# create local zip file containing all the files and directories that
# need to be copied to the server
if self._play_context.check_mode:
module_return = dict(changed=True)
return module_return
try:
zip_file = self._create_zip_tempfile(files, directories)
except Exception as e:
module_return = dict(
changed=False,
failed=True,
msg="failed to create tmp zip file: %s" % to_text(e),
exception=traceback.format_exc()
)
return module_return
zip_path = self._loader.get_real_file(zip_file)
# send zip file to remote, file must end in .zip so
# Com Shell.Application works
tmp_src = self._connection._shell.join_path(tmp, 'source.zip')
self._transfer_file(zip_path, tmp_src)
# run the explode operation of win_copy on remote
copy_args = self._task.args.copy()
copy_args.update(
dict(
src=tmp_src,
dest=dest,
_copy_mode="explode",
backup=backup,
)
)
copy_args.pop('content', None)
module_return = self._execute_module(module_name='copy',
module_args=copy_args,
task_vars=task_vars)
shutil.rmtree(os.path.dirname(zip_path))
return module_return
def run(self, tmp=None, task_vars=None):
""" handler for file transfer operations """
if task_vars is None:
task_vars = dict()
result = super(ActionModule, self).run(tmp, task_vars)
del tmp # tmp no longer has any effect
source = self._task.args.get('src', None)
content = self._task.args.get('content', None)
dest = self._task.args.get('dest', None)
remote_src = boolean(self._task.args.get('remote_src', False), strict=False)
local_follow = boolean(self._task.args.get('local_follow', False), strict=False)
force = boolean(self._task.args.get('force', True), strict=False)
decrypt = boolean(self._task.args.get('decrypt', True), strict=False)
backup = boolean(self._task.args.get('backup', False), strict=False)
result['src'] = source
result['dest'] = dest
result['failed'] = True
if (source is None and content is None) or dest is None:
result['msg'] = "src (or content) and dest are required"
elif source is not None and content is not None:
result['msg'] = "src and content are mutually exclusive"
elif content is not None and dest is not None and (
dest.endswith(os.path.sep) or dest.endswith(self.WIN_PATH_SEPARATOR)):
result['msg'] = "dest must be a file if content is defined"
else:
del result['failed']
if result.get('failed'):
return result
# If content is defined make a temp file and write the content into it
content_tempfile = None
if content is not None:
try:
# if content comes to us as a dict it should be decoded json.
# We need to encode it back into a string and write it out
if isinstance(content, dict) or isinstance(content, list):
content_tempfile = self._create_content_tempfile(json.dumps(content))
else:
content_tempfile = self._create_content_tempfile(content)
source = content_tempfile
except Exception as err:
result['failed'] = True
result['msg'] = "could not write content tmp file: %s" % to_native(err)
return result
# all actions should occur on the remote server, run win_copy module
elif remote_src:
new_module_args = self._task.args.copy()
new_module_args.update(
dict(
_copy_mode="remote",
dest=dest,
src=source,
force=force,
backup=backup,
)
)
new_module_args.pop('content', None)
result.update(self._execute_module(module_args=new_module_args, task_vars=task_vars))
return result
# find_needle returns a path that may not have a trailing slash on a
# directory so we need to find that out first and append at the end
else:
trailing_slash = source.endswith(os.path.sep)
try:
# find in expected paths
source = self._find_needle('files', source)
except AnsibleError as e:
result['failed'] = True
result['msg'] = to_text(e)
result['exception'] = traceback.format_exc()
return result
if trailing_slash != source.endswith(os.path.sep):
if source[-1] == os.path.sep:
source = source[:-1]
else:
source = source + os.path.sep
# A list of source file tuples (full_path, relative_path) which will try to copy to the destination
source_files = {'files': [], 'directories': [], 'symlinks': []}
# If source is a directory populate our list else source is a file and translate it to a tuple.
if os.path.isdir(to_bytes(source, errors='surrogate_or_strict')):
result['operation'] = 'folder_copy'
# Get a list of the files we want to replicate on the remote side
source_files = _walk_dirs(source, self._loader, decrypt=decrypt, local_follow=local_follow,
trailing_slash_detector=self._connection._shell.path_has_trailing_slash,
checksum_check=force)
# If it's recursive copy, destination is always a dir,
# explicitly mark it so (note - win_copy module relies on this).
if not self._connection._shell.path_has_trailing_slash(dest):
dest = "%s%s" % (dest, self.WIN_PATH_SEPARATOR)
check_dest = dest
# Source is a file, add details to source_files dict
else:
result['operation'] = 'file_copy'
# If the local file does not exist, get_real_file() raises AnsibleFileNotFound
try:
source_full = self._loader.get_real_file(source, decrypt=decrypt)
except AnsibleFileNotFound as e:
result['failed'] = True
result['msg'] = "could not find src=%s, %s" % (source, to_text(e))
return result
original_basename = os.path.basename(source)
result['original_basename'] = original_basename
# check if dest ends with / or \ and append source filename to dest
if self._connection._shell.path_has_trailing_slash(dest):
check_dest = dest
filename = original_basename
result['dest'] = self._connection._shell.join_path(dest, filename)
else:
# replace \\ with / so we can use os.path to get the filename or dirname
unix_path = dest.replace(self.WIN_PATH_SEPARATOR, os.path.sep)
filename = os.path.basename(unix_path)
check_dest = os.path.dirname(unix_path)
file_checksum = _get_local_checksum(force, source_full)
source_files['files'].append(
dict(
src=source_full,
dest=filename,
checksum=file_checksum
)
)
result['checksum'] = file_checksum
result['size'] = os.path.getsize(to_bytes(source_full, errors='surrogate_or_strict'))
# find out the files/directories/symlinks that we need to copy to the server
query_args = self._task.args.copy()
query_args.update(
dict(
_copy_mode="query",
dest=check_dest,
force=force,
files=source_files['files'],
directories=source_files['directories'],
symlinks=source_files['symlinks'],
)
)
# src is not required for query, will fail path validation is src has unix allowed chars
query_args.pop('src', None)
query_args.pop('content', None)
query_return = self._execute_module(module_args=query_args,
task_vars=task_vars)
if query_return.get('failed') is True:
result.update(query_return)
return result
if len(query_return['files']) > 0 or len(query_return['directories']) > 0 and self._connection._shell.tmpdir is None:
self._connection._shell.tmpdir = self._make_tmp_path()
if len(query_return['files']) == 1 and len(query_return['directories']) == 0:
# we only need to copy 1 file, don't mess around with zips
file_src = query_return['files'][0]['src']
file_dest = query_return['files'][0]['dest']
result.update(self._copy_single_file(file_src, dest, file_dest,
task_vars, self._connection._shell.tmpdir, backup))
if result.get('failed') is True:
result['msg'] = "failed to copy file %s: %s" % (file_src, result['msg'])
result['changed'] = True
elif len(query_return['files']) > 0 or len(query_return['directories']) > 0:
# either multiple files or directories need to be copied, compress
# to a zip and 'explode' the zip on the server
# TODO: handle symlinks
result.update(self._copy_zip_file(dest, source_files['files'],
source_files['directories'],
task_vars, self._connection._shell.tmpdir, backup))
result['changed'] = True
else:
# no operations need to occur
result['failed'] = False
result['changed'] = False
# remove the content tmp file and remote tmp file if it was created
self._remove_tempfile_if_content_defined(content, content_tempfile)
self._remove_tmp_path(self._connection._shell.tmpdir)
return result
| 23,466
|
Python
|
.py
| 445
| 38.31236
| 133
| 0.56859
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,297
|
win_acl.py
|
ansible_ansible/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_acl.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2015, Phil Schwartz <schwartzmx@gmail.com>
# Copyright: (c) 2015, Trond Hindenes
# Copyright: (c) 2015, Hans-Joachim Kliemeck <git@kliemeck.de>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'core'}
DOCUMENTATION = r"""
---
module: win_acl
version_added: "2.0"
short_description: Set file/directory/registry permissions for a system user or group
description:
- Add or remove rights/permissions for a given user or group for the specified
file, folder, registry key or AppPool identifies.
options:
path:
description:
- The path to the file or directory.
type: str
required: yes
user:
description:
- User or Group to add specified rights to act on src file/folder or
registry key.
type: str
required: yes
state:
description:
- Specify whether to add C(present) or remove C(absent) the specified access rule.
type: str
choices: [ absent, present ]
default: present
type:
description:
- Specify whether to allow or deny the rights specified.
type: str
required: yes
choices: [ allow, deny ]
rights:
description:
- The rights/permissions that are to be allowed/denied for the specified
user or group for the item at C(path).
- If C(path) is a file or directory, rights can be any right under MSDN
FileSystemRights U(https://msdn.microsoft.com/en-us/library/system.security.accesscontrol.filesystemrights.aspx).
- If C(path) is a registry key, rights can be any right under MSDN
RegistryRights U(https://msdn.microsoft.com/en-us/library/system.security.accesscontrol.registryrights.aspx).
type: str
required: yes
inherit:
description:
- Inherit flags on the ACL rules.
- Can be specified as a comma separated list, e.g. C(ContainerInherit),
C(ObjectInherit).
- For more information on the choices see MSDN InheritanceFlags enumeration
at U(https://msdn.microsoft.com/en-us/library/system.security.accesscontrol.inheritanceflags.aspx).
- Defaults to C(ContainerInherit, ObjectInherit) for Directories.
type: str
choices: [ ContainerInherit, ObjectInherit ]
propagation:
description:
- Propagation flag on the ACL rules.
- For more information on the choices see MSDN PropagationFlags enumeration
at U(https://msdn.microsoft.com/en-us/library/system.security.accesscontrol.propagationflags.aspx).
type: str
choices: [ InheritOnly, None, NoPropagateInherit ]
default: "None"
notes:
- If adding ACL's for AppPool identities (available since 2.3), the Windows
Feature "Web-Scripting-Tools" must be enabled.
seealso:
- module: win_acl_inheritance
- module: win_file
- module: win_owner
- module: win_stat
author:
- Phil Schwartz (@schwartzmx)
- Trond Hindenes (@trondhindenes)
- Hans-Joachim Kliemeck (@h0nIg)
"""
EXAMPLES = r"""
- name: Restrict write and execute access to User Fed-Phil
win_acl:
user: Fed-Phil
path: C:\Important\Executable.exe
type: deny
rights: ExecuteFile,Write
- name: Add IIS_IUSRS allow rights
win_acl:
path: C:\inetpub\wwwroot\MySite
user: IIS_IUSRS
rights: FullControl
type: allow
state: present
inherit: ContainerInherit, ObjectInherit
propagation: 'None'
- name: Set registry key right
win_acl:
path: HKCU:\Bovine\Key
user: BUILTIN\Users
rights: EnumerateSubKeys
type: allow
state: present
inherit: ContainerInherit, ObjectInherit
propagation: 'None'
- name: Remove FullControl AccessRule for IIS_IUSRS
win_acl:
path: C:\inetpub\wwwroot\MySite
user: IIS_IUSRS
rights: FullControl
type: allow
state: absent
inherit: ContainerInherit, ObjectInherit
propagation: 'None'
- name: Deny Intern
win_acl:
path: C:\Administrator\Documents
user: Intern
rights: Read,Write,Modify,FullControl,Delete
type: deny
state: present
"""
| 4,112
|
Python
|
.py
| 124
| 29.048387
| 119
| 0.721357
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,298
|
win_shell.py
|
ansible_ansible/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_shell.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2016, Ansible, inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'core'}
DOCUMENTATION = r"""
---
module: win_shell
short_description: Execute shell commands on target hosts
version_added: 2.2
description:
- The C(win_shell) module takes the command name followed by a list of space-delimited arguments.
It is similar to the M(win_command) module, but runs
the command via a shell (defaults to PowerShell) on the target host.
- For non-Windows targets, use the M(shell) module instead.
options:
free_form:
description:
- The C(win_shell) module takes a free form command to run.
- There is no parameter actually named 'free form'. See the examples!
type: str
required: yes
creates:
description:
- A path or path filter pattern; when the referenced path exists on the target host, the task will be skipped.
type: path
removes:
description:
- A path or path filter pattern; when the referenced path B(does not) exist on the target host, the task will be skipped.
type: path
chdir:
description:
- Set the specified path as the current working directory before executing a command
type: path
executable:
description:
- Change the shell used to execute the command (eg, C(cmd)).
- The target shell must accept a C(/c) parameter followed by the raw command line to be executed.
type: path
stdin:
description:
- Set the stdin of the command directly to the specified value.
type: str
version_added: '2.5'
no_profile:
description:
- Do not load the user profile before running a command. This is only valid
when using PowerShell as the executable.
type: bool
default: no
version_added: '2.8'
output_encoding_override:
description:
- This option overrides the encoding of stdout/stderr output.
- You can use this option when you need to run a command which ignore the console's codepage.
- You should only need to use this option in very rare circumstances.
- This value can be any valid encoding C(Name) based on the output of C([System.Text.Encoding]::GetEncodings()).
See U(https://docs.microsoft.com/dotnet/api/system.text.encoding.getencodings).
type: str
version_added: '2.10'
notes:
- If you want to run an executable securely and predictably, it may be
better to use the M(win_command) module instead. Best practices when writing
playbooks will follow the trend of using M(win_command) unless C(win_shell) is
explicitly required. When running ad-hoc commands, use your best judgement.
- WinRM will not return from a command execution until all child processes created have exited.
Thus, it is not possible to use C(win_shell) to spawn long-running child or background processes.
Consider creating a Windows service for managing background processes.
seealso:
- module: psexec
- module: raw
- module: script
- module: shell
- module: win_command
- module: win_psexec
author:
- Matt Davis (@nitzmahone)
"""
EXAMPLES = r"""
# Execute a command in the remote shell; stdout goes to the specified
# file on the remote.
- win_shell: C:\somescript.ps1 >> C:\somelog.txt
# Change the working directory to somedir/ before executing the command.
- win_shell: C:\somescript.ps1 >> C:\somelog.txt chdir=C:\somedir
# You can also use the 'args' form to provide the options. This command
# will change the working directory to somedir/ and will only run when
# somedir/somelog.txt doesn't exist.
- win_shell: C:\somescript.ps1 >> C:\somelog.txt
args:
chdir: C:\somedir
creates: C:\somelog.txt
# Run a command under a non-Powershell interpreter (cmd in this case)
- win_shell: echo %HOMEDIR%
args:
executable: cmd
register: homedir_out
- name: Run multi-lined shell commands
win_shell: |
$value = Test-Path -Path C:\temp
if ($value) {
Remove-Item -Path C:\temp -Force
}
New-Item -Path C:\temp -ItemType Directory
- name: Retrieve the input based on stdin
win_shell: '$string = [Console]::In.ReadToEnd(); Write-Output $string.Trim()'
args:
stdin: Input message
"""
RETURN = r"""
msg:
description: Changed.
returned: always
type: bool
sample: true
start:
description: The command execution start time.
returned: always
type: str
sample: '2016-02-25 09:18:26.429568'
end:
description: The command execution end time.
returned: always
type: str
sample: '2016-02-25 09:18:26.755339'
delta:
description: The command execution delta time.
returned: always
type: str
sample: '0:00:00.325771'
stdout:
description: The command standard output.
returned: always
type: str
sample: 'Clustering node rabbit@slave1 with rabbit@master ...'
stderr:
description: The command standard error.
returned: always
type: str
sample: 'ls: cannot access foo: No such file or directory'
cmd:
description: The command executed by the task.
returned: always
type: str
sample: 'rabbitmqctl join_cluster rabbit@master'
rc:
description: The command return code (0 means success).
returned: always
type: int
sample: 0
stdout_lines:
description: The command standard output split in lines.
returned: always
type: list
sample: [u'Clustering node rabbit@slave1 with rabbit@master ...']
"""
| 5,618
|
Python
|
.py
| 157
| 31.675159
| 127
| 0.71308
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,299
|
win_copy.py
|
ansible_ansible/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_copy.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2015, Jon Hawkesworth (@jhawkesworth) <figs@unity.demon.co.uk>
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = r"""
---
module: win_copy
version_added: '1.9.2'
short_description: Copies files to remote locations on windows hosts
description:
- The C(win_copy) module copies a file on the local box to remote windows locations.
- For non-Windows targets, use the M(copy) module instead.
options:
content:
description:
- When used instead of C(src), sets the contents of a file directly to the
specified value.
- This is for simple values, for anything complex or with formatting please
switch to the M(template) module.
type: str
version_added: '2.3'
decrypt:
description:
- This option controls the autodecryption of source files using vault.
type: bool
default: yes
version_added: '2.5'
dest:
description:
- Remote absolute path where the file should be copied to.
- If C(src) is a directory, this must be a directory too.
- Use \ for path separators or \\ when in "double quotes".
- If C(dest) ends with \ then source or the contents of source will be
copied to the directory without renaming.
- If C(dest) is a nonexistent path, it will only be created if C(dest) ends
with "/" or "\", or C(src) is a directory.
- If C(src) and C(dest) are files and if the parent directory of C(dest)
doesn't exist, then the task will fail.
type: path
required: yes
backup:
description:
- Determine whether a backup should be created.
- When set to C(yes), create a backup file including the timestamp information
so you can get the original file back if you somehow clobbered it incorrectly.
- No backup is taken when C(remote_src=False) and multiple files are being
copied.
type: bool
default: no
version_added: '2.8'
force:
description:
- If set to C(yes), the file will only be transferred if the content
is different than destination.
- If set to C(no), the file will only be transferred if the
destination does not exist.
- If set to C(no), no checksumming of the content is performed which can
help improve performance on larger files.
type: bool
default: yes
version_added: '2.3'
local_follow:
description:
- This flag indicates that filesystem links in the source tree, if they
exist, should be followed.
type: bool
default: yes
version_added: '2.4'
remote_src:
description:
- If C(no), it will search for src at originating/master machine.
- If C(yes), it will go to the remote/target machine for the src.
type: bool
default: no
version_added: '2.3'
src:
description:
- Local path to a file to copy to the remote server; can be absolute or
relative.
- If path is a directory, it is copied (including the source folder name)
recursively to C(dest).
- If path is a directory and ends with "/", only the inside contents of
that directory are copied to the destination. Otherwise, if it does not
end with "/", the directory itself with all contents is copied.
- If path is a file and dest ends with "\", the file is copied to the
folder with the same filename.
- Required unless using C(content).
type: path
notes:
- Currently win_copy does not support copying symbolic links from both local to
remote and remote to remote.
- It is recommended that backslashes C(\) are used instead of C(/) when dealing
with remote paths.
- Because win_copy runs over WinRM, it is not a very efficient transfer
mechanism. If sending large files consider hosting them on a web service and
using M(win_get_url) instead.
seealso:
- module: assemble
- module: copy
- module: win_get_url
- module: win_robocopy
author:
- Jon Hawkesworth (@jhawkesworth)
- Jordan Borean (@jborean93)
"""
EXAMPLES = r"""
- name: Copy a single file
win_copy:
src: /srv/myfiles/foo.conf
dest: C:\Temp\renamed-foo.conf
- name: Copy a single file, but keep a backup
win_copy:
src: /srv/myfiles/foo.conf
dest: C:\Temp\renamed-foo.conf
backup: yes
- name: Copy a single file keeping the filename
win_copy:
src: /src/myfiles/foo.conf
dest: C:\Temp\
- name: Copy folder to C:\Temp (results in C:\Temp\temp_files)
win_copy:
src: files/temp_files
dest: C:\Temp
- name: Copy folder contents recursively
win_copy:
src: files/temp_files/
dest: C:\Temp
- name: Copy a single file where the source is on the remote host
win_copy:
src: C:\Temp\foo.txt
dest: C:\ansible\foo.txt
remote_src: yes
- name: Copy a folder recursively where the source is on the remote host
win_copy:
src: C:\Temp
dest: C:\ansible
remote_src: yes
- name: Set the contents of a file
win_copy:
content: abc123
dest: C:\Temp\foo.txt
- name: Copy a single file as another user
win_copy:
src: NuGet.config
dest: '%AppData%\NuGet\NuGet.config'
vars:
ansible_become_user: user
ansible_become_password: pass
# The tmp dir must be set when using win_copy as another user
# This ensures the become user will have permissions for the operation
# Make sure to specify a folder both the ansible_user and the become_user have access to (i.e not %TEMP% which is user specific and requires Admin)
ansible_remote_tmp: 'c:\tmp'
"""
RETURN = r"""
backup_file:
description: Name of the backup file that was created.
returned: if backup=yes
type: str
sample: C:\Path\To\File.txt.11540.20150212-220915.bak
dest:
description: Destination file/path.
returned: changed
type: str
sample: C:\Temp\
src:
description: Source file used for the copy on the target machine.
returned: changed
type: str
sample: /home/httpd/.ansible/tmp/ansible-tmp-1423796390.97-147729857856000/source
checksum:
description: SHA1 checksum of the file after running copy.
returned: success, src is a file
type: str
sample: 6e642bb8dd5c2e027bf21dd923337cbb4214f827
size:
description: Size of the target, after execution.
returned: changed, src is a file
type: int
sample: 1220
operation:
description: Whether a single file copy took place or a folder copy.
returned: success
type: str
sample: file_copy
original_basename:
description: Basename of the copied file.
returned: changed, src is a file
type: str
sample: foo.txt
"""
| 6,765
|
Python
|
.py
| 194
| 30.690722
| 151
| 0.705703
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|