id int64 0 458k | file_name stringlengths 4 119 | file_path stringlengths 14 227 | content stringlengths 24 9.96M | size int64 24 9.96M | language stringclasses 1 value | extension stringclasses 14 values | total_lines int64 1 219k | avg_line_length float64 2.52 4.63M | max_line_length int64 5 9.91M | alphanum_fraction float64 0 1 | repo_name stringlengths 7 101 | repo_stars int64 100 139k | repo_forks int64 0 26.4k | repo_open_issues int64 0 2.27k | repo_license stringclasses 12 values | repo_extraction_date stringclasses 433 values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
30,200 | test_no_changed_when.py | ansible_ansible-lint/test/rules/test_no_changed_when.py | """Tests for no-change-when rule."""
from ansiblelint.rules import RulesCollection
from ansiblelint.rules.no_changed_when import CommandHasChangesCheckRule
from ansiblelint.runner import Runner
def test_command_changes_positive() -> None:
"""Positive test for no-changed-when."""
collection = RulesCollection()
collection.register(CommandHasChangesCheckRule())
success = "examples/playbooks/command-check-success.yml"
good_runner = Runner(success, rules=collection)
assert good_runner.run() == []
def test_command_changes_negative() -> None:
"""Negative test for no-changed-when."""
collection = RulesCollection()
collection.register(CommandHasChangesCheckRule())
failure = "examples/playbooks/command-check-failure.yml"
bad_runner = Runner(failure, rules=collection)
errs = bad_runner.run()
assert len(errs) == 2
| 871 | Python | .py | 19 | 41.842105 | 72 | 0.754427 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,201 | ematcher.py | ansible_ansible-lint/test/rules/fixtures/ematcher.py | """Custom rule used as fixture."""
from ansiblelint.rules import AnsibleLintRule
class EMatcherRule(AnsibleLintRule):
"""BANNED string found."""
id = "TEST0001"
description = (
"This is a test custom rule that looks for lines containing BANNED string"
)
tags = ["fake", "dummy", "test1"]
def match(self, line: str) -> bool:
return "BANNED" in line
| 393 | Python | .py | 11 | 30.636364 | 82 | 0.671088 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,202 | unset_variable_matcher.py | ansible_ansible-lint/test/rules/fixtures/unset_variable_matcher.py | """Custom linting rule used as test fixture."""
from ansiblelint.rules import AnsibleLintRule
class UnsetVariableMatcherRule(AnsibleLintRule):
"""Line contains untemplated variable."""
id = "TEST0002"
description = (
"This is a test rule that looks for lines post templating that still contain {{"
)
tags = ["fake", "dummy", "test2"]
def match(self, line: str) -> bool:
return "{{" in line
| 435 | Python | .py | 11 | 34.454545 | 88 | 0.682578 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,203 | raw_task.py | ansible_ansible-lint/test/rules/fixtures/raw_task.py | """Test Rule that needs_raw_task."""
from __future__ import annotations
from typing import TYPE_CHECKING
from ansiblelint.rules import AnsibleLintRule
if TYPE_CHECKING:
from ansiblelint.file_utils import Lintable
from ansiblelint.utils import Task
class RawTaskRule(AnsibleLintRule):
"""Test rule that inspects the raw task."""
id = "raw-task"
shortdesc = "Test rule that inspects the raw task"
tags = ["fake", "dummy", "test3"]
needs_raw_task = True
def matchtask(
self,
task: Task,
file: Lintable | None = None,
) -> bool | str:
"""Match a task using __raw_task__ to inspect the module params type."""
raw_task = task["__raw_task__"]
module = task["action"]["__ansible_module_original__"]
found_raw_task_params = not isinstance(raw_task[module], dict)
return found_raw_task_params
| 891 | Python | .py | 23 | 33.043478 | 80 | 0.666279 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,204 | rebuild.py | ansible_ansible-lint/test/schemas/src/rebuild.py | """Utility to generate some complex patterns."""
import copy
import json
import keyword
import sys
from pathlib import Path
from typing import Any
play_keywords = list(
filter(
None,
"""\
any_errors_fatal
become
become_exe
become_flags
become_method
become_user
check_mode
collections
connection
debugger
diff
environment
fact_path
force_handlers
gather_facts
gather_subset
gather_timeout
handlers
hosts
ignore_errors
ignore_unreachable
max_fail_percentage
module_defaults
name
no_log
order
port
post_tasks
pre_tasks
remote_user
roles
run_once
serial
strategy
tags
tasks
throttle
timeout
vars
vars_files
vars_prompt
""".split(),
),
)
def is_ref_used(obj: Any, ref: str) -> bool:
"""Return a reference use from a schema."""
ref_use = f"#/$defs/{ref}"
if isinstance(obj, dict):
if obj.get("$ref", None) == ref_use:
return True
for _ in obj.values():
if isinstance(_, dict | list) and is_ref_used(_, ref):
return True
elif isinstance(obj, list):
for _ in obj:
if isinstance(_, dict | list) and is_ref_used(_, ref):
return True
return False
if __name__ == "__main__":
invalid_var_names = sorted(list(keyword.kwlist) + play_keywords)
if "__peg_parser__" in invalid_var_names:
invalid_var_names.remove("__peg_parser__")
print("Updating invalid var names") # noqa: T201
with Path("f/vars.json").open("r+", encoding="utf-8") as f:
vars_schema = json.load(f)
vars_schema["anyOf"][0]["patternProperties"] = {
f"^(?!({'|'.join(invalid_var_names)})$)[a-zA-Z_][\\w]*$": {},
}
f.seek(0)
json.dump(vars_schema, f, indent=2)
f.write("\n")
f.truncate()
print("Compiling subschemas...") # noqa: T201
with Path("f/ansible.json").open(encoding="utf-8") as f:
combined_json = json.load(f)
for subschema in ["tasks", "playbook"]:
sub_json = copy.deepcopy(combined_json)
# remove unsafe keys from root
for key in [
"$id",
"id",
"title",
"description",
"type",
"default",
"items",
"properties",
"additionalProperties",
"examples",
]:
if key in sub_json:
del sub_json[key]
for key in sub_json:
if key not in ["$schema", "$defs"]:
print( # noqa: T201
f"Unexpected key found at combined schema root: ${key}",
)
sys.exit(2)
# Copy keys from subschema to root
for key, value in combined_json["$defs"][subschema].items():
sub_json[key] = value
sub_json["$comment"] = "Generated from ansible.json, do not edit."
sub_json["$id"] = (
f"https://raw.githubusercontent.com/ansible/ansible-lint/main/src/ansiblelint/schemas/{subschema}.json"
)
# Remove all unreferenced ($ref) definitions ($defs) recursively
while True:
spare = [k for k in sub_json["$defs"] if not is_ref_used(sub_json, k)]
for k in spare:
print(f"{subschema}: deleting unused '{k}' definition") # noqa: T201
del sub_json["$defs"][k]
if not spare:
break
with Path(f"f/{subschema}.json").open("w", encoding="utf-8") as f:
json.dump(sub_json, f, indent=2, sort_keys=True)
f.write("\n")
| 3,542 | Python | .py | 127 | 21.19685 | 115 | 0.581375 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,205 | example_com_rule.py | ansible_ansible-lint/test/custom_rules/example_com/example_com_rule.py | # Copyright (c) 2020, Ansible Project
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""A dummy custom rule module #2."""
from ansiblelint.rules import AnsibleLintRule
class ExampleComRule(AnsibleLintRule):
"""A dummy custom rule class."""
id = "100002"
| 1,276 | Python | .py | 24 | 51.666667 | 79 | 0.786058 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,206 | custom_rule.py | ansible_ansible-lint/test/custom_rules/example_inc/custom_rule.py | # Copyright (c) 2020, Ansible Project
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""Dummy custom rule module."""
from ansiblelint.rules import AnsibleLintRule
class CustomRule(AnsibleLintRule):
"""Dummy custom rule class."""
id = "100001"
| 1,265 | Python | .py | 24 | 51.208333 | 79 | 0.787389 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,207 | generate_docs.py | ansible_ansible-lint/tools/generate_docs.py | #!python3
"""Script that tests rule markdown documentation."""
from __future__ import annotations
import subprocess
from pathlib import Path
from ansiblelint.cli import get_rules_dirs
from ansiblelint.config import Options
from ansiblelint.rules import RulesCollection, TransformMixin
if __name__ == "__main__":
subprocess.run( # noqa: S603
["ansible-lint", "-L", "--format", "md"], # noqa: S607
check=True,
stdout=subprocess.DEVNULL,
)
file = Path("docs/_autofix_rules.md")
options = Options()
options.rulesdirs = get_rules_dirs([])
options.list_rules = True
rules = RulesCollection(
options.rulesdirs,
options=options,
)
contents: list[str] = [
f"- [{rule.id}](rules/{rule.id}.md)\n"
for rule in rules.alphabetical()
if issubclass(rule.__class__, TransformMixin)
]
# Write the injected contents to the file.
with file.open(encoding="utf-8", mode="w") as fh:
fh.writelines(contents)
| 1,011 | Python | .py | 30 | 28.533333 | 63 | 0.663934 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,208 | some_filter.py | ansible_ansible-lint/examples/playbooks/filter_plugins/some_filter.py | """Sample adjacent filter plugin."""
from __future__ import annotations
class FilterModule: # pylint: disable=too-few-public-methods
"""Ansible filters."""
def filters(self): # type: ignore[no-untyped-def]
"""Return list of exposed filters."""
return {
"some_filter": str,
}
| 325 | Python | .py | 9 | 29.777778 | 61 | 0.63141 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,209 | some_action.py | ansible_ansible-lint/examples/playbooks/action_plugins/some_action.py | """Sample action_plugin."""
from ansible.plugins.action import ActionBase
class ActionModule(ActionBase): # type: ignore[misc]
"""Sample module."""
def run(self, tmp=None, task_vars=None): # type: ignore[no-untyped-def]
"""."""
super().run(tmp, task_vars)
ret = {"foo": "bar"}
return {"ansible_facts": ret}
| 353 | Python | .py | 9 | 33.333333 | 76 | 0.620588 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,210 | task_has_tag.py | ansible_ansible-lint/examples/rules/task_has_tag.py | """Example implementation of a rule requiring tasks to have tags set."""
from __future__ import annotations
from typing import TYPE_CHECKING
from ansiblelint.rules import AnsibleLintRule
if TYPE_CHECKING:
from ansiblelint.file_utils import Lintable
from ansiblelint.utils import Task
class TaskHasTag(AnsibleLintRule):
"""Tasks must have tag."""
id = "EXAMPLE001"
description = "Tasks must have tag"
tags = ["productivity", "tags"]
def matchtask(
self,
task: Task,
file: Lintable | None = None,
) -> bool | str:
"""Task matching method."""
if isinstance(task, str):
return False
# If the task include another task or make the playbook fail
# Don't force to have a tag
if not set(task.keys()).isdisjoint(["include", "fail"]):
return False
if not set(task.keys()).isdisjoint(["include_tasks", "fail"]):
return False
if not set(task.keys()).isdisjoint(["import_tasks", "fail"]):
return False
# Task should have tags
return "tags" not in task
| 1,127 | Python | .py | 30 | 30.3 | 72 | 0.639631 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,211 | alpha.py | ansible_ansible-lint/examples/.collection/plugins/modules/alpha.py | """An ansible test module."""
DOCUMENTATION = """
module: mod_1
author:
- test
short_description: This is a test module
description:
- This is a test module
version_added: 1.0.0
options:
foo:
description:
- Dummy option I(foo)
type: str
bar:
description:
- Dummy option I(bar)
default: candidate
type: str
choices:
- candidate
- running
aliases:
- bam
notes:
- This is a dummy module
"""
EXAMPLES = """
- name: test task-1
company_name.coll_1.mod_1:
foo: some value
bar: candidate
"""
RETURN = """
baz:
description: test return 1
returned: success
type: list
sample: ['a','b']
"""
| 662 | Python | .py | 40 | 13.525 | 40 | 0.655897 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,212 | beta.py | ansible_ansible-lint/examples/.collection/plugins/modules/deep/beta.py | """An ansible test module."""
DOCUMENTATION = """
module: mod_2
author:
- test
short_description: This is a test module
description:
- This is a test module
version_added: 1.0.0
options:
foo:
description:
- Dummy option I(foo)
type: str
bar:
description:
- Dummy option I(bar)
default: candidate
type: str
choices:
- candidate
- running
aliases:
- bam
notes:
- This is a dummy module
"""
EXAMPLES = """
- name: test task-1
company_name.coll_1.mod_2:
foo: some value
bar: candidate
"""
RETURN = """
baz:
description: test return 1
returned: success
type: list
sample: ['a','b']
"""
| 662 | Python | .py | 40 | 13.525 | 40 | 0.655897 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,213 | gamma.py | ansible_ansible-lint/examples/.collection/plugins/modules/tests/gamma.py | """An ansible test module."""
DOCUMENTATION = """
module: mod_1
author:
- test
short_description: This is a test module
description:
- This is a test module
version_added: 1.0.0
options:
foo:
description:
- Dummy option I(foo)
type: str
bar:
description:
- Dummy option I(bar)
default: candidate
type: str
choices:
- candidate
- running
aliases:
- bam
notes:
- This is a dummy module
"""
EXAMPLES = """
- name: test task-1
company_name.coll_1.mod_1:
foo: some value
bar: candidate
"""
RETURN = """
baz:
description: test return 1
returned: success
type: list
sample: ['a','b']
"""
| 662 | Python | .py | 40 | 13.525 | 40 | 0.655897 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,214 | transformer.py | ansible_ansible-lint/src/ansiblelint/transformer.py | # cspell:ignore classinfo
"""Transformer implementation."""
from __future__ import annotations
import logging
from typing import TYPE_CHECKING, cast
from ruamel.yaml.comments import CommentedMap, CommentedSeq
from ansiblelint.file_utils import Lintable
from ansiblelint.rules import AnsibleLintRule, TransformMixin
from ansiblelint.yaml_utils import FormattedYAML, get_path_to_play, get_path_to_task
if TYPE_CHECKING:
from ansiblelint.config import Options
from ansiblelint.errors import MatchError
from ansiblelint.runner import LintResult
__all__ = ["Transformer"]
_logger = logging.getLogger(__name__)
class Transformer:
"""Transformer class marshals transformations.
The Transformer is similar to the ``ansiblelint.runner.Runner`` which manages
running each of the rules. We only expect there to be one ``Transformer`` instance
which should be instantiated from the main entrypoint function.
In the future, the transformer will be responsible for running transforms for each
of the rule matches. For now, it just reads/writes YAML files which is a
pre-requisite for the planned rule-specific transforms.
"""
DUMP_MSG = "Rewriting yaml file:"
FIX_NA_MSG = "Rule specific fix not available for:"
FIX_NE_MSG = "Rule specific fix not enabled for:"
FIX_APPLY_MSG = "Applying rule specific fix for:"
FIX_FAILED_MSG = "Rule specific fix failed for:"
FIX_ISSUE_MSG = (
"Please file an issue for this with the task or playbook that caused the error."
)
FIX_APPLIED_MSG = "Rule specific fix applied for:"
FIX_NOT_APPLIED_MSG = "Rule specific fix not applied for:"
def __init__(self, result: LintResult, options: Options):
"""Initialize a Transformer instance."""
self.write_set = self.effective_write_set(options.write_list)
self.matches: list[MatchError] = result.matches
self.files: set[Lintable] = result.files
lintables: dict[str, Lintable] = {file.filename: file for file in result.files}
self.matches_per_file: dict[Lintable, list[MatchError]] = {
file: [] for file in result.files
}
not_ignored = [match for match in self.matches if not match.ignored]
for match in not_ignored:
try:
lintable = lintables[match.filename]
except KeyError:
# we shouldn't get here, but this is easy to recover from so do that.
lintable = Lintable(match.filename)
self.matches_per_file[lintable] = []
self.matches_per_file[lintable].append(match)
@staticmethod
def effective_write_set(write_list: list[str]) -> set[str]:
"""Simplify write_list based on ``"none"`` and ``"all"`` keywords.
``"none"`` resets the enabled rule transforms.
This returns ``{"none"}`` or a set of everything after the last ``"none"``.
If ``"all"`` is in the ``write_list`` (after ``"none"`` if present),
then this will return ``{"all"}``.
"""
none_indexes = [i for i, value in enumerate(write_list) if value == "none"]
if none_indexes:
index = none_indexes[-1]
if len(write_list) > index + 1:
index += 1
write_list = write_list[index:]
if "all" in write_list:
return {"all"}
return set(write_list)
def run(self) -> None:
"""For each file, read it, execute transforms on it, then write it."""
for file, matches in self.matches_per_file.items():
# str() convinces mypy that "text/yaml" is a valid Literal.
# Otherwise, it thinks base_kind is one of playbook, meta, tasks, ...
file_is_yaml = str(file.base_kind) == "text/yaml"
try:
data: str = file.content
except (UnicodeDecodeError, IsADirectoryError):
# we hit a binary file (eg a jar or tar.gz) or a directory
data = ""
file_is_yaml = False
ruamel_data: CommentedMap | CommentedSeq | None = None
if file_is_yaml:
# We need a fresh YAML() instance for each load because ruamel.yaml
# stores intermediate state during load which could affect loading
# any other files. (Based on suggestion from ruamel.yaml author)
yaml = FormattedYAML(
# Ansible only uses YAML 1.1, but others files should use newer 1.2 (ruamel.yaml defaults to 1.2)
version=(1, 1) if file.is_owned_by_ansible() else None,
)
ruamel_data = yaml.load(data)
if not isinstance(ruamel_data, CommentedMap | CommentedSeq):
# This is an empty vars file or similar which loads as None.
# It is not safe to write this file or data-loss is likely.
# Only maps and sequences can preserve comments. Skip it.
_logger.debug(
"Ignored reformatting %s because current implementation in ruamel.yaml would drop comments. See https://sourceforge.net/p/ruamel-yaml/tickets/460/",
file,
)
continue
if self.write_set != {"none"}:
self._do_transforms(file, ruamel_data or data, file_is_yaml, matches)
if file_is_yaml:
_logger.debug("%s %s, version=%s", self.DUMP_MSG, file, yaml.version)
# noinspection PyUnboundLocalVariable
file.content = yaml.dumps(ruamel_data)
if file.updated:
file.write()
def _do_transforms(
self,
file: Lintable,
data: CommentedMap | CommentedSeq | str,
file_is_yaml: bool,
matches: list[MatchError],
) -> None:
"""Do Rule-Transforms handling any last-minute MatchError inspections."""
for match in sorted(matches):
match_id = f"{match.tag}/{match.match_type} {match.filename}:{match.lineno}"
if not isinstance(match.rule, TransformMixin):
logging.debug("%s %s", self.FIX_NA_MSG, match_id)
continue
if self.write_set != {"all"}:
rule = cast(AnsibleLintRule, match.rule)
rule_definition = set(rule.tags)
rule_definition.add(rule.id)
if rule_definition.isdisjoint(self.write_set):
logging.debug("%s %s", self.FIX_NE_MSG, match_id)
continue
if file_is_yaml and not match.yaml_path:
data = cast(CommentedMap | CommentedSeq, data)
if match.match_type == "play":
match.yaml_path = get_path_to_play(file, match.lineno, data)
elif match.task or file.kind in (
"tasks",
"handlers",
"playbook",
):
match.yaml_path = get_path_to_task(file, match.lineno, data)
logging.debug("%s %s", self.FIX_APPLY_MSG, match_id)
try:
match.rule.transform(match, file, data)
except Exception as exc: # pylint: disable=broad-except
_logger.error("%s %s", self.FIX_FAILED_MSG, match_id) # noqa: TRY400
_logger.exception(exc) # noqa: TRY401
_logger.error(self.FIX_ISSUE_MSG) # noqa: TRY400
continue
if match.fixed:
_logger.debug("%s %s", self.FIX_APPLIED_MSG, match_id)
else:
_logger.error("%s %s", self.FIX_NOT_APPLIED_MSG, match_id)
| 7,724 | Python | .py | 150 | 39.54 | 172 | 0.598383 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,215 | yaml_utils.py | ansible_ansible-lint/src/ansiblelint/yaml_utils.py | """Utility helpers to simplify working with yaml-based data."""
# pylint: disable=too-many-lines
from __future__ import annotations
import functools
import logging
import os
import re
from collections.abc import Callable, Iterator, Sequence
from io import StringIO
from pathlib import Path
from re import Pattern
from typing import TYPE_CHECKING, Any, cast
import ruamel.yaml.events
from ruamel.yaml.comments import CommentedMap, CommentedSeq, Format
from ruamel.yaml.composer import ComposerError
from ruamel.yaml.constructor import RoundTripConstructor
from ruamel.yaml.emitter import Emitter, ScalarAnalysis
# Module 'ruamel.yaml' does not explicitly export attribute 'YAML'; implicit reexport disabled
# To make the type checkers happy, we import from ruamel.yaml.main instead.
from ruamel.yaml.main import YAML
from ruamel.yaml.parser import ParserError
from ruamel.yaml.scalarint import HexInt, ScalarInt
from yamllint.config import YamlLintConfig
from ansiblelint.constants import (
ANNOTATION_KEYS,
NESTED_TASK_KEYS,
PLAYBOOK_TASK_KEYWORDS,
)
from ansiblelint.utils import Task
if TYPE_CHECKING:
# noinspection PyProtectedMember
from ruamel.yaml.comments import LineCol
from ruamel.yaml.compat import StreamTextType
from ruamel.yaml.nodes import ScalarNode
from ruamel.yaml.representer import RoundTripRepresenter
from ruamel.yaml.tokens import CommentToken
from ansiblelint.file_utils import Lintable
_logger = logging.getLogger(__name__)
class CustomYamlLintConfig(YamlLintConfig): # type: ignore[misc]
"""Extension of YamlLintConfig."""
def __init__(
self,
content: str | None = None,
file: str | Path | None = None,
) -> None:
"""Initialize config."""
super().__init__(content, file)
self.incompatible = ""
def deannotate(data: Any) -> Any:
"""Remove our annotations like __file__ and __line__ and return a JSON serializable object."""
if isinstance(data, dict):
result = data.copy()
for key, value in data.items():
if key in ANNOTATION_KEYS:
del result[key]
else:
result[key] = deannotate(value)
return result
if isinstance(data, list):
return [deannotate(item) for item in data if item not in ANNOTATION_KEYS]
return data
def load_yamllint_config() -> CustomYamlLintConfig:
"""Load our default yamllint config and any customized override file."""
config = CustomYamlLintConfig(file=Path(__file__).parent / "data" / ".yamllint")
config.incompatible = ""
# if we detect local yamllint config we use it but raise a warning
# as this is likely to get out of sync with our internal config.
for path in [
".yamllint",
".yamllint.yaml",
".yamllint.yml",
os.getenv("YAMLLINT_CONFIG_FILE", ""),
os.getenv("XDG_CONFIG_HOME", "~/.config") + "/yamllint/config",
]:
file = Path(path).expanduser()
if file.is_file():
_logger.debug(
"Loading custom %s config file, this extends our "
"internal yamllint config.",
file,
)
custom_config = CustomYamlLintConfig(file=str(file))
custom_config.extend(config)
config = custom_config
break
# Look for settings incompatible with our reformatting
checks: list[tuple[str, str | int | bool]] = [
(
"comments.min-spaces-from-content",
1,
),
(
"comments-indentation",
False,
),
(
"braces.min-spaces-inside",
0,
),
(
"braces.max-spaces-inside",
1,
),
(
"octal-values.forbid-implicit-octal",
True,
),
(
"octal-values.forbid-explicit-octal",
True,
),
# (
# "key-duplicates.forbid-duplicated-merge-keys", # v1.34.0+
# True,
# ),
# (
# "quoted-strings.quote-type", "double",
# ),
# (
# "quoted-strings.required", "only-when-needed",
# ),
]
errors = []
for setting, expected_value in checks:
v = config.rules
for key in setting.split("."):
if not isinstance(v, dict): # pragma: no cover
break
if key not in v: # pragma: no cover
break
v = v[key]
if v != expected_value:
msg = f"{setting} must be {str(expected_value).lower()}"
errors.append(msg)
if errors:
nl = "\n"
msg = f"Found incompatible custom yamllint configuration ({file}), please either remove the file or edit it to comply with:{nl} - {(nl + ' - ').join(errors)}.{nl}{nl}Read https://ansible.readthedocs.io/projects/lint/rules/yaml/ for more details regarding why we have these requirements. Fix mode will not be available."
config.incompatible = msg
_logger.debug("Effective yamllint rules used: %s", config.rules)
return config
def nested_items_path(
data_collection: dict[Any, Any] | list[Any],
ignored_keys: Sequence[str] = (),
) -> Iterator[tuple[Any, Any, list[str | int]]]:
"""Iterate a nested data structure, yielding key/index, value, and parent_path.
This is a recursive function that calls itself for each nested layer of data.
Each iteration yields:
1. the current item's dictionary key or list index,
2. the current item's value, and
3. the path to the current item from the outermost data structure.
For dicts, the yielded (1) key and (2) value are what ``dict.items()`` yields.
For lists, the yielded (1) index and (2) value are what ``enumerate()`` yields.
The final component, the parent path, is a list of dict keys and list indexes.
The parent path can be helpful in providing error messages that indicate
precisely which part of a yaml file (or other data structure) needs to be fixed.
For example, given this playbook:
.. code-block:: yaml
- name: A play
tasks:
- name: A task
debug:
msg: foobar
Here's the first and last yielded items:
.. code-block:: python
>>> playbook=[{"name": "a play", "tasks": [{"name": "a task", "debug": {"msg": "foobar"}}]}]
>>> next( nested_items_path( playbook ) )
(0, {'name': 'a play', 'tasks': [{'name': 'a task', 'debug': {'msg': 'foobar'}}]}, [])
>>> list( nested_items_path( playbook ) )[-1]
('msg', 'foobar', [0, 'tasks', 0, 'debug'])
Note that, for outermost data structure, the parent path is ``[]`` because
you do not need to descend into any nested dicts or lists to find the indicated
key and value.
If a rule were designed to prohibit "foobar" debug messages, it could use the
parent path to provide a path to the problematic ``msg``. It might use a jq-style
path in its error message: "the error is at ``.[0].tasks[0].debug.msg``".
Or if a utility could automatically fix issues, it could use the path to descend
to the parent object using something like this:
.. code-block:: python
target = data
for segment in parent_path:
target = target[segment]
:param data_collection: The nested data (dicts or lists).
:returns: each iteration yields the key (of the parent dict) or the index (lists)
"""
# As typing and mypy cannot effectively ensure we are called only with
# valid data, we better ignore NoneType
if data_collection is None:
return
data: dict[Any, Any] | list[Any]
if isinstance(data_collection, Task):
data = data_collection.normalized_task
else:
data = data_collection
yield from _nested_items_path(
data_collection=data,
parent_path=[],
ignored_keys=ignored_keys,
)
def _nested_items_path(
data_collection: dict[Any, Any] | list[Any],
parent_path: list[str | int],
ignored_keys: Sequence[str] = (),
) -> Iterator[tuple[Any, Any, list[str | int]]]:
"""Iterate through data_collection (internal implementation of nested_items_path).
This is a separate function because callers of nested_items_path should
not be using the parent_path param which is used in recursive _nested_items_path
calls to build up the path to the parent object of the current key/index, value.
"""
# we have to cast each convert_to_tuples assignment or mypy complains
# that both assignments (for dict and list) do not have the same type
convert_to_tuples_type = Callable[[], Iterator[tuple[str | int, Any]]]
if isinstance(data_collection, dict):
convert_data_collection_to_tuples = cast(
convert_to_tuples_type,
functools.partial(data_collection.items),
)
elif isinstance(data_collection, list):
convert_data_collection_to_tuples = cast(
convert_to_tuples_type,
functools.partial(enumerate, data_collection),
)
else:
msg = f"Expected a dict or a list but got {data_collection!r} of type '{type(data_collection)}'"
raise TypeError(msg)
for key, value in convert_data_collection_to_tuples():
if key in (*ANNOTATION_KEYS, *ignored_keys):
continue
yield key, value, parent_path
if isinstance(value, dict | list):
yield from _nested_items_path(
data_collection=value,
parent_path=[*parent_path, key],
)
def get_path_to_play(
lintable: Lintable,
lineno: int, # 1-based
ruamel_data: CommentedMap | CommentedSeq,
) -> list[str | int]:
"""Get the path to the play in the given file at the given line number."""
if lineno < 1:
msg = f"expected lineno >= 1, got {lineno}"
raise ValueError(msg)
if lintable.kind != "playbook" or not isinstance(ruamel_data, CommentedSeq):
return []
lc: LineCol # lc uses 0-based counts
# lineno is 1-based. Convert to 0-based.
line_index = lineno - 1
prev_play_line_index = ruamel_data.lc.line
last_play_index = len(ruamel_data)
for play_index, play in enumerate(ruamel_data):
next_play_index = play_index + 1
if last_play_index > next_play_index:
next_play_line_index = ruamel_data[next_play_index].lc.line
else:
next_play_line_index = None
lc = play.lc
if not isinstance(lc.line, int):
msg = f"expected lc.line to be an int, got {lc.line!r}"
raise TypeError(msg)
if lc.line == line_index:
return [play_index]
if play_index > 0 and prev_play_line_index < line_index < lc.line:
return [play_index - 1]
# The previous play check (above) can't catch the last play,
# so, handle the last play separately.
if (
next_play_index == last_play_index
and line_index > lc.line
and (next_play_line_index is None or line_index < next_play_line_index)
):
# part of this (last) play
return [play_index]
prev_play_line_index = play.lc.line
return []
def get_path_to_task(
lintable: Lintable,
lineno: int, # 1-based
ruamel_data: CommentedMap | CommentedSeq,
) -> list[str | int]:
"""Get the path to the task in the given file at the given line number."""
if lineno < 1:
msg = f"expected lineno >= 1, got {lineno}"
raise ValueError(msg)
if lintable.kind in ("tasks", "handlers", "playbook"):
if not isinstance(ruamel_data, CommentedSeq):
msg = f"expected ruamel_data to be a CommentedSeq, got {ruamel_data!r}"
raise ValueError(msg)
if lintable.kind in ("tasks", "handlers"):
return _get_path_to_task_in_tasks_block(lineno, ruamel_data)
if lintable.kind == "playbook":
return _get_path_to_task_in_playbook(lineno, ruamel_data)
return []
def _get_path_to_task_in_playbook(
lineno: int, # 1-based
ruamel_data: CommentedSeq,
) -> list[str | int]:
"""Get the path to the task in the given playbook data at the given line number."""
last_play_index = len(ruamel_data)
for play_index, play in enumerate(ruamel_data):
next_play_index = play_index + 1
if last_play_index > next_play_index:
next_play_line_index = ruamel_data[next_play_index].lc.line
else:
next_play_line_index = None
# We clearly haven't found the right spot yet if a following play starts on an earlier line.
if next_play_line_index and lineno > next_play_line_index:
continue
play_keys = list(play.keys())
for tasks_keyword in PLAYBOOK_TASK_KEYWORDS:
if not play.get(tasks_keyword):
continue
try:
next_keyword = play_keys[play_keys.index(tasks_keyword) + 1]
except IndexError:
next_block_line_index = None
else:
next_block_line_index = play.lc.data[next_keyword][0]
# last_lineno_in_block is 1-based; next_*_line_index is 0-based
# next_*_line_index - 1 to get line before next_*_line_index.
# Then + 1 to make it a 1-based number.
if next_block_line_index is not None:
last_lineno_in_block = next_block_line_index
elif next_play_line_index is not None:
last_lineno_in_block = next_play_line_index
else:
last_lineno_in_block = None
task_path = _get_path_to_task_in_tasks_block(
lineno,
play[tasks_keyword],
last_lineno_in_block,
)
if task_path:
# mypy gets confused without this typehint
tasks_keyword_path: list[int | str] = [
play_index,
tasks_keyword,
]
return tasks_keyword_path + list(task_path)
# lineno is before first play or no tasks keywords in any of the plays
return []
def _get_path_to_task_in_tasks_block(
lineno: int, # 1-based
tasks_block: CommentedSeq,
last_lineno: int | None = None, # 1-based
) -> list[str | int]:
"""Get the path to the task in the given tasks block at the given line number."""
task: CommentedMap | None
# lineno and last_lineno are 1-based. Convert to 0-based.
line_index = lineno - 1
last_line_index = None if last_lineno is None else last_lineno - 1
# lc (LineCol) uses 0-based counts
prev_task_line_index = tasks_block.lc.line
last_task_index = len(tasks_block)
for task_index, task in enumerate(tasks_block):
next_task_index = task_index + 1
if last_task_index > next_task_index:
if tasks_block[next_task_index] is not None:
next_task_line_index = tasks_block[next_task_index].lc.line
else:
next_task_line_index = tasks_block.lc.item(next_task_index)[0]
else:
next_task_line_index = None
if task is None:
# create a dummy task to represent the null task
task = CommentedMap()
task.lc.line, task.lc.col = tasks_block.lc.item(task_index)
nested_task_keys = set(task.keys()).intersection(set(NESTED_TASK_KEYS))
if nested_task_keys:
subtask_path = _get_path_to_task_in_nested_tasks_block(
lineno,
task,
nested_task_keys,
next_task_line_index,
)
if subtask_path:
# mypy gets confused without this typehint
task_path: list[str | int] = [task_index]
return task_path + list(subtask_path)
if not isinstance(task.lc.line, int):
msg = f"expected task.lc.line to be an int, got {task.lc.line!r}"
raise TypeError(msg)
if task.lc.line == line_index:
return [task_index]
if task_index > 0 and prev_task_line_index < line_index < task.lc.line:
return [task_index - 1]
# The previous task check can't catch the last task,
# so, handle the last task separately (also after subtask checks).
# pylint: disable=too-many-boolean-expressions
if (
next_task_index == last_task_index
and line_index > task.lc.line
and (next_task_line_index is None or line_index < next_task_line_index)
and (last_line_index is None or line_index <= last_line_index)
):
# part of this (last) task
return [task_index]
prev_task_line_index = task.lc.line
# line is not part of this tasks block
return []
def _get_path_to_task_in_nested_tasks_block(
lineno: int, # 1-based
task: CommentedMap,
nested_task_keys: set[str],
next_task_line_index: int | None = None, # 0-based
) -> list[str | int]:
"""Get the path to the task in the given nested tasks block."""
# loop through the keys in line order
task_keys = list(task.keys())
task_keys_by_index = dict(enumerate(task_keys))
for task_index, task_key in enumerate(task_keys):
nested_task_block = task[task_key]
if task_key not in nested_task_keys or not nested_task_block:
continue
next_task_key = task_keys_by_index.get(task_index + 1, None)
if next_task_key is not None:
if task.lc.data[next_task_key][2] < lineno:
continue
next_task_key_line_index = task.lc.data[next_task_key][0]
else:
next_task_key_line_index = None
# last_lineno_in_block is 1-based; next_*_line_index is 0-based
# next_*_line_index - 1 to get line before next_*_line_index.
# Then + 1 to make it a 1-based number.
last_lineno_in_block = (
next_task_key_line_index
if next_task_key_line_index is not None
else next_task_line_index
)
subtask_path = _get_path_to_task_in_tasks_block(
lineno,
nested_task_block,
last_lineno_in_block, # 1-based
)
if subtask_path:
return [task_key, *list(subtask_path)]
# line is not part of this nested tasks block
return []
class OctalIntYAML11(ScalarInt):
"""OctalInt representation for YAML 1.1."""
# tell mypy that ScalarInt has these attributes
_width: Any
_underscore: Any
def __new__(cls, *args: Any, **kwargs: Any) -> Any:
"""Create a new int with ScalarInt-defined attributes."""
return ScalarInt.__new__(cls, *args, **kwargs)
@staticmethod
def represent_octal(representer: RoundTripRepresenter, data: OctalIntYAML11) -> Any:
"""Return a YAML 1.1 octal representation.
Based on ruamel.yaml.representer.RoundTripRepresenter.represent_octal_int()
(which only handles the YAML 1.2 octal representation).
"""
v = format(data, "o")
anchor = data.yaml_anchor(any=True)
# noinspection PyProtectedMember
return representer.insert_underscore(
"0",
v,
data._underscore, # noqa: SLF001
anchor=anchor,
)
class CustomConstructor(RoundTripConstructor):
"""Custom YAML constructor that preserves Octal formatting in YAML 1.1."""
def construct_yaml_int(self, node: ScalarNode) -> Any:
"""Construct int while preserving Octal formatting in YAML 1.1.
ruamel.yaml only preserves the octal format for YAML 1.2.
For 1.1, it converts the octal to an int. So, we preserve the format.
Code partially copied from ruamel.yaml (MIT licensed).
"""
ret = super().construct_yaml_int(node)
if self.resolver.processing_version == (1, 1) and isinstance(ret, int):
# Do not rewrite zero as octal.
if ret == 0:
return ret
# see if we've got an octal we need to preserve.
value_su = self.construct_scalar(node)
try:
v = value_su.rstrip("_")
underscore = [len(v) - v.rindex("_") - 1, False, False] # type: Any
except ValueError:
underscore = None
except IndexError:
underscore = None
value_s = value_su.replace("_", "")
if value_s[0] in "+-":
value_s = value_s[1:]
if value_s[0:2] == "0x":
ret = HexInt(ret, width=len(value_s) - 2)
elif value_s[0] == "0":
# got an octal in YAML 1.1
ret = OctalIntYAML11(
ret,
width=None,
underscore=underscore,
anchor=node.anchor,
)
return ret
CustomConstructor.add_constructor(
"tag:yaml.org,2002:int",
CustomConstructor.construct_yaml_int,
)
class FormattedEmitter(Emitter):
"""Emitter that applies custom formatting rules when dumping YAML.
Differences from ruamel.yaml defaults:
- indentation of root-level sequences
- prefer double-quoted scalars over single-quoted scalars
This ensures that root-level sequences are never indented.
All subsequent levels are indented as configured (normal ruamel.yaml behavior).
Earlier implementations used dedent on ruamel.yaml's dumped output,
but string magic like that had a ton of problematic edge cases.
"""
preferred_quote = '"' # either " or '
min_spaces_inside = 0
max_spaces_inside = 1
_sequence_indent = 2
_sequence_dash_offset = 0 # Should be _sequence_indent - 2
_root_is_sequence = False
_in_empty_flow_map = False
@property
def _is_root_level_sequence(self) -> bool:
"""Return True if this is a sequence at the root level of the yaml document."""
return self.column < 2 and self._root_is_sequence
def expect_document_root(self) -> None:
"""Expect doc root (extend to record if the root doc is a sequence)."""
self._root_is_sequence = isinstance(
self.event,
ruamel.yaml.events.SequenceStartEvent,
)
return super().expect_document_root()
# NB: mypy does not support overriding attributes with properties yet:
# https://github.com/python/mypy/issues/4125
# To silence we have to ignore[override] both the @property and the method.
@property
def best_sequence_indent(self) -> int:
"""Return the configured sequence_indent or 2 for root level."""
return 2 if self._is_root_level_sequence else self._sequence_indent
@best_sequence_indent.setter
def best_sequence_indent(self, value: int) -> None:
"""Configure how many columns to indent each sequence item (including the '-')."""
self._sequence_indent = value
@property
def sequence_dash_offset(self) -> int:
"""Return the configured sequence_dash_offset or 0 for root level."""
return 0 if self._is_root_level_sequence else self._sequence_dash_offset
@sequence_dash_offset.setter
def sequence_dash_offset(self, value: int) -> None:
"""Configure how many spaces to put before each sequence item's '-'."""
self._sequence_dash_offset = value
def choose_scalar_style(self) -> Any:
"""Select how to quote scalars if needed."""
style = super().choose_scalar_style()
if (
style == ""
and self.event.value.startswith("0")
and len(self.event.value) > 1
):
# We have an as-yet unquoted token that starts with "0" (but is not itself the digit 0).
# It could be:
# - hexadecimal like "0xF1"; comes tagged as int. Should continue unquoted to continue as an int.
# - octal like "0666" or "0o755"; comes tagged as str. **Should** be quoted to be cross-YAML compatible.
# - string like "0.0.0.0" and "00-header". Should not be quoted, unless it has a quote in it.
if (
self.event.value.startswith("0x")
and self.event.tag == "tag:yaml.org,2002:int"
and self.event.implicit[0]
):
# hexadecimal
self.event.tag = "tag:yaml.org,2002:str"
return ""
try:
int(self.event.value, 8)
except ValueError:
pass
# fallthrough to string
else:
# octal
self.event.tag = "tag:yaml.org,2002:str"
self.event.implicit = (True, True, True)
return '"'
if style != "'":
# block scalar, double quoted, etc.
return style
if '"' in self.event.value:
return "'"
return self.preferred_quote
def increase_indent(
self,
flow: bool = False, # noqa: FBT002
sequence: bool | None = None,
indentless: bool = False, # noqa: FBT002
) -> None:
super().increase_indent(flow, sequence, indentless)
# If our previous node was a sequence and we are still trying to indent, don't
if self.indents.last_seq():
self.indent = self.column + 1
def write_indicator(
self,
indicator: str, # ruamel.yaml typehint is wrong. This is a string.
need_whitespace: bool,
whitespace: bool = False, # noqa: FBT002
indention: bool = False, # (sic) ruamel.yaml has this typo in their API # noqa: FBT002
) -> None:
"""Make sure that flow maps get whitespace by the curly braces."""
# We try to go with one whitespace by the curly braces and adjust accordingly
# to what min_spaces_inside and max_spaces_inside are set to.
# This assumes min_spaces_inside <= max_spaces_inside
spaces_inside = min(
max(1, self.min_spaces_inside),
self.max_spaces_inside if self.max_spaces_inside != -1 else 1,
)
# If this is the end of the flow mapping that isn't on a new line:
if (
indicator == "}"
and (self.column or 0) > (self.indent or 0)
and not self._in_empty_flow_map
):
indicator = (" " * spaces_inside) + "}"
# Indicator sometimes comes with embedded spaces we need to squish
if indicator == " -" and self.indents.last_seq():
indicator = "-"
super().write_indicator(indicator, need_whitespace, whitespace, indention)
# if it is the start of a flow mapping, and it's not time
# to wrap the lines, insert a space.
if indicator == "{" and self.column < self.best_width:
if self.check_empty_mapping():
self._in_empty_flow_map = True
else:
self.column += 1
self.stream.write(" " * spaces_inside)
self._in_empty_flow_map = False
# "/n/n" results in one blank line (end the previous line, then newline).
# So, "/n/n/n" or more is too many new lines. Clean it up.
_re_repeat_blank_lines: Pattern[str] = re.compile(r"\n{3,}")
@staticmethod
def add_octothorpe_protection(string: str) -> str:
"""Modify strings to protect "#" from full-line-comment post-processing."""
try:
if "#" in string:
# # is \uFF03 (fullwidth number sign)
# ﹟ is \uFE5F (small number sign)
string = string.replace("#", "\uFF03#\uFE5F")
# this is safe even if this sequence is present
# because it gets reversed in post-processing
except (ValueError, TypeError):
# probably not really a string. Whatever.
pass
return string
@staticmethod
def drop_octothorpe_protection(string: str) -> str:
"""Remove string protection of "#" after full-line-comment post-processing."""
try:
if "\uFF03#\uFE5F" in string:
# # is \uFF03 (fullwidth number sign)
# ﹟ is \uFE5F (small number sign)
string = string.replace("\uFF03#\uFE5F", "#")
except (ValueError, TypeError):
# probably not really a string. Whatever.
pass
return string
def analyze_scalar(self, scalar: str) -> ScalarAnalysis:
"""Determine quoting and other requirements for string.
And protect "#" from full-line-comment post-processing.
"""
analysis: ScalarAnalysis = super().analyze_scalar(scalar)
if analysis.empty:
return analysis
analysis.scalar = self.add_octothorpe_protection(analysis.scalar)
return analysis
# comment is a CommentToken, not Any (Any is ruamel.yaml's lazy type hint).
def write_comment(
self,
comment: CommentToken,
pre: bool = False, # noqa: FBT002
) -> None:
"""Clean up extra new lines and spaces in comments.
ruamel.yaml treats new or empty lines as comments.
See: https://stackoverflow.com/questions/42708668/removing-all-blank-lines-but-not-comments-in-ruamel-yaml/42712747#42712747
"""
value: str = comment.value
if (
pre
and not value.strip()
and not isinstance(
self.event,
ruamel.yaml.events.CollectionEndEvent
| ruamel.yaml.events.DocumentEndEvent
| ruamel.yaml.events.StreamEndEvent
| ruamel.yaml.events.MappingStartEvent,
)
):
# drop pure whitespace pre comments
# does not apply to End events since they consume one of the newlines.
value = ""
elif (
pre
and not value.strip()
and isinstance(self.event, ruamel.yaml.events.MappingStartEvent)
):
value = self._re_repeat_blank_lines.sub("", value)
elif pre:
# preserve content in pre comment with at least one newline,
# but no extra blank lines.
value = self._re_repeat_blank_lines.sub("\n", value)
else:
# single blank lines in post comments
value = self._re_repeat_blank_lines.sub("\n\n", value)
comment.value = value
# make sure that the eol comment only has one space before it.
if comment.column > self.column + 1 and not pre:
comment.column = self.column + 1
return super().write_comment(comment, pre)
def write_version_directive(self, version_text: Any) -> None:
"""Skip writing '%YAML 1.1'."""
if version_text == "1.1":
return
super().write_version_directive(version_text)
# pylint: disable=too-many-instance-attributes
class FormattedYAML(YAML):
"""A YAML loader/dumper that handles ansible content better by default."""
default_config = {
"explicit_start": True,
"explicit_end": False,
"width": 160,
"indent_sequences": True,
"preferred_quote": '"',
"min_spaces_inside": 0,
"max_spaces_inside": 1,
}
def __init__( # pylint: disable=too-many-arguments
self,
*,
typ: str | None = None,
pure: bool = False,
output: Any = None,
plug_ins: list[str] | None = None,
version: tuple[int, int] | None = None,
config: dict[str, bool | int | str] | None = None,
):
"""Return a configured ``ruamel.yaml.YAML`` instance.
Some config defaults get extracted from the yamllint config.
``ruamel.yaml.YAML`` uses attributes to configure how it dumps yaml files.
Some of these settings can be confusing, so here are examples of how different
settings will affect the dumped yaml.
This example does not indent any sequences:
.. code:: python
yaml.explicit_start=True
yaml.map_indent=2
yaml.sequence_indent=2
yaml.sequence_dash_offset=0
.. code:: yaml
---
- name: A playbook
tasks:
- name: Task
This example indents all sequences including the root-level:
.. code:: python
yaml.explicit_start=True
yaml.map_indent=2
yaml.sequence_indent=4
yaml.sequence_dash_offset=2
# yaml.Emitter defaults to ruamel.yaml.emitter.Emitter
.. code:: yaml
---
- name: Playbook
tasks:
- name: Task
This example indents all sequences except at the root-level:
.. code:: python
yaml.explicit_start=True
yaml.map_indent=2
yaml.sequence_indent=4
yaml.sequence_dash_offset=2
yaml.Emitter = FormattedEmitter # custom Emitter prevents root-level indents
.. code:: yaml
---
- name: Playbook
tasks:
- name: Task
"""
if version:
if isinstance(version, str):
x, y = version.split(".", maxsplit=1)
version = (int(x), int(y))
self._yaml_version_default: tuple[int, int] = version
self._yaml_version: tuple[int, int] = self._yaml_version_default
super().__init__(typ=typ, pure=pure, output=output, plug_ins=plug_ins)
# NB: We ignore some mypy issues because ruamel.yaml typehints are not great.
if not config:
config = self._defaults_from_yamllint_config()
# these settings are derived from yamllint config
self.explicit_start: bool = config["explicit_start"] # type: ignore[assignment]
self.explicit_end: bool = config["explicit_end"] # type: ignore[assignment]
self.width: int = config["width"] # type: ignore[assignment]
indent_sequences: bool = cast(bool, config["indent_sequences"])
preferred_quote: str = cast(str, config["preferred_quote"]) # either ' or "
min_spaces_inside: int = cast(int, config["min_spaces_inside"])
max_spaces_inside: int = cast(int, config["max_spaces_inside"])
self.default_flow_style = False
self.compact_seq_seq = True # type: ignore[assignment] # dash after dash
self.compact_seq_map = True # type: ignore[assignment] # key after dash
# Do not use yaml.indent() as it obscures the purpose of these vars:
self.map_indent = 2
self.sequence_indent = 4 if indent_sequences else 2
self.sequence_dash_offset = self.sequence_indent - 2
# If someone doesn't want our FormattedEmitter, they can change it.
self.Emitter = FormattedEmitter
# ignore invalid preferred_quote setting
if preferred_quote in ['"', "'"]:
FormattedEmitter.preferred_quote = preferred_quote
# NB: default_style affects preferred_quote as well.
# self.default_style ∈ None (default), '', '"', "'", '|', '>'
# spaces inside braces for flow mappings
FormattedEmitter.min_spaces_inside = min_spaces_inside
FormattedEmitter.max_spaces_inside = max_spaces_inside
# We need a custom constructor to preserve Octal formatting in YAML 1.1
self.Constructor = CustomConstructor
self.Representer.add_representer(OctalIntYAML11, OctalIntYAML11.represent_octal)
# We should preserve_quotes loads all strings as a str subclass that carries
# a quote attribute. Will the str subclasses cause problems in transforms?
# Are there any other gotchas to this?
#
# This will only preserve quotes for strings read from the file.
# anything modified by the transform will use no quotes, preferred_quote,
# or the quote that results in the least amount of escaping.
# If needed, we can use this to change null representation to be explicit
# (see https://stackoverflow.com/a/44314840/1134951)
# self.Representer.add_representer(
@staticmethod
def _defaults_from_yamllint_config() -> dict[str, bool | int | str]:
"""Extract FormattedYAML-relevant settings from yamllint config if possible."""
config = FormattedYAML.default_config
for rule, rule_config in load_yamllint_config().rules.items():
if not rule_config:
# rule disabled
continue
# refactor this if ... elif ... elif ... else monstrosity using match/case (PEP 634) once python 3.10 is mandatory
if rule == "document-start":
config["explicit_start"] = rule_config["present"]
elif rule == "document-end":
config["explicit_end"] = rule_config["present"]
elif rule == "line-length":
config["width"] = rule_config["max"]
elif rule == "braces":
min_spaces_inside = rule_config["min-spaces-inside"]
if min_spaces_inside:
config["min_spaces_inside"] = int(min_spaces_inside)
max_spaces_inside = rule_config["max-spaces-inside"]
if max_spaces_inside:
config["max_spaces_inside"] = int(max_spaces_inside)
elif rule == "indentation":
indent_sequences = rule_config["indent-sequences"]
# one of: bool, "whatever", "consistent"
# so, we use True for "whatever" and "consistent"
config["indent_sequences"] = bool(indent_sequences)
elif rule == "quoted-strings":
quote_type = rule_config["quote-type"]
# one of: single, double, any
if quote_type == "single":
config["preferred_quote"] = "'"
elif quote_type == "double":
config["preferred_quote"] = '"'
return cast(dict[str, bool | int | str], config)
@property
def version(self) -> tuple[int, int] | None:
"""Return the YAML version used to parse or dump.
Ansible uses PyYAML which only supports YAML 1.1. ruamel.yaml defaults to 1.2.
So, we have to make sure we dump yaml files using YAML 1.1.
We can relax the version requirement once ansible uses a version of PyYAML
that includes this PR: https://github.com/yaml/pyyaml/pull/555
"""
if hasattr(self, "_yaml_version"):
return self._yaml_version
return None
@version.setter
def version(self, value: tuple[int, int] | None) -> None:
"""Ensure that yaml version uses our default value.
The yaml Reader updates this value based on the ``%YAML`` directive in files.
So, if a file does not include the directive, it sets this to None.
But, None effectively resets the parsing version to YAML 1.2 (ruamel's default).
"""
if value is not None:
self._yaml_version = value
elif hasattr(self, "_yaml_version_default"):
self._yaml_version = self._yaml_version_default
# We do nothing if the object did not have a previous default version defined
def load(self, stream: Path | StreamTextType) -> Any:
"""Load YAML content from a string while avoiding known ruamel.yaml issues."""
if not isinstance(stream, str):
msg = f"expected a str but got {type(stream)}"
raise NotImplementedError(msg)
# As ruamel drops comments for any document that is not a mapping or sequence,
# we need to avoid using it to reformat those documents.
# https://sourceforge.net/p/ruamel-yaml/tickets/460/
text, preamble_comment = self._pre_process_yaml(stream)
try:
data = super().load(stream=text)
except ComposerError:
data = self.load_all(stream=text)
except ParserError:
data = None
_logger.error( # noqa: TRY400
"Invalid yaml, verify the file contents and try again.",
)
if preamble_comment is not None and isinstance(
data,
CommentedMap | CommentedSeq,
):
data.preamble_comment = preamble_comment # type: ignore[union-attr]
# Because data can validly also be None for empty documents, we cannot
# really annotate the return type here, so we need to remember to
# never save None or scalar data types when reformatting.
return data
def dumps(self, data: Any) -> str:
"""Dump YAML document to string (including its preamble_comment)."""
preamble_comment: str | None = getattr(data, "preamble_comment", None)
self._prevent_wrapping_flow_style(data)
with StringIO() as stream:
if preamble_comment:
stream.write(preamble_comment)
self.dump(data, stream)
text = stream.getvalue()
strip_version_directive = hasattr(self, "_yaml_version_default")
return self._post_process_yaml(
text,
strip_version_directive=strip_version_directive,
strip_explicit_start=not self.explicit_start,
)
def _prevent_wrapping_flow_style(self, data: Any) -> None:
if not isinstance(data, CommentedMap | CommentedSeq):
return
for key, value, parent_path in nested_items_path(data):
if not isinstance(value, CommentedMap | CommentedSeq):
continue
fa: Format = value.fa
if fa.flow_style():
predicted_indent = self._predict_indent_length(parent_path, key)
predicted_width = len(str(value))
if predicted_indent + predicted_width > self.width:
# this flow-style map will probably get line-wrapped,
# so, switch it to block style to avoid the line wrap.
fa.set_block_style()
def _predict_indent_length(self, parent_path: list[str | int], key: Any) -> int:
indent = 0
# each parent_key type tells us what the indent is for the next level.
for parent_key in parent_path:
if isinstance(parent_key, int) and indent == 0:
# root level is a sequence
indent += self.sequence_dash_offset
elif isinstance(parent_key, int):
# next level is a sequence
indent += cast(int, self.sequence_indent)
elif isinstance(parent_key, str):
# next level is a map
indent += cast(int, self.map_indent)
if isinstance(key, int) and indent == 0:
# flow map is an item in a root-level sequence
indent += self.sequence_dash_offset
elif isinstance(key, int) and indent > 0:
# flow map is in a sequence
indent += cast(int, self.sequence_indent)
elif isinstance(key, str):
# flow map is in a map
indent += len(key + ": ")
return indent
# ruamel.yaml only preserves empty (no whitespace) blank lines
# (ie "/n/n" becomes "/n/n" but "/n /n" becomes "/n").
# So, we need to identify whitespace-only lines to drop spaces before reading.
_whitespace_only_lines_re = re.compile(r"^ +$", re.MULTILINE)
def _pre_process_yaml(self, text: str) -> tuple[str, str | None]:
"""Handle known issues with ruamel.yaml loading.
Preserve blank lines despite extra whitespace.
Preserve any preamble (aka header) comments before "---".
For more on preamble comments, see: https://stackoverflow.com/questions/70286108/python-ruamel-yaml-package-how-to-get-header-comment-lines/70287507#70287507
"""
text = self._whitespace_only_lines_re.sub("", text)
# I investigated extending ruamel.yaml to capture preamble comments.
# preamble comment goes from:
# DocumentStartToken.comment -> DocumentStartEvent.comment
# Then, in the composer:
# once in composer.current_event
# discards DocumentStartEvent
# move DocumentStartEvent to composer.last_event
# all document nodes get composed (events get used)
# discard DocumentEndEvent
# move DocumentEndEvent to composer.last_event
# So, there's no convenient way to extend the composer
# to somehow capture the comments and pass them on.
preamble_comments = []
if "\n---\n" not in text and "\n--- " not in text:
# nothing is before the document start mark,
# so there are no comments to preserve.
return text, None
for line in text.splitlines(True):
# We only need to capture the preamble comments. No need to remove them.
# lines might also include directives.
if line.lstrip().startswith("#") or line == "\n":
preamble_comments.append(line)
elif line.startswith("---"):
break
return text, "".join(preamble_comments) or None
@staticmethod
def _post_process_yaml(
text: str,
*,
strip_version_directive: bool = False,
strip_explicit_start: bool = False,
) -> str:
"""Handle known issues with ruamel.yaml dumping.
Make sure there's only one newline at the end of the file.
Fix the indent of full-line comments to match the indent of the next line.
See: https://stackoverflow.com/questions/71354698/how-can-i-use-the-ruamel-yaml-rtsc-mode/71355688#71355688
Also, removes "#" protection from strings that prevents them from being
identified as full line comments in post-processing.
Make sure null list items don't end in a space.
"""
# remove YAML directive
if strip_version_directive and text.startswith("%YAML"):
text = text.split("\n", 1)[1]
# remove explicit document start
if strip_explicit_start and text.startswith("---"):
text = text.split("\n", 1)[1]
text = text.rstrip("\n") + "\n"
lines = text.splitlines(keepends=True)
full_line_comments: list[tuple[int, str]] = []
for i, line in enumerate(lines):
stripped = line.lstrip()
if not stripped:
# blank line. Move on.
continue
space_length = len(line) - len(stripped)
if stripped.startswith("#"):
# got a full line comment
# allow some full line comments to match the previous indent
if i > 0 and not full_line_comments and space_length:
prev = lines[i - 1]
prev_space_length = len(prev) - len(prev.lstrip())
if prev_space_length == space_length:
# if the indent matches the previous line's indent, skip it.
continue
full_line_comments.append((i, stripped))
elif full_line_comments:
# match ident of previous non-blank line
if not lines[i - 1].lstrip():
prev = lines[i - 2]
space_length = len(prev) - len(prev.lstrip())
# end of full line comments so adjust to match indent of this line
spaces = " " * space_length
for index, comment in full_line_comments:
lines[index] = spaces + comment
full_line_comments.clear()
if line.rpartition(" ")[2] == "\n":
# drop any trailing spaces
lines[i] = line.rstrip() + "\n"
cleaned = line.strip()
if not cleaned.startswith("#") and cleaned.endswith("-"):
# got an empty list item. drop any trailing spaces.
lines[i] = line.rstrip() + "\n"
text = "".join(
FormattedEmitter.drop_octothorpe_protection(line) for line in lines
)
return text
def clean_json(
obj: Any,
func: Callable[[str], Any] = lambda key: (
key.startswith("__") if isinstance(key, str) else False
),
) -> Any:
"""Remove all keys matching the condition from a nested JSON-like object.
:param obj: a JSON like object to clean, also returned for chaining.
:param func: a callable that takes a key in argument and return True for each key to delete
"""
if isinstance(obj, dict):
for key in list(obj.keys()):
if func(key):
del obj[key]
else:
clean_json(obj[key], func)
elif isinstance(obj, list):
for i in reversed(range(len(obj))):
if func(obj[i]):
del obj[i]
else:
clean_json(obj[i], func)
else:
# neither a dict nor a list, do nothing
pass
return obj
| 49,330 | Python | .py | 1,086 | 35.350829 | 329 | 0.60325 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,216 | errors.py | ansible_ansible-lint/src/ansiblelint/errors.py | """Exceptions and error representations."""
from __future__ import annotations
import functools
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any
from ansiblelint._internal.rules import BaseRule, RuntimeErrorRule
from ansiblelint.file_utils import Lintable
if TYPE_CHECKING:
from ansiblelint.utils import Task
class LintWarning(Warning):
"""Used by linter."""
@dataclass
class WarnSource:
"""Container for warning information, so we can later create a MatchError from it."""
filename: Lintable
lineno: int
tag: str
message: str | None = None
@dataclass(frozen=True)
class RuleMatchTransformMeta:
"""Additional metadata about a match error to be used during transformation."""
# pylint: disable=too-many-instance-attributes
@dataclass(unsafe_hash=True)
@functools.total_ordering
class MatchError(ValueError):
"""Rule violation detected during linting.
It can be raised as Exception but also just added to the list of found
rules violations.
Note that line argument is not considered when building hash of an
instance.
"""
# order matters for these:
message: str = field(init=True, repr=False, default="")
lintable: Lintable = field(init=True, repr=False, default=Lintable(name=""))
tag: str = field(init=True, repr=False, default="")
lineno: int = 1
details: str = ""
column: int | None = None
# rule is not included in hash because we might have different instances
# of the same rule, but we use the 'tag' to identify the rule.
rule: BaseRule = field(hash=False, default=RuntimeErrorRule())
ignored: bool = False
fixed: bool = False # True when a transform has resolved this MatchError
transform_meta: RuleMatchTransformMeta | None = None
def __post_init__(self) -> None:
"""Can be use by rules that can report multiple errors type, so we can still filter by them."""
self.filename = self.lintable.name
# We want to catch accidental MatchError() which contains no useful
# information. When no arguments are passed, the '_message' field is
# set to 'property', only if passed it becomes a string.
if self.rule.__class__ is RuntimeErrorRule:
# so instance was created without a rule
if not self.message:
msg = f"{self.__class__.__name__}() missing a required argument: one of 'message' or 'rule'"
raise TypeError(msg)
if not isinstance(self.tag, str):
msg = "MatchErrors must be created with either rule or tag specified."
raise TypeError(msg)
if not self.message:
self.message = self.rule.shortdesc
self.match_type: str | None = None
# for task matches, save the normalized task object (useful for transforms)
self.task: Task | None = None
# path to the problem area, like: [0,"pre_tasks",3] for [0].pre_tasks[3]
self.yaml_path: list[int | str] = []
if not self.tag:
self.tag = self.rule.id
# Safety measure to ensure we do not end-up with incorrect indexes
if self.lineno == 0: # pragma: no cover
msg = "MatchError called incorrectly as line numbers start with 1"
raise RuntimeError(msg)
if self.column == 0: # pragma: no cover
msg = "MatchError called incorrectly as column numbers start with 1"
raise RuntimeError(msg)
self.lineno += self.lintable.line_offset
# We make the lintable aware that we found a match inside it, as this
# can be used to skip running other rules that do require current one
# to pass.
self.lintable.matches.append(self)
@functools.cached_property
def level(self) -> str:
"""Return the level of the rule: error, warning or notice."""
if (
not self.ignored
and self.rule.options
and {self.tag, self.rule.id, *self.rule.tags}.isdisjoint(
self.rule.options.warn_list,
)
):
return "error"
return "warning"
def __repr__(self) -> str:
"""Return a MatchError instance representation."""
formatstr = "[{0}] ({1}) matched {2}:{3} {4}"
# note that `rule.id` can be int, str or even missing, as users
# can defined their own custom rules.
_id = getattr(self.rule, "id", "000")
return formatstr.format(
_id,
self.message,
self.filename,
self.lineno,
self.details,
)
def __str__(self) -> str:
"""Return a MatchError instance string representation."""
return self.__repr__()
@property
def position(self) -> str:
"""Return error positioning, with column number if available."""
if self.column:
return f"{self.lineno}:{self.column}"
return str(self.lineno)
@property
def _hash_key(self) -> Any:
# line attr is knowingly excluded, as dict is not hashable
return (
self.filename,
self.lineno,
str(getattr(self.rule, "id", 0)),
self.message,
self.details,
# -1 is used here to force errors with no column to sort before
# all other errors.
-1 if self.column is None else self.column,
)
def __lt__(self, other: object) -> bool:
"""Return whether the current object is less than the other."""
if not isinstance(other, self.__class__):
return NotImplemented
return bool(self._hash_key < other._hash_key)
def __eq__(self, other: object) -> bool:
"""Identify whether the other object represents the same rule match."""
if not isinstance(other, self.__class__):
return NotImplemented
return self.__hash__() == other.__hash__()
| 5,955 | Python | .py | 136 | 35.492647 | 108 | 0.634698 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,217 | __main__.py | ansible_ansible-lint/src/ansiblelint/__main__.py | #!/usr/bin/env python
# Copyright (c) 2013-2014 Will Thames <will@thames.id.au>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""Command line implementation."""
from __future__ import annotations
import errno
import logging
import os
import pathlib
import shutil
import site
import sys
from pathlib import Path
from typing import TYPE_CHECKING, Any, TextIO
from ansible_compat.prerun import get_cache_dir
from filelock import FileLock, Timeout
from rich.markup import escape
from ansiblelint.constants import RC, SKIP_SCHEMA_UPDATE
# safety check for broken ansible core, needs to happen first
try:
# pylint: disable=unused-import
from ansible.parsing.dataloader import DataLoader # noqa: F401
except Exception as _exc: # pylint: disable=broad-exception-caught # noqa: BLE001
logging.fatal(_exc)
sys.exit(RC.INVALID_CONFIG)
# pylint: disable=ungrouped-imports
from ansiblelint import cli
from ansiblelint._mockings import _perform_mockings_cleanup
from ansiblelint.app import get_app
from ansiblelint.color import (
console,
console_options,
console_stderr,
reconfigure,
render_yaml,
)
from ansiblelint.config import (
Options,
get_deps_versions,
get_version_warning,
log_entries,
options,
)
from ansiblelint.loaders import load_ignore_txt
from ansiblelint.runner import get_matches
from ansiblelint.skip_utils import normalize_tag
from ansiblelint.version import __version__
if TYPE_CHECKING:
# RulesCollection must be imported lazily or ansible gets imported too early.
from collections.abc import Callable
from ansiblelint.rules import RulesCollection
from ansiblelint.runner import LintResult
_logger = logging.getLogger(__name__)
class LintLogHandler(logging.Handler):
"""Custom handler that uses our rich stderr console."""
def emit(self, record: logging.LogRecord) -> None:
try:
msg = self.format(record)
console_stderr.print(f"[dim]{msg}[/dim]", highlight=False)
except RecursionError: # See issue 36272
raise
except Exception: # pylint: disable=broad-exception-caught # noqa: BLE001
self.handleError(record)
def initialize_logger(level: int = 0) -> None:
"""Set up the global logging level based on the verbosity number."""
# We are about to act on the root logger, which defaults to logging.WARNING.
# That is where our 0 (default) value comes from.
verbosity_map = {
-2: logging.CRITICAL,
-1: logging.ERROR,
0: logging.WARNING,
1: logging.INFO,
2: logging.DEBUG,
}
handler = LintLogHandler()
formatter = logging.Formatter("%(levelname)-8s %(message)s")
handler.setFormatter(formatter)
logger = logging.getLogger()
logger.addHandler(handler)
# Unknown logging level is treated as DEBUG
logging_level = verbosity_map.get(level, logging.DEBUG)
logger.setLevel(logging_level)
logging.captureWarnings(True) # pass all warnings.warn() messages through logging
# Use module-level _logger instance to validate it
_logger.debug("Logging initialized to level %s", logging_level)
def initialize_options(arguments: list[str] | None = None) -> None | FileLock:
"""Load config options and store them inside options module."""
cache_dir_lock = None
new_options = cli.get_config(arguments or [])
new_options.cwd = pathlib.Path.cwd()
if new_options.colored is None:
new_options.colored = should_do_markup()
# persist loaded configuration inside options module
for k, v in new_options.__dict__.items():
setattr(options, k, v)
# rename deprecated ids/tags to newer names
options.tags = [normalize_tag(tag) for tag in options.tags]
options.skip_list = [normalize_tag(tag) for tag in options.skip_list]
options.warn_list = [normalize_tag(tag) for tag in options.warn_list]
options.configured = True
options.cache_dir = get_cache_dir(pathlib.Path(options.project_dir))
# add a lock file so we do not have two instances running inside at the same time
if options.cache_dir:
options.cache_dir.mkdir(parents=True, exist_ok=True)
if not options.offline: # pragma: no cover
cache_dir_lock = FileLock(
f"{options.cache_dir}/.lock",
)
try:
cache_dir_lock.acquire(timeout=180)
except Timeout: # pragma: no cover
_logger.error( # noqa: TRY400
"Timeout waiting for another instance of ansible-lint to release the lock.",
)
sys.exit(RC.LOCK_TIMEOUT)
# Avoid extra output noise from Ansible about using devel versions
if "ANSIBLE_DEVEL_WARNING" not in os.environ: # pragma: no branch
os.environ["ANSIBLE_DEVEL_WARNING"] = "false"
return cache_dir_lock
def _do_list(rules: RulesCollection) -> int:
# On purpose lazy-imports to avoid pre-loading Ansible
# pylint: disable=import-outside-toplevel
from ansiblelint.generate_docs import rules_as_md, rules_as_rich, rules_as_str
if options.list_rules:
_rule_format_map: dict[str, Callable[..., Any]] = {
"brief": rules_as_str,
"full": rules_as_rich,
"md": rules_as_md,
}
console.print(
_rule_format_map.get(options.format, rules_as_str)(rules),
highlight=False,
)
return 0
if options.list_tags:
console.print(render_yaml(rules.list_tags()))
return 0
# we should not get here!
return 1
# noinspection PyShadowingNames
def _do_transform(result: LintResult, opts: Options) -> None:
"""Create and run Transformer."""
if "yaml" in opts.skip_list:
# The transformer rewrites yaml files, but the user requested to skip
# the yaml rule or anything tagged with "yaml", so there is nothing to do.
return
# On purpose lazy-imports to avoid loading transforms unless requested
# pylint: disable=import-outside-toplevel
from ansiblelint.transformer import Transformer
transformer = Transformer(result, options)
# this will mark any matches as fixed if the transforms repaired the issue
transformer.run()
def support_banner() -> None:
"""Display support banner when running on unsupported platform."""
def fix(runtime_options: Options, result: LintResult, rules: RulesCollection) -> None:
"""Fix the linting errors.
:param options: Options object
:param result: LintResult object
"""
match_count = len(result.matches)
_logger.debug("Begin fixing: %s matches", match_count)
ruamel_safe_version = "0.17.26"
# pylint: disable=import-outside-toplevel
from packaging.version import Version
from ruamel.yaml import __version__ as ruamel_yaml_version_str
# pylint: enable=import-outside-toplevel
if Version(ruamel_safe_version) > Version(ruamel_yaml_version_str):
_logger.warning(
"We detected use of `--fix` feature with a buggy ruamel-yaml %s library instead of >=%s, upgrade it before reporting any bugs like dropped comments.",
ruamel_yaml_version_str,
ruamel_safe_version,
)
acceptable_tags = {"all", "none", *rules.known_tags()}
unknown_tags = set(options.write_list).difference(acceptable_tags)
if unknown_tags:
_logger.error(
"Found invalid value(s) (%s) for --fix arguments, must be one of: %s",
", ".join(unknown_tags),
", ".join(acceptable_tags),
)
sys.exit(RC.INVALID_CONFIG)
_do_transform(result, options)
rerun = ["yaml"]
resolved = []
for idx, match in reversed(list(enumerate(result.matches))):
_logger.debug("Fixing: (%s of %s) %s", match_count - idx, match_count, match)
if match.fixed:
_logger.debug("Fixed, removed: %s", match)
result.matches.pop(idx)
continue
if match.rule.id not in rerun:
_logger.debug("Not rerun eligible: %s", match)
continue
uid = (match.rule.id, match.filename)
if uid in resolved:
_logger.debug("Previously resolved: %s", match)
result.matches.pop(idx)
continue
_logger.debug("Rerunning: %s", match)
runtime_options.tags = [match.rule.id]
runtime_options.lintables = [match.filename]
runtime_options._skip_ansible_syntax_check = True # noqa: SLF001
new_results = get_matches(rules, runtime_options)
if not new_results.matches:
_logger.debug("Newly resolved: %s", match)
result.matches.pop(idx)
resolved.append(uid)
continue
if match in new_results.matches:
_logger.debug("Still found: %s", match)
continue
_logger.debug("Fixed, removed: %s", match)
result.matches.pop(idx)
# pylint: disable=too-many-locals
def main(argv: list[str] | None = None) -> int:
"""Linter CLI entry point."""
# alter PATH if needed (venv support)
path_inject(argv[0] if argv and argv[0] else "")
if argv is None: # pragma: no cover
argv = sys.argv
cache_dir_lock = initialize_options(argv[1:])
console_options["force_terminal"] = options.colored
reconfigure(console_options)
if options.version:
deps = get_deps_versions()
msg = f"ansible-lint [repr.number]{__version__}[/] using[dim]"
for k, v in deps.items():
msg += f" {escape(k)}:[repr.number]{v}[/]"
msg += "[/]"
console.print(msg, markup=True, highlight=False)
msg = get_version_warning()
if msg:
console.print(msg)
support_banner()
sys.exit(0)
else:
support_banner()
initialize_logger(options.verbosity)
for level, message in log_entries:
_logger.log(level, message)
_logger.debug("Options: %s", options)
_logger.debug("CWD: %s", Path.cwd())
# checks if we have `ANSIBLE_LINT_SKIP_SCHEMA_UPDATE` set to bypass schema
# update. Also skip if in offline mode.
# env var set to skip schema refresh
skip_schema_update = (
bool(
int(
os.environ.get(
SKIP_SCHEMA_UPDATE,
"0",
),
),
)
or options.offline
or options.nodeps
)
if not skip_schema_update:
# pylint: disable=import-outside-toplevel
from ansiblelint.schemas.__main__ import refresh_schemas
refresh_schemas()
# pylint: disable=import-outside-toplevel
from ansiblelint.rules import RulesCollection
if options.list_profiles:
from ansiblelint.generate_docs import profiles_as_rich
console.print(profiles_as_rich())
return 0
app = get_app(
offline=None,
cached=True,
) # to be sure we use the offline value from settings
rules = RulesCollection(
options.rulesdirs,
profile_name=options.profile,
app=app,
options=options,
)
if options.list_rules or options.list_tags:
return _do_list(rules)
if isinstance(options.tags, str):
options.tags = options.tags.split(",") # pragma: no cover
result = get_matches(rules, options)
mark_as_success = True
if options.strict and result.matches:
mark_as_success = False
# Remove skip_list items from the result
result.matches = [m for m in result.matches if m.tag not in app.options.skip_list]
# Mark matches as ignored inside ignore file
ignore_map = load_ignore_txt(options.ignore_file)
for match in result.matches:
if match.tag in ignore_map[match.filename]:
match.ignored = True
_logger.debug("Ignored: %s", match)
if app.yamllint_config.incompatible:
logging.log(
level=logging.ERROR if options.write_list else logging.WARNING,
msg=app.yamllint_config.incompatible,
)
if options.write_list:
if app.yamllint_config.incompatible:
sys.exit(RC.INVALID_CONFIG)
fix(runtime_options=options, result=result, rules=rules)
app.render_matches(result.matches)
_perform_mockings_cleanup(app.options)
if cache_dir_lock:
cache_dir_lock.release()
pathlib.Path(cache_dir_lock.lock_file).unlink(missing_ok=True)
if options.mock_filters:
_logger.warning(
"The following filters were mocked during the run: %s",
",".join(options.mock_filters),
)
return app.report_outcome(result, mark_as_success=mark_as_success)
def _run_cli_entrypoint() -> None:
"""Invoke the main entrypoint with current CLI args.
This function also processes the runtime exceptions.
"""
try:
sys.exit(main(sys.argv))
except OSError as exc:
# NOTE: Only "broken pipe" is acceptable to ignore
if exc.errno != errno.EPIPE: # pragma: no cover
raise
except KeyboardInterrupt: # pragma: no cover
sys.exit(RC.EXIT_CONTROL_C)
except RuntimeError as exc: # pragma: no cover
raise SystemExit(exc) from exc
def path_inject(own_location: str = "") -> None:
"""Add python interpreter path to top of PATH to fix outside venv calling."""
# This make it possible to call ansible-lint that was installed inside a
# virtualenv without having to pre-activate it. Otherwise subprocess will
# either fail to find ansible executables or call the wrong ones.
#
# This must be run before we do run any subprocesses, and loading config
# does this as part of the ansible detection.
paths = [x for x in os.environ.get("PATH", "").split(os.pathsep) if x]
# Expand ~ in PATH as it known to break many tools
expanded = False
for idx, path in enumerate(paths):
if path.startswith("~"): # pragma: no cover
paths[idx] = str(Path(path).expanduser())
expanded = True
if expanded: # pragma: no cover
print( # noqa: T201
"WARNING: PATH altered to expand ~ in it. Read https://stackoverflow.com/a/44704799/99834 and correct your system configuration.",
file=sys.stderr,
)
inject_paths = []
userbase_bin_path = Path(site.getuserbase()) / "bin"
if (
str(userbase_bin_path) not in paths
and (userbase_bin_path / "bin" / "ansible").exists()
):
inject_paths.append(str(userbase_bin_path))
py_path = Path(sys.executable).parent
pipx_path = os.environ.get("PIPX_HOME", "pipx")
if (
str(py_path) not in paths
and (py_path / "ansible").exists()
and pipx_path not in str(py_path)
):
inject_paths.append(str(py_path))
# last option, if nothing else is found, just look next to ourselves...
if own_location:
own_location = os.path.realpath(own_location)
parent = Path(own_location).parent
if (parent / "ansible").exists() and str(parent) not in paths:
inject_paths.append(str(parent))
if not os.environ.get("PYENV_VIRTUAL_ENV", None):
if inject_paths and not all("pipx" in p for p in inject_paths):
print( # noqa: T201
f"WARNING: PATH altered to include {', '.join(inject_paths)} :: This is usually a sign of broken local setup, which can cause unexpected behaviors.",
file=sys.stderr,
)
if inject_paths or expanded:
os.environ["PATH"] = os.pathsep.join([*inject_paths, *paths])
# We do know that finding ansible in PATH does not guarantee that it is
# functioning or that is in fact the same version that was installed as
# our dependency, but addressing this would be done by ansible-compat.
for cmd in ("ansible",):
if not shutil.which(cmd):
msg = f"Failed to find runtime dependency '{cmd}' in PATH"
raise RuntimeError(msg)
# Based on Ansible implementation
def to_bool(value: Any) -> bool: # pragma: no cover
"""Return a bool for the arg."""
if value is None or isinstance(value, bool):
return bool(value)
if isinstance(value, str):
value = value.lower()
return value in ("yes", "on", "1", "true", 1)
def should_do_markup(stream: TextIO = sys.stdout) -> bool: # pragma: no cover
"""Decide about use of ANSI colors."""
py_colors = None
# https://xkcd.com/927/
for env_var in ["PY_COLORS", "CLICOLOR", "FORCE_COLOR", "ANSIBLE_FORCE_COLOR"]:
value = os.environ.get(env_var, None)
if value is not None:
py_colors = to_bool(value)
break
# If deliberately disabled colors
if os.environ.get("NO_COLOR", None):
return False
# User configuration requested colors
if py_colors is not None:
return to_bool(py_colors)
term = os.environ.get("TERM", "")
if "xterm" in term:
return True
if term == "dumb":
return False
# Use tty detection logic as last resort because there are numerous
# factors that can make isatty return a misleading value, including:
# - stdin.isatty() is the only one returning true, even on a real terminal
# - stderr returning false if user user uses a error stream coloring solution
return stream.isatty()
if __name__ == "__main__":
_run_cli_entrypoint()
| 18,446 | Python | .py | 435 | 35.48046 | 165 | 0.667355 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,218 | config.py | ansible_ansible-lint/src/ansiblelint/config.py | """Store configuration options as a singleton."""
from __future__ import annotations
import json
import logging
import os
import sys
import time
import urllib.request
import warnings
from dataclasses import dataclass, field
from functools import lru_cache
from http.client import HTTPException
from importlib.metadata import PackageNotFoundError, distribution, version
from pathlib import Path
from typing import Any
from urllib.error import HTTPError, URLError
from packaging.version import Version
from ansiblelint import __version__
from ansiblelint.loaders import yaml_from_file
_logger = logging.getLogger(__name__)
CACHE_DIR = (
os.path.expanduser(os.environ.get("XDG_CACHE_HOME", "~/.cache")) + "/ansible-lint"
)
DEFAULT_WARN_LIST = [
"experimental",
"jinja[spacing]", # warning until we resolve all reported false-positives
"fqcn[deep]", # 2023-05-31 added
]
DEFAULT_KINDS = [
# Do not sort this list, order matters.
{"jinja2": "**/*.j2"}, # jinja2 templates are not always parsable as something else
{"jinja2": "**/*.j2.*"},
{"yaml": ".github/**/*.{yaml,yml}"}, # github workflows
{"text": "**/templates/**/*.*"}, # templates are likely not validable
{"execution-environment": "**/execution-environment.yml"},
{"ansible-lint-config": "**/.ansible-lint"},
{"ansible-lint-config": "**/.config/ansible-lint.yml"},
{"ansible-navigator-config": "**/ansible-navigator.{yaml,yml}"},
{"inventory": "**/inventory/**.{yaml,yml}"},
{"requirements": "**/meta/requirements.{yaml,yml}"}, # v1 only
# https://docs.ansible.com/ansible/latest/dev_guide/collections_galaxy_meta.html
{"galaxy": "**/galaxy.yml"}, # Galaxy collection meta
{"reno": "**/releasenotes/*/*.{yaml,yml}"}, # reno release notes
{"vars": "**/{host_vars,group_vars,vars,defaults}/**/*.{yaml,yml}"},
{"tasks": "**/tasks/**/*.{yaml,yml}"},
{"rulebook": "**/rulebooks/*.{yml,yaml"},
{"playbook": "**/playbooks/*.{yml,yaml}"},
{"playbook": "**/*playbook*.{yml,yaml}"},
{"role": "**/roles/*/"},
{"handlers": "**/handlers/*.{yaml,yml}"},
{"test-meta": "**/tests/integration/targets/*/meta/main.{yaml,yml}"},
{"meta": "**/meta/main.{yaml,yml}"},
{"meta-runtime": "**/meta/runtime.{yaml,yml}"},
{"role-arg-spec": "**/meta/argument_specs.{yaml,yml}"}, # role argument specs
{"yaml": ".config/molecule/config.{yaml,yml}"}, # molecule global config
{
"requirements": "**/molecule/*/{collections,requirements}.{yaml,yml}",
}, # molecule old collection requirements (v1), ansible 2.8 only
{"yaml": "**/molecule/*/{base,molecule}.{yaml,yml}"}, # molecule config
{"requirements": "**/requirements.{yaml,yml}"}, # v2 and v1
{"playbook": "**/molecule/*/*.{yaml,yml}"}, # molecule playbooks
{"yaml": "**/{.ansible-lint,.yamllint}"},
{"changelog": "**/changelogs/changelog.{yaml,yml}"},
{"yaml": "**/*.{yaml,yml}"},
{"yaml": "**/.*.{yaml,yml}"},
{"sanity-ignore-file": "**/tests/sanity/ignore-*.txt"},
# what are these doc_fragments? We also ignore module_utils for now
{
"plugin": "**/plugins/{action,become,cache,callback,connection,filter,inventory,lookup,modules,test}/**/*.py",
},
{"python": "**/*.py"},
]
BASE_KINDS = [
# These assignations are only for internal use and are only inspired by
# MIME/IANA model. Their purpose is to be able to process a file based on
# it type, including generic processing of text files using the prefix.
{
"text/jinja2": "**/*.j2",
}, # jinja2 templates are not always parsable as something else
{"text/jinja2": "**/*.j2.*"},
{"text": "**/templates/**/*.*"}, # templates are likely not validable
{"text/json": "**/*.json"}, # standardized
{"text/markdown": "**/*.md"}, # https://tools.ietf.org/html/rfc7763
{"text/rst": "**/*.rst"}, # https://en.wikipedia.org/wiki/ReStructuredText
{"text/ini": "**/*.ini"},
# YAML has no official IANA assignation
{"text/yaml": "**/{.ansible-lint,.yamllint}"},
{"text/yaml": "**/*.{yaml,yml}"},
{"text/yaml": "**/.*.{yaml,yml}"},
{"text/python": "**/*.py"},
]
# File kinds that are recognized by ansible, used internally to force use of
# YAML 1.1 instead of 1.2 due to ansible-core dependency on pyyaml.
ANSIBLE_OWNED_KINDS = {
"handlers",
"galaxy",
"meta",
"meta-runtime",
"playbook",
"requirements",
"role-arg-spec",
"rulebook",
"tasks",
"vars",
}
PROFILES = yaml_from_file(Path(__file__).parent / "data" / "profiles.yml")
LOOP_VAR_PREFIX = "^(__|{role}_)"
@dataclass
class Options: # pylint: disable=too-many-instance-attributes
"""Store ansible-lint effective configuration options."""
# Private attributes
_skip_ansible_syntax_check: bool = False
# Public attributes
cache_dir: Path | None = None
colored: bool = True
configured: bool = False
cwd: Path = Path()
display_relative_path: bool = True
exclude_paths: list[str] = field(default_factory=list)
format: str = "brief"
lintables: list[str] = field(default_factory=list)
list_rules: bool = False
list_tags: bool = False
write_list: list[str] = field(default_factory=list)
parseable: bool = False
quiet: bool = False
rulesdirs: list[Path] = field(default_factory=list)
skip_list: list[str] = field(default_factory=list)
tags: list[str] = field(default_factory=list)
verbosity: int = 0
warn_list: list[str] = field(default_factory=list)
kinds = DEFAULT_KINDS
mock_filters: list[str] = field(default_factory=list)
mock_modules: list[str] = field(default_factory=list)
mock_roles: list[str] = field(default_factory=list)
loop_var_prefix: str | None = None
only_builtins_allow_collections: list[str] = field(default_factory=list)
only_builtins_allow_modules: list[str] = field(default_factory=list)
var_naming_pattern: str | None = None
offline: bool = False
project_dir: str = "." # default should be valid folder (do not use None here)
extra_vars: dict[str, Any] | None = None
enable_list: list[str] = field(default_factory=list)
skip_action_validation: bool = True
strict: bool = False
rules: dict[str, Any] = field(
default_factory=dict,
) # Placeholder to set and keep configurations for each rule.
profile: str | None = None
task_name_prefix: str = "{stem} | "
sarif_file: Path | None = None
config_file: str | None = None
generate_ignore: bool = False
rulesdir: list[Path] = field(default_factory=list)
use_default_rules: bool = False
version: bool = False # display version command
list_profiles: bool = False # display profiles command
ignore_file: Path | None = None
max_tasks: int = 100
max_block_depth: int = 20
# Refer to https://docs.ansible.com/ansible/latest/reference_appendices/release_and_maintenance.html#ansible-core-support-matrix
_default_supported = ["2.15.", "2.16.", "2.17."]
supported_ansible_also: list[str] = field(default_factory=list)
@property
def nodeps(self) -> bool:
"""Returns value of nodeps feature."""
# We do not want this to be cached as it would affect our testings.
return bool(int(os.environ.get("ANSIBLE_LINT_NODEPS", "0")))
def __post_init__(self) -> None:
"""Extra initialization logic."""
if self.nodeps:
self.offline = True
@property
def supported_ansible(self) -> list[str]:
"""Returns list of ansible versions that are considered supported."""
return sorted([*self._default_supported, *self.supported_ansible_also])
options = Options()
# Used to store detected tag deprecations
used_old_tags: dict[str, str] = {}
# Used to store collection list paths (with mock paths if needed)
collection_list: list[str] = []
# Used to store log messages before logging is initialized (level, message)
log_entries: list[tuple[int, str]] = []
@lru_cache
def ansible_collections_path() -> str:
"""Return collection path variable for current version of Ansible."""
# respect Ansible behavior, which is to load old name if present
for env_var in [
"ANSIBLE_COLLECTIONS_PATHS",
"ANSIBLE_COLLECTIONS_PATH",
]: # pragma: no cover
if env_var in os.environ:
return env_var
return "ANSIBLE_COLLECTIONS_PATH"
def in_venv() -> bool:
"""Determine whether Python is running from a venv."""
if hasattr(sys, "real_prefix") or os.environ.get("CONDA_EXE", None) is not None:
return True
pfx = getattr(sys, "base_prefix", sys.prefix)
return pfx != sys.prefix
def guess_install_method() -> str:
"""Guess if pip upgrade command should be used."""
package_name = "ansible-lint"
try:
if (distribution(package_name).read_text("INSTALLER") or "").strip() != "pip":
return ""
except PackageNotFoundError as exc:
logging.debug(exc)
return ""
pip = ""
if in_venv():
_logger.debug("Found virtualenv, assuming `pip3 install` will work.")
pip = f"pip install --upgrade {package_name}"
elif __file__.startswith(os.path.expanduser("~/.local/lib")):
_logger.debug(
"Found --user installation, assuming `pip3 install --user` will work.",
)
pip = f"pip3 install --user --upgrade {package_name}"
# By default we assume pip is not safe to be used
use_pip = False
try:
# Use pip to detect if is safe to use it to upgrade the package.
# We do imports here to for performance and reasons, and also in order
# to avoid errors if pip internals change. Also we want to avoid having
# to add pip as a dependency, so we make use of it only when present.
# trick to avoid runtime warning from inside pip: _distutils_hack/__init__.py:33: UserWarning: Setuptools is replacing distutils.
with warnings.catch_warnings(record=True):
warnings.simplefilter("always")
# pylint: disable=import-outside-toplevel
from pip._internal.metadata import get_default_environment
from pip._internal.req.req_uninstall import uninstallation_paths
dist = get_default_environment().get_distribution(package_name)
if dist:
logging.debug("Found %s dist", dist)
for _ in uninstallation_paths(dist):
use_pip = True
else:
logging.debug("Skipping %s as it is not installed.", package_name)
use_pip = False
except (AttributeError, ModuleNotFoundError) as exc:
# On Fedora 36, we got a AttributeError exception from pip that we want to avoid
# On NixOS, we got a ModuleNotFoundError exception from pip that we want to avoid
logging.debug(exc)
use_pip = False
# We only want to recommend pip for upgrade if it looks safe to do so.
return pip if use_pip else ""
def get_deps_versions() -> dict[str, Version | None]:
"""Return versions of most important dependencies."""
result: dict[str, Version | None] = {}
for name in ["ansible-core", "ansible-compat", "ruamel-yaml", "ruamel-yaml-clib"]:
try:
result[name] = Version(version(name))
except PackageNotFoundError:
result[name] = None
return result
def get_version_warning() -> str:
"""Display warning if current version is outdated."""
# 0.1dev1 is special fallback version
if __version__ == "0.1.dev1": # pragma: no cover
return ""
pip = guess_install_method()
# If we do not know how to upgrade, we do not want to show any warnings
# about version.
if not pip:
return ""
msg = ""
data = {}
current_version = Version(__version__)
if not os.path.exists(CACHE_DIR): # pragma: no cover
os.makedirs(CACHE_DIR)
cache_file = f"{CACHE_DIR}/latest.json"
refresh = True
if os.path.exists(cache_file):
age = time.time() - os.path.getmtime(cache_file)
if age < 24 * 60 * 60:
refresh = False
with open(cache_file, encoding="utf-8") as f:
data = json.load(f)
if not options.offline and (refresh or not data):
release_url = (
"https://api.github.com/repos/ansible/ansible-lint/releases/latest"
)
try:
with urllib.request.urlopen(release_url) as url: # noqa: S310
data = json.load(url)
with open(cache_file, "w", encoding="utf-8") as f:
json.dump(data, f)
except (URLError, HTTPError, HTTPException) as exc: # pragma: no cover
_logger.debug(
"Unable to fetch latest version from %s due to: %s",
release_url,
exc,
)
return ""
if data:
html_url = data["html_url"]
new_version = Version(data["tag_name"][1:]) # removing v prefix from tag
if current_version > new_version:
msg = "[dim]You are using a pre-release version of ansible-lint.[/]"
elif current_version < new_version:
msg = f"""[warning]A new release of ansible-lint is available: [red]{current_version}[/] → [green][link={html_url}]{new_version}[/][/][/]"""
msg += f" Upgrade by running: [info]{pip}[/]"
return msg
| 13,454 | Python | .py | 303 | 38.171617 | 154 | 0.638986 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,219 | _mockings.py | ansible_ansible-lint/src/ansiblelint/_mockings.py | """Utilities for mocking ansible modules and roles."""
from __future__ import annotations
import contextlib
import logging
import re
import sys
from typing import TYPE_CHECKING
from ansiblelint.constants import ANSIBLE_MOCKED_MODULE, RC
if TYPE_CHECKING:
from pathlib import Path
from ansiblelint.config import Options
_logger = logging.getLogger(__name__)
def _make_module_stub(module_name: str, options: Options) -> None:
if not options.cache_dir:
msg = "Cache directory not set"
raise RuntimeError(msg)
# a.b.c is treated a collection
if re.match(r"^(\w+|\w+\.\w+\.[\.\w]+)$", module_name):
parts = module_name.split(".")
if len(parts) < 3:
path = options.cache_dir / "modules"
module_file = f"{options.cache_dir}/modules/{module_name}.py"
namespace = None
collection = None
else:
namespace = parts[0]
collection = parts[1]
path = (
options.cache_dir
/ "collections"
/ "ansible_collections"
/ namespace
/ collection
/ "plugins"
/ "modules"
/ ("/".join(parts[2:-1]))
)
module_file = f"{path}/{parts[-1]}.py"
path.mkdir(exist_ok=True, parents=True)
_write_module_stub(
filename=module_file,
name=module_name,
namespace=namespace,
collection=collection,
)
else:
_logger.error("Config error: %s is not a valid module name.", module_name)
sys.exit(RC.INVALID_CONFIG)
def _write_module_stub(
filename: str,
name: str,
namespace: str | None = None,
collection: str | None = None,
) -> None:
"""Write module stub to disk."""
body = ANSIBLE_MOCKED_MODULE.format(
name=name,
collection=collection,
namespace=namespace,
)
with open(filename, "w", encoding="utf-8") as f:
f.write(body)
def _perform_mockings(options: Options) -> None:
"""Mock modules and roles."""
path: Path
if not options.cache_dir:
msg = "Cache directory not set"
raise RuntimeError(msg)
for role_name in options.mock_roles:
if re.match(r"\w+\.\w+\.\w+$", role_name):
namespace, collection, role_dir = role_name.split(".")
path = (
options.cache_dir
/ "collections"
/ "ansible_collections"
/ namespace
/ collection
/ "roles"
/ role_dir
)
else:
path = options.cache_dir / "roles" / role_name
# Avoid error from makedirs if destination is a broken symlink
if path.is_symlink() and not path.exists(): # pragma: no cover
_logger.warning("Removed broken symlink from %s", path)
path.unlink(missing_ok=True)
path.mkdir(exist_ok=True, parents=True)
if options.mock_modules:
for module_name in options.mock_modules:
_make_module_stub(module_name=module_name, options=options)
def _perform_mockings_cleanup(options: Options) -> None:
"""Clean up mocked modules and roles."""
if not options.cache_dir:
msg = "Cache directory not set"
raise RuntimeError(msg)
for role_name in options.mock_roles:
if re.match(r"\w+\.\w+\.\w+$", role_name):
namespace, collection, role_dir = role_name.split(".")
path = (
options.cache_dir
/ "collections"
/ "ansible_collections"
/ namespace
/ collection
/ "roles"
/ role_dir
)
else:
path = options.cache_dir / "roles" / role_name
with contextlib.suppress(OSError):
path.rmdir()
| 3,932 | Python | .py | 111 | 25.675676 | 82 | 0.561745 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,220 | constants.py | ansible_ansible-lint/src/ansiblelint/constants.py | """Constants used by AnsibleLint."""
from enum import Enum
from pathlib import Path
from typing import Literal
DEFAULT_RULESDIR = Path(__file__).parent / "rules"
CUSTOM_RULESDIR_ENVVAR = "ANSIBLE_LINT_CUSTOM_RULESDIR"
RULE_DOC_URL = "https://ansible.readthedocs.io/projects/lint/rules/"
SKIP_SCHEMA_UPDATE = "ANSIBLE_LINT_SKIP_SCHEMA_UPDATE"
ENV_VARS_HELP = {
CUSTOM_RULESDIR_ENVVAR: "Used for adding another folder into the lookup path for new rules.",
"ANSIBLE_LINT_IGNORE_FILE": "Define it to override the name of the default ignore file `.ansible-lint-ignore`",
"ANSIBLE_LINT_WRITE_TMP": "Tells linter to dump fixes into different temp files instead of overriding original. Used internally for testing.",
SKIP_SCHEMA_UPDATE: "Tells ansible-lint to skip schema refresh.",
"ANSIBLE_LINT_NODEPS": "Avoids installing content dependencies and avoids performing checks that would fail when modules are not installed. Far less violations will be reported.",
}
EPILOG = (
"The following environment variables are also recognized but there is no guarantee that they will work in future versions:\n\n"
+ "\n".join(f"{key}: {value}\n" for key, value in ENV_VARS_HELP.items())
)
# Not using an IntEnum because only starting with py3.11 it will evaluate it
# as int.
class RC: # pylint: disable=too-few-public-methods
"""All exit codes used by ansible-lint."""
SUCCESS = 0
VIOLATIONS_FOUND = 2
INVALID_CONFIG = 3
LOCK_TIMEOUT = 4
NO_FILES_MATCHED = 5
EXIT_CONTROL_C = 130
# Minimal version of Ansible we support for runtime
ANSIBLE_MIN_VERSION = "2.12"
ANSIBLE_MOCKED_MODULE = """\
# This is a mocked Ansible module generated by ansible-lint
from ansible.module_utils.basic import AnsibleModule
DOCUMENTATION = '''
module: {name}
short_description: Mocked
version_added: "1.0.0"
description: Mocked
author:
- ansible-lint (@nobody)
'''
EXAMPLES = '''mocked'''
RETURN = '''mocked'''
def main():
result = dict(
changed=False,
original_message='',
message='')
module = AnsibleModule(
argument_spec=dict(),
supports_check_mode=True,
)
module.exit_json(**result)
if __name__ == "__main__":
main()
"""
FileType = Literal[
"playbook",
"rulebook",
"meta", # role meta
"meta-runtime",
"tasks", # includes pre_tasks, post_tasks
"handlers", # very similar to tasks but with some specifics
# https://docs.ansible.com/ansible/latest/galaxy/user_guide.html#installing-roles-and-collections-from-the-same-requirements-yml-file
"requirements",
"role", # that is a folder!
"yaml", # generic yaml file, previously reported as unknown file type
"ansible-lint-config",
"sanity-ignore-file", # tests/sanity/ignore file
"plugin",
"", # unknown file type
]
# Aliases for deprecated tags/ids and their newer names
RENAMED_TAGS = {
"102": "no-jinja-when",
"104": "deprecated-bare-vars",
"105": "deprecated-module",
"106": "role-name",
"202": "risky-octal",
"203": "no-tabs",
"205": "playbook-extension",
"206": "jinja[spacing]",
"207": "jinja[invalid]",
"208": "risky-file-permissions",
"301": "no-changed-when",
"302": "deprecated-command-syntax",
"303": "command-instead-of-module",
"304": "inline-env-var",
"305": "command-instead-of-shell",
"306": "risky-shell-pipe",
"401": "latest[git]",
"402": "latest[hg]",
"403": "package-latest",
"404": "no-relative-paths",
"501": "partial-become",
"502": "name[missing]",
"503": "no-handler",
"504": "deprecated-local-action",
"505": "missing-import",
"601": "literal-compare",
"602": "empty-string-compare",
"702": "meta-no-tags",
"703": "meta-incorrect",
"704": "meta-video-links",
"911": "syntax-check",
"deprecated-command-syntax": "no-free-form",
"fqcn-builtins": "fqcn[action-core]",
"git-latest": "latest[git]",
"hg-latest": "latest[hg]",
"no-jinja-nesting": "jinja[invalid]",
"no-loop-var-prefix": "loop-var-prefix",
"unnamed-task": "name[missing]",
"var-spacing": "jinja[spacing]",
}
PLAYBOOK_TASK_KEYWORDS = [
"tasks",
"handlers",
"pre_tasks",
"post_tasks",
]
PLAYBOOK_ROLE_KEYWORDS = [
"any_errors_fatal",
"become",
"become_exe",
"become_flags",
"become_method",
"become_user",
"check_mode",
"collections",
"connection",
"debugger",
"delegate_facts",
"delegate_to",
"diff",
"environment",
"ignore_errors",
"ignore_unreachable",
"module_defaults",
"name",
"role",
"no_log",
"port",
"remote_user",
"run_once",
"tags",
"throttle",
"timeout",
"vars",
"when",
]
NESTED_TASK_KEYS = [
"block",
"always",
"rescue",
]
# Keys that are used internally when parsing YAML/JSON files
SKIPPED_RULES_KEY = "__skipped_rules__"
LINE_NUMBER_KEY = "__line__"
FILENAME_KEY = "__file__"
ANNOTATION_KEYS = [
FILENAME_KEY,
LINE_NUMBER_KEY,
SKIPPED_RULES_KEY,
"__ansible_module__",
"__ansible_module_original__",
]
INCLUSION_ACTION_NAMES = {
"include",
"include_tasks",
"import_playbook",
"import_tasks",
"ansible.builtin.include",
"ansible.builtin.include_tasks",
"ansible.builtin.import_playbook",
"ansible.builtin.import_tasks",
}
ROLE_IMPORT_ACTION_NAMES = {
"ansible.builtin.import_role",
"ansible.builtin.include_role",
"ansible.legacy.import_role",
"ansible.legacy.include_role",
"import_role",
"include_role",
}
# Newer versions of git might fail to run when different file ownership is
# found of repo. One example is on GHA runners executing containerized
# reusable actions, where the mounted volume might have different owner.
#
# https://github.com/ansible/ansible-lint-action/issues/138
GIT_CMD = ["git", "-c", f"safe.directory={Path.cwd()}"]
CONFIG_FILENAMES = [
".ansible-lint",
".config/ansible-lint.yml",
".config/ansible-lint.yaml",
]
class States(Enum):
"""States used are used as sentinel values in various places."""
NOT_LOADED = "File not loaded"
LOAD_FAILED = "File failed to load"
UNKNOWN_DATA = "Unknown data"
def __bool__(self) -> bool:
"""Ensure all states evaluate as False as booleans."""
return False
| 6,363 | Python | .py | 204 | 27.122549 | 183 | 0.665525 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,221 | cli.py | ansible_ansible-lint/src/ansiblelint/cli.py | """CLI parser setup and helpers."""
from __future__ import annotations
import argparse
import logging
import os
import sys
from argparse import Namespace
from pathlib import Path
from typing import TYPE_CHECKING, Any
from ansiblelint.config import (
DEFAULT_KINDS,
DEFAULT_WARN_LIST,
PROFILES,
Options,
log_entries,
)
from ansiblelint.constants import CUSTOM_RULESDIR_ENVVAR, DEFAULT_RULESDIR, EPILOG, RC
from ansiblelint.file_utils import (
Lintable,
abspath,
expand_path_vars,
find_project_root,
normpath,
)
from ansiblelint.loaders import IGNORE_FILE
from ansiblelint.schemas.main import validate_file_schema
from ansiblelint.yaml_utils import clean_json
if TYPE_CHECKING:
from collections.abc import Callable, Sequence
_logger = logging.getLogger(__name__)
_PATH_VARS = [
"rulesdir",
]
def expand_to_normalized_paths(
config: dict[str, Any],
base_dir: str | None = None,
) -> None:
"""Mutate given config normalizing any path values in it."""
# config can be None (-c /dev/null)
if not config:
return
base_dir = base_dir or os.getcwd()
for paths_var in _PATH_VARS:
if paths_var not in config:
continue # Cause we don't want to add a variable not present
normalized_paths = []
for path in config.pop(paths_var):
normalized_path = abspath(expand_path_vars(path), base_dir=base_dir)
normalized_paths.append(normalized_path)
config[paths_var] = normalized_paths
def load_config(config_file: str | None) -> tuple[dict[Any, Any], str | None]:
"""Load configuration from disk."""
config_path = None
if config_file == "/dev/null":
_logger.debug("Skipping config file as it was set to /dev/null")
return {}, config_file
if config_file:
config_path = os.path.abspath(config_file)
if not os.path.exists(config_path):
_logger.error("Config file not found '%s'", config_path)
sys.exit(RC.INVALID_CONFIG)
config_path = config_path or get_config_path()
if not config_path or not os.path.exists(config_path):
# a missing default config file should not trigger an error
return {}, None
config_lintable = Lintable(
config_path,
kind="ansible-lint-config",
base_kind="text/yaml",
)
for error in validate_file_schema(config_lintable):
_logger.error("Invalid configuration file %s. %s", config_path, error)
sys.exit(RC.INVALID_CONFIG)
config = clean_json(config_lintable.data)
if not isinstance(config, dict):
msg = "Schema failed to properly validate the config file."
raise TypeError(msg)
config["config_file"] = config_path
config_dir = os.path.dirname(config_path)
expand_to_normalized_paths(config, config_dir)
return config, config_path
def get_config_path(config_file: str | None = None) -> str | None:
"""Return local config file."""
if config_file:
project_filenames = [config_file]
else:
project_filenames = [
".ansible-lint",
".config/ansible-lint.yml",
".config/ansible-lint.yaml",
]
parent = tail = os.getcwd()
while tail:
for project_filename in project_filenames:
filename = os.path.abspath(os.path.join(parent, project_filename))
if os.path.exists(filename):
return filename
if os.path.exists(os.path.abspath(os.path.join(parent, ".git"))):
# Avoid looking outside .git folders as we do not want end-up
# picking config files from upper level projects if current
# project has no config.
return None
(parent, tail) = os.path.split(parent)
return None
class AbspathArgAction(argparse.Action):
"""Argparse action to convert relative paths to absolute paths."""
def __call__(
self,
parser: argparse.ArgumentParser,
namespace: Namespace,
values: str | Sequence[Any] | None,
option_string: str | None = None,
) -> None:
if isinstance(values, str | Path):
values = [values]
if values:
normalized_values = [
Path(expand_path_vars(str(path))).resolve() for path in values
]
previous_values = getattr(namespace, self.dest, [])
setattr(namespace, self.dest, previous_values + normalized_values)
class WriteArgAction(argparse.Action):
"""Argparse action to handle the --fix flag with optional args."""
_default = "__default__"
# noinspection PyShadowingBuiltins
def __init__( # pylint: disable=too-many-arguments,redefined-builtin,too-many-positional-arguments
self,
option_strings: list[str],
dest: str,
nargs: int | str | None = None,
const: Any = None,
default: Any = None,
type: Callable[[str], Any] | None = None, # noqa: A002
choices: list[Any] | None = None,
*,
required: bool = False,
help: str | None = None, # noqa: A002
metavar: str | None = None,
) -> None:
"""Create the argparse action with WriteArg-specific defaults."""
if nargs is not None:
msg = "nargs for WriteArgAction must not be set."
raise ValueError(msg)
if const is not None:
msg = "const for WriteArgAction must not be set."
raise ValueError(msg)
super().__init__(
option_strings=option_strings,
dest=dest,
nargs="?", # either 0 (--fix) or 1 (--fix=a,b,c) argument
const=self._default, # --fix (no option) implicitly stores this
default=default,
type=type,
choices=choices,
required=required,
help=help,
metavar=metavar,
)
def __call__(
self,
parser: argparse.ArgumentParser,
namespace: Namespace,
values: str | Sequence[Any] | None,
option_string: str | None = None,
) -> None:
lintables = getattr(namespace, "lintables", None)
if not lintables and isinstance(values, str):
# args are processed in order.
# If --fix is after lintables, then that is not ambiguous.
# But if --fix comes first, then it might actually be a lintable.
maybe_lintable = Path(values)
if maybe_lintable.exists():
namespace.lintables = [values]
values = []
if isinstance(values, str):
values = values.split(",")
default = [self.const] if isinstance(self.const, str) else self.const
previous_values = getattr(namespace, self.dest, default) or default
if not values:
values = previous_values
elif previous_values != default:
values = previous_values + values # type: ignore[operator]
setattr(namespace, self.dest, values)
@classmethod
def merge_fix_list_config(
cls,
from_file: list[str],
from_cli: list[str],
) -> list[str]:
"""Determine the write_list value based on cli vs config.
When --fix is not passed from command line the from_cli is an empty list,
so we use the file.
When from_cli is not an empty list, we ignore the from_file value.
"""
if not from_file:
arguments = ["all"] if from_cli == [cls._default] else from_cli
else:
arguments = from_file
for magic_value in ("none", "all"):
if magic_value in arguments and len(arguments) > 1:
msg = f"When passing '{magic_value}' to '--fix', you cannot pass other values."
raise RuntimeError(
msg,
)
if len(arguments) == 1 and arguments[0] == "none":
arguments = []
return arguments
def get_cli_parser() -> argparse.ArgumentParser:
"""Initialize an argument parser."""
parser = argparse.ArgumentParser(
epilog=EPILOG,
# Avoid rewrapping description and epilog
formatter_class=argparse.RawTextHelpFormatter,
)
listing_group = parser.add_mutually_exclusive_group()
listing_group.add_argument(
"-P",
"--list-profiles",
dest="list_profiles",
default=False,
action="store_true",
help="List all profiles, no formatting options available.",
)
listing_group.add_argument(
"-L",
"--list-rules",
dest="list_rules",
default=False,
action="store_true",
help="List all the rules. For listing rules only the following formats "
"for argument -f are supported: {brief, full, md} with 'brief' as default.",
)
listing_group.add_argument(
"-T",
"--list-tags",
dest="list_tags",
action="store_true",
help="List all the tags and the rules they cover. Increase the verbosity level "
"with `-v` to include 'opt-in' tag and its rules.",
)
parser.add_argument(
"-f",
"--format",
dest="format",
default=None,
choices=[
"brief",
# "plain",
"full",
"md",
"json",
"codeclimate",
"quiet",
"pep8",
"sarif",
],
help="stdout formatting, json being an alias for codeclimate. (default: %(default)s)",
)
parser.add_argument(
"--sarif-file",
default=None,
type=Path,
help="SARIF output file",
)
parser.add_argument(
"-q",
dest="quiet",
default=0,
action="count",
help="quieter, reduce verbosity, can be specified twice.",
)
parser.add_argument(
"--profile",
dest="profile",
default=None,
action="store",
choices=PROFILES.keys(),
help="Specify which rules profile to be used.",
)
parser.add_argument(
"-p",
"--parseable",
dest="parseable",
default=False,
action="store_true",
help="parseable output, same as '-f pep8'",
)
parser.add_argument(
"--project-dir",
dest="project_dir",
default=None,
help="Location of project/repository, autodetected based on location "
"of configuration file.",
)
parser.add_argument(
"-r",
"--rules-dir",
action=AbspathArgAction,
dest="rulesdir",
default=[],
type=Path,
help="Specify custom rule directories. Add -R "
f"to keep using embedded rules from {DEFAULT_RULESDIR}",
)
parser.add_argument(
"-R",
action="store_true",
default=False,
dest="use_default_rules",
help="Keep default rules when using -r",
)
parser.add_argument(
"-s",
"--strict",
action="store_true",
default=False,
dest="strict",
help="Return non-zero exit code on warnings as well as errors",
)
parser.add_argument(
"--fix",
dest="write_list",
# this is a tri-state argument that takes an optional comma separated list:
action=WriteArgAction,
help="Allow ansible-lint to perform auto-fixes, including YAML reformatting. "
"You can limit the effective rule transforms (the 'write_list') by passing a "
"keywords 'all' or 'none' or a comma separated list of rule ids or rule tags. "
"YAML reformatting happens whenever '--fix' or '--fix=' is used. "
"'--fix' and '--fix=all' are equivalent: they allow all transforms to run. "
"Presence of --fix in command overrides config file value.",
)
parser.add_argument(
"--show-relpath",
dest="display_relative_path",
action="store_false",
default=True,
help="Display path relative to CWD",
)
parser.add_argument(
"-t",
"--tags",
dest="tags",
action="append",
default=[],
help="only check rules whose id/tags match these values",
)
parser.add_argument(
"-v",
dest="verbosity",
action="count",
help="Increase verbosity level (-vv for more)",
default=0,
)
parser.add_argument(
"-x",
"--skip-list",
dest="skip_list",
default=[],
action="append",
help="only check rules whose id/tags do not match these values. \
e.g: --skip-list=name,run-once",
)
parser.add_argument(
"--generate-ignore",
dest="generate_ignore",
action="store_true",
default=False,
help="Generate a text file '.ansible-lint-ignore' that ignores all found violations. Each line contains filename and rule id separated by a space.",
)
parser.add_argument(
"-w",
"--warn-list",
dest="warn_list",
default=[],
action="append",
help="only warn about these rules, unless overridden in "
f"config file. Current version default value is: {', '.join(DEFAULT_WARN_LIST)}",
)
parser.add_argument(
"--enable-list",
dest="enable_list",
default=[],
action="append",
help="activate optional rules by their tag name",
)
# Do not use store_true/store_false because they create opposite defaults.
parser.add_argument(
"--nocolor",
dest="colored",
action="store_const",
const=False,
help="disable colored output, same as NO_COLOR=1",
)
parser.add_argument(
"--force-color",
dest="colored",
action="store_const",
const=True,
help="Force colored output, same as FORCE_COLOR=1",
)
parser.add_argument(
"--exclude",
dest="exclude_paths",
action="extend",
nargs="+",
type=str,
default=[],
help="path to directories or files to skip. This option is repeatable.",
)
parser.add_argument(
"-c",
"--config-file",
dest="config_file",
help="Specify configuration file to use. By default it will look for '.ansible-lint', '.config/ansible-lint.yml', or '.config/ansible-lint.yaml'",
)
parser.add_argument(
"-i",
"--ignore-file",
dest="ignore_file",
type=Path,
default=None,
help=f"Specify ignore file to use. By default it will look for '{IGNORE_FILE.default}' or '{IGNORE_FILE.alternative}'",
)
parser.add_argument(
"--offline",
dest="offline",
action="store_const",
const=True,
help="Disable installation of requirements.yml and schema refreshing",
)
parser.add_argument(
"--version",
action="store_true",
)
parser.add_argument(
dest="lintables",
nargs="*",
action="extend",
help="One or more files or paths. When missing it will enable auto-detection mode.",
)
return parser
def merge_config(file_config: dict[Any, Any], cli_config: Options) -> Options:
"""Combine the file config with the CLI args."""
bools = (
"display_relative_path",
"parseable",
"quiet",
"strict",
"use_default_rules",
"offline",
)
# maps lists to their default config values
lists_map = {
"exclude_paths": [".cache", ".git", ".hg", ".svn", ".tox"],
"rulesdir": [],
"skip_list": [],
"tags": [],
"warn_list": DEFAULT_WARN_LIST,
"mock_modules": [],
"mock_roles": [],
"enable_list": [],
"only_builtins_allow_collections": [],
"only_builtins_allow_modules": [],
"supported_ansible_also": [],
# do not include "write_list" here. See special logic below.
}
scalar_map = {
"loop_var_prefix": None,
"project_dir": None,
"profile": None,
"sarif_file": None,
}
if not file_config:
# use defaults if we don't have a config file and the commandline
# parameter is not set
for entry, default in lists_map.items():
if not getattr(cli_config, entry, None):
setattr(cli_config, entry, default)
if cli_config.write_list is None:
cli_config.write_list = []
elif cli_config.write_list == [WriteArgAction._default]: # noqa: SLF001
cli_config.write_list = ["all"]
return cli_config
for entry in bools:
file_value = file_config.pop(entry, False)
v = getattr(cli_config, entry) or file_value
setattr(cli_config, entry, v)
for entry, default_scalar in scalar_map.items():
file_value = file_config.pop(entry, default_scalar)
v = getattr(cli_config, entry, None) or file_value
setattr(cli_config, entry, v)
# if either commandline parameter or config file option is set merge
# with the other, if neither is set use the default
for entry, default in lists_map.items():
if getattr(cli_config, entry, None) or entry in file_config:
value = getattr(cli_config, entry, [])
value.extend(file_config.pop(entry, []))
else:
value = default
setattr(cli_config, entry, value)
# "write_list" config has special merge rules
entry = "write_list"
setattr(
cli_config,
entry,
WriteArgAction.merge_fix_list_config(
from_file=file_config.pop(entry, []),
from_cli=getattr(cli_config, entry, []) or [],
),
)
if "verbosity" in file_config:
cli_config.verbosity = cli_config.verbosity + file_config.pop("verbosity")
# merge options that can be set only via a file config
for entry, value in file_config.items():
setattr(cli_config, entry, value)
# append default kinds to the custom list
kinds = file_config.get("kinds", [])
kinds.extend(DEFAULT_KINDS)
cli_config.kinds = kinds
return cli_config
def get_config(arguments: list[str]) -> Options:
"""Extract the config based on given args."""
parser = get_cli_parser()
# translate deprecated options
for i, value in enumerate(arguments):
if arguments[i].startswith("--write"):
arguments[i] = value.replace("--write", "--fix")
_logger.warning(
"Replaced deprecated '--write' option with '--fix', change you call to avoid future regressions when we remove old option.",
)
options = Options(**vars(parser.parse_args(arguments)))
# docs is not document, being used for internal documentation building
if options.list_rules and options.format not in [
None,
"brief",
"full",
"md",
]:
parser.error(
f"argument -f: invalid choice: '{options.format}'. "
f"In combination with argument -L only 'brief', "
f"'rich' or 'md' are supported with -f.",
)
# save info about custom config file, as options.config_file may be modified by merge_config
file_config, options.config_file = load_config(options.config_file)
config = merge_config(file_config, options)
options.rulesdirs = get_rules_dirs(
options.rulesdir,
use_default=options.use_default_rules,
)
if not options.project_dir:
project_dir, method = find_project_root(
srcs=options.lintables,
config_file=options.config_file,
)
options.project_dir = os.path.expanduser(normpath(project_dir))
log_entries.append(
(
logging.INFO,
f"Identified [filename]{project_dir}[/] as project root due [bold]{method}[/].",
),
)
if not options.project_dir or not os.path.exists(options.project_dir):
msg = f"Failed to determine a valid project_dir: {options.project_dir}"
raise RuntimeError(msg)
# expand user home dir in exclude_paths
options.exclude_paths = [
os.path.expandvars(os.path.expanduser(p)) for p in options.exclude_paths
]
# Compute final verbosity level by subtracting -q counter.
options.verbosity -= options.quiet
return config
def print_help(file: Any = sys.stdout) -> None:
"""Print help test to the given stream."""
get_cli_parser().print_help(file=file)
def get_rules_dirs(rulesdir: list[Path], *, use_default: bool = True) -> list[Path]:
"""Return a list of rules dirs."""
default_ruledirs = [DEFAULT_RULESDIR]
default_custom_rulesdir = os.environ.get(
CUSTOM_RULESDIR_ENVVAR,
os.path.join(DEFAULT_RULESDIR, "custom"),
)
custom_ruledirs = sorted(
str(x.resolve())
for x in Path(default_custom_rulesdir).iterdir()
if x.is_dir() and (x / "__init__.py").exists()
)
result: list[Any] = []
if use_default:
result = rulesdir + custom_ruledirs + default_ruledirs
elif rulesdir:
result = rulesdir
else:
result = custom_ruledirs + default_ruledirs
return [Path(p) for p in result]
| 21,286 | Python | .py | 595 | 27.618487 | 156 | 0.600514 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,222 | app.py | ansible_ansible-lint/src/ansiblelint/app.py | """Application."""
from __future__ import annotations
import copy
import itertools
import logging
import os
import sys
from functools import lru_cache
from pathlib import Path
from typing import TYPE_CHECKING, Any
from ansible_compat.runtime import Runtime
from rich.markup import escape
from rich.table import Table
from ansiblelint import formatters
from ansiblelint._mockings import _perform_mockings
from ansiblelint.color import console, console_stderr, render_yaml
from ansiblelint.config import PROFILES, Options, get_version_warning
from ansiblelint.config import options as default_options
from ansiblelint.constants import RC, RULE_DOC_URL
from ansiblelint.loaders import IGNORE_FILE
from ansiblelint.requirements import Reqs
from ansiblelint.stats import SummarizedResults, TagStats
if TYPE_CHECKING:
from ansiblelint._internal.rules import BaseRule
from ansiblelint.errors import MatchError
from ansiblelint.file_utils import Lintable
from ansiblelint.runner import LintResult
_logger = logging.getLogger(__package__)
_CACHED_APP = None
class App:
"""App class represents an execution of the linter."""
def __init__(self, options: Options):
"""Construct app run based on already loaded configuration."""
options.skip_list = _sanitize_list_options(options.skip_list)
options.warn_list = _sanitize_list_options(options.warn_list)
self.options = options
formatter_factory = choose_formatter_factory(options)
self.formatter = formatter_factory(options.cwd, options.display_relative_path)
# Without require_module, our _set_collections_basedir may fail
self.runtime = Runtime(
isolated=True,
require_module=True,
verbosity=options.verbosity,
)
self.reqs = Reqs("ansible-lint")
package = "ansible-core"
if not self.reqs.matches(
package,
str(self.runtime.version),
): # pragma: no cover
msg = f"ansible-lint requires {package}{','.join(str(x) for x in self.reqs[package])} and current version is {self.runtime.version}"
logging.error(msg)
sys.exit(RC.INVALID_CONFIG)
# pylint: disable=import-outside-toplevel
from ansiblelint.yaml_utils import load_yamllint_config
self.yamllint_config = load_yamllint_config()
def render_matches(self, matches: list[MatchError]) -> None:
"""Display given matches (if they are not fixed)."""
matches = [match for match in matches if not match.fixed]
if isinstance(
self.formatter,
formatters.CodeclimateJSONFormatter | formatters.SarifFormatter,
):
# If formatter CodeclimateJSONFormatter or SarifFormatter is chosen,
# then print only the matches in JSON
console.print(
self.formatter.format_result(matches),
markup=False,
highlight=False,
)
return
ignored_matches = [match for match in matches if match.ignored]
fatal_matches = [match for match in matches if not match.ignored]
# Displayed ignored matches first
if ignored_matches:
_logger.warning(
"Listing %s violation(s) marked as ignored, likely already known",
len(ignored_matches),
)
for match in ignored_matches:
if match.ignored:
# highlight must be off or apostrophes may produce unexpected results
console.print(self.formatter.apply(match), highlight=False)
if fatal_matches:
_logger.warning(
"Listing %s violation(s) that are fatal",
len(fatal_matches),
)
for match in fatal_matches:
if not match.ignored:
console.print(self.formatter.apply(match), highlight=False)
# If run under GitHub Actions we also want to emit output recognized by it.
if os.getenv("GITHUB_ACTIONS") == "true" and os.getenv("GITHUB_WORKFLOW"):
_logger.info(
"GitHub Actions environment detected, adding annotations output...",
)
formatter = formatters.AnnotationsFormatter(self.options.cwd, True)
for match in itertools.chain(fatal_matches, ignored_matches):
console_stderr.print(
formatter.apply(match),
markup=False,
highlight=False,
)
# If sarif_file is set, we also dump the results to a sarif file.
if self.options.sarif_file:
sarif = formatters.SarifFormatter(self.options.cwd, True)
json = sarif.format_result(matches)
# Somehow, this gets set as an AnsibleUnicode under unclear circumstances. Force it to be a Path
sarif_file = Path(self.options.sarif_file)
sarif_file.write_text(
json,
encoding="utf-8",
)
def count_results(self, matches: list[MatchError]) -> SummarizedResults:
"""Count failures and warnings in matches."""
result = SummarizedResults()
for match in matches:
# any ignores match counts as a warning
if match.ignored:
result.warnings += 1
continue
# tag can include a sub-rule id: `yaml[document-start]`
# rule.id is the generic rule id: `yaml`
# *rule.tags is the list of the rule's tags (categories): `style`
if match.tag not in result.tag_stats:
result.tag_stats[match.tag] = TagStats(
tag=match.tag,
count=1,
associated_tags=match.rule.tags,
)
else:
result.tag_stats[match.tag].count += 1
if {match.tag, match.rule.id, *match.rule.tags}.isdisjoint(
self.options.warn_list,
):
# not in warn_list
if match.fixed:
result.fixed_failures += 1
else:
result.failures += 1
else:
result.tag_stats[match.tag].warning = True
if match.fixed:
result.fixed_warnings += 1
else:
result.warnings += 1
return result
@staticmethod
def count_lintables(files: set[Lintable]) -> tuple[int, int]:
"""Count total and modified files."""
files_count = len(files)
changed_files_count = len([file for file in files if file.updated])
return files_count, changed_files_count
@staticmethod
def _get_matched_skippable_rules(
matches: list[MatchError],
) -> dict[str, BaseRule]:
"""Extract the list of matched rules, if skippable, from the list of matches."""
matches_unignored = [match for match in matches if not match.ignored]
# match.tag is more specialized than match.rule.id
matched_rules = {
match.tag or match.rule.id: match.rule for match in matches_unignored
}
# remove unskippable rules from the list
for rule_id in list(matched_rules.keys()):
if "unskippable" in matched_rules[rule_id].tags:
matched_rules.pop(rule_id)
return matched_rules
def report_outcome(
self,
result: LintResult,
*,
mark_as_success: bool = False,
) -> int:
"""Display information about how to skip found rules.
Returns exit code, 2 if errors were found, 0 when only warnings were found.
"""
msg = ""
summary = self.count_results(result.matches)
files_count, changed_files_count = self.count_lintables(result.files)
matched_rules = self._get_matched_skippable_rules(result.matches)
if matched_rules and self.options.generate_ignore:
# ANSIBLE_LINT_IGNORE_FILE environment variable overrides default
# dumping location in linter and is not documented or supported. We
# use this only for testing purposes.
ignore_file_path = Path(
os.environ.get("ANSIBLE_LINT_IGNORE_FILE", IGNORE_FILE.default),
)
console_stderr.print(f"Writing ignore file to {ignore_file_path}")
lines = set()
for rule in result.matches:
lines.add(f"{rule.filename} {rule.tag}\n")
with ignore_file_path.open("w", encoding="utf-8") as ignore_file:
ignore_file.write(
"# This file contains ignores rule violations for ansible-lint\n",
)
ignore_file.writelines(sorted(lines))
elif matched_rules and not self.options.quiet:
console_stderr.print(
"Read [link=https://ansible.readthedocs.io/projects/lint/configuring/#ignoring-rules-for-entire-files]documentation[/link] for instructions on how to ignore specific rule violations.",
)
# Do not deprecate the old tags just yet. Why? Because it is not currently feasible
# to migrate old tags to new tags. There are a lot of things out there that still
# use ansible-lint 4 (for example, Ansible Galaxy and Automation Hub imports). If we
# replace the old tags, those tools will report warnings. If we do not replace them,
# ansible-lint 5 will report warnings.
#
# We can do the deprecation once the ecosystem caught up at least a bit.
# for k, v in used_old_tags.items():
# _logger.warning(
# "error in the future.",
# k,
# v,
if self.options.write_list and "yaml" in self.options.skip_list:
_logger.warning(
"You specified '--fix', but no files can be modified "
"because 'yaml' is in 'skip_list'.",
)
if mark_as_success and summary.failures:
mark_as_success = False
if not self.options.quiet:
console_stderr.print(render_yaml(msg))
self.report_summary(
summary,
changed_files_count,
files_count,
is_success=mark_as_success,
)
if mark_as_success:
if not files_count:
# success without any file being analyzed is reported as failure
# to match match, preventing accidents where linter was running
# not doing anything due to misconfiguration.
_logger.critical(
"Linter finished without analyzing any file, check configuration and arguments given.",
)
return RC.NO_FILES_MATCHED
return RC.SUCCESS
return RC.VIOLATIONS_FOUND
def report_summary( # pylint: disable=too-many-locals # noqa: C901
self,
summary: SummarizedResults,
changed_files_count: int,
files_count: int,
is_success: bool,
) -> None:
"""Report match and file counts."""
# sort the stats by profiles
idx = 0
rule_order = {}
for profile, profile_config in PROFILES.items():
for rule in profile_config["rules"]:
rule_order[rule] = (idx, profile)
idx += 1
_logger.debug("Determined rule-profile order: %s", rule_order)
failed_profiles = set()
for tag, tag_stats in summary.tag_stats.items():
if tag in rule_order:
tag_stats.order, tag_stats.profile = rule_order.get(tag, (idx, ""))
elif "[" in tag:
tag_stats.order, tag_stats.profile = rule_order.get(
tag.split("[")[0],
(idx, ""),
)
if tag_stats.profile:
failed_profiles.add(tag_stats.profile)
summary.sort()
if changed_files_count:
console_stderr.print(f"Modified {changed_files_count} files.")
# determine which profile passed
summary.passed_profile = ""
passed_profile_count = 0
for profile in PROFILES:
if profile in failed_profiles:
break
if profile != summary.passed_profile:
summary.passed_profile = profile
passed_profile_count += 1
stars = ""
if summary.tag_stats:
table = Table(
title="Rule Violation Summary",
collapse_padding=True,
box=None,
show_lines=False,
)
table.add_column("count", justify="right")
table.add_column("tag")
table.add_column("profile")
table.add_column("rule associated tags")
for tag, stats in summary.tag_stats.items():
table.add_row(
str(stats.count),
f"[link={RULE_DOC_URL}{ tag.split('[')[0] }]{escape(tag)}[/link]",
stats.profile,
f"{', '.join(stats.associated_tags)}{' (warning)' if stats.warning else ''}",
style="yellow" if stats.warning else "red",
)
# rate stars for the top 5 profiles (min would not get
rating = 5 - (len(PROFILES.keys()) - passed_profile_count)
if 0 < rating < 6:
stars = f" Rating: {rating}/5 star"
console_stderr.print(table)
console_stderr.print()
msg = "[green]Passed[/]" if is_success else "[red][bold]Failed[/][/]"
msg += f": {summary.failures} failure(s), {summary.warnings} warning(s)"
if summary.fixed:
msg += f", and fixed {summary.fixed} issue(s)"
msg += f" on {files_count} files."
# Now we add some information about required and passed profile
if self.options.profile:
msg += f" Profile '{self.options.profile}' was required"
if summary.passed_profile:
if summary.passed_profile == self.options.profile:
msg += ", and it passed."
else:
msg += f", but '{summary.passed_profile}' profile passed."
else:
msg += "."
elif summary.passed_profile:
msg += f" Last profile that met the validation criteria was '{summary.passed_profile}'."
if stars:
msg += stars
# on offline mode and when run under pre-commit we do not want to
# check for updates.
if not self.options.offline and os.environ.get("PRE_COMMIT", "0") != "1":
version_warning = get_version_warning()
if version_warning:
msg += f"\n{version_warning}"
console_stderr.print(msg)
def choose_formatter_factory(
options_list: Options,
) -> type[formatters.BaseFormatter[Any]]:
"""Select an output formatter based on the incoming command line arguments."""
r: type[formatters.BaseFormatter[Any]] = formatters.Formatter
if options_list.format == "quiet":
r = formatters.QuietFormatter
elif options_list.format in ("json", "codeclimate"):
r = formatters.CodeclimateJSONFormatter
elif options_list.format == "sarif":
r = formatters.SarifFormatter
elif options_list.parseable or options_list.format == "pep8":
r = formatters.ParseableFormatter
return r
def _sanitize_list_options(tag_list: list[str]) -> list[str]:
"""Normalize list options."""
# expand comma separated entries
tags = set()
for tag in tag_list:
tags.update(str(tag).split(","))
# remove duplicates, and return as sorted list
return sorted(set(tags))
@lru_cache
def get_app(*, offline: bool | None = None, cached: bool = False) -> App:
"""Return the application instance, caching the return value."""
# Avoids ever running the app initialization twice if cached argument
# is mentioned.
# pylint: disable=global-statement
global _CACHED_APP
if cached:
if offline is not None:
msg = (
"get_app should never be called with other arguments when cached=True."
)
raise RuntimeError(msg)
if cached and _CACHED_APP is not None:
return _CACHED_APP
if offline is None:
offline = default_options.offline
if default_options.offline != offline:
options = copy.deepcopy(default_options)
options.offline = offline
else:
options = default_options
app = App(options=options)
if cached:
_CACHED_APP = app
# Make linter use the cache dir from compat
options.cache_dir = app.runtime.cache_dir
role_name_check = 0
if "role-name" in app.options.warn_list:
role_name_check = 1
elif "role-name" in app.options.skip_list:
role_name_check = 2
# mocking must happen before prepare_environment or galaxy install might
# fail.
_perform_mockings(options=app.options)
app.runtime.prepare_environment(
install_local=(not offline),
offline=offline,
role_name_check=role_name_check,
)
return app
| 17,386 | Python | .py | 393 | 33.239186 | 200 | 0.601299 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,223 | skip_utils.py | ansible_ansible-lint/src/ansiblelint/skip_utils.py | # (c) 2019-2020, Ansible by Red Hat
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""Utils related to inline skipping of rules."""
from __future__ import annotations
import collections.abc
import logging
import re
import warnings
from functools import cache
from itertools import product
from typing import TYPE_CHECKING, Any
# Module 'ruamel.yaml' does not explicitly export attribute 'YAML'; implicit reexport disabled
from ruamel.yaml import YAML
from ruamel.yaml.composer import ComposerError
from ruamel.yaml.scanner import ScannerError
from ruamel.yaml.tokens import CommentToken
from ansiblelint.config import used_old_tags
from ansiblelint.constants import (
NESTED_TASK_KEYS,
PLAYBOOK_TASK_KEYWORDS,
RENAMED_TAGS,
SKIPPED_RULES_KEY,
)
from ansiblelint.errors import LintWarning, WarnSource
if TYPE_CHECKING:
from collections.abc import Generator, Sequence
from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject
from ansiblelint.file_utils import Lintable
_logger = logging.getLogger(__name__)
_found_deprecated_tags: set[str] = set()
_noqa_comment_re = re.compile(r"^# noqa(\s|:)")
# playbook: Sequence currently expects only instances of one of the two
# classes below but we should consider avoiding this chimera.
# ruamel.yaml.comments.CommentedSeq
# ansible.parsing.yaml.objects.AnsibleSequence
def get_rule_skips_from_line(
line: str,
lintable: Lintable,
lineno: int = 1,
) -> list[str]:
"""Return list of rule ids skipped via comment on the line of yaml."""
_before_noqa, _noqa_marker, noqa_text = line.partition("# noqa")
result = []
for v in noqa_text.lstrip(" :").split():
if v in RENAMED_TAGS:
tag = RENAMED_TAGS[v]
if v not in _found_deprecated_tags:
msg = f"Replaced outdated tag '{v}' with '{tag}', replace it to avoid future errors"
warnings.warn(
message=msg,
category=LintWarning,
source=WarnSource(
filename=lintable,
lineno=lineno,
tag="warning[outdated-tag]",
message=msg,
),
stacklevel=0,
)
_found_deprecated_tags.add(v)
v = tag
result.append(v)
return result
def append_skipped_rules(
pyyaml_data: AnsibleBaseYAMLObject,
lintable: Lintable,
) -> AnsibleBaseYAMLObject:
"""Append 'skipped_rules' to individual tasks or single metadata block.
For a file, uses 2nd parser (ruamel.yaml) to pull comments out of
yaml subsets, check for '# noqa' skipped rules, and append any skips to the
original parser (pyyaml) data relied on by remainder of ansible-lint.
:param pyyaml_data: file text parsed via ansible and pyyaml.
:param file_text: raw file text.
:param file_type: type of file: tasks, handlers or meta.
:returns: original pyyaml_data altered with a 'skipped_rules' list added \
to individual tasks, or added to the single metadata block.
"""
try:
yaml_skip = _append_skipped_rules(pyyaml_data, lintable)
except RuntimeError:
# Notify user of skip error, do not stop, do not change exit code
_logger.exception("Error trying to append skipped rules")
return pyyaml_data
if not yaml_skip:
return pyyaml_data
return yaml_skip
@cache
def load_data(file_text: str) -> Any:
"""Parse ``file_text`` as yaml and return parsed structure.
This is the main culprit for slow performance, each rule asks for loading yaml again and again
ideally the ``maxsize`` on the decorator above MUST be great or equal total number of rules
:param file_text: raw text to parse
:return: Parsed yaml
"""
yaml = YAML()
# Ruamel role is not to validate the yaml file, so we ignore duplicate keys:
yaml.allow_duplicate_keys = True
try:
return yaml.load(file_text)
except ComposerError:
# load fails on multi-documents with ComposerError exception
return yaml.load_all(file_text)
def _append_skipped_rules(
pyyaml_data: AnsibleBaseYAMLObject,
lintable: Lintable,
) -> AnsibleBaseYAMLObject | None:
# parse file text using 2nd parser library
try:
ruamel_data = load_data(lintable.content)
except ScannerError as exc:
_logger.debug(
"Ignored loading skipped rules from file %s due to: %s",
lintable,
exc,
)
# For unparsable file types, we return empty skip lists
return None
skipped_rules = _get_rule_skips_from_yaml(ruamel_data, lintable)
if lintable.kind in [
"yaml",
"requirements",
"vars",
"meta",
"reno",
"test-meta",
"galaxy",
]:
# AnsibleMapping, dict
if hasattr(pyyaml_data, "get"):
pyyaml_data[SKIPPED_RULES_KEY] = skipped_rules
# AnsibleSequence, list
elif (
not isinstance(pyyaml_data, str)
and isinstance(pyyaml_data, collections.abc.Sequence)
and skipped_rules
):
pyyaml_data[0][SKIPPED_RULES_KEY] = skipped_rules
return pyyaml_data
# create list of blocks of tasks or nested tasks
if lintable.kind in ("tasks", "handlers"):
ruamel_task_blocks = ruamel_data
pyyaml_task_blocks = pyyaml_data
elif lintable.kind == "playbook":
try:
pyyaml_task_blocks = _get_task_blocks_from_playbook(pyyaml_data)
ruamel_task_blocks = _get_task_blocks_from_playbook(ruamel_data)
except (AttributeError, TypeError):
return pyyaml_data
else:
# For unsupported file types, we return empty skip lists
return None
# get tasks from blocks of tasks
pyyaml_tasks = _get_tasks_from_blocks(pyyaml_task_blocks)
ruamel_tasks = _get_tasks_from_blocks(ruamel_task_blocks)
# append skipped_rules for each task
for ruamel_task, pyyaml_task in zip(ruamel_tasks, pyyaml_tasks, strict=False):
# ignore empty tasks
if not pyyaml_task and not ruamel_task:
continue
# AnsibleUnicode or str
if isinstance(pyyaml_task, str):
continue
if pyyaml_task.get("name") != ruamel_task.get("name"):
msg = "Error in matching skip comment to a task"
raise RuntimeError(msg)
pyyaml_task[SKIPPED_RULES_KEY] = _get_rule_skips_from_yaml(
ruamel_task,
lintable,
)
return pyyaml_data
def _get_task_blocks_from_playbook(playbook: Sequence[Any]) -> list[Any]:
"""Return parts of playbook that contains tasks, and nested tasks.
:param playbook: playbook yaml from yaml parser.
:returns: list of task dictionaries.
"""
task_blocks = []
for play, key in product(playbook, PLAYBOOK_TASK_KEYWORDS):
task_blocks.extend(play.get(key, []))
return task_blocks
def _get_tasks_from_blocks(task_blocks: Sequence[Any]) -> Generator[Any, None, None]:
"""Get list of tasks from list made of tasks and nested tasks."""
if not task_blocks:
return
def get_nested_tasks(task: Any) -> Generator[Any, None, None]:
if not task or not is_nested_task(task):
return
for k in NESTED_TASK_KEYS:
if task.get(k):
if hasattr(task[k], "get"):
continue
for subtask in task[k]:
yield from get_nested_tasks(subtask)
yield subtask
for task in task_blocks:
yield from get_nested_tasks(task)
yield task
def _get_rule_skips_from_yaml(
yaml_input: Sequence[Any],
lintable: Lintable,
) -> Sequence[Any]:
"""Traverse yaml for comments with rule skips and return list of rules."""
yaml_comment_obj_strings = []
if isinstance(yaml_input, str):
return []
def traverse_yaml(obj: Any) -> None:
for entry in obj.ca.items.values():
for v in entry:
if isinstance(v, CommentToken):
comment_str = v.value
if _noqa_comment_re.match(comment_str):
line = v.start_mark.line + 1 # ruamel line numbers start at 0
lintable.line_skips[line].update(
get_rule_skips_from_line(
comment_str.strip(),
lintable=lintable,
lineno=line,
),
)
yaml_comment_obj_strings.append(str(obj.ca.items))
if isinstance(obj, dict):
for val in obj.values():
if isinstance(val, dict | list):
traverse_yaml(val)
elif isinstance(obj, list):
for element in obj:
if isinstance(element, dict | list):
traverse_yaml(element)
if isinstance(yaml_input, dict | list):
traverse_yaml(yaml_input)
rule_id_list = []
for comment_obj_str in yaml_comment_obj_strings:
for line in comment_obj_str.split(r"\n"):
rule_id_list.extend(get_rule_skips_from_line(line, lintable=lintable))
return [normalize_tag(tag) for tag in rule_id_list]
def normalize_tag(tag: str) -> str:
"""Return current name of tag."""
if tag in RENAMED_TAGS:
used_old_tags[tag] = RENAMED_TAGS[tag]
return RENAMED_TAGS[tag]
return tag
def is_nested_task(task: dict[str, Any]) -> bool:
"""Check if task includes block/always/rescue."""
# Cannot really trust the input
if isinstance(task, str):
return False
return any(task.get(key) for key in NESTED_TASK_KEYS)
| 10,879 | Python | .py | 263 | 33.197719 | 100 | 0.648083 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,224 | generate_docs.py | ansible_ansible-lint/src/ansiblelint/generate_docs.py | """Utils to generate rules documentation."""
import logging
from collections.abc import Iterable
from rich import box
from rich.console import RenderableType, group
from rich.markdown import Markdown
from rich.table import Table
from ansiblelint.config import PROFILES
from ansiblelint.constants import RULE_DOC_URL
from ansiblelint.rules import RulesCollection, TransformMixin
DOC_HEADER = """
# Default Rules
(lint_default_rules)=
Below you can see the list of default rules Ansible Lint use to evaluate playbooks and roles:
"""
_logger = logging.getLogger(__name__)
def rules_as_str(rules: RulesCollection) -> RenderableType:
"""Return rules as string."""
table = Table(show_header=False, header_style="title", box=box.SIMPLE)
for rule in rules.alphabetical():
if issubclass(rule.__class__, TransformMixin):
rule.tags.insert(0, "autofix")
tag = f"[dim] ({', '.join(rule.tags)})[/dim]" if rule.tags else ""
table.add_row(
f"[link={RULE_DOC_URL}{rule.id}/]{rule.id}[/link]",
rule.shortdesc + tag,
)
return table
def rules_as_md(rules: RulesCollection) -> str:
"""Return md documentation for a list of rules."""
result = DOC_HEADER
for rule in rules.alphabetical():
# because title == rule.id we get the desired labels for free
# and we do not have to insert `(target_header)=`
title = f"{rule.id}"
if rule.help:
if not rule.help.startswith(f"# {rule.id}"): # pragma: no cover
msg = f"Rule {rule.__class__} markdown help does not start with `# {rule.id}` header.\n{rule.help}"
raise RuntimeError(msg)
result += f"\n\n{rule.help}"
else:
description = rule.description
if rule.link:
description += f" [more]({rule.link})"
result += f"\n\n## {title}\n\n**{rule.shortdesc}**\n\n{description}"
# Safety net for preventing us from adding autofix to rules and
# forgetting to mention it inside their documentation.
if "autofix" in rule.tags and "autofix" not in rule.description:
msg = f"Rule {rule.id} is invalid because it has 'autofix' tag but this ability is not documented in its description."
raise RuntimeError(msg)
return result
@group()
def rules_as_rich(rules: RulesCollection) -> Iterable[Table]:
"""Print documentation for a list of rules, returns empty string."""
width = max(16, *[len(rule.id) for rule in rules])
for rule in rules.alphabetical():
table = Table(show_header=True, header_style="title", box=box.MINIMAL)
table.add_column(rule.id, style="dim", width=width)
table.add_column(Markdown(rule.shortdesc))
description = rule.help or rule.description
if rule.link:
description += f" [(more)]({rule.link})"
table.add_row("description", Markdown(description))
if rule.version_added:
table.add_row("version_added", rule.version_added)
if rule.tags:
table.add_row("tags", ", ".join(rule.tags))
if rule.severity:
table.add_row("severity", rule.severity)
yield table
def profiles_as_md(*, header: bool = False, docs_url: str = RULE_DOC_URL) -> str:
"""Return markdown representation of supported profiles."""
result = ""
if header:
result += """<!---
Do not manually edit, generated from generate_docs.py
-->
# Profiles
Ansible-lint profiles gradually increase the strictness of rules as your Ansible content lifecycle.
!!! note
Rules with `*` in the suffix are not yet implemented but are documented with linked GitHub issues.
"""
for name, profile in PROFILES.items():
extends = ""
if profile.get("extends", None):
extends = (
f" It extends [{profile['extends']}](#{profile['extends']}) profile."
)
result += f"## {name}\n\n{profile['description']}{extends}\n"
for rule, rule_data in profile["rules"].items():
if "[" in rule:
url = f"{docs_url}{rule.split('[')[0]}/"
else:
url = f"{docs_url}{rule}/"
if not rule_data:
result += f"- [{rule}]({url})\n"
else:
result += f"- [{rule}]({rule_data['url']})\n"
result += "\n"
return result
def profiles_as_rich() -> Markdown:
"""Return rich representation of supported profiles."""
return Markdown(profiles_as_md())
| 4,568 | Python | .py | 103 | 36.563107 | 130 | 0.628101 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,225 | runner.py | ansible_ansible-lint/src/ansiblelint/runner.py | """Runner implementation."""
from __future__ import annotations
import json
import logging
import math
import multiprocessing
import multiprocessing.pool
import os
import re
import subprocess
import tempfile
import warnings
from dataclasses import dataclass
from fnmatch import fnmatch
from functools import cache
from pathlib import Path
from tempfile import NamedTemporaryFile
from typing import TYPE_CHECKING, Any
from ansible.errors import AnsibleError
from ansible.parsing.splitter import split_args
from ansible.parsing.yaml.constructor import AnsibleMapping
from ansible.plugins.loader import add_all_plugin_dirs
from ansible_compat.runtime import AnsibleWarning
import ansiblelint.skip_utils
import ansiblelint.utils
from ansiblelint.app import App, get_app
from ansiblelint.constants import States
from ansiblelint.errors import LintWarning, MatchError, WarnSource
from ansiblelint.file_utils import Lintable, expand_dirs_in_lintables
from ansiblelint.logger import timed_info
from ansiblelint.rules.syntax_check import OUTPUT_PATTERNS
from ansiblelint.text import strip_ansi_escape
from ansiblelint.utils import (
PLAYBOOK_DIR,
HandleChildren,
parse_examples_from_plugin,
template,
)
if TYPE_CHECKING:
from collections.abc import Callable, Generator
from ansiblelint._internal.rules import BaseRule
from ansiblelint.config import Options
from ansiblelint.constants import FileType
from ansiblelint.rules import RulesCollection
_logger = logging.getLogger(__name__)
@dataclass
class LintResult:
"""Class that tracks result of linting."""
matches: list[MatchError]
files: set[Lintable]
class Runner:
"""Runner class performs the linting process."""
# pylint: disable=too-many-arguments,too-many-instance-attributes
def __init__(
self,
*lintables: Lintable | str | Path,
rules: RulesCollection,
tags: frozenset[Any] = frozenset(),
skip_list: list[str] | None = None,
exclude_paths: list[str] | None = None,
verbosity: int = 0,
checked_files: set[Lintable] | None = None,
project_dir: str | None = None,
_skip_ansible_syntax_check: bool = False,
) -> None:
"""Initialize a Runner instance."""
self.rules = rules
self.lintables: set[Lintable] = set()
self.project_dir = os.path.abspath(project_dir) if project_dir else None
self.skip_ansible_syntax_check = _skip_ansible_syntax_check
if skip_list is None:
skip_list = []
if exclude_paths is None:
exclude_paths = []
# Assure consistent type and configure given lintables as explicit (so
# excludes paths would not apply on them).
for item in lintables:
if not isinstance(item, Lintable):
item = Lintable(item)
item.explicit = True
self.lintables.add(item)
# Expand folders (roles) to their components
expand_dirs_in_lintables(self.lintables)
self.tags = tags
self.skip_list = skip_list
self._update_exclude_paths(exclude_paths)
self.verbosity = verbosity
if checked_files is None:
checked_files = set()
self.checked_files = checked_files
self.app = get_app(cached=True)
def _update_exclude_paths(self, exclude_paths: list[str]) -> None:
if exclude_paths:
# These will be (potentially) relative paths
paths = ansiblelint.file_utils.expand_paths_vars(exclude_paths)
# Since ansiblelint.utils.find_children returns absolute paths,
# and the list of files we create in `Runner.run` can contain both
# relative and absolute paths, we need to cover both bases.
self.exclude_paths = paths + [os.path.abspath(p) for p in paths]
else:
self.exclude_paths = []
def is_excluded(self, lintable: Lintable) -> bool:
"""Verify if a file path should be excluded."""
# Any will short-circuit as soon as something returns True, but will
# be poor performance for the case where the path under question is
# not excluded.
# Exclusions should be evaluated only using absolute paths in order
# to work correctly.
# Explicit lintables are never excluded
if lintable.explicit:
return False
abs_path = str(lintable.abspath)
if self.project_dir and not abs_path.startswith(self.project_dir):
_logger.debug(
"Skipping %s as it is outside of the project directory.",
abs_path,
)
return True
return any(
abs_path.startswith(path)
or lintable.path.match(path)
or fnmatch(str(abs_path), path)
or fnmatch(str(lintable), path)
for path in self.exclude_paths
)
def run(self) -> list[MatchError]:
"""Execute the linting process."""
matches: list[MatchError] = []
with warnings.catch_warnings(record=True) as captured_warnings:
warnings.simplefilter("always")
matches = self._run()
for warn in captured_warnings:
# Silence Ansible runtime warnings that are unactionable
# https://github.com/ansible/ansible-lint/issues/3216
if warn.category is AnsibleWarning and isinstance(warn.source, dict):
msg = warn.source["msg"]
if msg.startswith(
"Falling back to Ansible unique filter as Jinja2 one failed",
):
continue
# For the moment we are ignoring deprecation warnings as Ansible
# modules outside current content can generate them and user
# might not be able to do anything about them.
if warn.category is DeprecationWarning:
continue
if warn.category is LintWarning:
filename: None | Lintable = None
if isinstance(warn.source, WarnSource):
match = MatchError(
message=warn.source.message or warn.category.__name__,
rule=self.rules["warning"],
lintable=Lintable(warn.source.filename.filename),
tag=warn.source.tag,
lineno=warn.source.lineno,
)
else:
filename = warn.source
match = MatchError(
message=(
warn.message if isinstance(warn.message, str) else "?"
),
rule=self.rules["warning"],
lintable=Lintable(str(filename)),
)
matches.append(match)
continue
_logger.warning(
"%s:%s %s %s",
warn.filename,
warn.lineno or 1,
warn.category.__name__,
warn.message,
)
return matches
def _run(self) -> list[MatchError]:
"""Run the linting (inner loop)."""
files: list[Lintable] = []
matches: list[MatchError] = []
# remove exclusions
for lintable in self.lintables.copy():
if self.is_excluded(lintable):
_logger.debug("Excluded %s", lintable)
self.lintables.remove(lintable)
continue
if isinstance(lintable.data, States) and lintable.exc:
lintable.exc.__class__.__name__.lower()
matches.append(
MatchError(
lintable=lintable,
message=str(lintable.exc),
details=str(lintable.exc.__cause__),
rule=self.rules["load-failure"],
tag=f"load-failure[{lintable.exc.__class__.__name__.lower()}]",
),
)
lintable.stop_processing = True
# identify missing files/folders
if not lintable.path.exists():
matches.append(
MatchError(
lintable=lintable,
message="File or directory not found.",
rule=self.rules["load-failure"],
tag="load-failure[not-found]",
),
)
# -- phase 1 : syntax check in parallel --
if not self.skip_ansible_syntax_check:
# app = get_app(cached=True)
def worker(lintable: Lintable) -> list[MatchError]:
return self._get_ansible_syntax_check_matches(
lintable=lintable,
app=self.app,
)
for lintable in self.lintables:
if (
lintable.kind not in ("playbook", "role")
or lintable.stop_processing
):
continue
files.append(lintable)
# avoid resource leak warning, https://github.com/python/cpython/issues/90549
# pylint: disable=unused-variable
global_resource = multiprocessing.Semaphore() # noqa: F841
pool = multiprocessing.pool.ThreadPool(processes=threads())
return_list = pool.map(worker, files, chunksize=1)
pool.close()
pool.join()
for data in return_list:
matches.extend(data)
matches = self._filter_excluded_matches(matches)
# -- phase 2 ---
# do our processing only when ansible syntax check passed in order
# to avoid causing runtime exceptions. Our processing is not as
# resilient to be able process garbage.
matches.extend(self._emit_matches(files))
# remove duplicates from files list
files = [value for n, value in enumerate(files) if value not in files[:n]]
for file in self.lintables:
if file in self.checked_files or not file.kind or file.failed():
continue
_logger.debug(
"Examining %s of type %s",
ansiblelint.file_utils.normpath(file.path),
file.kind,
)
matches.extend(
self.rules.run(file, tags=set(self.tags), skip_list=self.skip_list),
)
# update list of checked files
self.checked_files.update(self.lintables)
# remove any matches made inside excluded files
matches = self._filter_excluded_matches(matches)
return sorted(set(matches))
# pylint: disable=too-many-locals
def _get_ansible_syntax_check_matches(
self,
lintable: Lintable,
app: App,
) -> list[MatchError]:
"""Run ansible syntax check and return a list of MatchError(s)."""
try:
default_rule: BaseRule = self.rules["syntax-check"]
except ValueError:
# if syntax-check is not loaded, we do not perform any syntax check,
# that might happen during testing
return []
fh = None
results = []
if lintable.kind not in ("playbook", "role"):
return []
with timed_info(
"Executing syntax check on %s %s",
lintable.kind,
lintable.path,
):
if lintable.kind == "role":
playbook_text = f"""
---
- name: Temporary playbook for role syntax check
hosts: localhost
tasks:
- ansible.builtin.import_role:
name: {lintable.path.expanduser()!s}
"""
# pylint: disable=consider-using-with
fh = tempfile.NamedTemporaryFile( # noqa: SIM115
mode="w",
suffix=".yml",
prefix="play",
)
fh.write(playbook_text)
fh.flush()
playbook_path = fh.name
else:
playbook_path = str(lintable.path.expanduser())
# To avoid noisy warnings we pass localhost as current inventory:
# [WARNING]: No inventory was parsed, only implicit localhost is available
# [WARNING]: provided hosts list is empty, only localhost is available. Note that the implicit localhost does not match 'all'
cmd = [
"ansible-playbook",
"-i",
"localhost,",
"--syntax-check",
playbook_path,
]
if app.options.extra_vars:
cmd.extend(["--extra-vars", json.dumps(app.options.extra_vars)])
# To reduce noisy warnings like
# CryptographyDeprecationWarning: Blowfish has been deprecated
# https://github.com/paramiko/paramiko/issues/2038
env = app.runtime.environ.copy()
env["PYTHONWARNINGS"] = "ignore"
# Avoid execution failure if user customized any_unparsed_is_failed setting
# https://github.com/ansible/ansible-lint/issues/3650
env["ANSIBLE_INVENTORY_ANY_UNPARSED_IS_FAILED"] = "False"
run = subprocess.run( # noqa: S603
cmd,
stdin=subprocess.PIPE,
capture_output=True,
shell=False, # needed when command is a list
text=True,
check=False,
env=env,
)
if run.returncode != 0:
message = None
filename = lintable
lineno = 1
column = None
ignore_rc = False
stderr = strip_ansi_escape(run.stderr)
stdout = strip_ansi_escape(run.stdout)
if stderr:
details = stderr
if stdout:
details += "\n" + stdout
else:
details = stdout
for pattern in OUTPUT_PATTERNS:
rule = default_rule
match = re.search(pattern.regex, stderr)
if match:
groups = match.groupdict()
title = groups.get("title", match.group(0))
details = groups.get("details", "")
lineno = int(groups.get("line", 1))
if (
"filename" in groups
and str(lintable.path.absolute()) != groups["filename"]
and lintable.filename != groups["filename"]
):
# avoids creating a new lintable object if the filename
# is matching as this might prevent Lintable.failed()
# feature from working well.
filename = Lintable(groups["filename"])
else:
filename = lintable
column = int(groups.get("column", 1))
if (
pattern.tag in ("unknown-module", "specific")
and app.options.nodeps
):
ignore_rc = True
else:
results.append(
MatchError(
message=title,
lintable=filename,
lineno=lineno,
column=column,
rule=rule,
details=details,
tag=f"{rule.id}[{pattern.tag}]",
),
)
break
if not results and not ignore_rc:
rule = self.rules["internal-error"]
message = (
f"Unexpected error code {run.returncode} from "
f"execution of: {' '.join(cmd)}"
)
results.append(
MatchError(
message=message,
lintable=filename,
lineno=lineno,
column=column,
rule=rule,
details=details,
tag="",
),
)
if fh:
fh.close()
return results
def _filter_excluded_matches(self, matches: list[MatchError]) -> list[MatchError]:
return [
match
for match in matches
if not self.is_excluded(match.lintable)
and match.tag not in match.lintable.line_skips[match.lineno]
]
def _emit_matches(self, files: list[Lintable]) -> Generator[MatchError, None, None]:
visited: set[Lintable] = set()
while visited != self.lintables:
for lintable in self.lintables - visited:
visited.add(lintable)
if not lintable.path.exists():
continue
try:
children = self.find_children(lintable)
for child in children:
if self.is_excluded(child):
continue
self.lintables.add(child)
files.append(child)
except MatchError as exc:
if not exc.filename: # pragma: no branch
exc.filename = str(lintable.path)
exc.rule = self.rules["load-failure"]
yield exc
except AttributeError:
yield MatchError(
lintable=lintable,
rule=self.rules["load-failure"],
)
def find_children(self, lintable: Lintable) -> list[Lintable]:
"""Traverse children of a single file or folder."""
if not lintable.path.exists():
return []
playbook_dir = str(lintable.path.parent)
ansiblelint.utils.set_collections_basedir(lintable.path.parent)
add_all_plugin_dirs(playbook_dir or ".")
if lintable.kind == "role":
playbook_ds = AnsibleMapping({"roles": [{"role": str(lintable.path)}]})
elif lintable.kind == "plugin":
return self.plugin_children(lintable)
elif lintable.kind not in ("playbook", "tasks"):
return []
else:
try:
playbook_ds = ansiblelint.utils.parse_yaml_from_file(str(lintable.path))
except AnsibleError as exc:
msg = f"Loading {lintable.filename} caused an {type(exc).__name__} exception: {exc}, file was ignored."
logging.exception(msg)
return []
results = []
# playbook_ds can be an AnsibleUnicode string, which we consider invalid
if isinstance(playbook_ds, str):
raise MatchError(lintable=lintable, rule=self.rules["load-failure"])
for item in ansiblelint.utils.playbook_items(playbook_ds):
# if lintable.kind not in ["playbook"]:
for child in self.play_children(
lintable,
item,
lintable.kind,
playbook_dir,
):
# We avoid processing parametrized children
path_str = str(child.path)
if "$" in path_str or "{{" in path_str:
continue
# Repair incorrect paths obtained when old syntax was used, like:
# - include: simpletask.yml tags=nginx
valid_tokens = []
for token in split_args(path_str):
if "=" in token:
break
valid_tokens.append(token)
path = " ".join(valid_tokens)
if path != path_str:
child.path = Path(path)
child.name = child.path.name
results.append(child)
return results
def play_children(
self,
lintable: Lintable,
item: tuple[str, Any],
parent_type: FileType,
playbook_dir: str,
) -> list[Lintable]:
"""Flatten the traversed play tasks."""
# pylint: disable=unused-argument
basedir = lintable.path.parent
handlers = HandleChildren(self.rules, app=self.app)
delegate_map: dict[
str,
Callable[[Lintable, Any, Any, FileType], list[Lintable]],
] = {
"tasks": handlers.taskshandlers_children,
"pre_tasks": handlers.taskshandlers_children,
"post_tasks": handlers.taskshandlers_children,
"block": handlers.taskshandlers_children,
"include": handlers.include_children,
"ansible.builtin.include": handlers.include_children,
"import_playbook": handlers.import_playbook_children,
"ansible.builtin.import_playbook": handlers.import_playbook_children,
"roles": handlers.roles_children,
"dependencies": handlers.roles_children,
"handlers": handlers.taskshandlers_children,
"include_tasks": handlers.include_children,
"ansible.builtin.include_tasks": handlers.include_children,
"import_tasks": handlers.include_children,
"ansible.builtin.import_tasks": handlers.include_children,
}
(k, v) = item
add_all_plugin_dirs(str(basedir.resolve()))
if k in delegate_map and v:
v = template(
basedir,
v,
{"playbook_dir": PLAYBOOK_DIR or str(basedir.resolve())},
fail_on_undefined=False,
)
return delegate_map[k](lintable, k, v, parent_type)
return []
def plugin_children(self, lintable: Lintable) -> list[Lintable]:
"""Collect lintable sections from plugin file."""
offset, content = parse_examples_from_plugin(lintable)
if not content:
# No examples, nothing to see here
return []
examples = Lintable(
name=lintable.name,
content=content,
kind="yaml",
base_kind="text/yaml",
parent=lintable,
)
examples.line_offset = offset
# pylint: disable=consider-using-with
examples.file = NamedTemporaryFile( # noqa: SIM115
mode="w+",
suffix=f"_{lintable.path.name}.yaml",
)
examples.file.write(content)
examples.file.flush()
examples.filename = examples.file.name
examples.path = Path(examples.file.name)
return [examples]
@cache
def threads() -> int:
"""Determine how many threads to use.
Inside containers we want to respect limits imposed.
When present /sys/fs/cgroup/cpu.max can contain something like:
$ podman/docker run -it --rm --cpus 1.5 ubuntu:latest cat /sys/fs/cgroup/cpu.max
150000 100000
# "max 100000" is returned when no limits are set.
See: https://github.com/python/cpython/issues/80235
See: https://github.com/python/cpython/issues/70879
"""
os_cpu_count = multiprocessing.cpu_count()
# Cgroup CPU bandwidth limit available in Linux since 2.6 kernel
cpu_max_fname = "/sys/fs/cgroup/cpu.max"
cfs_quota_fname = "/sys/fs/cgroup/cpu/cpu.cfs_quota_us"
cfs_period_fname = "/sys/fs/cgroup/cpu/cpu.cfs_period_us"
if os.path.exists(cpu_max_fname):
# cgroup v2
# https://www.kernel.org/doc/html/latest/admin-guide/cgroup-v2.html
with open(cpu_max_fname, encoding="utf-8") as fh:
cpu_quota_us, cpu_period_us = fh.read().strip().split()
elif os.path.exists(cfs_quota_fname) and os.path.exists(cfs_period_fname):
# cgroup v1
# https://www.kernel.org/doc/html/latest/scheduler/sched-bwc.html#management
with open(cfs_quota_fname, encoding="utf-8") as fh:
cpu_quota_us = fh.read().strip()
with open(cfs_period_fname, encoding="utf-8") as fh:
cpu_period_us = fh.read().strip()
else:
# No Cgroup CPU bandwidth limit (e.g. non-Linux platform)
cpu_quota_us = "max"
cpu_period_us = "100000" # unused, for consistency with default values
if cpu_quota_us == "max":
# No active Cgroup quota on a Cgroup-capable platform
return os_cpu_count
cpu_quota_us_int = int(cpu_quota_us)
cpu_period_us_int = int(cpu_period_us)
if cpu_quota_us_int > 0 and cpu_period_us_int > 0:
return math.ceil(cpu_quota_us_int / cpu_period_us_int)
# Setting a negative cpu_quota_us value is a valid way to disable
# cgroup CPU bandwidth limits
return os_cpu_count
def get_matches(rules: RulesCollection, options: Options) -> LintResult:
"""Get matches for given rules and options.
:param rules: Rules to use for linting.
:param options: Options to use for linting.
:returns: LintResult containing matches and checked files.
"""
lintables = ansiblelint.utils.get_lintables(opts=options, args=options.lintables)
for rule in rules:
if "unskippable" in rule.tags:
for entry in (*options.skip_list, *options.warn_list):
if rule.id == entry or entry.startswith(f"{rule.id}["):
msg = f"Rule '{rule.id}' is unskippable, you cannot use it in 'skip_list' or 'warn_list'. Still, you could exclude the file."
raise RuntimeError(msg)
matches = []
checked_files: set[Lintable] = set()
runner = Runner(
*lintables,
rules=rules,
tags=frozenset(options.tags),
skip_list=options.skip_list,
exclude_paths=options.exclude_paths,
verbosity=options.verbosity,
checked_files=checked_files,
project_dir=options.project_dir,
_skip_ansible_syntax_check=options._skip_ansible_syntax_check, # noqa: SLF001
)
matches.extend(runner.run())
# Assure we do not print duplicates and the order is consistent
matches = sorted(set(matches))
# Convert reported filenames into human readable ones, so we hide the
# fact we used temporary files when processing input from stdin.
for match in matches:
for lintable in lintables:
if match.filename == lintable.filename:
match.filename = lintable.name
break
return LintResult(matches=matches, files=checked_files)
| 26,832 | Python | .py | 622 | 30.083601 | 145 | 0.560568 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,226 | utils.py | ansible_ansible-lint/src/ansiblelint/utils.py | # Copyright (c) 2013-2014 Will Thames <will@thames.id.au>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# spell-checker:ignore dwim
# pylint: disable=too-many-lines
"""Generic utility helpers."""
from __future__ import annotations
import ast
import contextlib
import inspect
import logging
import os
import re
from collections.abc import ItemsView, Iterable, Iterator, Mapping, Sequence
from dataclasses import _MISSING_TYPE, dataclass, field
from functools import cache, lru_cache
from pathlib import Path
from typing import TYPE_CHECKING, Any
import ruamel.yaml.parser
import yaml
from ansible.errors import AnsibleError, AnsibleParserError
from ansible.module_utils.parsing.convert_bool import boolean
from ansible.parsing.dataloader import DataLoader
from ansible.parsing.mod_args import ModuleArgsParser
from ansible.parsing.plugin_docs import read_docstring
from ansible.parsing.splitter import split_args
from ansible.parsing.yaml.constructor import AnsibleConstructor, AnsibleMapping
from ansible.parsing.yaml.loader import AnsibleLoader
from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject, AnsibleSequence
from ansible.plugins.loader import (
PluginLoadContext,
action_loader,
add_all_plugin_dirs,
module_loader,
)
from ansible.template import Templar
from ansible.utils.collection_loader import AnsibleCollectionConfig
from yaml.composer import Composer
from yaml.representer import RepresenterError
from ansiblelint._internal.rules import (
AnsibleParserErrorRule,
RuntimeErrorRule,
)
from ansiblelint.app import App, get_app
from ansiblelint.config import Options, options
from ansiblelint.constants import (
ANNOTATION_KEYS,
FILENAME_KEY,
INCLUSION_ACTION_NAMES,
LINE_NUMBER_KEY,
NESTED_TASK_KEYS,
PLAYBOOK_TASK_KEYWORDS,
ROLE_IMPORT_ACTION_NAMES,
SKIPPED_RULES_KEY,
FileType,
)
from ansiblelint.errors import MatchError
from ansiblelint.file_utils import Lintable, discover_lintables
from ansiblelint.skip_utils import is_nested_task
from ansiblelint.text import has_jinja, is_fqcn, removeprefix
if TYPE_CHECKING:
from ansiblelint.rules import RulesCollection
# ansible-lint doesn't need/want to know about encrypted secrets, so we pass a
# string as the password to enable such yaml files to be opened and parsed
# successfully.
DEFAULT_VAULT_PASSWORD = "x" # noqa: S105
COLLECTION_PLAY_RE = re.compile(r"^[\w\d_]+\.[\w\d_]+\.[\w\d_]+$")
PLAYBOOK_DIR = os.environ.get("ANSIBLE_PLAYBOOK_DIR", None)
_logger = logging.getLogger(__name__)
def parse_yaml_from_file(filepath: str) -> AnsibleBaseYAMLObject:
"""Extract a decrypted YAML object from file."""
dataloader = DataLoader()
if hasattr(dataloader, "set_vault_password"):
dataloader.set_vault_password(DEFAULT_VAULT_PASSWORD)
return dataloader.load_from_file(filepath)
def path_dwim(basedir: str, given: str) -> str:
"""Convert a given path do-what-I-mean style."""
dataloader = DataLoader()
dataloader.set_basedir(basedir)
return str(dataloader.path_dwim(given))
def ansible_templar(basedir: Path, templatevars: Any) -> Templar:
"""Create an Ansible Templar using templatevars."""
# `basedir` is the directory containing the lintable file.
# Therefore, for tasks in a role, `basedir` has the form
# `roles/some_role/tasks`. On the other hand, the search path
# is `roles/some_role/{files,templates}`. As a result, the
# `tasks` part in the basedir should be stripped stripped.
if basedir.name == "tasks":
basedir = basedir.parent
dataloader = DataLoader()
dataloader.set_basedir(basedir)
templar = Templar(dataloader, variables=templatevars)
return templar
def mock_filter(left: Any, *args: Any, **kwargs: Any) -> Any: # noqa: ARG001
"""Mock a filter that can take any combination of args and kwargs.
This will return x when x | filter(y,z) is called
e.g. {{ foo | ansible.utils.ipaddr('address') }}
:param left: The left hand side of the filter
:param args: The args passed to the filter
:param kwargs: The kwargs passed to the filter
:return: The left hand side of the filter
"""
# pylint: disable=unused-argument
return left
def ansible_template(
basedir: Path,
varname: Any,
templatevars: Any,
**kwargs: Any,
) -> Any:
"""Render a templated string by mocking missing filters.
In the case of a missing lookup, ansible core does an early exit
when disable_lookup=True but this happens after the jinja2 syntax already passed
return the original string as if it had been templated.
In the case of a missing filter, extract the missing filter plugin name
from the ansible error, 'Could not load "filter"'. Then mock the filter
and template the string again. The range allows for up to 10 unknown filters
in succession
:param basedir: The directory containing the lintable file
:param varname: The string to be templated
:param templatevars: The variables to be used in the template
:param kwargs: Additional arguments to be passed to the templating engine
:return: The templated string or None
:raises: AnsibleError if the filter plugin cannot be extracted or the
string could not be templated in 10 attempts
"""
# pylint: disable=too-many-locals
filter_error = "template error while templating string:"
lookup_error = "was found, however lookups were disabled from templating"
re_filter_fqcn = re.compile(r"\w+\.\w+\.\w+")
re_filter_in_err = re.compile(r"Could not load \"(\w+)\"")
re_valid_filter = re.compile(r"^\w+(\.\w+\.\w+)?$")
templar = ansible_templar(basedir=basedir, templatevars=templatevars)
kwargs["disable_lookups"] = True
for _i in range(10):
try:
templated = templar.template(varname, **kwargs)
except AnsibleError as exc:
if lookup_error in exc.message:
return varname
if exc.message.startswith(filter_error):
while True:
match = re_filter_in_err.search(exc.message)
if match:
missing_filter = match.group(1)
break
match = re_filter_fqcn.search(exc.message)
if match:
missing_filter = match.group(0)
break
missing_filter = exc.message.split("'")[1]
break
if not re_valid_filter.match(missing_filter):
err = f"Could not parse missing filter name from error message: {exc.message}"
_logger.warning(err)
raise
templar.environment.filters._delegatee[missing_filter] = mock_filter # fmt: skip # noqa: SLF001
# Record the mocked filter so we can warn the user
if missing_filter not in options.mock_filters:
_logger.debug("Mocking missing filter %s", missing_filter)
options.mock_filters.append(missing_filter)
continue
raise
return templated
return None
BLOCK_NAME_TO_ACTION_TYPE_MAP = {
"tasks": "task",
"handlers": "handler",
"pre_tasks": "task",
"post_tasks": "task",
"block": "meta",
"rescue": "meta",
"always": "meta",
}
def tokenize(value: str) -> tuple[list[str], dict[str, str]]:
"""Parse a string task invocation."""
# We do not try to tokenize something very simple because it would fail to
# work for a case like: task_include: path with space.yml
if value and "=" not in value:
return ([value], {})
parts = split_args(value)
args: list[str] = []
kwargs: dict[str, str] = {}
for part in parts:
if "=" not in part:
args.append(part)
else:
k, v = part.split("=", 1)
kwargs[k] = v
return (args, kwargs)
def playbook_items(pb_data: AnsibleBaseYAMLObject) -> ItemsView: # type: ignore[type-arg]
"""Return a list of items from within the playbook."""
if isinstance(pb_data, dict):
return pb_data.items()
if not pb_data:
return [] # type: ignore[return-value]
# "if play" prevents failure if the play sequence contains None,
# which is weird but currently allowed by Ansible
# https://github.com/ansible/ansible-lint/issues/849
return [item for play in pb_data if play for item in play.items()] # type: ignore[return-value]
def set_collections_basedir(basedir: Path) -> None:
"""Set the playbook directory as playbook_paths for the collection loader."""
# Ansible expects only absolute paths inside `playbook_paths` and will
# produce weird errors if we use a relative one.
AnsibleCollectionConfig.playbook_paths = str(basedir.resolve())
def template(
basedir: Path,
value: Any,
variables: Any,
*,
fail_on_error: bool = False,
fail_on_undefined: bool = False,
**kwargs: str,
) -> Any:
"""Attempt rendering a value with known vars."""
try:
value = ansible_template(
basedir.resolve(),
value,
variables,
**dict(kwargs, fail_on_undefined=fail_on_undefined),
)
# Hack to skip the following exception when using to_json filter on a variable. # noqa: FIX004
# I guess the filter doesn't like empty vars...
except (AnsibleError, ValueError, RepresenterError):
# templating failed, so just keep value as is.
if fail_on_error:
raise
return value
@dataclass
class HandleChildren:
"""Parse task, roles and children."""
rules: RulesCollection = field(init=True, repr=False)
app: App
def include_children(
self,
lintable: Lintable,
k: str,
v: Any,
parent_type: FileType,
) -> list[Lintable]:
"""Include children."""
basedir = str(lintable.path.parent)
# handle special case include_tasks: name=filename.yml
if k in INCLUSION_ACTION_NAMES and isinstance(v, dict) and "file" in v:
v = v["file"]
# we cannot really parse any jinja2 in includes, so we ignore them
if not v or "{{" in v:
return []
# handle include: filename.yml tags=blah
(args, kwargs) = tokenize(v)
if args:
file = args[0]
elif "file" in kwargs:
file = kwargs["file"]
else:
return []
result = path_dwim(basedir, file)
while basedir not in ["", "/"]:
if os.path.exists(result):
break
basedir = os.path.dirname(basedir)
result = path_dwim(basedir, file)
return [Lintable(result, kind=parent_type)]
def taskshandlers_children(
self,
lintable: Lintable,
k: str,
v: None | Any,
parent_type: FileType,
) -> list[Lintable]:
"""TasksHandlers Children."""
basedir = str(lintable.path.parent)
results: list[Lintable] = []
if v is None or isinstance(v, int | str):
raise MatchError(
message="A malformed block was encountered while loading a block.",
rule=RuntimeErrorRule(),
lintable=lintable,
)
for task_handler in v:
# ignore empty tasks, `-`
if not task_handler:
continue
with contextlib.suppress(LookupError):
children = _get_task_handler_children_for_tasks_or_playbooks(
task_handler,
basedir,
k,
parent_type,
)
results.append(children)
continue
if any(x in task_handler for x in ROLE_IMPORT_ACTION_NAMES):
task_handler = normalize_task_v2(
Task(task_handler, filename=str(lintable.path)),
)
self._validate_task_handler_action_for_role(task_handler["action"])
name = task_handler["action"].get("name")
if has_jinja(name):
# we cannot deal with dynamic imports
continue
results.extend(
self.roles_children(lintable, k, [name], parent_type),
)
continue
if "block" not in task_handler:
continue
for elem in ("block", "rescue", "always"):
if elem in task_handler:
results.extend(
self.taskshandlers_children(
lintable,
k,
task_handler[elem],
parent_type,
),
)
return results
def _validate_task_handler_action_for_role(self, th_action: dict[str, Any]) -> None:
"""Verify that the task handler action is valid for role include."""
module = th_action["__ansible_module__"]
lintable = Lintable(
self.rules.options.lintables[0] if self.rules.options.lintables else ".",
)
if "name" not in th_action:
raise MatchError(
message=f"Failed to find required 'name' key in {module!s}",
rule=self.rules.rules[0],
lintable=lintable,
)
if not isinstance(th_action["name"], str):
raise MatchError(
message=f"Value assigned to 'name' key on '{module!s}' is not a string.",
rule=self.rules.rules[1],
lintable=lintable,
)
def roles_children(
self,
lintable: Lintable,
k: str,
v: Sequence[Any],
parent_type: FileType,
) -> list[Lintable]:
"""Roles children."""
# pylint: disable=unused-argument # parent_type)
basedir = str(lintable.path.parent)
results: list[Lintable] = []
if not v or not isinstance(v, Iterable):
# typing does not prevent junk from being passed in
return results
for role in v:
if isinstance(role, dict):
if "role" in role or "name" in role:
if "tags" not in role or "skip_ansible_lint" not in role["tags"]:
results.extend(
_look_for_role_files(
basedir,
role.get("role", role.get("name")),
),
)
elif k != "dependencies":
msg = f'role dict {role} does not contain a "role" or "name" key'
raise SystemExit(msg)
else:
results.extend(_look_for_role_files(basedir, role))
return results
def import_playbook_children(
self,
lintable: Lintable,
k: str, # pylint: disable=unused-argument
v: Any,
parent_type: FileType,
) -> list[Lintable]:
"""Include import_playbook children."""
def append_playbook_path(loc: str, playbook_name: str) -> None:
possible_paths.append(
Path(
path_dwim(
os.path.expanduser(loc),
os.path.join(
"ansible_collections",
namespace_name,
collection_name,
"playbooks",
playbook_name,
),
),
),
)
# import_playbook only accepts a string as argument (no dict syntax)
if not isinstance(v, str):
return []
possible_paths = []
namespace_name, collection_name, playbook_name = parse_fqcn(v)
if namespace_name and collection_name:
for loc in get_app(cached=True).runtime.config.collections_paths:
append_playbook_path(loc, f"{playbook_name}.yml")
append_playbook_path(loc, f"{playbook_name}.yaml")
else:
possible_paths.append(lintable.path.parent / v)
for possible_path in possible_paths:
if not possible_path.exists():
msg = f"Failed to find {v} playbook."
elif not self.app.runtime.has_playbook(
str(possible_path),
):
msg = f"Failed to load {v} playbook due to failing syntax check."
break
elif namespace_name and collection_name:
# don't lint foreign playbook
return []
else:
return [Lintable(possible_path, kind=parent_type)]
logging.error(msg)
return []
def _get_task_handler_children_for_tasks_or_playbooks(
task_handler: dict[str, Any],
basedir: str,
k: Any,
parent_type: FileType,
) -> Lintable:
"""Try to get children of taskhandler for include/import tasks/playbooks."""
child_type = k if parent_type == "playbook" else parent_type
# Include the FQCN task names as this happens before normalize
for task_handler_key in INCLUSION_ACTION_NAMES:
with contextlib.suppress(KeyError):
# ignore empty tasks
if not task_handler or isinstance(task_handler, str): # pragma: no branch
continue
file_name = ""
action_args = task_handler[task_handler_key]
if isinstance(action_args, str):
(args, kwargs) = tokenize(action_args)
if len(args) == 1:
file_name = args[0]
elif kwargs.get("file", None):
file_name = kwargs["file"]
else:
# ignore invalid data (syntax check will outside the scope)
continue
if isinstance(action_args, Mapping) and action_args.get("file", None):
file_name = action_args["file"]
if not file_name:
# ignore invalid data (syntax check will outside the scope)
continue
f = path_dwim(basedir, file_name)
while basedir not in ["", "/"]:
if os.path.exists(f):
break
basedir = os.path.dirname(basedir)
f = path_dwim(basedir, file_name)
return Lintable(f, kind=child_type)
msg = f'The node contains none of: {", ".join(sorted(INCLUSION_ACTION_NAMES))}'
raise LookupError(msg)
def _rolepath(basedir: str, role: str) -> str | None:
role_path = None
namespace_name, collection_name, role_name = parse_fqcn(role)
possible_paths = [
# if included from a playbook
path_dwim(basedir, os.path.join("roles", role_name)),
path_dwim(basedir, role_name),
# if included from roles/[role]/meta/main.yml
path_dwim(basedir, os.path.join("..", "..", "..", "roles", role_name)),
path_dwim(basedir, os.path.join("..", "..", role_name)),
# if checking a role in the current directory
path_dwim(basedir, os.path.join("..", role_name)),
]
for loc in get_app(cached=True).runtime.config.default_roles_path:
loc = os.path.expanduser(loc)
possible_paths.append(path_dwim(loc, role_name))
if namespace_name and collection_name:
for loc in get_app(cached=True).runtime.config.collections_paths:
loc = os.path.expanduser(loc)
possible_paths.append(
path_dwim(
loc,
os.path.join(
"ansible_collections",
namespace_name,
collection_name,
"roles",
role_name,
),
),
)
possible_paths.append(path_dwim(basedir, ""))
for path_option in possible_paths: # pragma: no branch
if os.path.isdir(path_option):
role_path = path_option
break
if role_path: # pragma: no branch
add_all_plugin_dirs(role_path)
return role_path
def _look_for_role_files(basedir: str, role: str) -> list[Lintable]:
role_path = _rolepath(basedir, role)
if not role_path: # pragma: no branch
return []
results = []
for kind in ["tasks", "meta", "handlers", "vars", "defaults"]:
current_path = os.path.join(role_path, kind)
for folder, _, files in os.walk(current_path):
for file in files:
file_ignorecase = file.lower()
if file_ignorecase.endswith((".yml", ".yaml")):
results.append(Lintable(os.path.join(folder, file)))
return results
def _sanitize_task(task: dict[str, Any]) -> dict[str, Any]:
"""Return a stripped-off task structure compatible with new Ansible.
This helper takes a copy of the incoming task and drops
any internally used keys from it.
"""
result = task.copy()
# task is an AnsibleMapping which inherits from OrderedDict, so we need
# to use `del` to remove unwanted keys.
for k in [SKIPPED_RULES_KEY, FILENAME_KEY, LINE_NUMBER_KEY]:
if k in result:
del result[k]
return result
def _extract_ansible_parsed_keys_from_task(
result: dict[str, Any],
task: dict[str, Any],
keys: tuple[str, ...],
) -> dict[str, Any]:
"""Return a dict with existing key in task."""
for k, v in list(task.items()):
if k in keys:
# we don't want to re-assign these values, which were
# determined by the ModuleArgsParser() above
continue
result[k] = v
return result
def normalize_task_v2(task: Task) -> dict[str, Any]:
"""Ensure tasks have a normalized action key and strings are converted to python objects."""
raw_task = task.raw_task
result: dict[str, Any] = {}
ansible_parsed_keys = ("action", "local_action", "args", "delegate_to")
if is_nested_task(raw_task):
_extract_ansible_parsed_keys_from_task(result, raw_task, ansible_parsed_keys)
# Add dummy action for block/always/rescue statements
result["action"] = {
"__ansible_module__": "block/always/rescue",
"__ansible_module_original__": "block/always/rescue",
}
return result
sanitized_task = _sanitize_task(raw_task)
mod_arg_parser = ModuleArgsParser(sanitized_task)
try:
action, arguments, result["delegate_to"] = mod_arg_parser.parse(
skip_action_validation=options.skip_action_validation,
)
except AnsibleParserError as exc:
raise MatchError(
rule=AnsibleParserErrorRule(),
message=exc.message,
lintable=Lintable(task.filename or ""),
lineno=raw_task.get(LINE_NUMBER_KEY, 1),
) from exc
# denormalize shell -> command conversion
if "_uses_shell" in arguments:
action = "shell"
del arguments["_uses_shell"]
_extract_ansible_parsed_keys_from_task(
result,
raw_task,
(*ansible_parsed_keys, action),
)
if not isinstance(action, str):
msg = f"Task actions can only be strings, got {action}"
raise TypeError(msg)
action_unnormalized = action
# convert builtin fqn calls to short forms because most rules know only
# about short calls but in the future we may switch the normalization to do
# the opposite. Mainly we currently consider normalized the module listing
# used by `ansible-doc -t module -l 2>/dev/null`
action = removeprefix(action, "ansible.builtin.")
result["action"] = {
"__ansible_module__": action,
"__ansible_module_original__": action_unnormalized,
}
result["action"].update(arguments)
return result
def task_to_str(task: dict[str, Any]) -> str:
"""Make a string identifier for the given task."""
name = task.get("name")
if name:
return str(name)
action = task.get("action")
if isinstance(action, str) or not isinstance(action, dict):
return str(action)
args = [
f"{k}={v}"
for (k, v) in action.items()
if k
not in [
"__ansible_module__",
"__ansible_module_original__",
"_raw_params",
LINE_NUMBER_KEY,
FILENAME_KEY,
]
]
_raw_params = action.get("_raw_params", [])
if isinstance(_raw_params, list):
for item in _raw_params:
args.extend(str(item))
else:
args.append(_raw_params)
return f"{action['__ansible_module__']} {' '.join(args)}"
def extract_from_list(
blocks: AnsibleBaseYAMLObject,
candidates: list[str],
*,
recursive: bool = False,
) -> list[Any]:
"""Get action tasks from block structures."""
results = []
for block in blocks:
for candidate in candidates:
if isinstance(block, dict) and candidate in block:
if isinstance(block[candidate], list):
subresults = add_action_type(block[candidate], candidate)
if recursive:
subresults.extend(
extract_from_list(
subresults,
candidates,
recursive=recursive,
),
)
results.extend(subresults)
elif block[candidate] is not None:
msg = f"Key '{candidate}' defined, but bad value: '{block[candidate]!s}'"
raise RuntimeError(msg)
return results
@dataclass
class Task(dict[str, Any]):
"""Class that represents a task from linter point of view.
raw_task:
When looping through the tasks in the file, each "raw_task" is minimally
processed to include these special keys: __line__, __file__, skipped_rules.
normalized_task:
When each raw_task is "normalized", action shorthand (strings) get parsed
by ansible into python objects and the action key gets normalized. If the task
should be skipped (skipped is True) or normalizing it fails (error is not None)
then this is just the raw_task instead of a normalized copy.
skip_tags:
List of tags found to be skipped, from tags block or noqa comments
error:
This is normally None. It will be a MatchError when the raw_task cannot be
normalized due to an AnsibleParserError.
position: Any
"""
raw_task: dict[str, Any]
filename: str = ""
_normalized_task: dict[str, Any] | _MISSING_TYPE = field(init=False, repr=False)
error: MatchError | None = None
position: Any = None
@property
def name(self) -> str | None:
"""Return the name of the task."""
name = self.raw_task.get("name", None)
if name is not None and not isinstance(name, str):
msg = "Task name can only be a string."
raise RuntimeError(msg)
return name
@property
def action(self) -> str:
"""Return the resolved action name."""
action_name = self.normalized_task["action"]["__ansible_module_original__"]
if not isinstance(action_name, str):
msg = "Task actions can only be strings."
raise TypeError(msg)
return action_name
@property
def args(self) -> Any:
"""Return the arguments passed to the task action.
While we usually expect to return a dictionary, it can also
return a templated string when jinja is used.
"""
if "args" in self.raw_task:
return self.raw_task["args"]
result = {
k: v
for k, v in self.normalized_task["action"].items()
if k not in ANNOTATION_KEYS
}
return result
@property
def normalized_task(self) -> dict[str, Any]:
"""Return the name of the task."""
if not hasattr(self, "_normalized_task"):
try:
self._normalized_task = self._normalize_task()
except MatchError as err:
self.error = err
# When we cannot normalize it, we just use the raw task instead
# to avoid adding extra complexity to the rules.
self._normalized_task = self.raw_task
if isinstance(self._normalized_task, _MISSING_TYPE):
msg = "Task was not normalized"
raise TypeError(msg)
return self._normalized_task
def _normalize_task(self) -> dict[str, Any]:
"""Unify task-like object structures."""
ansible_action_type = self.raw_task.get("__ansible_action_type__", "task")
if "__ansible_action_type__" in self.raw_task:
del self.raw_task["__ansible_action_type__"]
task = normalize_task_v2(self)
task[FILENAME_KEY] = self.filename
task["__ansible_action_type__"] = ansible_action_type
return task
@property
def skip_tags(self) -> list[str]:
"""Return the list of tags to skip."""
skip_tags: list[str] = self.raw_task.get(SKIPPED_RULES_KEY, [])
return skip_tags
def is_handler(self) -> bool:
"""Return true for tasks that are handlers."""
is_handler_file = False
if isinstance(self._normalized_task, dict):
file_name = str(self._normalized_task["action"].get(FILENAME_KEY, None))
if file_name:
paths = file_name.split("/")
is_handler_file = "handlers" in paths
return is_handler_file if is_handler_file else ".handlers[" in self.position
def __repr__(self) -> str:
"""Return a string representation of the task."""
return f"Task('{self.name}' [{self.position}])"
def get(self, key: str, default: Any = None) -> Any:
"""Get a value from the task."""
return self.normalized_task.get(key, default)
def __getitem__(self, index: str) -> Any:
"""Allow access as task[...]."""
return self.normalized_task[index]
def __iter__(self) -> Iterator[str]:
"""Provide support for 'key in task'."""
yield from (f for f in self.normalized_task)
def task_in_list(
data: AnsibleBaseYAMLObject,
file: Lintable,
kind: str,
position: str = ".",
) -> Iterator[Task]:
"""Get action tasks from block structures."""
def each_entry(data: AnsibleBaseYAMLObject, position: str) -> Iterator[Task]:
if not data:
return
for entry_index, entry in enumerate(data):
if not entry:
continue
_pos = f"{position}[{entry_index}]"
if isinstance(entry, dict):
yield Task(
entry,
position=_pos,
)
for block in [k for k in entry if k in NESTED_TASK_KEYS]:
yield from task_in_list(
data=entry[block],
file=file,
kind="tasks",
position=f"{_pos}.{block}",
)
if not isinstance(data, list):
return
if kind == "playbook":
attributes = ["tasks", "pre_tasks", "post_tasks", "handlers"]
for item_index, item in enumerate(data):
for attribute in attributes:
if not isinstance(item, dict):
continue
if attribute in item:
if isinstance(item[attribute], list):
yield from each_entry(
item[attribute],
f"{position }[{item_index}].{attribute}",
)
elif item[attribute] is not None:
msg = f"Key '{attribute}' defined, but bad value: '{item[attribute]!s}'"
raise RuntimeError(msg)
else:
yield from each_entry(data, position)
def add_action_type(actions: AnsibleBaseYAMLObject, action_type: str) -> list[Any]:
"""Add action markers to task objects."""
results = []
for action in actions:
# ignore empty task
if not action:
continue
action["__ansible_action_type__"] = BLOCK_NAME_TO_ACTION_TYPE_MAP[action_type]
results.append(action)
return results
def get_action_tasks(data: AnsibleBaseYAMLObject, file: Lintable) -> list[Any]:
"""Get a flattened list of action tasks from the file."""
tasks = []
if file.kind in ["tasks", "handlers"]:
tasks = add_action_type(data, file.kind)
else:
tasks.extend(extract_from_list(data, PLAYBOOK_TASK_KEYWORDS))
# Add sub-elements of block/rescue/always to tasks list
tasks.extend(extract_from_list(tasks, NESTED_TASK_KEYS, recursive=True))
return tasks
@cache
def parse_yaml_linenumbers(
lintable: Lintable,
) -> AnsibleBaseYAMLObject:
"""Parse yaml as ansible.utils.parse_yaml but with linenumbers.
The line numbers are stored in each node's LINE_NUMBER_KEY key.
"""
result = []
def compose_node(parent: yaml.nodes.Node, index: int) -> yaml.nodes.Node:
# the line number where the previous token has ended (plus empty lines)
line = loader.line
node = Composer.compose_node(loader, parent, index)
if not isinstance(node, yaml.nodes.Node):
msg = "Unexpected yaml data."
raise TypeError(msg)
node.__line__ = line + 1 # type: ignore[attr-defined]
return node
def construct_mapping(
node: AnsibleBaseYAMLObject,
*,
deep: bool = False,
) -> AnsibleMapping:
mapping = AnsibleConstructor.construct_mapping(loader, node, deep=deep)
if hasattr(node, "__line__"):
mapping[LINE_NUMBER_KEY] = node.__line__
else:
mapping[LINE_NUMBER_KEY] = mapping._line_number # noqa: SLF001
mapping[FILENAME_KEY] = lintable.path
return mapping
try:
kwargs = {}
if "vault_password" in inspect.getfullargspec(AnsibleLoader.__init__).args:
kwargs["vault_password"] = DEFAULT_VAULT_PASSWORD
loader = AnsibleLoader(lintable.content, **kwargs)
loader.compose_node = compose_node
loader.construct_mapping = construct_mapping
# while Ansible only accepts single documents, we also need to load
# multi-documents, as we attempt to load any YAML file, not only
# Ansible managed ones.
while True:
data = loader.get_data()
if data is None:
break
result.append(data)
except (
yaml.parser.ParserError,
yaml.scanner.ScannerError,
yaml.constructor.ConstructorError,
ruamel.yaml.parser.ParserError,
) as exc:
msg = f"Failed to load YAML file: {lintable.path}"
raise RuntimeError(msg) from exc
if len(result) == 0:
return None # empty documents
if len(result) == 1:
return result[0]
return result
def get_cmd_args(task: dict[str, Any]) -> str:
"""Extract the args from a cmd task as a string."""
if "cmd" in task["action"]:
args = task["action"]["cmd"]
else:
args = task["action"].get("_raw_params", [])
if not isinstance(args, str):
return " ".join(args)
return args
def get_first_cmd_arg(task: dict[str, Any]) -> Any:
"""Extract the first arg from a cmd task."""
try:
first_cmd_arg = get_cmd_args(task).split()[0]
except IndexError:
return None
return first_cmd_arg
def get_second_cmd_arg(task: dict[str, Any]) -> Any:
"""Extract the second arg from a cmd task."""
try:
second_cmd_arg = get_cmd_args(task).split()[1]
except IndexError:
return None
return second_cmd_arg
def is_playbook(filename: str) -> bool:
"""Check if the file is a playbook.
Given a filename, it should return true if it looks like a playbook. The
function is not supposed to raise exceptions.
"""
# we assume is a playbook if we loaded a sequence of dictionaries where
# at least one of these keys is present:
playbooks_keys = {
"gather_facts",
"hosts",
"import_playbook",
"post_tasks",
"pre_tasks",
"roles",
"tasks",
}
# makes it work with Path objects by converting them to strings
if not isinstance(filename, str):
filename = str(filename)
try:
f = parse_yaml_from_file(filename)
except Exception as exc: # pylint: disable=broad-except # noqa: BLE001
_logger.warning(
"Failed to load %s with %s, assuming is not a playbook.",
filename,
exc,
)
else:
if (
isinstance(f, AnsibleSequence)
and hasattr(next(iter(f), {}), "keys")
and playbooks_keys.intersection(next(iter(f), {}).keys())
):
return True
return False
def get_lintables(
opts: Options = options,
args: list[str] | None = None,
) -> list[Lintable]:
"""Detect files and directories that are lintable."""
lintables: list[Lintable] = []
# passing args bypass auto-detection mode
if args:
for arg in args:
lintable = Lintable(arg)
lintables.append(lintable)
else:
for filename in discover_lintables(opts):
path = Path(filename)
lintables.append(Lintable(path))
# stage 2: guess roles from current lintables, as there is no unique
# file that must be present in any kind of role.
_extend_with_roles(lintables)
return lintables
def _extend_with_roles(lintables: list[Lintable]) -> None:
"""Detect roles among lintables and adds them to the list."""
for lintable in lintables:
parts = lintable.path.parent.parts
if "roles" in parts:
role = lintable.path
while role.parent.name != "roles" and role.name:
role = role.parent
if role.exists() and not role.is_file():
lintable = Lintable(role)
if lintable.kind == "role" and lintable not in lintables:
_logger.debug("Added role: %s", lintable)
lintables.append(lintable)
def convert_to_boolean(value: Any) -> bool:
"""Use Ansible to convert something to a boolean."""
return bool(boolean(value))
def parse_examples_from_plugin(lintable: Lintable) -> tuple[int, str]:
"""Parse yaml inside plugin EXAMPLES string.
Store a line number offset to realign returned line numbers later
"""
offset = 1
parsed = ast.parse(lintable.content)
for child in parsed.body:
if isinstance(child, ast.Assign):
label = child.targets[0]
if isinstance(label, ast.Name) and label.id == "EXAMPLES":
offset = child.lineno - 1
break
docs = read_docstring(str(lintable.path))
examples = docs["plainexamples"]
# Ignore the leading newline and lack of document start
# as including those in EXAMPLES would be weird.
return offset, (f"---{examples}" if examples else "")
@lru_cache
def load_plugin(name: str) -> PluginLoadContext:
"""Return loaded ansible plugin/module."""
loaded_module = action_loader.find_plugin_with_context(
name,
ignore_deprecated=True,
check_aliases=True,
)
if not loaded_module.resolved:
loaded_module = module_loader.find_plugin_with_context(
name,
ignore_deprecated=True,
check_aliases=True,
)
if not loaded_module.resolved and name.startswith("ansible.builtin."):
# fallback to core behavior of using legacy
loaded_module = module_loader.find_plugin_with_context(
name.replace("ansible.builtin.", "ansible.legacy."),
ignore_deprecated=True,
check_aliases=True,
)
return loaded_module
def parse_fqcn(name: str) -> tuple[str, ...]:
"""Parse name parameter into FQCN segments."""
return tuple(name.split(".")) if is_fqcn(name) else ("", "", name)
| 41,554 | Python | .py | 1,011 | 31.591494 | 112 | 0.605335 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,227 | text.py | ansible_ansible-lint/src/ansiblelint/text.py | """Text utils."""
from __future__ import annotations
import re
from functools import cache
RE_HAS_JINJA = re.compile(r"{[{%#].*[%#}]}", re.DOTALL)
RE_HAS_GLOB = re.compile("[][*?]")
RE_IS_FQCN_OR_NAME = re.compile(r"^\w+(\.\w+\.\w+)?$")
def strip_ansi_escape(data: str | bytes) -> str:
"""Remove all ANSI escapes from string or bytes.
If bytes is passed instead of string, it will be converted to string
using UTF-8.
"""
if isinstance(data, bytes): # pragma: no branch
data = data.decode("utf-8")
return re.sub(r"\x1b[^m]*m", "", data)
def toidentifier(text: str) -> str:
"""Convert unsafe chars to ones allowed in variables."""
result = re.sub(r"[\s-]+", "_", text)
if not result.isidentifier():
msg = f"Unable to convert role name '{text}' to valid variable name."
raise RuntimeError(msg)
return result
# https://www.python.org/dev/peps/pep-0616/
def removeprefix(self: str, prefix: str) -> str:
"""Remove prefix from string."""
if self.startswith(prefix):
return self[len(prefix) :]
return self[:]
@cache
def has_jinja(value: str) -> bool:
"""Return true if a string seems to contain jinja templating."""
return bool(isinstance(value, str) and RE_HAS_JINJA.search(value))
@cache
def has_glob(value: str) -> bool:
"""Return true if a string looks like having a glob pattern."""
return bool(isinstance(value, str) and RE_HAS_GLOB.search(value))
@cache
def is_fqcn_or_name(value: str) -> bool:
"""Return true if a string seems to be a module/filter old name or a fully qualified one."""
return bool(isinstance(value, str) and RE_IS_FQCN_OR_NAME.search(value))
@cache
def is_fqcn(value: str) -> bool:
"""Return true if a string seems to be a fully qualified collection name."""
match = RE_IS_FQCN_OR_NAME.search(value)
return bool(isinstance(value, str) and match and match.group(1))
| 1,923 | Python | .py | 45 | 38.644444 | 96 | 0.670791 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,228 | __init__.py | ansible_ansible-lint/src/ansiblelint/__init__.py | # Copyright (c) 2013-2014 Will Thames <will@thames.id.au>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""Main ansible-lint package."""
from __future__ import annotations
from ansiblelint.version import __version__
__all__ = ("__version__",)
| 1,256 | Python | .py | 23 | 53.521739 | 79 | 0.781478 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,229 | color.py | ansible_ansible-lint/src/ansiblelint/color.py | """Console coloring and terminal support."""
from __future__ import annotations
from typing import Any
import rich
import rich.markdown
from rich.console import Console
from rich.default_styles import DEFAULT_STYLES
from rich.style import Style
from rich.syntax import Syntax
from rich.theme import Theme
# WARNING: When making style changes, be sure you test the output of
# `ansible-lint -L` on multiple terminals with dark/light themes, including:
# - iTerm2 (macOS) - bold might not be rendered differently
# - vscode integrated terminal - bold might not be rendered differently, links will not work
#
# When it comes to colors being used, try to match:
# - Ansible official documentation theme, https://docs.ansible.com/ansible/latest/dev_guide/developing_api.html
# - VSCode Ansible extension for syntax highlighting
# - GitHub markdown theme
#
# Current values: (docs)
# codeblock border: #404040
# codeblock background: #edf0f2
# codeblock comment: #6a737d (also italic)
# teletype-text: #e74c3c (red)
# teletype-text-border: 1px solid #e1e4e5 (background white)
# text: #404040
# codeblock other-text: #555555 (black-ish)
# codeblock property: #22863a (green)
# codeblock integer: 032f62 (blue)
# codeblock command: #0086b3 (blue) - [shell]
# == python ==
# class: #445588 (dark blue and bold)
# docstring: #dd1144 (red)
# self: #999999 (light-gray)
# method/function: #990000 (dark-red)
# number: #009999 cyan
# keywords (def,None,False,len,from,import): #007020 (green) bold
# super|dict|print: #0086b3 light-blue
# __name__: #bb60d5 (magenta)
# string: #dd1144 (light-red)
DEFAULT_STYLES.update(
{
"markdown.code": Style(color="bright_black"),
"markdown.code_block": Style(dim=True, color="cyan"),
},
)
_theme = Theme(
{
"info": "cyan",
"warning": "yellow",
"danger": "bold red",
"title": "yellow",
"error": "bright_red",
"filename": "blue",
},
)
console_options: dict[str, Any] = {"emoji": False, "theme": _theme, "soft_wrap": True}
console_options_stderr = console_options.copy()
console_options_stderr["stderr"] = True
console = rich.get_console()
console_stderr = Console(**console_options_stderr)
def reconfigure(new_options: dict[str, Any]) -> None:
"""Reconfigure console options."""
console_options = new_options # pylint: disable=redefined-outer-name
rich.reconfigure(**new_options)
# see https://github.com/willmcgugan/rich/discussions/484#discussioncomment-200182
new_console_options_stderr = console_options.copy()
new_console_options_stderr["stderr"] = True
tmp_console = Console(**new_console_options_stderr)
console_stderr.__dict__ = tmp_console.__dict__
def render_yaml(text: str) -> Syntax:
"""Colorize YAMl for nice display."""
return Syntax(text, "yaml", theme="ansi_dark")
# pylint: disable=redefined-outer-name,unused-argument
def _rich_codeblock_custom_rich_console(
self: rich.markdown.CodeBlock,
console: Console, # noqa: ARG001
options: rich.console.ConsoleOptions, # noqa: ARG001
) -> rich.console.RenderResult: # pragma: no cover
code = str(self.text).rstrip()
syntax = Syntax(
code,
self.lexer_name,
theme=self.theme,
word_wrap=True,
background_color="default",
)
yield syntax
rich.markdown.CodeBlock.__rich_console__ = _rich_codeblock_custom_rich_console # type: ignore[method-assign]
| 3,434 | Python | .py | 90 | 34.9 | 111 | 0.714929 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,230 | logger.py | ansible_ansible-lint/src/ansiblelint/logger.py | """Utils related to logging."""
import logging
import time
from collections.abc import Iterator
from contextlib import contextmanager
from typing import Any
_logger = logging.getLogger(__name__)
@contextmanager
def timed_info(msg: Any, *args: Any) -> Iterator[None]:
"""Context manager for logging slow operations, mentions duration."""
start = time.time()
try:
yield
finally:
elapsed = time.time() - start
_logger.info(msg + " (%.2fs)", *(*args, elapsed)) # noqa: G003
| 515 | Python | .py | 16 | 28.4375 | 73 | 0.692929 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,231 | loaders.py | ansible_ansible-lint/src/ansiblelint/loaders.py | """Utilities for loading various files."""
from __future__ import annotations
import logging
import os
from collections import defaultdict
from functools import partial
from typing import TYPE_CHECKING, Any, NamedTuple
import yaml
from yaml import YAMLError
try:
from yaml import CFullLoader as FullLoader
from yaml import CSafeLoader as SafeLoader
except (ImportError, AttributeError):
from yaml import FullLoader, SafeLoader # type: ignore[assignment]
if TYPE_CHECKING:
from pathlib import Path
class IgnoreFile(NamedTuple):
"""IgnoreFile n."""
default: str
alternative: str
IGNORE_FILE = IgnoreFile(".ansible-lint-ignore", ".config/ansible-lint-ignore.txt")
yaml_load = partial(yaml.load, Loader=FullLoader)
yaml_load_safe = partial(yaml.load, Loader=SafeLoader)
_logger = logging.getLogger(__name__)
def yaml_from_file(filepath: str | Path) -> Any:
"""Return a loaded YAML file."""
with open(str(filepath), encoding="utf-8") as content:
return yaml_load(content)
def load_ignore_txt(filepath: Path | None = None) -> dict[str, set[str]]:
"""Return a list of rules to ignore."""
result = defaultdict(set)
ignore_file = None
if filepath:
if os.path.isfile(filepath):
ignore_file = str(filepath)
else:
_logger.error("Ignore file not found '%s'", ignore_file)
elif os.path.isfile(IGNORE_FILE.default):
ignore_file = IGNORE_FILE.default
elif os.path.isfile(IGNORE_FILE.alternative):
ignore_file = IGNORE_FILE.alternative
if ignore_file:
with open(ignore_file, encoding="utf-8") as _ignore_file:
_logger.debug("Loading ignores from '%s'", ignore_file)
for line in _ignore_file:
entry = line.split("#")[0].rstrip()
if entry:
try:
path, rule = entry.split()
except ValueError as exc:
msg = f"Unable to parse line '{line}' from {ignore_file} file."
raise RuntimeError(msg) from exc
result[path].add(rule)
return result
__all__ = [
"load_ignore_txt",
"yaml_from_file",
"yaml_load",
"yaml_load_safe",
"YAMLError",
"IGNORE_FILE",
]
| 2,297 | Python | .py | 62 | 30.032258 | 87 | 0.649051 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,232 | requirements.py | ansible_ansible-lint/src/ansiblelint/requirements.py | """Utilities for checking python packages requirements."""
import importlib_metadata
from packaging.requirements import Requirement
from packaging.specifiers import SpecifierSet
from packaging.version import Version
class Reqs(dict[str, SpecifierSet]):
"""Utility class for working with package dependencies."""
reqs: dict[str, SpecifierSet]
def __init__(self, name: str = "ansible-lint") -> None:
"""Load linter metadata requirements."""
for req_str in importlib_metadata.metadata(name).json["requires_dist"]:
req = Requirement(req_str)
if req.name:
self[req.name] = req.specifier
def matches(self, req_name: str, req_version: str | Version) -> bool:
"""Verify if given version is matching current metadata dependencies."""
if req_name not in self:
return False
return all(
specifier.contains(str(req_version), prereleases=True)
for specifier in self[req_name]
)
| 1,010 | Python | .py | 22 | 38.272727 | 80 | 0.677189 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,233 | stats.py | ansible_ansible-lint/src/ansiblelint/stats.py | """Module hosting functionality about reporting."""
from __future__ import annotations
from dataclasses import dataclass, field
@dataclass(order=True)
class TagStats:
"""Tag statistics."""
order: int = 0 # to be computed based on rule's profile
tag: str = "" # rule effective id (can be multiple tags per rule id)
count: int = 0 # total number of occurrences
warning: bool = False # set true if listed in warn_list
profile: str = ""
associated_tags: list[str] = field(default_factory=list)
class SummarizedResults:
"""The statistics about an ansible-lint run."""
failures: int = 0
warnings: int = 0
fixed_failures: int = 0
fixed_warnings: int = 0
tag_stats: dict[str, TagStats] = {}
passed_profile: str = ""
@property
def fixed(self) -> int:
"""Get total fixed count."""
return self.fixed_failures + self.fixed_warnings
def sort(self) -> None:
"""Sort tag stats by tag name."""
self.tag_stats = dict(sorted(self.tag_stats.items(), key=lambda t: t[1]))
| 1,066 | Python | .py | 27 | 34.407407 | 81 | 0.662779 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,234 | version.py | ansible_ansible-lint/src/ansiblelint/version.py | """Ansible-lint version information."""
try:
from ._version import version as __version__
except ImportError: # pragma: no cover
try:
import pkg_resources
__version__ = pkg_resources.get_distribution("ansible-lint").version
except Exception: # pylint: disable=broad-except # noqa: BLE001
# this is the fallback SemVer version picked by setuptools_scm when tag
# information is not available.
__version__ = "0.1.dev1"
__all__ = ("__version__",)
| 501 | Python | .py | 12 | 36.166667 | 79 | 0.664609 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,235 | file_utils.py | ansible_ansible-lint/src/ansiblelint/file_utils.py | """Utility functions related to file operations."""
from __future__ import annotations
import copy
import logging
import os
import sys
from collections import defaultdict
from contextlib import contextmanager
from pathlib import Path
from tempfile import NamedTemporaryFile
from typing import TYPE_CHECKING, Any, cast
import pathspec
import wcmatch.pathlib
import wcmatch.wcmatch
from yaml.error import YAMLError
from ansiblelint.app import get_app
from ansiblelint.config import ANSIBLE_OWNED_KINDS, BASE_KINDS, Options, options
from ansiblelint.constants import CONFIG_FILENAMES, FileType, States
if TYPE_CHECKING:
from collections.abc import Iterator, Sequence
from ansiblelint.errors import MatchError
_logger = logging.getLogger(__package__)
def abspath(path: str, base_dir: str) -> str:
"""Make relative path absolute relative to given directory.
path (str): the path to make absolute
base_dir (str): the directory from which make relative paths absolute.
"""
if not os.path.isabs(path):
# Don't use abspath as it assumes path is relative to cwd.
# We want it relative to base_dir.
path = os.path.join(base_dir, path)
return os.path.normpath(path)
def normpath(path: str | Path) -> str:
"""Normalize a path in order to provide a more consistent output.
Currently it generates a relative path but in the future we may want to
make this user configurable.
"""
# prevent possible ValueError with relpath(), when input is an empty string
if not path:
path = "."
# conversion to string in order to allow receiving non string objects
relpath = os.path.relpath(str(path))
path_absolute = os.path.abspath(str(path))
if path_absolute.startswith(os.getcwd()):
return relpath
if path_absolute.startswith(os.path.expanduser("~")):
return path_absolute.replace(os.path.expanduser("~"), "~")
# we avoid returning relative paths that end-up at root level
if path_absolute in relpath:
return path_absolute
if relpath.startswith("../"):
return path_absolute
return relpath
# That is needed for compatibility with py38, later was added to Path class
def is_relative_to(path: Path, *other: Any) -> bool:
"""Return True if the path is relative to another path or False."""
try:
path.resolve().absolute().relative_to(*other)
except ValueError:
return False
return True
def normpath_path(path: str | Path) -> Path:
"""Normalize a path in order to provide a more consistent output.
- Any symlinks are resolved.
- Any paths outside the CWD are resolved to their absolute path.
- Any absolute path within current user home directory is compressed to
make use of '~', so it is easier to read and more portable.
"""
if not isinstance(path, Path):
path = Path(path)
is_relative = is_relative_to(path, path.cwd())
path = path.resolve()
if is_relative:
path = path.relative_to(path.cwd())
# Compress any absolute path within current user home directory
if path.is_absolute():
home = Path.home()
if is_relative_to(path, home):
path = Path("~") / path.relative_to(home)
return path
@contextmanager
def cwd(path: Path) -> Iterator[None]:
"""Context manager for temporary changing current working directory."""
old_pwd = Path.cwd()
os.chdir(path)
try:
yield
finally:
os.chdir(old_pwd)
def expand_path_vars(path: str) -> str:
"""Expand the environment or ~ variables in a path string."""
# It may be possible for function to be called with a Path object
path = str(path).strip()
path = os.path.expanduser(path)
path = os.path.expandvars(path)
return path
def expand_paths_vars(paths: list[str]) -> list[str]:
"""Expand the environment or ~ variables in a list."""
paths = [expand_path_vars(p) for p in paths]
return paths
def kind_from_path(path: Path, *, base: bool = False) -> FileType:
"""Determine the file kind based on its name.
When called with base=True, it will return the base file type instead
of the explicit one. That is expected to return 'yaml' for any yaml files.
"""
# pathlib.Path.match patterns are very limited, they do not support *a*.yml
# glob.glob supports **/foo.yml but not multiple extensions
pathex = wcmatch.pathlib.PurePath(str(path.absolute().resolve()))
kinds = options.kinds if not base else BASE_KINDS
for entry in kinds:
for k, v in entry.items():
if pathex.globmatch(
v,
flags=(
wcmatch.pathlib.GLOBSTAR
| wcmatch.pathlib.BRACE
| wcmatch.pathlib.DOTGLOB
),
):
return str(k) # type: ignore[return-value]
if base:
# Unknown base file type is default
return ""
if path.is_dir():
known_role_subfolders = ("tasks", "meta", "vars", "defaults", "handlers")
for filename in known_role_subfolders:
if (path / filename).is_dir():
return "role"
_logger.debug(
"Folder `%s` does not look like a role due to missing any of the common subfolders such: %s.",
path,
", ".join(known_role_subfolders),
)
if str(path) == "/dev/stdin":
return "playbook"
# Unknown file types report a empty string (evaluated as False)
return ""
# pylint: disable=too-many-instance-attributes
class Lintable:
"""Defines a file/folder that can be linted.
Providing file content when creating the object allow creation of in-memory
instances that do not need files to be present on disk.
When symlinks are given, they will always be resolved to their target.
"""
# pylint: disable=too-many-arguments,too-many-positional-arguments
def __init__(
self,
name: str | Path,
content: str | None = None,
kind: FileType | None = None,
base_kind: str = "",
parent: Lintable | None = None,
):
"""Create a Lintable instance."""
self.dir: str = ""
self.kind: FileType | None = None
self.stop_processing = False # Set to stop other rules from running
self.state: Any = States.NOT_LOADED
self.line_skips: dict[int, set[str]] = defaultdict(set)
self.exc: Exception | None = None # Stores data loading exceptions
self.parent = parent
self.explicit = False # Indicates if the file was explicitly provided or was indirectly included.
self.line_offset = (
0 # Amount to offset line numbers by to get accurate position
)
self.matches: list[MatchError] = []
if isinstance(name, str):
name = Path(name)
is_relative = is_relative_to(name, str(name.cwd()))
name = name.resolve()
if is_relative:
name = name.relative_to(name.cwd())
name = normpath_path(name)
# we need to be sure that we expanduser() because otherwise a simple
# test like .path.exists() will return unexpected results.
self.path = name.expanduser()
# Filename is effective file on disk, for stdin is a namedtempfile
self.name = self.filename = str(name)
self._content = self._original_content = content
self.updated = False
# if the lintable is part of a role, we save role folder name
self.role = ""
parts = self.path.parent.parts
if "roles" in parts:
role = self.path
roles_path = get_app(cached=True).runtime.config.default_roles_path
while (
str(role.parent.absolute()) not in roles_path
and role.parent.name != "roles"
and role.name
):
role = role.parent
if role.exists():
self.role = role.name
if str(self.path) in ["/dev/stdin", "-"]:
# pylint: disable=consider-using-with
self.file = NamedTemporaryFile( # noqa: SIM115
mode="w+",
suffix="playbook.yml",
)
self.filename = self.file.name
self._content = sys.stdin.read()
self.file.write(self._content)
self.file.flush()
self.path = Path(self.file.name)
self.name = "stdin"
self.kind = "playbook"
self.dir = "/"
else:
self.kind = kind or kind_from_path(self.path)
# We store absolute directory in dir
if not self.dir:
if self.kind == "role":
self.dir = str(self.path.resolve())
else:
self.dir = str(self.path.parent.resolve())
# determine base file kind (yaml, xml, ini, ...)
self.base_kind = base_kind or kind_from_path(self.path, base=True)
self.abspath = self.path.expanduser().absolute()
if self.kind == "tasks":
self.parent = _guess_parent(self)
if self.kind == "yaml":
_ = self.data
def __del__(self) -> None:
"""Clean up temporary files when the instance is cleaned up."""
if hasattr(self, "file"):
self.file.close()
def _guess_kind(self) -> None:
if self.kind == "yaml":
if (
isinstance(self.data, list)
and len(self.data) > 0
and (
"hosts" in self.data[0]
or "import_playbook" in self.data[0]
or "ansible.builtin.import_playbook" in self.data[0]
)
):
if "rules" not in self.data[0]:
self.kind = "playbook"
else:
self.kind = "rulebook"
# we we failed to guess the more specific kind, we warn user
if self.kind == "yaml":
_logger.debug(
"Passed '%s' positional argument was identified as generic '%s' file kind.",
self.name,
self.kind,
)
def __getitem__(self, key: Any) -> Any:
"""Provide compatibility subscriptable support."""
if key == "path":
return str(self.path)
if key == "type":
return str(self.kind)
raise NotImplementedError
def get(self, key: Any, default: Any = None) -> Any:
"""Provide compatibility subscriptable support."""
try:
return self[key]
except NotImplementedError:
return default
def _populate_content_cache_from_disk(self) -> None:
# Can raise UnicodeDecodeError
self._content = self.path.expanduser().resolve().read_text(encoding="utf-8")
if self._original_content is None:
self._original_content = self._content
@property
def content(self) -> str:
"""Retrieve file content, from internal cache or disk."""
if self._content is None:
self._populate_content_cache_from_disk()
return cast(str, self._content)
@content.setter
def content(self, value: str) -> None:
"""Update ``content`` and calculate ``updated``.
To calculate ``updated`` this will read the file from disk if the cache
has not already been populated.
"""
if not isinstance(value, str):
msg = f"Expected str but got {type(value)}"
raise TypeError(msg)
if self._original_content is None:
if self._content is not None:
self._original_content = self._content
elif self.path.exists():
self._populate_content_cache_from_disk()
else:
# new file
self._original_content = ""
self.updated = self._original_content != value
self._content = value
@content.deleter
def content(self) -> None:
"""Reset the internal content cache."""
self._content = None
def write(self, *, force: bool = False) -> None:
"""Write the value of ``Lintable.content`` to disk.
This only writes to disk if the content has been updated (``Lintable.updated``).
For example, you can update the content, and then write it to disk like this:
.. code:: python
lintable.content = new_content
lintable.write()
Use ``force=True`` when you want to force a content rewrite even if the
content has not changed. For example:
.. code:: python
lintable.write(force=True)
"""
dump_filename = self.path.expanduser().resolve()
if os.environ.get("ANSIBLE_LINT_WRITE_TMP", "0") == "1":
dump_filename = dump_filename.with_suffix(
f".tmp{dump_filename.suffix}",
)
elif not force and not self.updated:
# No changes to write.
return
dump_filename.write_text(
self._content or "",
encoding="utf-8",
)
def __hash__(self) -> int:
"""Return a hash value of the lintables."""
return hash((self.name, self.kind, self.abspath))
def __eq__(self, other: object) -> bool:
"""Identify whether the other object represents the same rule match."""
if isinstance(other, Lintable):
return bool(self.name == other.name and self.kind == other.kind)
return False
def __repr__(self) -> str:
"""Return user friendly representation of a lintable."""
return f"{self.name} ({self.kind})"
def is_owned_by_ansible(self) -> bool:
"""Return true for YAML files that are managed by Ansible."""
return self.kind in ANSIBLE_OWNED_KINDS
def failed(self) -> bool:
"""Return true if we already found syntax-check errors on this file."""
return any(
match.rule.id in ("syntax-check", "load-failure") for match in self.matches
)
@property
def data(self) -> Any:
"""Return loaded data representation for current file, if possible."""
if self.state == States.NOT_LOADED:
if self.path.is_dir():
self.state = None
return self.state
try:
if str(self.base_kind) == "text/yaml":
from ansiblelint.utils import ( # pylint: disable=import-outside-toplevel
parse_yaml_linenumbers,
)
self.state = parse_yaml_linenumbers(self)
# now that _data is not empty, we can try guessing if playbook or rulebook
# it has to be done before append_skipped_rules() call as it's relying
# on self.kind.
if self.kind == "yaml":
self._guess_kind()
# Lazy import to avoid delays and cyclic-imports
if "append_skipped_rules" not in globals():
# pylint: disable=import-outside-toplevel
from ansiblelint.skip_utils import append_skipped_rules
# pylint: disable=possibly-used-before-assignment
self.state = append_skipped_rules(
self.state,
self,
)
else:
logging.debug(
"data set to None for %s due to being '%s' (%s) kind.",
self.path,
self.kind,
self.base_kind or "unknown",
)
self.state = States.UNKNOWN_DATA
except (
RuntimeError,
FileNotFoundError,
YAMLError,
UnicodeDecodeError,
) as exc:
self.state = States.LOAD_FAILED
self.exc = exc
return self.state
# pylint: disable=redefined-outer-name
def discover_lintables(options: Options) -> list[str]:
"""Find all files that we know how to lint.
Return format is normalized, relative for stuff below cwd, ~/ for content
under current user and absolute for everything else.
"""
if not options.lintables:
options.lintables = ["."]
return [
str(filename)
for filename in get_all_files(
*[Path(s) for s in options.lintables],
exclude_paths=options.exclude_paths,
)
]
def strip_dotslash_prefix(fname: str) -> str:
"""Remove ./ leading from filenames."""
return fname[2:] if fname.startswith("./") else fname
def find_project_root(
srcs: Sequence[str],
config_file: str | None = None,
) -> tuple[Path, str]:
"""Return a directory containing .git or ansible-lint config files.
That directory will be a common parent of all files and directories
passed in `srcs`.
If no directory in the tree contains a marker that would specify it's the
project root, the root of the file system is returned.
Returns a two-tuple with the first element as the project root path and
the second element as a string describing the method by which the
project root was discovered.
"""
directory = None
if not srcs:
srcs = [str(Path.cwd().resolve().absolute())]
path_srcs = [Path(Path.cwd(), src).resolve() for src in srcs]
cfg_files = [config_file] if config_file else CONFIG_FILENAMES
# A list of lists of parents for each 'src'. 'src' is included as a
# "parent" of itself if it is a directory
src_parents = [
list(path.parents) + ([path] if path.is_dir() else []) for path in path_srcs
]
common_base = max(
set.intersection(*(set(parents) for parents in src_parents)),
key=lambda path: path.parts,
)
for directory in (common_base, *common_base.parents):
if (directory / ".git").exists():
return directory, ".git directory"
if (directory / ".hg").is_dir():
return directory, ".hg directory"
for cfg_file in cfg_files:
# note that if cfg_file is already absolute, 'directory' is ignored
resolved_cfg_path = directory / cfg_file
if resolved_cfg_path.is_file():
if os.path.isabs(cfg_file):
directory = Path(cfg_file).parent
if directory.name == ".config":
directory = directory.parent
return directory, f"config file {resolved_cfg_path}"
if not directory:
return Path.cwd(), "current working directory"
return directory, "file system root"
def expand_dirs_in_lintables(lintables: set[Lintable]) -> None:
"""Return all recognized lintables within given directory."""
should_expand = False
for item in lintables:
if item.path.is_dir():
should_expand = True
break
if should_expand:
# this relies on git and we do not want to call unless needed
all_files = discover_lintables(options)
for item in copy.copy(lintables):
if item.path.is_dir():
for filename in all_files:
if filename.startswith((str(item.path), str(item.path.absolute()))):
lintables.add(Lintable(filename))
def _guess_parent(lintable: Lintable) -> Lintable | None:
"""Return a parent directory for a lintable."""
try:
if lintable.path.parents[2].name == "roles":
# role_name = lintable.parents[1].name
return Lintable(lintable.path.parents[1], kind="role")
except IndexError:
pass
return None
def get_all_files(
*paths: Path,
exclude_paths: list[str] | None = None,
) -> list[Path]:
"""Recursively retrieve all files from given folders."""
all_files: list[Path] = []
exclude_paths = [] if exclude_paths is None else exclude_paths
def is_excluded(path_to_check: Path) -> bool:
"""Check if a file is exclude by current specs."""
return any(
spec.match_file(pathspec.util.append_dir_sep(path_to_check))
for spec in pathspecs
)
for path in paths:
pathspecs = [
pathspec.GitIgnoreSpec.from_lines(
[
".git",
".tox",
".mypy_cache",
"__pycache__",
".DS_Store",
".coverage",
".pytest_cache",
".ruff_cache",
*exclude_paths,
],
),
]
gitignore = path / ".gitignore"
if gitignore.exists():
with gitignore.open(encoding="UTF-8") as f:
_logger.info("Loading ignores from %s", gitignore)
pathspecs.append(
pathspec.GitIgnoreSpec.from_lines(f.read().splitlines()),
)
# Iterate over all items in the directory
if path.is_file():
all_files.append(path)
else:
for item in sorted(path.iterdir()):
if is_excluded(item):
_logger.info("Excluded: %s", item)
continue
if item.is_file():
all_files.append(item)
# If it's a directory, recursively call the function
elif item.is_dir():
all_files.extend(get_all_files(item, exclude_paths=exclude_paths))
return all_files
| 21,740 | Python | .py | 519 | 31.678227 | 106 | 0.589439 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,236 | __init__.py | ansible_ansible-lint/src/ansiblelint/formatters/__init__.py | """Output formatters."""
from __future__ import annotations
import hashlib
import json
import os
from pathlib import Path
from typing import TYPE_CHECKING, Any, Generic, TypeVar
import rich
from ansiblelint.config import options
from ansiblelint.version import __version__
if TYPE_CHECKING:
from ansiblelint.errors import MatchError
from ansiblelint.rules import BaseRule # type: ignore[attr-defined]
T = TypeVar("T", bound="BaseFormatter") # type: ignore[type-arg]
class BaseFormatter(Generic[T]):
"""Formatter of ansible-lint output.
Base class for output formatters.
Args:
----
base_dir (str|Path): reference directory against which display relative path.
display_relative_path (bool): whether to show path as relative or absolute
"""
def __init__(self, base_dir: str | Path, display_relative_path: bool) -> None:
"""Initialize a BaseFormatter instance."""
if isinstance(base_dir, str):
base_dir = Path(base_dir)
if base_dir: # can be None
base_dir = base_dir.absolute()
self.base_dir = base_dir if display_relative_path else None
def _format_path(self, path: str | Path) -> str | Path:
if not self.base_dir or not path:
return path
# Use os.path.relpath 'cause Path.relative_to() misbehaves
rel_path = os.path.relpath(path, start=self.base_dir)
# Avoid returning relative paths that go outside of base_dir
if rel_path.startswith(".."):
return path
return rel_path
def apply(self, match: MatchError) -> str:
"""Format a match error."""
return str(match)
@staticmethod
def escape(text: str) -> str:
"""Escapes a string to avoid processing it as markup."""
return rich.markup.escape(text)
class Formatter(BaseFormatter): # type: ignore[type-arg]
"""Default output formatter of ansible-lint."""
def apply(self, match: MatchError) -> str:
_id = getattr(match.rule, "id", "000")
result = f"[{match.level}][bold][link={match.rule.url}]{self.escape(match.tag)}[/link][/][/][dim]:[/] [{match.level}]{self.escape(match.message)}[/]"
if match.level != "error":
result += f" [dim][{match.level}]({match.level})[/][/]"
if match.ignored:
result += " [dim]# ignored[/]"
result += (
"\n"
f"[filename]{self._format_path(match.filename or '')}[/]:{match.position}"
)
if match.details:
result += f" [dim]{self.escape(str(match.details))}[/]"
result += "\n"
return result
class QuietFormatter(BaseFormatter[Any]):
"""Brief output formatter for ansible-lint."""
def apply(self, match: MatchError) -> str:
return (
f"[{match.level}]{match.rule.id}[/] "
f"[filename]{self._format_path(match.filename or '')}[/]:{match.position}"
)
class ParseableFormatter(BaseFormatter[Any]):
"""Parseable uses PEP8 compatible format."""
def apply(self, match: MatchError) -> str:
result = (
f"[filename]{self._format_path(match.filename or '')}[/][dim]:{match.position}:[/] "
f"[{match.level}][bold]{self.escape(match.tag)}[/bold]"
f"{ f': {match.message}' if not options.quiet else '' }[/]"
)
if match.level != "error":
result += f" [dim][{match.level}]({match.level})[/][/]"
return result
class AnnotationsFormatter(BaseFormatter): # type: ignore[type-arg]
# https://docs.github.com/en/actions/using-workflows/workflow-commands-for-github-actions#setting-a-warning-message
"""Formatter for emitting violations as GitHub Workflow Commands.
These commands trigger the GHA Workflow runners platform to post violations
in a form of GitHub Checks API annotations that appear rendered in pull-
request files view.
::debug file={name},line={line},col={col},severity={severity}::{message}
::warning file={name},line={line},col={col},severity={severity}::{message}
::error file={name},line={line},col={col},severity={severity}::{message}
Supported levels: debug, warning, error
"""
def apply(self, match: MatchError) -> str:
"""Prepare a match instance for reporting as a GitHub Actions annotation."""
file_path = self._format_path(match.filename or "")
line_num = match.lineno
severity = match.rule.severity
violation_details = self.escape(match.message)
col = f",col={match.column}" if match.column else ""
return (
f"::{match.level} file={file_path},line={line_num}{col},severity={severity},title={match.tag}"
f"::{violation_details}"
)
class CodeclimateJSONFormatter(BaseFormatter[Any]):
"""Formatter for emitting violations in Codeclimate JSON report format.
The formatter expects a list of MatchError objects and returns a JSON formatted string.
The spec for the codeclimate report can be found here:
https://github.com/codeclimate/platform/blob/master/spec/analyzers/SPEC.md#user-content-data-types
"""
def format_result(self, matches: list[MatchError]) -> str:
"""Format a list of match errors as a JSON string."""
if not isinstance(matches, list):
msg = f"The {self.__class__} was expecting a list of MatchError."
raise TypeError(msg)
result = []
for match in matches:
issue: dict[str, Any] = {}
issue["type"] = "issue"
issue["check_name"] = match.tag or match.rule.id # rule-id[subrule-id]
issue["categories"] = match.rule.tags
if match.rule.url:
# https://github.com/codeclimate/platform/issues/68
issue["url"] = match.rule.url
issue["severity"] = self._remap_severity(match)
issue["description"] = self.escape(str(match.message))
issue["fingerprint"] = hashlib.sha256(
repr(match).encode("utf-8"),
).hexdigest()
issue["location"] = {}
issue["location"]["path"] = self._format_path(match.filename or "")
if match.column:
issue["location"]["positions"] = {}
issue["location"]["positions"]["begin"] = {}
issue["location"]["positions"]["begin"]["line"] = match.lineno
issue["location"]["positions"]["begin"]["column"] = match.column
else:
issue["location"]["lines"] = {}
issue["location"]["lines"]["begin"] = match.lineno
if match.details:
issue["content"] = {}
issue["content"]["body"] = match.details
# Append issue to result list
result.append(issue)
# Keep it single line due to https://github.com/ansible/ansible-navigator/issues/1490
return json.dumps(result, sort_keys=False)
@staticmethod
def _remap_severity(match: MatchError) -> str:
# level is not part of CodeClimate specification, but there is
# no other way to expose that info. We recommend switching to
# SARIF format which is better suited for interoperability.
#
# Out current implementation will return `major` for all errors and
# `warning` for all warnings. We may revisit this in the future.
if match.level == "warning":
return "minor"
return "major"
class SarifFormatter(BaseFormatter[Any]):
"""Formatter for emitting violations in SARIF report format.
The spec of SARIF can be found here:
https://docs.oasis-open.org/sarif/sarif/v2.1.0/
"""
BASE_URI_ID = "SRCROOT"
TOOL_NAME = "ansible-lint"
TOOL_URL = "https://github.com/ansible/ansible-lint"
SARIF_SCHEMA_VERSION = "2.1.0"
SARIF_SCHEMA = (
"https://schemastore.azurewebsites.net/schemas/json/sarif-2.1.0-rtm.5.json"
)
def format_result(self, matches: list[MatchError]) -> str:
"""Format a list of match errors as a JSON string."""
if not isinstance(matches, list):
msg = f"The {self.__class__} was expecting a list of MatchError."
raise TypeError(msg)
root_path = Path(str(self.base_dir)).as_uri()
root_path = root_path + "/" if not root_path.endswith("/") else root_path
rules, results = self._extract_results(matches)
tool = {
"driver": {
"name": self.TOOL_NAME,
"version": __version__,
"informationUri": self.TOOL_URL,
"rules": rules,
},
}
runs = [
{
"tool": tool,
"columnKind": "utf16CodeUnits",
"results": results,
"originalUriBaseIds": {
self.BASE_URI_ID: {"uri": root_path},
},
},
]
report = {
"$schema": self.SARIF_SCHEMA,
"version": self.SARIF_SCHEMA_VERSION,
"runs": runs,
}
# Keep it single line due to https://github.com/ansible/ansible-navigator/issues/1490
return json.dumps(report, sort_keys=False)
def _extract_results(
self,
matches: list[MatchError],
) -> tuple[list[Any], list[Any]]:
rules = {}
results = []
for match in matches:
if match.tag not in rules:
rules[match.tag] = self._to_sarif_rule(match)
results.append(self._to_sarif_result(match))
return list(rules.values()), results
def _to_sarif_rule(self, match: MatchError) -> dict[str, Any]:
rule: dict[str, Any] = {
"id": match.tag,
"name": match.tag,
"shortDescription": {
"text": str(match.message),
},
"defaultConfiguration": {
"level": self.get_sarif_rule_severity_level(match.rule),
},
"help": {
"text": str(match.rule.description),
},
"helpUri": match.rule.url,
"properties": {"tags": match.rule.tags},
}
return rule
def _to_sarif_result(self, match: MatchError) -> dict[str, Any]:
# https://docs.oasis-open.org/sarif/sarif/v2.1.0/errata01/os/sarif-v2.1.0-errata01-os-complete.html#_Toc141790898
if match.level not in ("warning", "error", "note", "none"):
msg = "Unexpected failure to map '%s' level to SARIF."
raise RuntimeError(
msg,
match.level,
)
result: dict[str, Any] = {
"ruleId": match.tag,
"level": self.get_sarif_result_severity_level(match),
"message": {
"text": (
str(match.details) if str(match.details) else str(match.message)
),
},
"locations": [
{
"physicalLocation": {
"artifactLocation": {
"uri": self._format_path(match.filename or ""),
"uriBaseId": self.BASE_URI_ID,
},
"region": {
"startLine": match.lineno,
},
},
},
],
}
if match.column:
result["locations"][0]["physicalLocation"]["region"][
"startColumn"
] = match.column
return result
@staticmethod
def get_sarif_rule_severity_level(rule: BaseRule) -> str:
"""General SARIF severity level for a rule.
Note: Can differ from an actual result/match severity.
Possible values: "none", "note", "warning", "error"
see: https://github.com/oasis-tcs/sarif-spec/blob/123e95847b13fbdd4cbe2120fa5e33355d4a042b/Schemata/sarif-schema-2.1.0.json#L1934-L1939
"""
if rule.severity in ["VERY_HIGH", "HIGH"]:
return "error"
if rule.severity in ["MEDIUM", "LOW", "VERY_LOW"]:
return "warning"
if rule.severity == "INFO":
return "note"
return "none"
@staticmethod
def get_sarif_result_severity_level(match: MatchError) -> str:
"""SARIF severity level for an actual result/match.
Possible values: "none", "note", "warning", "error"
see: https://github.com/oasis-tcs/sarif-spec/blob/123e95847b13fbdd4cbe2120fa5e33355d4a042b/Schemata/sarif-schema-2.1.0.json#L2066-L2071
"""
if not match.level:
return "none"
if match.level in ["warning", "error"]:
return match.level
return "note"
| 12,817 | Python | .py | 288 | 34.118056 | 157 | 0.582464 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,237 | conftest.py | ansible_ansible-lint/src/ansiblelint/rules/conftest.py | """Makes pytest fixtures available."""
# pylint: disable=wildcard-import,unused-wildcard-import
from ansiblelint.testing.fixtures import * # noqa: F403
| 154 | Python | .py | 3 | 50 | 56 | 0.793333 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,238 | no_log_password.py | ansible_ansible-lint/src/ansiblelint/rules/no_log_password.py | # Copyright 2018, Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""NoLogPasswordsRule used with ansible-lint."""
from __future__ import annotations
import os
import sys
from pathlib import Path
from typing import TYPE_CHECKING
from ansiblelint.rules import AnsibleLintRule, RulesCollection, TransformMixin
from ansiblelint.runner import get_matches
from ansiblelint.transformer import Transformer
from ansiblelint.utils import Task, convert_to_boolean
if TYPE_CHECKING:
from ruamel.yaml.comments import CommentedMap, CommentedSeq
from ansiblelint.config import Options
from ansiblelint.errors import MatchError
from ansiblelint.file_utils import Lintable
class NoLogPasswordsRule(AnsibleLintRule, TransformMixin):
"""Password should not be logged."""
id = "no-log-password"
description = (
"When passing password argument you should have no_log configured "
"to a non False value to avoid accidental leaking of secrets."
)
severity = "LOW"
tags = ["opt-in", "security", "experimental"]
version_added = "v5.0.9"
def matchtask(
self,
task: Task,
file: Lintable | None = None,
) -> bool | str:
if task["action"]["__ansible_module_original__"] == "ansible.builtin.user" and (
task["action"].get("password_lock") and not task["action"].get("password")
):
has_password = False
else:
for param in task["action"]:
if "password" in param:
has_password = True
break
else:
has_password = False
has_loop = [key for key in task if key.startswith("with_") or key == "loop"]
# No no_log and no_log: False behave the same way
# and should return a failure (return True), so we
# need to invert the boolean
no_log = task.get("no_log", False)
if (
isinstance(no_log, str)
and no_log.startswith("{{")
and no_log.endswith("}}")
):
# we cannot really evaluate jinja expressions
return False
return bool(
has_password and not convert_to_boolean(no_log) and len(has_loop) > 0,
)
def transform(
self,
match: MatchError,
lintable: Lintable,
data: CommentedMap | CommentedSeq | str,
) -> None:
if match.tag == self.id:
task = self.seek(match.yaml_path, data)
task["no_log"] = True
match.fixed = True
if "pytest" in sys.modules:
from unittest import mock
import pytest
if TYPE_CHECKING:
from ansiblelint.testing import RunFromText
NO_LOG_UNUSED = """
- name: Test
hosts: all
tasks:
- name: Succeed when no_log is not used but no loop present
ansible.builtin.user:
name: john_doe
password: "wow"
state: absent
"""
NO_LOG_FALSE = """
- hosts: all
tasks:
- name: Use of jinja for no_log is valid
user:
name: john_doe
user_password: "{{ item }}"
state: absent
no_log: "{{ False }}"
- name: Fail when no_log is set to False
user:
name: john_doe
user_password: "{{ item }}"
state: absent
with_items:
- wow
- now
no_log: False
- name: Fail when no_log is set to False
ansible.builtin.user:
name: john_doe
user_password: "{{ item }}"
state: absent
with_items:
- wow
- now
no_log: False
"""
NO_LOG_NO = """
- hosts: all
tasks:
- name: Fail when no_log is set to no
user:
name: john_doe
password: "{{ item }}"
state: absent
no_log: no
loop:
- wow
- now
"""
PASSWORD_WITH_LOCK = """
- hosts: all
tasks:
- name: Fail when password is set and password_lock is true
user:
name: "{{ item }}"
password: "wow"
password_lock: true
with_random_choice:
- ansible
- lint
"""
NO_LOG_YES = """
- hosts: all
tasks:
- name: Succeed when no_log is set to yes
with_list:
- name: user
password: wow
- password: now
name: ansible
user:
name: "{{ item.name }}"
password: "{{ item.password }}"
state: absent
no_log: yes
"""
NO_LOG_TRUE = """
- hosts: all
tasks:
- name: Succeed when no_log is set to True
user:
name: john_doe
user_password: "{{ item }}"
state: absent
no_log: True
loop:
- wow
- now
"""
PASSWORD_LOCK_YES = """
- hosts: all
tasks:
- name: Succeed when only password locking account
user:
name: "{{ item }}"
password_lock: yes
# user_password: "this is a comment, not a password"
with_list:
- ansible
- lint
"""
PASSWORD_LOCK_YES_BUT_NO_PASSWORD = """
- hosts: all
tasks:
- name: Succeed when only password locking account
ansible.builtin.user:
name: "{{ item }}"
password_lock: yes
# user_password: "this is a comment, not a password"
with_list:
- ansible
- lint
"""
PASSWORD_LOCK_FALSE = """
- hosts: all
tasks:
- name: Succeed when password_lock is false and password is not used
user:
name: lint
password_lock: False
"""
@pytest.mark.parametrize(
"rule_runner",
(NoLogPasswordsRule,),
indirect=["rule_runner"],
)
def test_no_log_unused(rule_runner: RunFromText) -> None:
"""The task does not use no_log but also no loop."""
results = rule_runner.run_playbook(NO_LOG_UNUSED)
assert len(results) == 0
@pytest.mark.parametrize(
"rule_runner",
(NoLogPasswordsRule,),
indirect=["rule_runner"],
)
def test_no_log_false(rule_runner: RunFromText) -> None:
"""The task sets no_log to false."""
results = rule_runner.run_playbook(NO_LOG_FALSE)
assert len(results) == 2
for result in results:
assert result.rule.id == "no-log-password"
@pytest.mark.parametrize(
"rule_runner",
(NoLogPasswordsRule,),
indirect=["rule_runner"],
)
def test_no_log_no(rule_runner: RunFromText) -> None:
"""The task sets no_log to no."""
results = rule_runner.run_playbook(NO_LOG_NO)
assert len(results) == 1
assert results[0].rule.id == "no-log-password"
@pytest.mark.parametrize(
"rule_runner",
(NoLogPasswordsRule,),
indirect=["rule_runner"],
)
def test_password_with_lock(rule_runner: RunFromText) -> None:
"""The task sets a password but also lock the user."""
results = rule_runner.run_playbook(PASSWORD_WITH_LOCK)
assert len(results) == 1
assert results[0].rule.id == "no-log-password"
@pytest.mark.parametrize(
"rule_runner",
(NoLogPasswordsRule,),
indirect=["rule_runner"],
)
def test_no_log_yes(rule_runner: RunFromText) -> None:
"""The task sets no_log to yes."""
results = rule_runner.run_playbook(NO_LOG_YES)
assert len(results) == 0
@pytest.mark.parametrize(
"rule_runner",
(NoLogPasswordsRule,),
indirect=["rule_runner"],
)
def test_no_log_true(rule_runner: RunFromText) -> None:
"""The task sets no_log to true."""
results = rule_runner.run_playbook(NO_LOG_TRUE)
assert len(results) == 0
@pytest.mark.parametrize(
"rule_runner",
(NoLogPasswordsRule,),
indirect=["rule_runner"],
)
def test_no_log_password_lock_yes(rule_runner: RunFromText) -> None:
"""The task only locks the user."""
results = rule_runner.run_playbook(PASSWORD_LOCK_YES)
assert len(results) == 0
@pytest.mark.parametrize(
"rule_runner",
(NoLogPasswordsRule,),
indirect=["rule_runner"],
)
def test_no_log_password_lock_yes_but_no_password(rule_runner: RunFromText) -> None:
"""The task only locks the user."""
results = rule_runner.run_playbook(PASSWORD_LOCK_YES_BUT_NO_PASSWORD)
assert len(results) == 0
@pytest.mark.parametrize(
"rule_runner",
(NoLogPasswordsRule,),
indirect=["rule_runner"],
)
def test_password_lock_false(rule_runner: RunFromText) -> None:
"""The task does not actually lock the user."""
results = rule_runner.run_playbook(PASSWORD_LOCK_FALSE)
assert len(results) == 0
@mock.patch.dict(os.environ, {"ANSIBLE_LINT_WRITE_TMP": "1"}, clear=True)
def test_no_log_password_transform(
config_options: Options,
) -> None:
"""Test transform functionality for no-log-password rule."""
playbook = Path("examples/playbooks/transform-no-log-password.yml")
config_options.write_list = ["all"]
rules = RulesCollection(options=config_options)
rules.register(NoLogPasswordsRule())
config_options.lintables = [str(playbook)]
runner_result = get_matches(rules=rules, options=config_options)
transformer = Transformer(result=runner_result, options=config_options)
transformer.run()
matches = runner_result.matches
assert len(matches) == 2
orig_content = playbook.read_text(encoding="utf-8")
expected_content = playbook.with_suffix(
f".transformed{playbook.suffix}",
).read_text(encoding="utf-8")
transformed_content = playbook.with_suffix(f".tmp{playbook.suffix}").read_text(
encoding="utf-8",
)
assert orig_content != transformed_content
assert expected_content == transformed_content
playbook.with_suffix(f".tmp{playbook.suffix}").unlink()
| 10,443 | Python | .py | 317 | 25.731861 | 88 | 0.610808 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,239 | meta_runtime.py | ansible_ansible-lint/src/ansiblelint/rules/meta_runtime.py | """Implementation of meta-runtime rule."""
from __future__ import annotations
import sys
from typing import TYPE_CHECKING
from packaging.specifiers import SpecifierSet
from ansiblelint.rules import AnsibleLintRule
# Copyright (c) 2018, Ansible Project
if TYPE_CHECKING:
from ansiblelint.errors import MatchError
from ansiblelint.file_utils import Lintable
class CheckRequiresAnsibleVersion(AnsibleLintRule):
"""Required ansible version in meta/runtime.yml must be a supported version."""
id = "meta-runtime"
description = (
"The ``requires_ansible`` key in runtime.yml must specify "
"a supported platform version of ansible-core and be a valid version value "
"in x.y.z format."
)
severity = "VERY_HIGH"
tags = ["metadata"]
version_added = "v6.11.0 (last update)"
_ids = {
"meta-runtime[unsupported-version]": "'requires_ansible' key must refer to a currently supported version",
"meta-runtime[invalid-version]": "'requires_ansible' is not a valid requirement specification",
}
def matchyaml(self, file: Lintable) -> list[MatchError]:
"""Find violations inside meta files.
:param file: Input lintable file that is a match for `meta-runtime`
:returns: List of errors matched to the input file
"""
results = []
if file.kind != "meta-runtime":
return []
requires_ansible = file.data.get("requires_ansible", None)
if requires_ansible:
if self.options and not any(
version in requires_ansible
for version in self.options.supported_ansible
):
supported_ansible = [f">={x}0" for x in self.options.supported_ansible]
msg = f"'requires_ansible' key must refer to a currently supported version such as: {', '.join(supported_ansible)}"
results.append(
self.create_matcherror(
message=msg,
tag="meta-runtime[unsupported-version]",
filename=file,
),
)
try:
SpecifierSet(requires_ansible)
except ValueError:
results.append(
self.create_matcherror(
message="'requires_ansible' is not a valid requirement specification",
tag="meta-runtime[invalid-version]",
filename=file,
),
)
return results
# testing code to be loaded only with pytest or when executed the rule file
if "pytest" in sys.modules:
import pytest
# pylint: disable=ungrouped-imports
from ansiblelint.rules import RulesCollection
from ansiblelint.runner import Runner
@pytest.mark.parametrize(
("test_file", "failures", "tags"),
(
pytest.param(
"examples/meta_runtime_version_checks/pass_0/meta/runtime.yml",
0,
"meta-runtime[unsupported-version]",
id="pass0",
),
pytest.param(
"examples/meta_runtime_version_checks/fail_0/meta/runtime.yml",
1,
"meta-runtime[unsupported-version]",
id="fail0",
),
pytest.param(
"examples/meta_runtime_version_checks/fail_1/meta/runtime.yml",
1,
"meta-runtime[unsupported-version]",
id="fail1",
),
pytest.param(
"examples/meta_runtime_version_checks/fail_2/meta/runtime.yml",
1,
"meta-runtime[invalid-version]",
id="fail2",
),
),
)
def test_default_meta_supported_version(
default_rules_collection: RulesCollection,
test_file: str,
failures: int,
tags: str,
) -> None:
"""Test for default supported ansible versions."""
default_rules_collection.register(CheckRequiresAnsibleVersion())
results = Runner(test_file, rules=default_rules_collection).run()
for result in results:
assert result.rule.id == CheckRequiresAnsibleVersion().id
assert result.tag == tags
assert len(results) == failures
@pytest.mark.parametrize(
("test_file", "failures"),
(
pytest.param(
"examples/meta_runtime_version_checks/pass_1/meta/runtime.yml",
0,
id="pass1",
),
),
)
def test_added_meta_supported_version(
default_rules_collection: RulesCollection,
test_file: str,
failures: int,
) -> None:
"""Test for added supported ansible versions in the config."""
default_rules_collection.register(CheckRequiresAnsibleVersion())
default_rules_collection.options.supported_ansible_also = ["2.9"]
results = Runner(test_file, rules=default_rules_collection).run()
assert len(results) == failures
| 5,134 | Python | .py | 127 | 29.251969 | 131 | 0.588802 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,240 | command_instead_of_shell.py | ansible_ansible-lint/src/ansiblelint/rules/command_instead_of_shell.py | """Implementation of command-instead-of-shell rule."""
# Copyright (c) 2016 Will Thames <will@thames.id.au>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import annotations
import sys
from typing import TYPE_CHECKING
from ansiblelint.rules import AnsibleLintRule, TransformMixin
from ansiblelint.utils import get_cmd_args
if TYPE_CHECKING:
from ruamel.yaml.comments import CommentedMap, CommentedSeq
from ansiblelint.errors import MatchError
from ansiblelint.file_utils import Lintable
from ansiblelint.utils import Task
class UseCommandInsteadOfShellRule(AnsibleLintRule, TransformMixin):
"""Use shell only when shell functionality is required."""
id = "command-instead-of-shell"
description = (
"Shell should only be used when piping, redirecting "
"or chaining commands (and Ansible would be preferred "
"for some of those!)"
)
severity = "HIGH"
tags = ["command-shell", "idiom"]
version_added = "historic"
def matchtask(
self,
task: Task,
file: Lintable | None = None,
) -> bool | str:
"""Check if a shell module is used instead of an appropriate command.
:param task: Task to check for shell usage
:param file: File to lint
:returns: False if shell module isn't used, or string output of where it is used
"""
# Use unjinja so that we don't match on jinja filters
# rather than pipes
if task["action"]["__ansible_module__"] in ["shell", "ansible.builtin.shell"]:
# Since Ansible 2.4, the `command` module does not accept setting
# the `executable`. If the user needs to set it, they have to use
# the `shell` module.
if "executable" in task["action"]:
return False
jinja_stripped_cmd = self.unjinja(get_cmd_args(task))
return not any(ch in jinja_stripped_cmd for ch in "&|<>;$\n*[]{}?`")
return False
def transform(
self,
match: MatchError,
lintable: Lintable,
data: CommentedMap | CommentedSeq | str,
) -> None:
"""Transform the data.
:param match: The match to transform.
:param lintable: The file to transform.
:param data: The data to transform.
"""
if match.tag == "command-instead-of-shell":
target_task = self.seek(match.yaml_path, data)
for _ in range(len(target_task)):
k, v = target_task.popitem(False)
target_task["ansible.builtin.command" if "shell" in k else k] = v
match.fixed = True
# testing code to be loaded only with pytest or when executed the rule file
if "pytest" in sys.modules:
import pytest
# pylint: disable=ungrouped-imports
from ansiblelint.rules import RulesCollection
from ansiblelint.runner import Runner
@pytest.mark.parametrize(
("file", "expected"),
(
pytest.param(
"examples/playbooks/rule-command-instead-of-shell-pass.yml",
0,
id="good",
),
pytest.param(
"examples/playbooks/rule-command-instead-of-shell-fail.yml",
3,
id="bad",
),
),
)
def test_rule_command_instead_of_shell(
default_rules_collection: RulesCollection,
file: str,
expected: int,
) -> None:
"""Validate that rule works as intended.
:param default_rules_collection: Default rules for testing
:param file: Test file to check for violations
:expected: Expected number of errors
"""
results = Runner(file, rules=default_rules_collection).run()
for result in results:
assert result.rule.id == UseCommandInsteadOfShellRule.id, result
assert len(results) == expected
| 4,928 | Python | .py | 114 | 35.789474 | 88 | 0.664929 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,241 | partial_become.py | ansible_ansible-lint/src/ansiblelint/rules/partial_become.py | """Implementation of partial-become rule."""
# Copyright (c) 2016 Will Thames <will@thames.id.au>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import annotations
import sys
from typing import TYPE_CHECKING, Any
from ruamel.yaml.comments import CommentedMap, CommentedSeq
from ansiblelint.constants import LINE_NUMBER_KEY
from ansiblelint.rules import AnsibleLintRule, TransformMixin
if TYPE_CHECKING:
from collections.abc import Iterator
from ansiblelint.errors import MatchError
from ansiblelint.file_utils import Lintable
from ansiblelint.utils import Task
class BecomeUserWithoutBecomeRule(AnsibleLintRule, TransformMixin):
"""``become_user`` should have a corresponding ``become`` at the play or task level."""
id = "partial-become"
description = "``become_user`` should have a corresponding ``become`` at the play or task level."
severity = "VERY_HIGH"
tags = ["unpredictability"]
version_added = "historic"
def matchplay(
self: BecomeUserWithoutBecomeRule,
file: Lintable,
data: dict[str, Any],
) -> list[MatchError]:
"""Match become_user without become in play.
:param file: The file to lint.
:param data: The data to lint (play)
:returns: A list of errors.
"""
if file.kind != "playbook":
return []
errors = []
partial = "become_user" in data and "become" not in data
if partial:
error = self.create_matcherror(
message=self.shortdesc,
filename=file,
tag=f"{self.id}[play]",
lineno=data[LINE_NUMBER_KEY],
)
errors.append(error)
return errors
def matchtask(
self: BecomeUserWithoutBecomeRule,
task: Task,
file: Lintable | None = None,
) -> list[MatchError]:
"""Match become_user without become in task.
:param task: The task to lint.
:param file: The file to lint.
:returns: A list of errors.
"""
data = task.normalized_task
errors = []
partial = "become_user" in data and "become" not in data
if partial:
error = self.create_matcherror(
message=self.shortdesc,
filename=file,
tag=f"{self.id}[task]",
lineno=task[LINE_NUMBER_KEY],
)
errors.append(error)
return errors
def _dive(self: BecomeUserWithoutBecomeRule, data: CommentedSeq) -> Iterator[Any]:
"""Dive into the data and yield each item.
:param data: The data to dive into.
:yield: Each item in the data.
"""
for item in data:
for nested in ("block", "rescue", "always"):
if nested in item:
yield from self._dive(item[nested])
yield item
def transform(
self: BecomeUserWithoutBecomeRule,
match: MatchError,
lintable: Lintable,
data: CommentedMap | CommentedSeq | str,
) -> None:
"""Transform the data.
:param match: The match to transform.
:param lintable: The file to transform.
:param data: The data to transform.
"""
if not isinstance(data, CommentedSeq):
return
obj = self.seek(match.yaml_path, data)
if "become" in obj and "become_user" in obj:
match.fixed = True
return
if "become" not in obj and "become_user" not in obj:
match.fixed = True
return
self._transform_plays(plays=data)
if "become" in obj and "become_user" in obj:
match.fixed = True
return
if "become" not in obj and "become_user" not in obj:
match.fixed = True
return
def is_ineligible_for_transform(
self: BecomeUserWithoutBecomeRule,
data: CommentedMap,
) -> bool:
"""Check if the data is eligible for transformation.
:param data: The data to check.
:returns: True if ineligible, False otherwise.
"""
if any("include" in key for key in data):
return True
return "notify" in data
def _transform_plays(self, plays: CommentedSeq) -> None:
"""Transform the plays.
:param plays: The plays to transform.
"""
for play in plays:
self._transform_play(play=play)
def _transform_play(self, play: CommentedMap) -> None:
"""Transform the play.
:param play: The play to transform.
"""
# Ensure we have no includes in this play
task_groups = ("tasks", "pre_tasks", "post_tasks", "handlers")
for task_group in task_groups:
tasks = self._dive(play.get(task_group, []))
for task in tasks:
if self.is_ineligible_for_transform(task):
return
remove_play_become_user = False
for task_group in task_groups:
tasks = self._dive(play.get(task_group, []))
for task in tasks:
b_in_t = "become" in task
bu_in_t = "become_user" in task
b_in_p = "become" in play
bu_in_p = "become_user" in play
if b_in_t and not bu_in_t and bu_in_p:
# Preserve the end comment if become is the last key
comment = None
if list(task.keys())[-1] == "become" and "become" in task.ca.items:
comment = task.ca.items.pop("become")
become_index = list(task.keys()).index("become")
task.insert(become_index + 1, "become_user", play["become_user"])
if comment:
self._attach_comment_end(task, comment)
remove_play_become_user = True
if bu_in_t and not b_in_t and b_in_p:
become_user_index = list(task.keys()).index("become_user")
task.insert(become_user_index, "become", play["become"])
if bu_in_t and not b_in_t and not b_in_p:
# Preserve the end comment if become_user is the last key
comment = None
if (
list(task.keys())[-1] == "become_user"
and "become_user" in task.ca.items
):
comment = task.ca.items.pop("become_user")
task.pop("become_user")
if comment:
self._attach_comment_end(task, comment)
if remove_play_become_user:
del play["become_user"]
def _attach_comment_end(
self,
obj: CommentedMap | CommentedSeq,
comment: Any,
) -> None:
"""Attach a comment to the end of the object.
:param obj: The object to attach the comment to.
:param comment: The comment to attach.
"""
if isinstance(obj, CommentedMap):
last = list(obj.keys())[-1]
if not isinstance(obj[last], CommentedSeq | CommentedMap):
obj.ca.items[last] = comment
return
self._attach_comment_end(obj[last], comment)
elif isinstance(obj, CommentedSeq):
if not isinstance(obj[-1], CommentedSeq | CommentedMap):
obj.ca.items[len(obj)] = comment
return
self._attach_comment_end(obj[-1], comment)
# testing code to be loaded only with pytest or when executed the rule file
if "pytest" in sys.modules:
from ansiblelint.rules import RulesCollection
from ansiblelint.runner import Runner
def test_partial_become_pass() -> None:
"""No errors found for partial-become."""
collection = RulesCollection()
collection.register(BecomeUserWithoutBecomeRule())
success = "examples/playbooks/rule-partial-become-without-become-pass.yml"
good_runner = Runner(success, rules=collection)
assert good_runner.run() == []
def test_partial_become_fail() -> None:
"""Errors found for partial-become."""
collection = RulesCollection()
collection.register(BecomeUserWithoutBecomeRule())
failure = "examples/playbooks/rule-partial-become-without-become-fail.yml"
bad_runner = Runner(failure, rules=collection)
errs = bad_runner.run()
assert len(errs) == 3
| 9,542 | Python | .py | 221 | 32.945701 | 101 | 0.603187 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,242 | inline_env_var.py | ansible_ansible-lint/src/ansiblelint/rules/inline_env_var.py | """Implementation of inside-env-var rule."""
# Copyright (c) 2016 Will Thames <will@thames.id.au>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import annotations
from typing import TYPE_CHECKING
from ansiblelint.constants import FILENAME_KEY, LINE_NUMBER_KEY
from ansiblelint.rules import AnsibleLintRule
from ansiblelint.utils import Task, get_first_cmd_arg
if TYPE_CHECKING:
from ansiblelint.file_utils import Lintable
class EnvVarsInCommandRule(AnsibleLintRule):
"""Command module does not accept setting environment variables inline."""
id = "inline-env-var"
description = (
"Use ``environment:`` to set environment variables "
"or use ``shell`` module which accepts both"
)
severity = "VERY_HIGH"
tags = ["command-shell", "idiom"]
version_added = "historic"
expected_args = [
"chdir",
"creates",
"executable",
"removes",
"stdin",
"stdin_add_newline",
"strip_empty_ends",
"cmd",
"__ansible_module__",
"__ansible_module_original__",
"_raw_params",
LINE_NUMBER_KEY,
FILENAME_KEY,
]
def matchtask(
self,
task: Task,
file: Lintable | None = None,
) -> bool | str:
if task["action"]["__ansible_module__"] in ["command"]:
first_cmd_arg = get_first_cmd_arg(task)
if not first_cmd_arg:
return False
return any(
[arg not in self.expected_args for arg in task["action"]]
+ ["=" in first_cmd_arg],
)
return False
| 2,648 | Python | .py | 66 | 34.363636 | 79 | 0.682348 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,243 | complexity.py | ansible_ansible-lint/src/ansiblelint/rules/complexity.py | """Implementation of limiting number of tasks."""
from __future__ import annotations
import re
import sys
from typing import TYPE_CHECKING, Any
from ansiblelint.constants import LINE_NUMBER_KEY
from ansiblelint.rules import AnsibleLintRule, RulesCollection
if TYPE_CHECKING:
from ansiblelint.config import Options
from ansiblelint.errors import MatchError
from ansiblelint.file_utils import Lintable
from ansiblelint.utils import Task
class ComplexityRule(AnsibleLintRule):
"""Rule for limiting number of tasks inside a file."""
id = "complexity"
description = "There should be limited tasks executed inside any file"
severity = "MEDIUM"
tags = ["experimental", "idiom"]
version_added = "v6.18.0 (last update)"
_re_templated_inside = re.compile(r".*\{\{.*\}\}.*\w.*$")
def matchplay(self, file: Lintable, data: dict[str, Any]) -> list[MatchError]:
"""Call matchplay for up to no_of_max_tasks inside file and return aggregate results."""
results: list[MatchError] = []
if file.kind != "playbook":
return []
tasks = data.get("tasks", [])
if not isinstance(self._collection, RulesCollection):
msg = "Rules cannot be run outside a rule collection."
raise TypeError(msg)
if len(tasks) > self._collection.options.max_tasks:
results.append(
self.create_matcherror(
message=f"Maximum tasks allowed in a play is {self._collection.options.max_tasks}.",
lineno=data[LINE_NUMBER_KEY],
tag=f"{self.id}[play]",
filename=file,
),
)
return results
def matchtask(self, task: Task, file: Lintable | None = None) -> list[MatchError]:
"""Check if the task is a block and count the number of items inside it."""
results: list[MatchError] = []
if not isinstance(self._collection, RulesCollection):
msg = "Rules cannot be run outside a rule collection."
raise TypeError(msg)
if task.action == "block/always/rescue":
block_depth = self.calculate_block_depth(task)
if block_depth > self._collection.options.max_block_depth:
results.append(
self.create_matcherror(
message=f"Replace nested block with an include_tasks to make code easier to maintain. Maximum block depth allowed is {self._collection.options.max_block_depth}.",
lineno=task[LINE_NUMBER_KEY],
tag=f"{self.id}[nesting]",
filename=file,
),
)
return results
def calculate_block_depth(self, task: Task) -> int:
"""Recursively calculate the block depth of a task."""
if not isinstance(task.position, str):
raise NotImplementedError
return task.position.count(".block")
if "pytest" in sys.modules:
import pytest
# pylint: disable=ungrouped-imports
from ansiblelint.runner import Runner
@pytest.mark.parametrize(
("file", "expected_results"),
(
pytest.param(
"examples/playbooks/rule-complexity-pass.yml",
[],
id="pass",
),
pytest.param(
"examples/playbooks/rule-complexity-fail.yml",
["complexity[play]", "complexity[nesting]"],
id="fail",
),
),
)
def test_complexity(
file: str,
expected_results: list[str],
monkeypatch: pytest.MonkeyPatch,
config_options: Options,
) -> None:
"""Test rule."""
monkeypatch.setattr(config_options, "max_tasks", 5)
monkeypatch.setattr(config_options, "max_block_depth", 3)
collection = RulesCollection(options=config_options)
collection.register(ComplexityRule())
results = Runner(file, rules=collection).run()
assert len(results) == len(expected_results)
for i, result in enumerate(results):
assert result.rule.id == ComplexityRule.id, result
assert result.tag == expected_results[i]
| 4,260 | Python | .py | 97 | 33.536082 | 186 | 0.60965 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,244 | meta_no_tags.py | ansible_ansible-lint/src/ansiblelint/rules/meta_no_tags.py | """Implementation of meta-no-tags rule."""
from __future__ import annotations
import re
import sys
from pathlib import Path
from typing import TYPE_CHECKING
from ansiblelint.rules import AnsibleLintRule
# Copyright (c) 2018, Ansible Project
if TYPE_CHECKING:
from typing import Any
from ansiblelint.errors import MatchError
from ansiblelint.file_utils import Lintable
from ansiblelint.testing import RunFromText
class MetaTagValidRule(AnsibleLintRule):
"""Tags must contain lowercase letters and digits only."""
id = "meta-no-tags"
description = (
"Tags must contain lowercase letters and digits only, "
"and ``galaxy_tags`` is expected to be a list"
)
severity = "HIGH"
tags = ["metadata"]
version_added = "v4.0.0"
TAG_REGEXP = re.compile("^[a-z0-9]+$")
def matchyaml(self, file: Lintable) -> list[MatchError]:
"""Find violations inside meta files."""
if file.kind != "meta" or not file.data:
return []
galaxy_info = file.data.get("galaxy_info", None)
if not galaxy_info:
return []
tags = []
results = []
if "galaxy_tags" in galaxy_info:
if isinstance(galaxy_info["galaxy_tags"], list):
tags += galaxy_info["galaxy_tags"]
else:
results.append(
self.create_matcherror(
"Expected 'galaxy_tags' to be a list",
filename=file,
),
)
if "categories" in galaxy_info:
results.append(
self.create_matcherror(
"Use 'galaxy_tags' rather than 'categories'",
filename=file,
),
)
if isinstance(galaxy_info["categories"], list):
tags += galaxy_info["categories"]
else:
results.append(
self.create_matcherror(
"Expected 'categories' to be a list",
filename=file,
),
)
for tag in tags:
msg = self.shortdesc
if not isinstance(tag, str):
results.append(
self.create_matcherror(
f"Tags must be strings: '{tag}'",
filename=file,
),
)
continue
if not re.match(self.TAG_REGEXP, tag):
results.append(
self.create_matcherror(
message=f"{msg}, invalid: '{tag}'",
filename=file,
),
)
return results
# testing code to be loaded only with pytest or when executed the rule file
if "pytest" in sys.modules:
import pytest
@pytest.mark.parametrize(
"rule_runner",
(MetaTagValidRule,),
indirect=["rule_runner"],
)
def test_valid_tag_rule(rule_runner: RunFromText) -> None:
"""Test rule matches."""
results = rule_runner.run(
Path("examples/roles/meta_no_tags_valid/meta/main.yml"),
)
assert "Use 'galaxy_tags' rather than 'categories'" in str(results), results
assert "Expected 'categories' to be a list" in str(results)
assert "invalid: 'my s q l'" in str(results)
assert "invalid: 'MYTAG'" in str(results)
@pytest.mark.parametrize(
"rule_runner",
(MetaTagValidRule,),
indirect=["rule_runner"],
)
def test_meta_not_tags(rule_runner: Any) -> None:
"""Test rule matches."""
results = rule_runner.run(
"examples/roles/meta_no_tags_galaxy_info/meta/main.yml",
)
assert results == []
@pytest.mark.parametrize(
"rule_runner",
(MetaTagValidRule,),
indirect=["rule_runner"],
)
def test_no_galaxy_tags_list(rule_runner: Any) -> None:
"""Test rule matches."""
results = rule_runner.run("examples/roles/meta_tags_no_list/meta/main.yml")
assert "Expected 'galaxy_tags' to be a list" in str(results)
@pytest.mark.parametrize(
"rule_runner",
(MetaTagValidRule,),
indirect=["rule_runner"],
)
def test_galaxy_categories_as_list(rule_runner: Any) -> None:
"""Test rule matches."""
results = rule_runner.run(
"examples/roles/meta_categories_as_list/meta/main.yml",
)
assert "Use 'galaxy_tags' rather than 'categories'" in str(results), results
assert "Expected 'categories' to be a list" not in str(results)
@pytest.mark.parametrize(
"rule_runner",
(MetaTagValidRule,),
indirect=["rule_runner"],
)
def test_tags_not_a_string(rule_runner: Any) -> None:
"""Test rule matches."""
results = rule_runner.run("examples/roles/meta_tags_not_a_string/meta/main.yml")
assert "Tags must be strings" in str(results)
| 5,054 | Python | .py | 135 | 26.8 | 88 | 0.5613 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,245 | syntax_check.py | ansible_ansible-lint/src/ansiblelint/rules/syntax_check.py | """Rule definition for ansible syntax check."""
from __future__ import annotations
import re
from dataclasses import dataclass
from ansiblelint.rules import AnsibleLintRule
@dataclass
class KnownError:
"""Class that tracks result of linting."""
tag: str
regex: re.Pattern[str]
# Order matters, we only report the first matching pattern, the one at the end
# is used to match generic or less specific patterns.
OUTPUT_PATTERNS = (
KnownError(
tag="missing-file",
regex=re.compile(
# do not use <filename> capture group for this because we want to report original file, not the missing target one
r"(?P<title>Unable to retrieve file contents)\n(?P<details>Could not find or access '(?P<value>.*)'[^\n]*)",
re.MULTILINE | re.DOTALL | re.DOTALL,
),
),
KnownError(
tag="no-file",
regex=re.compile(
r"^ERROR! (?P<title>No file specified for [^\n]*)",
re.MULTILINE | re.DOTALL | re.DOTALL,
),
),
KnownError(
tag="empty-playbook",
regex=re.compile(
"Empty playbook, nothing to do",
re.MULTILINE | re.DOTALL | re.DOTALL,
),
),
KnownError(
tag="malformed",
regex=re.compile(
"^ERROR! (?P<title>A malformed block was encountered while loading a block[^\n]*)",
re.MULTILINE | re.DOTALL | re.DOTALL,
),
),
KnownError(
tag="unknown-module",
regex=re.compile(
r"^ERROR! (?P<title>couldn't resolve module/action [^\n]*)\n\nThe error appears to be in '(?P<filename>[\w\/\.\-]+)': line (?P<line>\d+), column (?P<column>\d+)",
re.MULTILINE | re.DOTALL | re.DOTALL,
),
),
KnownError(
tag="specific",
regex=re.compile(
r"^ERROR! (?P<title>[^\n]*)\n\nThe error appears to be in '(?P<filename>[\w\/\.\-]+)': line (?P<line>\d+), column (?P<column>\d+)",
re.MULTILINE | re.DOTALL | re.DOTALL,
),
),
# "ERROR! the role 'this_role_is_missing' was not found in ROLE_INCLUDE_PATHS\n\nThe error appears to be in 'FILE_PATH': line 5, column 7, but may\nbe elsewhere in the file depending on the exact syntax problem.\n\nThe offending line appears to be:\n\n roles:\n - this_role_is_missing\n ^ here\n"
KnownError(
tag="specific",
regex=re.compile(
r"^ERROR! (?P<title>the role '.*' was not found in[^\n]*)'(?P<filename>[\w\/\.\-]+)': line (?P<line>\d+), column (?P<column>\d+)",
re.MULTILINE | re.DOTALL | re.DOTALL,
),
),
)
class AnsibleSyntaxCheckRule(AnsibleLintRule):
"""Ansible syntax check failed."""
id = "syntax-check"
severity = "VERY_HIGH"
tags = ["core", "unskippable"]
version_added = "v5.0.0"
_order = 0
| 2,850 | Python | .py | 72 | 32.263889 | 309 | 0.594507 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,246 | no_handler.py | ansible_ansible-lint/src/ansiblelint/rules/no_handler.py | # Copyright (c) 2016 Will Thames <will@thames.id.au>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""UseHandlerRatherThanWhenChangedRule used with ansible-lint."""
from __future__ import annotations
import sys
from typing import TYPE_CHECKING
from ansiblelint.rules import AnsibleLintRule
if TYPE_CHECKING:
from ansiblelint.file_utils import Lintable
from ansiblelint.utils import Task
def _changed_in_when(item: str) -> bool:
if not isinstance(item, str):
return False
item_list = item.split()
if {"and", "or", "not"} & set(item_list):
return False
return any(
changed in item
for changed in [
".changed",
"|changed",
'["changed"]',
"['changed']",
"is changed",
]
)
class UseHandlerRatherThanWhenChangedRule(AnsibleLintRule):
"""Tasks that run when changed should likely be handlers."""
id = "no-handler"
description = (
"If a task has a ``when: result.changed`` setting, it is effectively "
"acting as a handler. You could use ``notify`` and move that task to "
"``handlers``."
)
link = "https://docs.ansible.com/ansible/latest/playbook_guide/playbooks_handlers.html#handlers"
severity = "MEDIUM"
tags = ["idiom"]
version_added = "historic"
def matchtask(
self,
task: Task,
file: Lintable | None = None,
) -> bool | str:
if task["__ansible_action_type__"] != "task" or task.is_handler():
return False
when = task.get("when")
result = False
if isinstance(when, list):
if len(when) <= 1:
result = _changed_in_when(when[0])
elif isinstance(when, str):
result = _changed_in_when(when)
return result
if "pytest" in sys.modules:
import pytest
# pylint: disable=ungrouped-imports
from ansiblelint.rules import RulesCollection
from ansiblelint.runner import Runner
from ansiblelint.testing import run_ansible_lint
@pytest.mark.parametrize(
("test_file", "failures"),
(
pytest.param("examples/playbooks/no_handler_fail.yml", 5, id="fail"),
pytest.param("examples/playbooks/no_handler_pass.yml", 0, id="pass"),
),
)
def test_no_handler(
default_rules_collection: RulesCollection,
test_file: str,
failures: int,
) -> None:
"""Test rule matches."""
results = Runner(test_file, rules=default_rules_collection).run()
assert len(results) == failures
for result in results:
assert result.tag == "no-handler"
def test_role_with_handler() -> None:
"""Test role with handler."""
role_path = "examples/roles/role_with_handler"
results = run_ansible_lint("-v", role_path)
assert "no-handler" not in results.stdout
| 3,930 | Python | .py | 98 | 33.765306 | 100 | 0.667978 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,247 | no_tabs.py | ansible_ansible-lint/src/ansiblelint/rules/no_tabs.py | """Implementation of no-tabs rule."""
# Copyright (c) 2016, Will Thames and contributors
# Copyright (c) 2018, Ansible Project
from __future__ import annotations
import sys
from typing import TYPE_CHECKING
from ansiblelint.rules import AnsibleLintRule
from ansiblelint.text import has_jinja
from ansiblelint.yaml_utils import nested_items_path
if TYPE_CHECKING:
from ansiblelint.file_utils import Lintable
from ansiblelint.utils import Task
class NoTabsRule(AnsibleLintRule):
"""Most files should not contain tabs."""
id = "no-tabs"
description = "Tabs can cause unexpected display issues, use spaces"
severity = "LOW"
tags = ["formatting"]
version_added = "v4.0.0"
allow_list = [
("lineinfile", "insertafter"),
("lineinfile", "insertbefore"),
("lineinfile", "regexp"),
("lineinfile", "line"),
("win_lineinfile", "insertafter"),
("win_lineinfile", "insertbefore"),
("win_lineinfile", "regexp"),
("win_lineinfile", "line"),
("ansible.builtin.lineinfile", "insertafter"),
("ansible.builtin.lineinfile", "insertbefore"),
("ansible.builtin.lineinfile", "regexp"),
("ansible.builtin.lineinfile", "line"),
("ansible.legacy.lineinfile", "insertafter"),
("ansible.legacy.lineinfile", "insertbefore"),
("ansible.legacy.lineinfile", "regexp"),
("ansible.legacy.lineinfile", "line"),
("community.windows.win_lineinfile", "insertafter"),
("community.windows.win_lineinfile", "insertbefore"),
("community.windows.win_lineinfile", "regexp"),
("community.windows.win_lineinfile", "line"),
]
def matchtask(
self,
task: Task,
file: Lintable | None = None,
) -> bool | str:
action = task["action"]["__ansible_module__"]
for k, v, _ in nested_items_path(task):
if isinstance(k, str) and "\t" in k and not has_jinja(k):
return True
if (
isinstance(v, str)
and "\t" in v
and (action, k) not in self.allow_list
and not has_jinja(v)
):
return True
return False
# testing code to be loaded only with pytest or when executed the rule file
if "pytest" in sys.modules:
from ansiblelint.rules import RulesCollection
from ansiblelint.runner import Runner
def test_no_tabs_rule(default_rules_collection: RulesCollection) -> None:
"""Test rule matches."""
results = Runner(
"examples/playbooks/rule-no-tabs.yml",
rules=default_rules_collection,
).run()
expected_results = [
(10, NoTabsRule().shortdesc),
(13, NoTabsRule().shortdesc),
]
for i, expected in enumerate(expected_results):
assert len(results) >= i + 1
assert results[i].lineno == expected[0]
assert results[i].message == expected[1]
assert len(results) == len(expected), results
| 3,060 | Python | .py | 77 | 31.584416 | 77 | 0.618439 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,248 | no_prompting.py | ansible_ansible-lint/src/ansiblelint/rules/no_prompting.py | """Implementation of no-prompting rule."""
from __future__ import annotations
import sys
from typing import TYPE_CHECKING, Any
from ansiblelint.constants import LINE_NUMBER_KEY
from ansiblelint.rules import AnsibleLintRule
if TYPE_CHECKING:
from ansiblelint.config import Options
from ansiblelint.errors import MatchError
from ansiblelint.file_utils import Lintable
from ansiblelint.utils import Task
class NoPromptingRule(AnsibleLintRule):
"""Disallow prompting."""
id = "no-prompting"
description = (
"Disallow the use of vars_prompt or ansible.builtin.pause to better"
"accommodate unattended playbook runs and use in CI pipelines."
)
tags = ["opt-in"]
severity = "VERY_LOW"
version_added = "v6.0.3"
def matchplay(self, file: Lintable, data: dict[str, Any]) -> list[MatchError]:
"""Return matches found for a specific playbook."""
# If the Play uses the 'vars_prompt' section to set variables
if file.kind != "playbook": # pragma: no cover
return []
vars_prompt = data.get("vars_prompt")
if not vars_prompt:
return []
return [
self.create_matcherror(
message="Play uses vars_prompt",
lineno=vars_prompt[0][LINE_NUMBER_KEY],
filename=file,
),
]
def matchtask(
self,
task: Task,
file: Lintable | None = None,
) -> bool | str:
"""Return matches for ansible.builtin.pause tasks."""
# We do not want to trigger this rule if pause has either seconds or
# minutes defined, as that does not make it blocking.
return task["action"]["__ansible_module_original__"] in [
"pause",
"ansible.builtin.pause",
] and not (
task["action"].get("minutes", None) or task["action"].get("seconds", None)
)
if "pytest" in sys.modules:
from ansiblelint.rules import RulesCollection
from ansiblelint.runner import Runner
def test_no_prompting_fail(config_options: Options) -> None:
"""Negative test for no-prompting."""
# For testing we want to manually enable opt-in rules
config_options.enable_list = ["no-prompting"]
rules = RulesCollection(options=config_options)
rules.register(NoPromptingRule())
results = Runner("examples/playbooks/rule-no-prompting.yml", rules=rules).run()
assert len(results) == 2
for result in results:
assert result.rule.id == "no-prompting"
| 2,581 | Python | .py | 63 | 33.079365 | 87 | 0.645367 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,249 | latest.py | ansible_ansible-lint/src/ansiblelint/rules/latest.py | """Implementation of latest rule."""
from __future__ import annotations
from typing import TYPE_CHECKING
from ansiblelint.rules import AnsibleLintRule
if TYPE_CHECKING:
from ansiblelint.errors import MatchError
from ansiblelint.file_utils import Lintable
from ansiblelint.utils import Task
class LatestRule(AnsibleLintRule):
"""Result of the command may vary on subsequent runs."""
id = "latest"
description = (
"All version control checkouts must point to "
"an explicit commit or tag, not just ``latest``"
)
severity = "MEDIUM"
tags = ["idempotency"]
version_added = "v6.5.2"
_ids = {
"latest[git]": "Use a commit hash or tag instead of 'latest' for git",
"latest[hg]": "Use a commit hash or tag instead of 'latest' for hg",
}
def matchtask(
self,
task: Task,
file: Lintable | None = None,
) -> bool | str | MatchError:
"""Check if module args are safe."""
if (
task["action"]["__ansible_module__"] == "git"
and task["action"].get("version", "HEAD") == "HEAD"
):
return self.create_matcherror(tag="latest[git]", filename=file)
if (
task["action"]["__ansible_module__"] == "hg"
and task["action"].get("revision", "default") == "default"
):
return self.create_matcherror(tag="latest[hg]", filename=file)
return False
| 1,457 | Python | .py | 39 | 30.205128 | 78 | 0.613475 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,250 | ignore_errors.py | ansible_ansible-lint/src/ansiblelint/rules/ignore_errors.py | """IgnoreErrorsRule used with ansible-lint."""
from __future__ import annotations
import sys
from typing import TYPE_CHECKING
from ansiblelint.rules import AnsibleLintRule
if TYPE_CHECKING:
from ansiblelint.file_utils import Lintable
from ansiblelint.utils import Task
class IgnoreErrorsRule(AnsibleLintRule):
"""Use failed_when and specify error conditions instead of using ignore_errors."""
id = "ignore-errors"
description = (
"Instead of ignoring all errors, ignore the errors only when using ``{{ ansible_check_mode }}``, "
"register the errors using ``register``, "
"or use ``failed_when:`` and specify acceptable error conditions "
"to reduce the risk of ignoring important failures."
)
severity = "LOW"
tags = ["unpredictability"]
version_added = "v5.0.7"
def matchtask(
self,
task: Task,
file: Lintable | None = None,
) -> bool | str:
return (
task.get("ignore_errors")
and task.get("ignore_errors") != "{{ ansible_check_mode }}"
and not task.get("register")
)
if "pytest" in sys.modules:
import pytest
if TYPE_CHECKING:
from ansiblelint.testing import RunFromText
IGNORE_ERRORS_TRUE = """
- hosts: all
tasks:
- name: Run apt-get update
command: apt-get update
ignore_errors: true
"""
IGNORE_ERRORS_FALSE = """
- hosts: all
tasks:
- name: Run apt-get update
command: apt-get update
ignore_errors: false
"""
IGNORE_ERRORS_CHECK_MODE = """
- hosts: all
tasks:
- name: Run apt-get update
command: apt-get update
ignore_errors: "{{ ansible_check_mode }}"
"""
IGNORE_ERRORS_REGISTER = """
- hosts: all
tasks:
- name: Run apt-get update
command: apt-get update
ignore_errors: true
register: ignore_errors_register
"""
FAILED_WHEN = """
- hosts: all
tasks:
- name: Disable apport
become: 'yes'
lineinfile:
line: "enabled=0"
dest: /etc/default/apport
mode: 0644
state: present
register: default_apport
failed_when: default_apport.rc !=0 and not default_apport.rc == 257
"""
@pytest.mark.parametrize(
"rule_runner",
(IgnoreErrorsRule,),
indirect=["rule_runner"],
)
def test_ignore_errors_true(rule_runner: RunFromText) -> None:
"""The task uses ignore_errors."""
results = rule_runner.run_playbook(IGNORE_ERRORS_TRUE)
assert len(results) == 1
@pytest.mark.parametrize(
"rule_runner",
(IgnoreErrorsRule,),
indirect=["rule_runner"],
)
def test_ignore_errors_false(rule_runner: RunFromText) -> None:
"""The task uses ignore_errors: false, oddly enough."""
results = rule_runner.run_playbook(IGNORE_ERRORS_FALSE)
assert len(results) == 0
@pytest.mark.parametrize(
"rule_runner",
(IgnoreErrorsRule,),
indirect=["rule_runner"],
)
def test_ignore_errors_check_mode(rule_runner: RunFromText) -> None:
"""The task uses ignore_errors: "{{ ansible_check_mode }}"."""
results = rule_runner.run_playbook(IGNORE_ERRORS_CHECK_MODE)
assert len(results) == 0
@pytest.mark.parametrize(
"rule_runner",
(IgnoreErrorsRule,),
indirect=["rule_runner"],
)
def test_ignore_errors_register(rule_runner: RunFromText) -> None:
"""The task uses ignore_errors: but output is registered and managed."""
results = rule_runner.run_playbook(IGNORE_ERRORS_REGISTER)
assert len(results) == 0
@pytest.mark.parametrize(
"rule_runner",
(IgnoreErrorsRule,),
indirect=["rule_runner"],
)
def test_failed_when(rule_runner: RunFromText) -> None:
"""Instead of ignore_errors, this task uses failed_when."""
results = rule_runner.run_playbook(FAILED_WHEN)
assert len(results) == 0
| 3,984 | Python | .py | 121 | 26.561983 | 106 | 0.636908 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,251 | fqcn.py | ansible_ansible-lint/src/ansiblelint/rules/fqcn.py | """Rule definition for usage of fully qualified collection names for builtins."""
from __future__ import annotations
import logging
import sys
from typing import TYPE_CHECKING, Any
from ruamel.yaml.comments import CommentedSeq
from ansiblelint.constants import LINE_NUMBER_KEY
from ansiblelint.rules import AnsibleLintRule, TransformMixin
from ansiblelint.utils import load_plugin
if TYPE_CHECKING:
from ruamel.yaml.comments import CommentedMap
from ansiblelint.errors import MatchError
from ansiblelint.file_utils import Lintable
from ansiblelint.utils import Task
_logger = logging.getLogger(__name__)
builtins = [
"add_host",
"apt",
"apt_key",
"apt_repository",
"assemble",
"assert",
"async_status",
"blockinfile",
"command",
"copy",
"cron",
"debconf",
"debug",
"dnf",
"dpkg_selections",
"expect",
"fail",
"fetch",
"file",
"find",
"gather_facts",
"get_url",
"getent",
"git",
"group",
"group_by",
"hostname",
"import_playbook",
"import_role",
"import_tasks",
"include",
"include_role",
"include_tasks",
"include_vars",
"iptables",
"known_hosts",
"lineinfile",
"meta",
"package",
"package_facts",
"pause",
"ping",
"pip",
"raw",
"reboot",
"replace",
"rpm_key",
"script",
"service",
"service_facts",
"set_fact",
"set_stats",
"setup",
"shell",
"slurp",
"stat",
"subversion",
"systemd",
"sysvinit",
"tempfile",
"template",
"unarchive",
"uri",
"user",
"wait_for",
"wait_for_connection",
"yum",
"yum_repository",
]
class FQCNBuiltinsRule(AnsibleLintRule, TransformMixin):
"""Use FQCN for builtin actions."""
id = "fqcn"
severity = "MEDIUM"
description = (
"Check whether actions are using using full qualified collection names."
)
tags = ["formatting"]
version_added = "v6.8.0"
module_aliases: dict[str, str] = {"block/always/rescue": "block/always/rescue"}
_ids = {
"fqcn[action-core]": "Use FQCN for builtin module actions",
"fqcn[action]": "Use FQCN for module actions",
"fqcn[canonical]": "You should use canonical module name",
}
def matchtask(
self,
task: Task,
file: Lintable | None = None,
) -> list[MatchError]:
result: list[MatchError] = []
if file and file.failed():
return result
module = task["action"]["__ansible_module_original__"]
if not isinstance(module, str):
msg = "Invalid data for module."
raise TypeError(msg)
if module not in self.module_aliases:
loaded_module = load_plugin(module)
target = loaded_module.resolved_fqcn
self.module_aliases[module] = target
if target is None:
_logger.warning("Unable to resolve FQCN for module %s", module)
self.module_aliases[module] = module
return []
if target not in self.module_aliases:
self.module_aliases[target] = target
if module != self.module_aliases[module]:
module_alias = self.module_aliases[module]
if module_alias.startswith("ansible.builtin"):
legacy_module = module_alias.replace(
"ansible.builtin.",
"ansible.legacy.",
1,
)
if module != legacy_module:
if module == "ansible.builtin.include":
message = f"Avoid deprecated module ({module})"
details = "Use `ansible.builtin.include_task` or `ansible.builtin.import_tasks` instead."
else:
message = f"Use FQCN for builtin module actions ({module})."
details = f"Use `{module_alias}` or `{legacy_module}` instead."
result.append(
self.create_matcherror(
message=message,
details=details,
filename=file,
lineno=task["__line__"],
tag="fqcn[action-core]",
),
)
elif module.count(".") < 2:
result.append(
self.create_matcherror(
message=f"Use FQCN for module actions, such `{self.module_aliases[module]}`.",
details=f"Action `{module}` is not FQCN.",
filename=file,
lineno=task["__line__"],
tag="fqcn[action]",
),
)
# TODO(ssbarnea): Remove the c.g. and c.n. exceptions from here once # noqa: FIX002
# community team is flattening these.
# https://github.com/ansible-community/community-topics/issues/147
elif not module.startswith("community.general.") or module.startswith(
"community.network.",
):
result.append(
self.create_matcherror(
message=f"You should use canonical module name `{self.module_aliases[module]}` instead of `{module}`.",
filename=file,
lineno=task["__line__"],
tag="fqcn[canonical]",
),
)
return result
def matchyaml(self, file: Lintable) -> list[MatchError]:
"""Return matches found for a specific YAML text."""
result = []
if file.kind == "plugin":
i = file.path.resolve().parts.index("plugins")
plugin_type = file.path.resolve().parts[i : i + 2]
short_path = file.path.resolve().parts[i + 2 :]
if len(short_path) > 1 and "test" not in str(file.path):
result.append(
self.create_matcherror(
message=f"Deep plugins directory is discouraged. Move '{file.path}' directly under '{'/'.join(plugin_type)}' folder.",
tag="fqcn[deep]",
filename=file,
),
)
elif file.kind == "playbook":
for play in file.data:
if play is None:
continue
result.extend(self.matchplay(file, play))
return result
def matchplay(self, file: Lintable, data: dict[str, Any]) -> list[MatchError]:
if file.kind != "playbook":
return []
if "collections" in data:
return [
self.create_matcherror(
message="Avoid `collections` keyword by using FQCN for all plugins, modules, roles and playbooks.",
lineno=data[LINE_NUMBER_KEY],
tag="fqcn[keyword]",
filename=file,
),
]
return []
def transform(
self,
match: MatchError,
lintable: Lintable,
data: CommentedMap | CommentedSeq | str,
) -> None:
if match.tag in self.ids():
target_task = self.seek(match.yaml_path, data)
# Unfortunately, a lot of data about Ansible content gets lost here, you only get a simple dict.
# For now, just parse the error messages for the data about action names etc. and fix this later.
current_action = ""
new_action = ""
if match.tag == "fqcn[action-core]":
# split at the first bracket, cut off the last bracket and dot
current_action = match.message.split("(")[1][:-2]
# This will always replace builtin modules with "ansible.builtin" versions, not "ansible.legacy".
# The latter is technically more correct in what ansible has executed so far, the former is most likely better understood and more robust.
new_action = match.details.split("`")[1]
elif match.tag == "fqcn[action]":
current_action = match.details.split("`")[1]
new_action = match.message.split("`")[1]
elif match.tag == "fqcn[canonical]":
current_action = match.message.split("`")[3]
new_action = match.message.split("`")[1]
for _ in range(len(target_task)):
if isinstance(target_task, CommentedSeq):
continue
k, v = target_task.popitem(False)
target_task[new_action if k == current_action else k] = v
match.fixed = True
# testing code to be loaded only with pytest or when executed the rule file
if "pytest" in sys.modules:
from ansiblelint.rules import RulesCollection
from ansiblelint.runner import Runner
def test_fqcn_builtin_fail() -> None:
"""Test rule matches."""
collection = RulesCollection()
collection.register(FQCNBuiltinsRule())
success = "examples/playbooks/rule-fqcn-fail.yml"
results = Runner(success, rules=collection).run()
assert len(results) == 3
assert results[0].tag == "fqcn[keyword]"
assert "Avoid `collections` keyword" in results[0].message
assert results[1].tag == "fqcn[action-core]"
assert "Use FQCN for builtin module actions" in results[1].message
assert results[2].tag == "fqcn[action]"
assert "Use FQCN for module actions, such" in results[2].message
def test_fqcn_builtin_pass() -> None:
"""Test rule does not match."""
collection = RulesCollection()
collection.register(FQCNBuiltinsRule())
success = "examples/playbooks/rule-fqcn-pass.yml"
results = Runner(success, rules=collection).run()
assert len(results) == 0, results
def test_fqcn_deep_fail() -> None:
"""Test rule matches."""
collection = RulesCollection()
collection.register(FQCNBuiltinsRule())
failure = "examples/.collection/plugins/modules/deep/beta.py"
results = Runner(failure, rules=collection).run()
assert len(results) == 1
assert results[0].tag == "fqcn[deep]"
assert "Deep plugins directory is discouraged" in results[0].message
def test_fqcn_deep_pass() -> None:
"""Test rule does not match."""
collection = RulesCollection()
collection.register(FQCNBuiltinsRule())
success = "examples/.collection/plugins/modules/alpha.py"
results = Runner(success, rules=collection).run()
assert len(results) == 0
def test_fqcn_deep_test_dir_pass() -> None:
"""Test rule does not match."""
collection = RulesCollection()
collection.register(FQCNBuiltinsRule())
success = "examples/.collection/plugins/modules/tests/gamma.py"
results = Runner(success, rules=collection).run()
assert len(results) == 0
| 11,100 | Python | .py | 282 | 28.553191 | 154 | 0.564122 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,252 | meta_incorrect.py | ansible_ansible-lint/src/ansiblelint/rules/meta_incorrect.py | """Implementation of meta-incorrect rule."""
# Copyright (c) 2018, Ansible Project
from __future__ import annotations
import sys
from typing import TYPE_CHECKING
from ansiblelint.constants import LINE_NUMBER_KEY
from ansiblelint.rules import AnsibleLintRule
if TYPE_CHECKING:
from ansiblelint.errors import MatchError
from ansiblelint.file_utils import Lintable
class MetaChangeFromDefaultRule(AnsibleLintRule):
"""meta/main.yml default values should be changed."""
id = "meta-incorrect"
field_defaults = [
("author", "your name"),
("description", "your description"),
("company", "your company (optional)"),
("license", "license (GPLv2, CC-BY, etc)"),
("license", "license (GPL-2.0-or-later, MIT, etc)"),
]
values = ", ".join(sorted({f[0] for f in field_defaults}))
description = (
f"You should set appropriate values in meta/main.yml for these fields: {values}"
)
severity = "HIGH"
tags = ["metadata"]
version_added = "v4.0.0"
def matchyaml(self, file: Lintable) -> list[MatchError]:
if file.kind != "meta" or not file.data:
return []
galaxy_info = file.data.get("galaxy_info", None)
if not galaxy_info:
return []
results = []
for field, default in self.field_defaults:
value = galaxy_info.get(field, None)
if value and value == default:
results.append(
self.create_matcherror(
filename=file,
lineno=file.data[LINE_NUMBER_KEY],
message=f"Should change default metadata: {field}",
),
)
return results
if "pytest" in sys.modules:
from ansiblelint.rules import RulesCollection
from ansiblelint.runner import Runner
def test_default_galaxy_info(
default_rules_collection: RulesCollection,
) -> None:
"""Test for meta-incorrect."""
results = Runner(
"examples/roles/meta_incorrect_fail",
rules=default_rules_collection,
).run()
for result in results:
assert result.rule.id == "meta-incorrect"
assert len(results) == 4
assert "Should change default metadata: author" in str(results)
assert "Should change default metadata: description" in str(results)
assert "Should change default metadata: company" in str(results)
assert "Should change default metadata: license" in str(results)
| 2,565 | Python | .py | 63 | 31.984127 | 88 | 0.627262 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,253 | package_latest.py | ansible_ansible-lint/src/ansiblelint/rules/package_latest.py | """Implementations of the package-latest rule."""
# Copyright (c) 2016 Will Thames <will@thames.id.au>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import annotations
from typing import TYPE_CHECKING
from ansiblelint.rules import AnsibleLintRule
if TYPE_CHECKING:
from ansiblelint.file_utils import Lintable
from ansiblelint.utils import Task
class PackageIsNotLatestRule(AnsibleLintRule):
"""Package installs should not use latest."""
id = "package-latest"
description = (
"Package installs should use ``state=present`` with or without a version"
)
severity = "VERY_LOW"
tags = ["idempotency"]
version_added = "historic"
_package_managers = [
"apk",
"apt",
"bower",
"bundler",
"dnf",
"easy_install",
"gem",
"homebrew",
"jenkins_plugin",
"npm",
"openbsd_package",
"openbsd_pkg",
"package",
"pacman",
"pear",
"pip",
"pkg5",
"pkgutil",
"portage",
"slackpkg",
"sorcery",
"swdepot",
"win_chocolatey",
"yarn",
"yum",
"zypper",
]
def matchtask(
self,
task: Task,
file: Lintable | None = None,
) -> bool | str:
return (
task["action"]["__ansible_module__"] in self._package_managers
and not task["action"].get("version")
and not task["action"].get("update_only")
and not task["action"].get("only_upgrade")
and task["action"].get("state") == "latest"
)
| 2,651 | Python | .py | 75 | 29.306667 | 81 | 0.656274 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,254 | no_free_form.py | ansible_ansible-lint/src/ansiblelint/rules/no_free_form.py | """Implementation of NoFreeFormRule."""
from __future__ import annotations
import functools
import re
import sys
from typing import TYPE_CHECKING, Any
from ansiblelint.constants import INCLUSION_ACTION_NAMES, LINE_NUMBER_KEY
from ansiblelint.rules import AnsibleLintRule, TransformMixin
from ansiblelint.rules.key_order import task_property_sorter
if TYPE_CHECKING:
from ruamel.yaml.comments import CommentedMap, CommentedSeq
from ansiblelint.errors import MatchError
from ansiblelint.file_utils import Lintable
from ansiblelint.utils import Task
class NoFreeFormRule(AnsibleLintRule, TransformMixin):
"""Rule for detecting discouraged free-form syntax for action modules."""
id = "no-free-form"
description = "Avoid free-form inside files as it can produce subtle bugs."
severity = "MEDIUM"
tags = ["syntax", "risk"]
version_added = "v6.8.0"
needs_raw_task = True
cmd_shell_re = re.compile(
r"(chdir|creates|executable|removes|stdin|stdin_add_newline|warn)=",
)
_ids = {
"no-free-form[raw]": "Avoid embedding `executable=` inside raw calls, use explicit args dictionary instead.",
"no-free-form[raw-non-string]": "Passing a non string value to `raw` module is neither documented or supported.",
}
def matchtask(
self,
task: Task,
file: Lintable | None = None,
) -> list[MatchError]:
results: list[MatchError] = []
action = task["action"]["__ansible_module_original__"]
if action in INCLUSION_ACTION_NAMES:
return results
action_value = task["__raw_task__"].get(action, None)
if task["action"].get("__ansible_module__", None) == "raw":
if isinstance(action_value, str):
if "executable=" in action_value:
results.append(
self.create_matcherror(
message="Avoid embedding `executable=` inside raw calls, use explicit args dictionary instead.",
lineno=task[LINE_NUMBER_KEY],
filename=file,
tag=f"{self.id}[raw]",
),
)
else:
results.append(
self.create_matcherror(
message="Passing a non string value to `raw` module is neither documented or supported.",
lineno=task[LINE_NUMBER_KEY],
filename=file,
tag=f"{self.id}[raw-non-string]",
),
)
elif isinstance(action_value, str) and "=" in action_value:
fail = False
if task["action"].get("__ansible_module__") in (
"ansible.builtin.command",
"ansible.builtin.shell",
"ansible.windows.win_command",
"ansible.windows.win_shell",
"command",
"shell",
"win_command",
"win_shell",
):
if self.cmd_shell_re.search(action_value):
fail = True
else:
fail = True
if fail:
results.append(
self.create_matcherror(
message=f"Avoid using free-form when calling module actions. ({action})",
lineno=task[LINE_NUMBER_KEY],
filename=file,
),
)
return results
def transform(
self,
match: MatchError,
lintable: Lintable,
data: CommentedMap | CommentedSeq | str,
) -> None:
if "no-free-form" in match.tag:
task = self.seek(match.yaml_path, data)
def filter_values(
val: str,
filter_key: str,
filter_dict: dict[str, Any],
) -> str:
"""Pull out key=value pairs from a string and set them in filter_dict.
Returns unmatched strings.
"""
if filter_key not in val:
return val
extra = ""
[k, v] = val.split(filter_key, 1)
if " " in k:
extra, k = k.rsplit(" ", 1)
if v[0] in "\"'":
# Keep quoted strings together
quote = v[0]
_, v, remainder = v.split(quote, 2)
v = f"{quote}{v}{quote}"
else:
try:
v, remainder = v.split(" ", 1)
except ValueError:
remainder = ""
filter_dict[k] = v
extra = " ".join(
(extra, filter_values(remainder, filter_key, filter_dict)),
)
return extra.strip()
if match.tag == "no-free-form":
module_opts: dict[str, Any] = {}
for _ in range(len(task)):
k, v = task.popitem(False)
# identify module as key and process its value
if len(k.split(".")) == 3 and isinstance(v, str):
cmd = filter_values(v, "=", module_opts)
if cmd:
module_opts["cmd"] = cmd
sorted_module_opts = {}
for key in sorted(
module_opts.keys(),
key=functools.cmp_to_key(task_property_sorter),
):
sorted_module_opts[key] = module_opts[key]
task[k] = sorted_module_opts
else:
task[k] = v
match.fixed = True
elif match.tag == "no-free-form[raw]":
exec_key_val: dict[str, Any] = {}
for _ in range(len(task)):
k, v = task.popitem(False)
if isinstance(v, str) and "executable" in v:
# Filter the executable and other parts from the string
task[k] = " ".join(
[
item
for item in v.split(" ")
if filter_values(item, "=", exec_key_val)
],
)
task["args"] = exec_key_val
else:
task[k] = v
match.fixed = True
if "pytest" in sys.modules:
import pytest
# pylint: disable=ungrouped-imports
from ansiblelint.rules import RulesCollection
from ansiblelint.runner import Runner
@pytest.mark.parametrize(
("file", "expected"),
(
pytest.param("examples/playbooks/rule-no-free-form-pass.yml", 0, id="pass"),
pytest.param("examples/playbooks/rule-no-free-form-fail.yml", 3, id="fail"),
),
)
def test_rule_no_free_form(
default_rules_collection: RulesCollection,
file: str,
expected: int,
) -> None:
"""Validate that rule works as intended."""
results = Runner(file, rules=default_rules_collection).run()
for result in results:
assert result.rule.id == NoFreeFormRule.id, result
assert len(results) == expected
| 7,519 | Python | .py | 179 | 26.865922 | 124 | 0.492411 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,255 | literal_compare.py | ansible_ansible-lint/src/ansiblelint/rules/literal_compare.py | """Implementation of the literal-compare rule."""
# Copyright (c) 2016, Will Thames and contributors
# Copyright (c) 2018-2021, Ansible Project
from __future__ import annotations
import re
import sys
from typing import TYPE_CHECKING
from ansiblelint.rules import AnsibleLintRule
from ansiblelint.yaml_utils import nested_items_path
if TYPE_CHECKING:
from ansiblelint.file_utils import Lintable
from ansiblelint.utils import Task
class ComparisonToLiteralBoolRule(AnsibleLintRule):
"""Don't compare to literal True/False."""
id = "literal-compare"
description = (
"Use ``when: var`` rather than ``when: var == True`` "
"(or conversely ``when: not var``)"
)
severity = "HIGH"
tags = ["idiom"]
version_added = "v4.0.0"
literal_bool_compare = re.compile("[=!]= ?(True|true|False|false)")
def matchtask(
self,
task: Task,
file: Lintable | None = None,
) -> bool | str:
for k, v, _ in nested_items_path(task):
if k == "when":
if isinstance(v, str):
if self.literal_bool_compare.search(v):
return True
elif isinstance(v, bool):
pass
else:
for item in v:
if isinstance(item, str) and self.literal_bool_compare.search(
item,
):
return True
return False
if "pytest" in sys.modules:
import pytest
# pylint: disable=ungrouped-imports
from ansiblelint.rules import RulesCollection
from ansiblelint.runner import Runner
@pytest.mark.parametrize(
("test_file", "failures"),
(
pytest.param(
"examples/playbooks/rule_literal_compare_fail.yml",
3,
id="fail",
),
pytest.param(
"examples/playbooks/rule_literal_compare_pass.yml",
0,
id="pass",
),
),
)
def test_literal_compare(
default_rules_collection: RulesCollection,
test_file: str,
failures: int,
) -> None:
"""Test rule matches."""
# Enable checking of loop variable prefixes in roles
results = Runner(test_file, rules=default_rules_collection).run()
for result in results:
assert result.rule.id == "literal-compare"
assert len(results) == failures
| 2,528 | Python | .py | 73 | 24.821918 | 86 | 0.572951 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,256 | risky_file_permissions.py | ansible_ansible-lint/src/ansiblelint/rules/risky_file_permissions.py | # Copyright (c) 2020 Sorin Sbarnea <sorin.sbarnea@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""MissingFilePermissionsRule used with ansible-lint."""
from __future__ import annotations
import sys
from pathlib import Path
from typing import TYPE_CHECKING
from ansiblelint.rules import AnsibleLintRule
if TYPE_CHECKING:
from ansiblelint.file_utils import Lintable
from ansiblelint.utils import Task
# Despite documentation mentioning 'preserve' only these modules support it:
_modules_with_preserve = (
"copy",
"template",
)
_MODULES: set[str] = {
"archive",
"community.general.archive",
"assemble",
"ansible.builtin.assemble",
"copy", # supports preserve
"ansible.builtin.copy",
"file",
"ansible.builtin.file",
"get_url",
"ansible.builtin.get_url",
"replace", # implicit preserve behavior but mode: preserve is invalid
"ansible.builtin.replace",
"template", # supports preserve
"ansible.builtin.template",
# 'unarchive', # disabled because .tar.gz files can have permissions inside
}
_MODULES_WITH_CREATE: dict[str, bool] = {
"blockinfile": False,
"ansible.builtin.blockinfile": False,
"htpasswd": True,
"community.general.htpasswd": True,
"ini_file": True,
"community.general.ini_file": True,
"lineinfile": False,
"ansible.builtin.lineinfile": False,
}
class MissingFilePermissionsRule(AnsibleLintRule):
"""File permissions unset or incorrect."""
id = "risky-file-permissions"
description = (
"Missing or unsupported mode parameter can cause unexpected file "
"permissions based "
"on version of Ansible being used. Be explicit, like `mode: 0644` to "
"avoid hitting this rule. Special `preserve` value is accepted "
f"only by {', '.join([f'`{x}`' for x in _modules_with_preserve])} modules."
)
link = "https://github.com/ansible/ansible/issues/71200"
severity = "VERY_HIGH"
tags = ["unpredictability"]
version_added = "v4.3.0"
_modules = _MODULES
_modules_with_create = _MODULES_WITH_CREATE
# pylint: disable=too-many-return-statements
def matchtask(
self,
task: Task,
file: Lintable | None = None,
) -> bool | str:
module = task["action"]["__ansible_module__"]
mode = task["action"].get("mode", None)
if not isinstance(task.args, dict):
# We are unable to check args when using jinja templating
return False
if module not in self._modules and module not in self._modules_with_create:
return False
if mode == "preserve" and module not in _modules_with_preserve:
return True
if module in self._modules_with_create:
create = task["action"].get("create", self._modules_with_create[module])
return create and mode is None
# A file that doesn't exist cannot have a mode
if task["action"].get("state", None) == "absent":
return False
# A symlink always has mode 0777
if task["action"].get("state", None) == "link":
return False
# Recurse on a directory does not allow for an uniform mode
if task["action"].get("recurse", None):
return False
# The file module does not create anything when state==file (default)
if module == "file" and task["action"].get("state", "file") == "file":
return False
# replace module is the only one that has a valid default preserve
# behavior, but we want to trigger rule if user used incorrect
# documentation and put 'preserve', which is not supported.
if module == "replace" and mode is None:
return False
return mode is None
if "pytest" in sys.modules:
import pytest
# pylint: disable=ungrouped-imports
from ansiblelint.rules import RulesCollection
from ansiblelint.testing import RunFromText
@pytest.mark.parametrize(
("file", "expected"),
(
pytest.param(
"examples/playbooks/rule-risky-file-permissions-pass.yml",
0,
id="pass",
),
pytest.param(
"examples/playbooks/rule-risky-file-permissions-fail.yml",
11,
id="fails",
),
),
)
def test_risky_file_permissions(
file: str,
expected: int,
default_rules_collection: RulesCollection,
) -> None:
"""The ini_file module does not accept preserve mode."""
runner = RunFromText(default_rules_collection)
results = runner.run(Path(file))
assert len(results) == expected
for result in results:
assert result.tag == "risky-file-permissions"
| 5,849 | Python | .py | 143 | 34.237762 | 84 | 0.667606 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,257 | key_order.py | ansible_ansible-lint/src/ansiblelint/rules/key_order.py | """All tasks should be have name come first."""
from __future__ import annotations
import functools
import sys
from dataclasses import dataclass
from typing import TYPE_CHECKING, Any
from ansiblelint.constants import ANNOTATION_KEYS, LINE_NUMBER_KEY
from ansiblelint.errors import MatchError, RuleMatchTransformMeta
from ansiblelint.rules import AnsibleLintRule, TransformMixin
if TYPE_CHECKING:
from ruamel.yaml.comments import CommentedMap, CommentedSeq
from ansiblelint.file_utils import Lintable
from ansiblelint.utils import Task
SORTER_TASKS = (
"name",
# "__module__",
# "action",
# "args",
None, # <-- None include all modules that not using action and *
# "when",
# "notify",
# "tags",
"block",
"rescue",
"always",
)
def get_property_sort_index(name: str) -> int:
"""Return the index of the property in the sorter."""
a_index = -1
for i, v in enumerate(SORTER_TASKS):
if v == name:
return i
if v is None:
a_index = i
return a_index
def task_property_sorter(property1: str, property2: str) -> int:
"""Sort task properties based on SORTER."""
v_1 = get_property_sort_index(property1)
v_2 = get_property_sort_index(property2)
return (v_1 > v_2) - (v_1 < v_2)
@dataclass(frozen=True)
class KeyOrderTMeta(RuleMatchTransformMeta):
"""Key Order transform metadata.
:param fixed: tuple with updated key order
"""
fixed: tuple[str | int, ...]
def __str__(self) -> str:
"""Return string representation."""
return f"Fixed to {self.fixed}"
class KeyOrderRule(AnsibleLintRule, TransformMixin):
"""Ensure specific order of keys in mappings."""
id = "key-order"
shortdesc = __doc__
severity = "LOW"
tags = ["formatting"]
version_added = "v6.6.2"
needs_raw_task = True
_ids = {
"key-order[task]": "You can improve the task key order",
}
def matchplay(self, file: Lintable, data: dict[str, Any]) -> list[MatchError]:
"""Return matches found for a specific play (entry in playbook)."""
result: list[MatchError] = []
if file.kind != "playbook":
return result
keys = [str(key) for key, val in data.items() if key not in ANNOTATION_KEYS]
sorted_keys = sorted(keys, key=functools.cmp_to_key(task_property_sorter))
if keys != sorted_keys:
result.append(
self.create_matcherror(
f"You can improve the play key order to: {', '.join(sorted_keys)}",
filename=file,
tag=f"{self.id}[play]",
lineno=data[LINE_NUMBER_KEY],
transform_meta=KeyOrderTMeta(fixed=tuple(sorted_keys)),
),
)
return result
def matchtask(
self,
task: Task,
file: Lintable | None = None,
) -> list[MatchError]:
result = []
raw_task = task["__raw_task__"]
keys = [str(key) for key in raw_task if not key.startswith("_")]
sorted_keys = sorted(keys, key=functools.cmp_to_key(task_property_sorter))
if keys != sorted_keys:
result.append(
self.create_matcherror(
f"You can improve the task key order to: {', '.join(sorted_keys)}",
filename=file,
tag="key-order[task]",
transform_meta=KeyOrderTMeta(fixed=tuple(sorted_keys)),
),
)
return result
def transform(
self,
match: MatchError,
lintable: Lintable,
data: CommentedMap | CommentedSeq | str,
) -> None:
if not isinstance(match.transform_meta, KeyOrderTMeta):
return
if match.tag == f"{self.id}[play]":
play = self.seek(match.yaml_path, data)
for key in match.transform_meta.fixed:
# other transformation might change the key
if key in play:
play[key] = play.pop(key)
match.fixed = True
if match.tag == f"{self.id}[task]":
task = self.seek(match.yaml_path, data)
for key in match.transform_meta.fixed:
# other transformation might change the key
if key in task:
task[key] = task.pop(key)
match.fixed = True
# testing code to be loaded only with pytest or when executed the rule file
if "pytest" in sys.modules:
import pytest
# pylint: disable=ungrouped-imports
from ansiblelint.rules import RulesCollection
from ansiblelint.runner import Runner
@pytest.mark.parametrize(
("test_file", "failures"),
(
pytest.param("examples/playbooks/rule-key-order-pass.yml", 0, id="pass"),
pytest.param("examples/playbooks/rule-key-order-fail.yml", 6, id="fail"),
),
)
def test_key_order_rule(
default_rules_collection: RulesCollection,
test_file: str,
failures: int,
) -> None:
"""Test rule matches."""
results = Runner(test_file, rules=default_rules_collection).run()
assert len(results) == failures
for result in results:
assert result.rule.id == "key-order"
@pytest.mark.parametrize(
("properties", "expected"),
(
pytest.param([], []),
pytest.param(["block", "name"], ["name", "block"]),
pytest.param(
["block", "name", "action", "..."],
["name", "action", "...", "block"],
),
),
)
def test_key_order_property_sorter(
properties: list[str],
expected: list[str],
) -> None:
"""Test the task property sorter."""
result = sorted(properties, key=functools.cmp_to_key(task_property_sorter))
assert expected == result
@pytest.mark.parametrize(
("key", "order"),
(
pytest.param("name", 0),
pytest.param("action", 1),
pytest.param("foobar", SORTER_TASKS.index(None)),
pytest.param("block", len(SORTER_TASKS) - 3),
pytest.param("rescue", len(SORTER_TASKS) - 2),
pytest.param("always", len(SORTER_TASKS) - 1),
),
)
def test_key_order_property_sort_index(key: str, order: int) -> None:
"""Test sorting index."""
assert get_property_sort_index(key) == order
@pytest.mark.parametrize(
("prop1", "prop2", "result"),
(
pytest.param("name", "block", -1),
pytest.param("block", "name", 1),
pytest.param("block", "block", 0),
),
)
def test_key_order_property_sortfunc(prop1: str, prop2: str, result: int) -> None:
"""Test sorting function."""
assert task_property_sorter(prop1, prop2) == result
| 6,918 | Python | .py | 185 | 28.535135 | 87 | 0.581829 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,258 | avoid_implicit.py | ansible_ansible-lint/src/ansiblelint/rules/avoid_implicit.py | """Implementation of avoid-implicit rule."""
# https://github.com/ansible/ansible-lint/issues/2501
from __future__ import annotations
import sys
from typing import TYPE_CHECKING
from ansiblelint.rules import AnsibleLintRule
if TYPE_CHECKING:
from ansiblelint.file_utils import Lintable
from ansiblelint.utils import Task
class AvoidImplicitRule(AnsibleLintRule):
"""Rule that identifies use of undocumented or discouraged implicit behaviors."""
id = "avoid-implicit"
shortdesc = "Avoid implicit behaviors"
description = (
"Items which are templated should use ``template`` instead of "
"``copy`` with ``content`` to ensure correctness."
)
severity = "MEDIUM"
tags = ["unpredictability"]
version_added = "v6.8.0"
def matchtask(
self,
task: Task,
file: Lintable | None = None,
) -> bool | str:
"""Confirm if current rule is matching a specific task."""
if task["action"]["__ansible_module__"] == "copy":
content = task["action"].get("content", "")
if not isinstance(content, str):
return True
return False
# testing code to be loaded only with pytest or when executed the rule file
if "pytest" in sys.modules:
from ansiblelint.rules import RulesCollection
from ansiblelint.runner import Runner
def test_template_instead_of_copy_positive() -> None:
"""Positive test for avoid-implicit."""
collection = RulesCollection()
collection.register(AvoidImplicitRule())
success = "examples/playbooks/rule-avoid-implicit-pass.yml"
good_runner = Runner(success, rules=collection)
assert good_runner.run() == []
def test_template_instead_of_copy_negative() -> None:
"""Negative test for avoid-implicit."""
collection = RulesCollection()
collection.register(AvoidImplicitRule())
failure = "examples/playbooks/rule-avoid-implicit-fail.yml"
bad_runner = Runner(failure, rules=collection)
errs = bad_runner.run()
assert len(errs) == 1
| 2,100 | Python | .py | 50 | 35.32 | 85 | 0.676644 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,259 | var_naming.py | ansible_ansible-lint/src/ansiblelint/rules/var_naming.py | """Implementation of var-naming rule."""
from __future__ import annotations
import keyword
import re
import sys
from typing import TYPE_CHECKING, Any, NamedTuple
from ansible.parsing.yaml.objects import AnsibleUnicode
from ansible.vars.reserved import get_reserved_names
from ansiblelint.config import Options, options
from ansiblelint.constants import (
ANNOTATION_KEYS,
LINE_NUMBER_KEY,
PLAYBOOK_ROLE_KEYWORDS,
RC,
)
from ansiblelint.errors import MatchError
from ansiblelint.file_utils import Lintable
from ansiblelint.rules import AnsibleLintRule, RulesCollection
from ansiblelint.runner import Runner
from ansiblelint.skip_utils import get_rule_skips_from_line
from ansiblelint.text import has_jinja, is_fqcn, is_fqcn_or_name
from ansiblelint.utils import parse_yaml_from_file
if TYPE_CHECKING:
from ansiblelint.utils import Task
class Prefix(NamedTuple):
"""Prefix."""
value: str = ""
from_fqcn: bool = False
class VariableNamingRule(AnsibleLintRule):
"""All variables should be named using only lowercase and underscores."""
id = "var-naming"
severity = "MEDIUM"
tags = ["idiom"]
version_added = "v5.0.10"
needs_raw_task = True
re_pattern_str = options.var_naming_pattern or "^[a-z_][a-z0-9_]*$"
re_pattern = re.compile(re_pattern_str)
reserved_names = get_reserved_names()
# List of special variables that should be treated as read-only. This list
# does not include connection variables, which we expect users to tune in
# specific cases.
# https://docs.ansible.com/ansible/latest/reference_appendices/special_variables.html
read_only_names = {
"ansible_check_mode",
"ansible_collection_name",
"ansible_config_file",
"ansible_dependent_role_names",
"ansible_diff_mode",
"ansible_forks",
"ansible_index_var",
"ansible_inventory_sources",
"ansible_limit",
"ansible_local", # special fact
"ansible_loop",
"ansible_loop_var",
"ansible_parent_role_names",
"ansible_parent_role_paths",
"ansible_play_batch",
"ansible_play_hosts",
"ansible_play_hosts_all",
"ansible_play_name",
"ansible_play_role_names",
"ansible_playbook_python",
"ansible_role_name",
"ansible_role_names",
"ansible_run_tags",
"ansible_search_path",
"ansible_skip_tags",
"ansible_verbosity",
"ansible_version",
"group_names",
"groups",
"hostvars",
"inventory_dir",
"inventory_file",
"inventory_hostname",
"inventory_hostname_short",
"omit",
"play_hosts",
"playbook_dir",
"role_name",
"role_names",
"role_path",
}
# These special variables are used by Ansible but we allow users to set
# them as they might need it in certain cases.
allowed_special_names = {
"ansible_facts",
"ansible_become_user",
"ansible_connection",
"ansible_host",
"ansible_python_interpreter",
"ansible_user",
"ansible_remote_tmp", # no included in docs
}
_ids = {
"var-naming[no-reserved]": "Variables names must not be Ansible reserved names.",
"var-naming[no-jinja]": "Variables names must not contain jinja2 templating.",
"var-naming[pattern]": f"Variables names should match {re_pattern_str} regex.",
}
# pylint: disable=too-many-return-statements
def get_var_naming_matcherror(
self,
ident: str,
*,
prefix: Prefix | None = None,
file: Lintable,
) -> MatchError | None:
"""Return a MatchError if the variable name is not valid, otherwise None."""
if not isinstance(ident, str): # pragma: no cover
return MatchError(
tag="var-naming[non-string]",
message="Variables names must be strings.",
rule=self,
lintable=file,
)
if ident in ANNOTATION_KEYS or ident in self.allowed_special_names:
return None
try:
ident.encode("ascii")
except UnicodeEncodeError:
return MatchError(
tag="var-naming[non-ascii]",
message=f"Variables names must be ASCII. ({ident})",
rule=self,
lintable=file,
)
if keyword.iskeyword(ident):
return MatchError(
tag="var-naming[no-keyword]",
message=f"Variables names must not be Python keywords. ({ident})",
rule=self,
lintable=file,
)
if ident in self.reserved_names:
return MatchError(
tag="var-naming[no-reserved]",
message=f"Variables names must not be Ansible reserved names. ({ident})",
rule=self,
lintable=file,
)
if ident in self.read_only_names:
return MatchError(
tag="var-naming[read-only]",
message=f"This special variable is read-only. ({ident})",
rule=self,
lintable=file,
)
# We want to allow use of jinja2 templating for variable names
if "{{" in ident:
return None
if not bool(self.re_pattern.match(ident)) and (
not prefix or not prefix.from_fqcn
):
return MatchError(
tag="var-naming[pattern]",
message=f"Variables names should match {self.re_pattern_str} regex. ({ident})",
rule=self,
lintable=file,
)
if (
prefix
and not ident.lstrip("_").startswith(f"{prefix.value}_")
and not has_jinja(prefix.value)
and is_fqcn_or_name(prefix.value)
):
return MatchError(
tag="var-naming[no-role-prefix]",
message=f"Variables names from within roles should use {prefix.value}_ as a prefix.",
rule=self,
lintable=file,
)
return None
def matchplay(self, file: Lintable, data: dict[str, Any]) -> list[MatchError]:
"""Return matches found for a specific playbook."""
results: list[MatchError] = []
raw_results: list[MatchError] = []
if not data or file.kind not in ("tasks", "handlers", "playbook", "vars"):
return results
# If the Play uses the 'vars' section to set variables
our_vars = data.get("vars", {})
for key in our_vars:
match_error = self.get_var_naming_matcherror(key, file=file)
if match_error:
match_error.lineno = (
key.ansible_pos[1]
if isinstance(key, AnsibleUnicode)
else our_vars[LINE_NUMBER_KEY]
)
raw_results.append(match_error)
roles = data.get("roles", [])
for role in roles:
if isinstance(role, AnsibleUnicode):
continue
role_fqcn = role.get("role", role.get("name"))
prefix = self._parse_prefix(role_fqcn)
for key in list(role.keys()):
if key not in PLAYBOOK_ROLE_KEYWORDS:
match_error = self.get_var_naming_matcherror(
key,
prefix=prefix,
file=file,
)
if match_error:
match_error.message += f" (vars: {key})"
match_error.lineno = (
key.ansible_pos[1]
if isinstance(key, AnsibleUnicode)
else role[LINE_NUMBER_KEY]
)
raw_results.append(match_error)
our_vars = role.get("vars", {})
for key in our_vars:
match_error = self.get_var_naming_matcherror(
key,
prefix=prefix,
file=file,
)
if match_error:
match_error.message += f" (vars: {key})"
match_error.lineno = (
key.ansible_pos[1]
if isinstance(key, AnsibleUnicode)
else our_vars[LINE_NUMBER_KEY]
)
raw_results.append(match_error)
if raw_results:
lines = file.content.splitlines()
for match in raw_results:
# lineno starts with 1, not zero
skip_list = get_rule_skips_from_line(
line=lines[match.lineno - 1],
lintable=file,
)
if match.rule.id not in skip_list and match.tag not in skip_list:
results.append(match)
return results
def matchtask(
self,
task: Task,
file: Lintable | None = None,
) -> list[MatchError]:
"""Return matches for task based variables."""
results = []
prefix = Prefix()
if file and file.parent and file.parent.kind == "role":
prefix = Prefix(file.parent.path.name)
ansible_module = task["action"]["__ansible_module__"]
# If the task uses the 'vars' section to set variables
our_vars = task.get("vars", {})
if ansible_module in ("include_role", "import_role"):
action = task["action"]
if isinstance(action, dict):
role_fqcn = action.get("name", "")
prefix = self._parse_prefix(role_fqcn)
else:
prefix = Prefix()
for key in our_vars:
match_error = self.get_var_naming_matcherror(
key,
prefix=prefix,
file=file or Lintable(""),
)
if match_error:
match_error.lineno = our_vars[LINE_NUMBER_KEY]
match_error.message += f" (vars: {key})"
results.append(match_error)
# If the task uses the 'set_fact' module
if ansible_module == "set_fact":
for key in filter(
lambda x: isinstance(x, str)
and not x.startswith("__")
and x != "cacheable",
task["action"].keys(),
):
match_error = self.get_var_naming_matcherror(
key,
prefix=prefix,
file=file or Lintable(""),
)
if match_error:
match_error.lineno = task["action"][LINE_NUMBER_KEY]
match_error.message += f" (set_fact: {key})"
results.append(match_error)
# If the task registers a variable
registered_var = task.get("register", None)
if registered_var:
match_error = self.get_var_naming_matcherror(
registered_var,
prefix=prefix,
file=file or Lintable(""),
)
if match_error:
match_error.message += f" (register: {registered_var})"
match_error.lineno = task[LINE_NUMBER_KEY]
results.append(match_error)
return results
def matchyaml(self, file: Lintable) -> list[MatchError]:
"""Return matches for variables defined in vars files."""
results: list[MatchError] = []
raw_results: list[MatchError] = []
meta_data: dict[AnsibleUnicode, Any] = {}
if str(file.kind) == "vars" and file.data:
meta_data = parse_yaml_from_file(str(file.path))
for key in meta_data:
prefix = Prefix(file.role) if file.role else Prefix()
match_error = self.get_var_naming_matcherror(
key,
prefix=prefix,
file=file,
)
if match_error:
match_error.lineno = key.ansible_pos[1]
match_error.message += f" (vars: {key})"
raw_results.append(match_error)
if raw_results:
lines = file.content.splitlines()
for match in raw_results:
# lineno starts with 1, not zero
skip_list = get_rule_skips_from_line(
line=lines[match.lineno - 1],
lintable=file,
)
if match.rule.id not in skip_list and match.tag not in skip_list:
results.append(match)
else:
results.extend(super().matchyaml(file))
return results
def _parse_prefix(self, fqcn: str) -> Prefix:
return Prefix("" if "." in fqcn else fqcn.split("/")[-1], is_fqcn(fqcn))
# testing code to be loaded only with pytest or when executed the rule file
if "pytest" in sys.modules:
import pytest
from ansiblelint.testing import ( # pylint: disable=ungrouped-imports
run_ansible_lint,
)
@pytest.mark.parametrize(
("file", "expected"),
(
pytest.param(
"examples/playbooks/var-naming/rule-var-naming-fail.yml",
7,
id="0",
),
pytest.param("examples/Taskfile.yml", 0, id="1"),
),
)
def test_invalid_var_name_playbook(
file: str,
expected: int,
config_options: Options,
) -> None:
"""Test rule matches."""
rules = RulesCollection(options=config_options)
rules.register(VariableNamingRule())
results = Runner(Lintable(file), rules=rules).run()
assert len(results) == expected
for result in results:
assert result.rule.id == VariableNamingRule.id
# We are not checking line numbers because they can vary between
# different versions of ruamel.yaml (and depending on presence/absence
# of its c-extension)
def test_invalid_var_name_varsfile(
default_rules_collection: RulesCollection,
) -> None:
"""Test rule matches."""
results = Runner(
Lintable("examples/playbooks/vars/rule_var_naming_fail.yml"),
rules=default_rules_collection,
).run()
expected_errors = (
("schema[vars]", 1),
("var-naming[pattern]", 2),
("var-naming[pattern]", 6),
("var-naming[no-keyword]", 10),
("var-naming[non-ascii]", 11),
("var-naming[no-reserved]", 12),
("var-naming[read-only]", 13),
)
assert len(results) == len(expected_errors)
for idx, result in enumerate(results):
assert result.tag == expected_errors[idx][0]
assert result.lineno == expected_errors[idx][1]
def test_invalid_vars_diff_files(
default_rules_collection: RulesCollection,
) -> None:
"""Test rule matches."""
results = Runner(
Lintable("examples/playbooks/vars/rule_var_naming_fails_files"),
rules=default_rules_collection,
).run()
expected_errors = (
("var-naming[pattern]", 2),
("var-naming[pattern]", 3),
("var-naming[pattern]", 2),
("var-naming[pattern]", 3),
)
assert len(results) == len(expected_errors)
for idx, result in enumerate(results):
assert result.tag == expected_errors[idx][0]
assert result.lineno == expected_errors[idx][1]
def test_var_naming_with_role_prefix(
default_rules_collection: RulesCollection,
) -> None:
"""Test rule matches."""
results = Runner(
Lintable("examples/roles/role_vars_prefix_detection"),
rules=default_rules_collection,
).run()
assert len(results) == 2
for result in results:
assert result.tag == "var-naming[no-role-prefix]"
def test_var_naming_with_role_prefix_plays(
default_rules_collection: RulesCollection,
) -> None:
"""Test rule matches."""
results = Runner(
Lintable("examples/playbooks/role_vars_prefix_detection.yml"),
rules=default_rules_collection,
exclude_paths=["examples/roles/role_vars_prefix_detection"],
).run()
expected_errors = (
("var-naming[no-role-prefix]", 9),
("var-naming[no-role-prefix]", 12),
("var-naming[no-role-prefix]", 15),
("var-naming[no-role-prefix]", 25),
("var-naming[no-role-prefix]", 32),
("var-naming[no-role-prefix]", 45),
)
assert len(results) == len(expected_errors)
for idx, result in enumerate(results):
assert result.tag == expected_errors[idx][0]
assert result.lineno == expected_errors[idx][1]
def test_var_naming_with_pattern() -> None:
"""Test rule matches."""
role_path = "examples/roles/var_naming_pattern/tasks/main.yml"
conf_path = "examples/roles/var_naming_pattern/.ansible-lint"
result = run_ansible_lint(
f"--config-file={conf_path}",
role_path,
)
assert result.returncode == RC.SUCCESS
assert "var-naming" not in result.stdout
def test_var_naming_with_pattern_foreign_role() -> None:
"""Test rule matches."""
role_path = "examples/playbooks/bug-4095.yml"
conf_path = "examples/roles/var_naming_pattern/.ansible-lint"
result = run_ansible_lint(
f"--config-file={conf_path}",
role_path,
)
assert result.returncode == RC.SUCCESS
assert "var-naming" not in result.stdout
def test_var_naming_with_include_tasks_and_vars() -> None:
"""Test with include tasks and vars."""
role_path = "examples/roles/var_naming_pattern/tasks/include_task_with_vars.yml"
result = run_ansible_lint(role_path)
assert result.returncode == RC.SUCCESS
assert "var-naming" not in result.stdout
def test_var_naming_with_set_fact_and_cacheable() -> None:
"""Test with include tasks and vars."""
role_path = "examples/roles/var_naming_pattern/tasks/cacheable_set_fact.yml"
result = run_ansible_lint(role_path)
assert result.returncode == RC.SUCCESS
assert "var-naming" not in result.stdout
def test_var_naming_with_include_role_import_role() -> None:
"""Test with include role and import role."""
role_path = "examples/.test_collection/roles/my_role/tasks/main.yml"
result = run_ansible_lint(role_path)
assert result.returncode == RC.SUCCESS
assert "var-naming" not in result.stdout
| 18,971 | Python | .py | 471 | 28.719745 | 101 | 0.558331 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,260 | galaxy.py | ansible_ansible-lint/src/ansiblelint/rules/galaxy.py | """Implementation of GalaxyRule."""
from __future__ import annotations
import sys
from typing import TYPE_CHECKING, Any
from ansiblelint.constants import FILENAME_KEY, LINE_NUMBER_KEY
from ansiblelint.rules import AnsibleLintRule
if TYPE_CHECKING:
from ansiblelint.errors import MatchError
from ansiblelint.file_utils import Lintable
class GalaxyRule(AnsibleLintRule):
"""Rule for checking collections."""
id = "galaxy"
description = "Confirm that collection's units are valid."
severity = "MEDIUM"
tags = ["metadata"]
version_added = "v6.11.0 (last update)"
_ids = {
"galaxy[tags]": "galaxy.yaml must have one of the required tags",
"galaxy[no-changelog]": "No changelog found. Please add a changelog file. Refer to the galaxy.md file for more info.",
"galaxy[version-missing]": "galaxy.yaml should have version tag.",
"galaxy[no-runtime]": "meta/runtime.yml file not found.",
"galaxy[invalid-dependency-version]": "Invalid collection metadata. Dependency version spec range is invalid",
}
def matchplay(self, file: Lintable, data: dict[str, Any]) -> list[MatchError]:
"""Return matches found for a specific play (entry in playbook)."""
if file.kind != "galaxy": # type: ignore[comparison-overlap]
return []
# Defined by Automation Hub Team and Partner Engineering
required_tag_list = [
"application",
"cloud",
"database",
"eda",
"infrastructure",
"linux",
"monitoring",
"networking",
"security",
"storage",
"tools",
"windows",
]
results = []
base_path = file.path.parent.resolve()
changelog_found = 0
changelog_paths = [
base_path / "changelogs" / "changelog.yaml",
base_path / "changelogs" / "changelog.yml",
base_path / "CHANGELOG.rst",
base_path / "CHANGELOG.md",
]
for path in changelog_paths:
if path.is_file():
changelog_found = 1
galaxy_tag_list = data.get("tags")
collection_deps = data.get("dependencies")
if collection_deps:
for dep, ver in collection_deps.items():
if (
dep not in [LINE_NUMBER_KEY, FILENAME_KEY]
and len(str(ver).strip()) == 0
):
results.append(
self.create_matcherror(
message=f"Invalid collection metadata. Dependency version spec range is invalid for '{dep}'.",
tag="galaxy[invalid-dependency-version]",
filename=file,
),
)
# Changelog Check - building off Galaxy rule as there is no current way to check
# for a nonexistent file
if not changelog_found:
results.append(
self.create_matcherror(
message="No changelog found. Please add a changelog file. Refer to the galaxy.md file for more info.",
tag="galaxy[no-changelog]",
filename=file,
),
)
# Checking if galaxy.yml contains one or more required tags for certification
if not galaxy_tag_list or not any(
tag in required_tag_list for tag in galaxy_tag_list
):
results.append(
self.create_matcherror(
message=(
f"galaxy.yaml must have one of the required tags: {required_tag_list}"
),
tag="galaxy[tags]",
filename=file,
),
)
if "version" not in data:
results.append(
self.create_matcherror(
message="galaxy.yaml should have version tag.",
lineno=data[LINE_NUMBER_KEY],
tag="galaxy[version-missing]",
filename=file,
),
)
return results
# returning here as it does not make sense
# to continue for version check below
if not (base_path / "meta" / "runtime.yml").is_file():
results.append(
self.create_matcherror(
message="meta/runtime.yml file not found.",
tag="galaxy[no-runtime]",
filename=file,
),
)
return results
if "pytest" in sys.modules:
import pytest
from ansiblelint.rules import RulesCollection # pylint: disable=ungrouped-imports
from ansiblelint.runner import Runner
def test_galaxy_no_collection_version() -> None:
"""Test for no collection version in galaxy."""
collection = RulesCollection()
collection.register(GalaxyRule())
failure = "examples/.no_collection_version/galaxy.yml"
bad_runner = Runner(failure, rules=collection)
errs = bad_runner.run()
assert len(errs) == 1
@pytest.mark.parametrize(
("file", "expected"),
(
pytest.param(
"examples/galaxy_no_required_tags/fail/galaxy.yml",
["galaxy[tags]"],
id="tags",
),
pytest.param(
"examples/galaxy_no_required_tags/pass/galaxy.yml",
[],
id="pass",
),
pytest.param(
"examples/.collection/galaxy.yml",
["schema[galaxy]"],
id="schema",
),
pytest.param(
"examples/.invalid_dependencies/galaxy.yml",
[
"galaxy[invalid-dependency-version]",
"galaxy[invalid-dependency-version]",
],
id="invalid-dependency-version",
),
pytest.param(
"examples/.no_changelog/galaxy.yml",
["galaxy[no-changelog]"],
id="no-changelog",
),
pytest.param(
"examples/.no_collection_version/galaxy.yml",
["schema[galaxy]", "galaxy[version-missing]"],
id="no-collection-version",
),
),
)
def test_galaxy_rule(
default_rules_collection: RulesCollection,
file: str,
expected: list[str],
) -> None:
"""Validate that rule works as intended."""
results = Runner(file, rules=default_rules_collection).run()
assert len(results) == len(expected)
for index, result in enumerate(results):
assert result.tag == expected[index]
| 6,861 | Python | .py | 173 | 26.965318 | 126 | 0.537584 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,261 | no_changed_when.py | ansible_ansible-lint/src/ansiblelint/rules/no_changed_when.py | """Implementation of the no-changed-when rule."""
# Copyright (c) 2016 Will Thames <will@thames.id.au>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import annotations
import sys
from typing import TYPE_CHECKING
from ansiblelint.rules import AnsibleLintRule
if TYPE_CHECKING:
from ansiblelint.errors import MatchError
from ansiblelint.file_utils import Lintable
from ansiblelint.utils import Task
class CommandHasChangesCheckRule(AnsibleLintRule):
"""Commands should not change things if nothing needs doing."""
id = "no-changed-when"
severity = "HIGH"
tags = ["command-shell", "idempotency"]
version_added = "historic"
_commands = [
"ansible.builtin.command",
"ansible.builtin.shell",
"ansible.builtin.raw",
"ansible.legacy.command",
"ansible.legacy.shell",
"ansible.legacy.raw",
"command",
"shell",
"raw",
]
def matchtask(
self,
task: Task,
file: Lintable | None = None,
) -> list[MatchError]:
result = []
# tasks in a block are "meta" type
if (
task["__ansible_action_type__"] in ["task", "meta"]
and task["action"]["__ansible_module__"] in self._commands
and (
"changed_when" not in task.raw_task
and "creates" not in task["action"]
and "removes" not in task["action"]
)
):
result.append(self.create_matcherror(filename=file))
return result
if "pytest" in sys.modules:
import pytest
# pylint: disable=ungrouped-imports
from ansiblelint.rules import RulesCollection
from ansiblelint.runner import Runner
@pytest.mark.parametrize(
("file", "expected"),
(
pytest.param(
"examples/playbooks/rule-no-changed-when-pass.yml",
0,
id="pass",
),
pytest.param(
"examples/playbooks/rule-no-changed-when-fail.yml",
3,
id="fail",
),
),
)
def test_rule_no_changed_when(
default_rules_collection: RulesCollection,
file: str,
expected: int,
) -> None:
"""Validate no-changed-when rule."""
results = Runner(file, rules=default_rules_collection).run()
for result in results:
assert result.rule.id == CommandHasChangesCheckRule.id, result
assert len(results) == expected
| 3,557 | Python | .py | 93 | 31.064516 | 79 | 0.654683 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,262 | risky_octal.py | ansible_ansible-lint/src/ansiblelint/rules/risky_octal.py | """Implementation of risky-octal rule."""
# Copyright (c) 2013-2014 Will Thames <will@thames.id.au>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import annotations
import sys
from typing import TYPE_CHECKING
from ansiblelint.rules import AnsibleLintRule, RulesCollection
from ansiblelint.runner import Runner
if TYPE_CHECKING:
from ansiblelint.file_utils import Lintable
from ansiblelint.utils import Task
class OctalPermissionsRule(AnsibleLintRule):
"""Octal file permissions must contain leading zero or be a string."""
id = "risky-octal"
description = (
"Numeric file permissions without leading zero can behave "
"in unexpected ways."
)
link = "https://docs.ansible.com/ansible/latest/collections/ansible/builtin/file_module.html"
severity = "VERY_HIGH"
tags = ["formatting"]
version_added = "historic"
_modules = [
"assemble",
"copy",
"file",
"ini_file",
"lineinfile",
"replace",
"synchronize",
"template",
"unarchive",
]
@staticmethod
def is_invalid_permission(mode: int) -> bool:
"""Check if permissions are valid.
Sensible file permission modes don't have write bit set when read bit
is not set and don't have execute bit set when user execute bit is
not set.
Also, user permissions are more generous than group permissions and
user and group permissions are more generous than world permissions.
"""
other_write_without_read = (
mode % 8 and mode % 8 < 4 and not (mode % 8 == 1 and (mode >> 6) % 2 == 1)
)
group_write_without_read = (
(mode >> 3) % 8
and (mode >> 3) % 8 < 4
and not ((mode >> 3) % 8 == 1 and (mode >> 6) % 2 == 1)
)
user_write_without_read = (
(mode >> 6) % 8 and (mode >> 6) % 8 < 4 and (mode >> 6) % 8 != 1
)
other_more_generous_than_group = mode % 8 > (mode >> 3) % 8
other_more_generous_than_user = mode % 8 > (mode >> 6) % 8
group_more_generous_than_user = (mode >> 3) % 8 > (mode >> 6) % 8
return bool(
other_write_without_read
or group_write_without_read
or user_write_without_read
or other_more_generous_than_group
or other_more_generous_than_user
or group_more_generous_than_user,
)
def matchtask(
self,
task: Task,
file: Lintable | None = None,
) -> bool | str:
if task["action"]["__ansible_module__"] in self._modules:
mode = task["action"].get("mode", None)
if isinstance(mode, str):
return False
if isinstance(mode, int) and self.is_invalid_permission(mode):
return f'`mode: {mode}` should have a string value with leading zero `mode: "0{mode:o}"` or use symbolic mode.'
return False
if "pytest" in sys.modules:
import pytest
VALID_MODES = [
0o777,
0o775,
0o770,
0o755,
0o750,
0o711,
0o710,
0o700,
0o666,
0o664,
0o660,
0o644,
0o640,
0o600,
0o555,
0o551,
0o550,
0o511,
0o510,
0o500,
0o444,
0o440,
0o400,
]
INVALID_MODES = [
777,
775,
770,
755,
750,
711,
710,
700,
666,
664,
660,
644,
640,
622,
620,
600,
555,
551,
550, # 511 == 0o777, 510 == 0o776, 500 == 0o764
444,
440,
400,
]
@pytest.mark.parametrize(
("file", "failures"),
(
pytest.param("examples/playbooks/rule-risky-octal-pass.yml", 0, id="pass"),
pytest.param("examples/playbooks/rule-risky-octal-fail.yml", 4, id="fail"),
),
)
def test_octal(file: str, failures: int) -> None:
"""Test that octal permissions are valid."""
collection = RulesCollection()
collection.register(OctalPermissionsRule())
results = Runner(file, rules=collection).run()
assert len(results) == failures
for result in results:
assert result.rule.id == "risky-octal"
def test_octal_valid_modes() -> None:
"""Test that octal modes are valid."""
rule = OctalPermissionsRule()
for mode in VALID_MODES:
assert not rule.is_invalid_permission(
mode,
), f"0o{mode:o} should be a valid mode"
def test_octal_invalid_modes() -> None:
"""Test that octal modes are invalid."""
rule = OctalPermissionsRule()
for mode in INVALID_MODES:
assert rule.is_invalid_permission(
mode,
), f"{mode:d} should be an invalid mode"
| 5,997 | Python | .py | 173 | 26.751445 | 127 | 0.600862 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,263 | role_name.py | ansible_ansible-lint/src/ansiblelint/rules/role_name.py | """Implementation of role-name rule."""
# Copyright (c) 2020 Gael Chamoulaud <gchamoul@redhat.com>
# Copyright (c) 2020 Sorin Sbarnea <ssbarnea@redhat.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import annotations
import re
import sys
from functools import cache
from typing import TYPE_CHECKING
from ansiblelint.constants import ROLE_IMPORT_ACTION_NAMES
from ansiblelint.rules import AnsibleLintRule
from ansiblelint.utils import parse_yaml_from_file
if TYPE_CHECKING:
from pathlib import Path
from ansiblelint.errors import MatchError
from ansiblelint.file_utils import Lintable
from ansiblelint.utils import Task
ROLE_NAME_REGEX = re.compile(r"^[a-z][a-z0-9_]*$")
def _remove_prefix(text: str, prefix: str) -> str:
return re.sub(rf"^{re.escape(prefix)}", "", text)
@cache
def _match_role_name_regex(role_name: str) -> bool:
return ROLE_NAME_REGEX.match(role_name) is not None
class RoleNames(AnsibleLintRule):
"""Role name {0} does not match ``^[a-z][a-z0-9_]*$`` pattern."""
id = "role-name"
description = (
"Role names are now limited to contain only lowercase alphanumeric "
"characters, plus underline and start with an alpha character."
)
link = "https://docs.ansible.com/ansible/devel/dev_guide/developing_collections_structure.html#roles-directory"
severity = "HIGH"
tags = ["deprecations", "metadata"]
version_added = "v6.8.5"
_ids = {
"role-name[path]": "Avoid using paths when importing roles.",
}
def matchtask(
self,
task: Task,
file: Lintable | None = None,
) -> list[MatchError]:
results = []
if task["action"]["__ansible_module__"] in ROLE_IMPORT_ACTION_NAMES:
name = task["action"].get("name", "")
if "/" in name:
results.append(
self.create_matcherror(
f"Avoid using paths when importing roles. ({name})",
filename=file,
lineno=task["action"].get("__line__", task["__line__"]),
tag=f"{self.id}[path]",
),
)
return results
def matchdir(self, lintable: Lintable) -> list[MatchError]:
return self.matchyaml(lintable)
def matchyaml(self, file: Lintable) -> list[MatchError]:
result: list[MatchError] = []
if file.kind not in ("meta", "role", "playbook"):
return result
if file.kind == "meta":
for role in file.data.get("dependencies", []):
if isinstance(role, dict):
role_name = role["role"]
elif isinstance(role, str):
role_name = role
else:
msg = "Role dependency has unexpected type."
raise TypeError(msg)
if "/" in role_name:
result.append(
self.create_matcherror(
f"Avoid using paths when importing roles. ({role_name})",
filename=file,
lineno=role_name.ansible_pos[1],
tag=f"{self.id}[path]",
),
)
return result
if file.kind == "playbook":
for play in file.data:
if "roles" in play:
line = play["__line__"]
for role in play["roles"]:
if isinstance(role, dict):
line = role["__line__"]
role_name = role["role"]
elif isinstance(role, str):
role_name = role
if "/" in role_name:
result.append(
self.create_matcherror(
f"Avoid using paths when importing roles. ({role_name})",
filename=file,
lineno=line,
tag=f"{self.id}[path]",
),
)
return result
if file.kind == "role":
role_name = self._infer_role_name(
meta=file.path / "meta" / "main.yml",
default=file.path.name,
)
else:
role_name = self._infer_role_name(
meta=file.path,
default=file.path.resolve().parents[1].name,
)
role_name = _remove_prefix(role_name, "ansible-role-")
if role_name and not _match_role_name_regex(role_name):
result.append(
self.create_matcherror(
filename=file,
message=self.shortdesc.format(role_name),
),
)
return result
@staticmethod
def _infer_role_name(meta: Path, default: str) -> str:
if meta.is_file():
meta_data = parse_yaml_from_file(str(meta))
if meta_data:
try:
return str(meta_data["galaxy_info"]["role_name"])
except (KeyError, TypeError):
pass
return default
if "pytest" in sys.modules:
import pytest
# pylint: disable=ungrouped-imports
from ansiblelint.rules import RulesCollection
from ansiblelint.runner import Runner
@pytest.mark.parametrize(
("test_file", "failure"),
(pytest.param("examples/playbooks/rule-role-name-path.yml", 3, id="fail"),),
)
def test_role_name_path(
default_rules_collection: RulesCollection,
test_file: str,
failure: int,
) -> None:
"""Test rule matches."""
results = Runner(test_file, rules=default_rules_collection).run()
for result in results:
assert result.tag == "role-name[path]"
assert len(results) == failure
@pytest.mark.parametrize(
("test_file", "failure"),
(pytest.param("examples/roles/role_with_deps_paths", 3, id="fail"),),
)
def test_role_deps_path_names(
default_rules_collection: RulesCollection,
test_file: str,
failure: int,
) -> None:
"""Test rule matches."""
results = Runner(
test_file,
rules=default_rules_collection,
).run()
expected_errors = (
("role-name[path]", 3),
("role-name[path]", 9),
("role-name[path]", 10),
)
assert len(expected_errors) == failure
for idx, result in enumerate(results):
assert result.tag == expected_errors[idx][0]
assert result.lineno == expected_errors[idx][1]
assert len(results) == failure
@pytest.mark.parametrize(
("test_file", "failure"),
(pytest.param("examples/roles/test-no-deps-role", 0, id="no_deps"),),
)
def test_role_no_deps(
default_rules_collection: RulesCollection,
test_file: str,
failure: int,
) -> None:
"""Test role if no dependencies are present in meta/main.yml."""
results = Runner(
test_file,
rules=default_rules_collection,
).run()
assert len(results) == failure
| 8,409 | Python | .py | 204 | 29.960784 | 115 | 0.564579 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,264 | jinja.py | ansible_ansible-lint/src/ansiblelint/rules/jinja.py | """Rule for checking content of jinja template strings."""
from __future__ import annotations
import logging
import os
import re
import sys
from dataclasses import dataclass
from pathlib import Path
from typing import TYPE_CHECKING, Any, NamedTuple
import black
import jinja2
from ansible.errors import AnsibleError, AnsibleFilterError, AnsibleParserError
from ansible.parsing.yaml.objects import AnsibleUnicode
from jinja2.exceptions import TemplateSyntaxError
from ansiblelint.constants import LINE_NUMBER_KEY
from ansiblelint.errors import RuleMatchTransformMeta
from ansiblelint.file_utils import Lintable
from ansiblelint.rules import AnsibleLintRule, TransformMixin
from ansiblelint.runner import get_matches
from ansiblelint.skip_utils import get_rule_skips_from_line
from ansiblelint.text import has_jinja
from ansiblelint.utils import ( # type: ignore[attr-defined]
Templar,
parse_yaml_from_file,
template,
)
from ansiblelint.yaml_utils import deannotate, nested_items_path
if TYPE_CHECKING:
from ruamel.yaml.comments import CommentedMap, CommentedSeq
from ansiblelint.config import Options
from ansiblelint.errors import MatchError
from ansiblelint.utils import Task
_logger = logging.getLogger(__package__)
KEYWORDS_WITH_IMPLICIT_TEMPLATE = ("changed_when", "failed_when", "until", "when")
class Token(NamedTuple):
"""Token."""
lineno: int
token_type: str
value: str
ignored_re = re.compile(
"|".join( # noqa: FLY002
[
r"^Object of type method is not JSON serializable",
r"^Unexpected templating type error occurred on",
r"^obj must be a list of dicts or a nested dict$",
r"^the template file (.*) could not be found for the lookup$",
r"could not locate file in lookup",
r"unable to locate collection",
r"^Error in (.*)is undefined$",
r"^Mandatory variable (.*) not defined.$",
r"is undefined",
r"Unrecognized type <<class 'ansible.template.AnsibleUndefined'>> for (.*) filter <value>$",
# https://github.com/ansible/ansible-lint/issues/3155
r"^The '(.*)' test expects a dictionary$",
],
),
flags=re.MULTILINE | re.DOTALL,
)
@dataclass(frozen=True)
class JinjaRuleTMetaSpacing(RuleMatchTransformMeta):
"""JinjaRule transform metadata.
:param key: Key or index within the task
:param value: Value of the key
:param path: Path to the key
:param fixed: Value with spacing fixed
"""
key: str | int
value: str | int
path: tuple[str | int, ...]
fixed: str
def __str__(self) -> str:
"""Return string representation."""
return f"{self.key}={self.value} at {self.path} fixed to {self.fixed}"
class JinjaRule(AnsibleLintRule, TransformMixin):
"""Rule that looks inside jinja2 templates."""
id = "jinja"
severity = "LOW"
tags = ["formatting"]
version_added = "v6.5.0"
_ansible_error_re = re.compile(
(
r"^(?P<error>.*): (?P<detail>.*)\. String: (?P<string>.*)$"
r"|An unhandled exception occurred while templating '.*'\. Error was a .*, original message: (?P<nested_error>.*)"
),
flags=re.MULTILINE,
)
env = jinja2.Environment(trim_blocks=False)
_tag2msg = {
"invalid": "Syntax error in jinja2 template: {value}",
"spacing": "Jinja2 spacing could be improved: {value} -> {reformatted}",
}
_ids = {
"jinja[invalid]": "Invalid jinja2 syntax",
"jinja[spacing]": "Jinja2 spacing could be improved",
}
def _msg(self, tag: str, value: str, reformatted: str) -> str:
"""Generate error message."""
return self._tag2msg[tag].format(value=value, reformatted=reformatted)
# pylint: disable=too-many-locals
def matchtask(
self,
task: Task,
file: Lintable | None = None,
) -> list[MatchError]:
result = []
try:
for key, v, path in nested_items_path(
task,
ignored_keys=("block", "ansible.builtin.block", "ansible.legacy.block"),
):
if isinstance(v, str):
try:
template(
basedir=file.path.parent if file else Path(),
value=v,
variables=deannotate(task.get("vars", {})),
fail_on_error=True, # we later decide which ones to ignore or not
)
except AnsibleFilterError:
bypass = True
# ValueError RepresenterError
except AnsibleError as exc:
bypass = False
orig_exc = (
exc.orig_exc if getattr(exc, "orig_exc", None) else exc
)
orig_exc_message = getattr(orig_exc, "message", str(orig_exc))
match = self._ansible_error_re.match(
getattr(orig_exc, "message", str(orig_exc)),
)
if ignored_re.search(orig_exc_message) or isinstance(
orig_exc,
AnsibleParserError | TypeError,
):
# An unhandled exception occurred while running the lookup plugin 'template'. Error was a <class 'ansible.errors.AnsibleError'>, original message: the template file ... could not be found for the lookup. the template file ... could not be found for the lookup
# ansible@devel (2.14) new behavior:
# AnsibleError(TemplateSyntaxError): template error while templating string: Could not load "ipwrap": 'Invalid plugin FQCN (ansible.netcommon.ipwrap): unable to locate collection ansible.netcommon'. String: Foo {{ buildset_registry.host | ipwrap }}. Could not load "ipwrap": 'Invalid plugin FQCN (ansible.netcommon.ipwrap): unable to locate collection ansible.netcommon'
bypass = True
elif (
isinstance(orig_exc, AnsibleError | TemplateSyntaxError)
and match
):
error = match.group("error")
detail = match.group("detail")
nested_error = match.group("nested_error")
if error and error.startswith(
"template error while templating string",
):
bypass = False
elif detail and detail.startswith(
"unable to locate collection",
):
_logger.debug("Ignored AnsibleError: %s", exc)
bypass = True
elif nested_error and nested_error.startswith(
"Unexpected templating type error occurred on",
):
bypass = True
else:
bypass = False
elif re.match(r"^lookup plugin (.*) not found$", exc.message):
# lookup plugin 'template' not found
bypass = True
# AnsibleError: template error while templating string: expected token ':', got '}'. String: {{ {{ '1' }} }}
# AnsibleError: template error while templating string: unable to locate collection ansible.netcommon. String: Foo {{ buildset_registry.host | ipwrap }}
if not bypass:
result.append(
self.create_matcherror(
message=str(exc),
lineno=_get_error_line(task, path),
filename=file,
tag=f"{self.id}[invalid]",
),
)
continue
reformatted, details, tag = self.check_whitespace(
v,
key=key,
lintable=file,
)
if reformatted != v:
result.append(
self.create_matcherror(
message=self._msg(
tag=tag,
value=v,
reformatted=reformatted,
),
lineno=_get_error_line(task, path),
details=details,
filename=file,
tag=f"{self.id}[{tag}]",
transform_meta=JinjaRuleTMetaSpacing(
key=key,
value=v,
path=tuple(path),
fixed=reformatted,
),
),
)
except Exception as exc:
_logger.info("Exception in JinjaRule.matchtask: %s", exc)
raise
return result
def matchyaml(self, file: Lintable) -> list[MatchError]:
"""Return matches for variables defined in vars files."""
data: dict[str, Any] = {}
raw_results: list[MatchError] = []
results: list[MatchError] = []
if str(file.kind) == "vars":
data = parse_yaml_from_file(str(file.path))
for key, v, _path in nested_items_path(data):
if isinstance(v, AnsibleUnicode):
reformatted, details, tag = self.check_whitespace(
v,
key=key,
lintable=file,
)
if reformatted != v:
results.append(
self.create_matcherror(
message=self._msg(
tag=tag,
value=v,
reformatted=reformatted,
),
lineno=v.ansible_pos[1],
details=details,
filename=file,
tag=f"{self.id}[{tag}]",
),
)
if raw_results:
lines = file.content.splitlines()
for match in raw_results:
# lineno starts with 1, not zero
skip_list = get_rule_skips_from_line(
line=lines[match.lineno - 1],
lintable=file,
)
if match.rule.id not in skip_list and match.tag not in skip_list:
results.append(match)
else:
results.extend(super().matchyaml(file))
return results
def lex(self, text: str) -> list[Token]:
"""Parse jinja template."""
# https://github.com/pallets/jinja/issues/1711
self.env.keep_trailing_newline = True
self.env.lstrip_blocks = False
self.env.trim_blocks = False
self.env.autoescape = True
self.env.newline_sequence = "\n"
tokens = [
Token(lineno=t[0], token_type=t[1], value=t[2]) for t in self.env.lex(text)
]
new_text = self.unlex(tokens)
if text != new_text:
_logger.debug(
"Unable to perform full roundtrip lex-unlex on jinja template (expected when '-' modifier is used): {text} -> {new_text}",
)
return tokens
def unlex(self, tokens: list[Token]) -> str:
"""Return original text by compiling the lex output."""
result = ""
last_lineno = 1
last_value = ""
for lineno, _, value in tokens:
if lineno > last_lineno and "\n" not in last_value:
result += "\n"
result += value
last_lineno = lineno
last_value = value
return result
# pylint: disable=too-many-locals
def check_whitespace(
self,
text: str,
key: str,
lintable: Lintable | None = None,
) -> tuple[str, str, str]:
"""Check spacing inside given jinja2 template string.
We aim to match Python Black formatting rules.
:raises NotImplementedError: On few cases where valid jinja is not valid Python.
:returns: (string, string, string) reformatted text, detailed error, error tag
"""
def cook(value: str, *, implicit: bool = False) -> str:
"""Prepare an implicit string for jinja parsing when needed."""
if not implicit:
return value
if value.startswith("{{") and value.endswith("}}"):
# maybe we should make this an error?
return value
return f"{{{{ {value} }}}}"
def uncook(value: str, *, implicit: bool = False) -> str:
"""Restore an string to original form when it was an implicit one."""
if not implicit:
return value
return value[3:-3]
tokens = []
details = ""
begin_types = ("variable_begin", "comment_begin", "block_begin")
end_types = ("variable_end", "comment_end", "block_end")
implicit = False
# implicit templates do not have the {{ }} wrapping
if (
key in KEYWORDS_WITH_IMPLICIT_TEMPLATE
and lintable
and lintable.kind
in (
"playbook",
"task",
)
):
implicit = True
text = cook(text, implicit=implicit)
# don't try to lex strings that have no jinja inside them
if not has_jinja(text):
return text, "", "spacing"
expr_str = None
expr_type = None
verb_skipped = True
lineno = 1
try:
for token in self.lex(text):
if (
expr_type
and expr_type.startswith("{%")
and token.token_type in ("name", "whitespace")
and not verb_skipped
):
# on {% blocks we do not take first word as part of the expression
tokens.append(token)
if token.token_type != "whitespace":
verb_skipped = True
elif token.token_type in begin_types:
tokens.append(token)
expr_type = token.value # such {#, {{, {%
expr_str = ""
verb_skipped = False
elif token.token_type in end_types and expr_str is not None:
# process expression
# pylint: disable=unsupported-membership-test
if isinstance(expr_str, str) and "\n" in expr_str:
raise NotImplementedError # noqa: TRY301
leading_spaces = " " * (len(expr_str) - len(expr_str.lstrip()))
expr_str = leading_spaces + blacken(expr_str.lstrip())
if tokens[
-1
].token_type != "whitespace" and not expr_str.startswith(" "):
expr_str = " " + expr_str
if not expr_str.endswith(" "):
expr_str += " "
tokens.append(Token(lineno, "data", expr_str))
tokens.append(token)
expr_str = None
expr_type = None
elif expr_str is not None:
expr_str += token.value
else:
tokens.append(token)
lineno = token.lineno
except jinja2.exceptions.TemplateSyntaxError as exc:
return "", str(exc.message), "invalid"
# pylint: disable=c-extension-no-member
except (NotImplementedError, black.parsing.InvalidInput) as exc:
# black is not able to recognize all valid jinja2 templates, so we
# just ignore InvalidInput errors.
# NotImplementedError is raised internally for expressions with
# newlines, as we decided to not touch them yet.
# These both are documented as known limitations.
_logger.debug("Ignored jinja internal error %s", exc)
return uncook(text, implicit=implicit), "", "spacing"
# finalize
reformatted = self.unlex(tokens)
failed = reformatted != text
reformatted = uncook(reformatted, implicit=implicit)
details = (
f"Jinja2 template rewrite recommendation: `{reformatted}`."
if failed
else ""
)
return reformatted, details, "spacing"
def transform(
self: JinjaRule,
match: MatchError,
lintable: Lintable,
data: CommentedMap | CommentedSeq | str,
) -> None:
"""Transform jinja2 errors.
:param match: MatchError instance
:param lintable: Lintable instance
:param data: data to transform
"""
if match.tag == "jinja[spacing]":
self._transform_spacing(match, data)
def _transform_spacing(
self: JinjaRule,
match: MatchError,
data: CommentedMap | CommentedSeq | str,
) -> None:
"""Transform jinja2 spacing errors.
The match error was found on a normalized task so we cannot compare the path
instead we only compare the key and value, if the task has 2 identical keys with the
exact same jinja spacing issue, we may transform them out of order
:param match: MatchError instance
:param data: data to transform
"""
if not isinstance(match.transform_meta, JinjaRuleTMetaSpacing):
return
if isinstance(data, str):
return
obj = self.seek(match.yaml_path, data)
if obj is None:
return
ignored_keys = ("block", "ansible.builtin.block", "ansible.legacy.block")
for key, value, path in nested_items_path(
data_collection=obj,
ignored_keys=ignored_keys,
):
if key == match.transform_meta.key and value == match.transform_meta.value:
if not path:
continue
for pth in path[:-1]:
try:
obj = obj[pth]
except (KeyError, TypeError) as exc:
err = f"Unable to transform {match.transform_meta}: {exc}"
_logger.error(err) # noqa: TRY400
return
try:
obj[path[-1]][key] = match.transform_meta.fixed
match.fixed = True
except (KeyError, TypeError) as exc:
err = f"Unable to transform {match.transform_meta}: {exc}"
_logger.error(err) # noqa: TRY400
return
def blacken(text: str) -> str:
"""Format Jinja2 template using black."""
return black.format_str(
text,
mode=black.FileMode(line_length=sys.maxsize, string_normalization=False),
).rstrip("\n")
if "pytest" in sys.modules:
from unittest import mock
import pytest
# pylint: disable=ungrouped-imports
from ansiblelint.rules import RulesCollection
from ansiblelint.runner import Runner
from ansiblelint.transformer import Transformer
@pytest.fixture(name="error_expected_lines")
def fixture_error_expected_lines() -> list[int]:
"""Return list of expected error lines."""
return [33, 36, 39, 42, 45, 48, 74]
# 21 68
@pytest.fixture(name="lint_error_lines")
def fixture_lint_error_lines() -> list[int]:
"""Get VarHasSpacesRules linting results on test_playbook."""
collection = RulesCollection()
collection.register(JinjaRule())
lintable = Lintable("examples/playbooks/jinja-spacing.yml")
results = Runner(lintable, rules=collection).run()
return [item.lineno for item in results]
def test_jinja_spacing_playbook(
error_expected_lines: list[int],
lint_error_lines: list[int],
) -> None:
"""Ensure that expected error lines are matching found linting error lines."""
# list unexpected error lines or non-matching error lines
error_lines_difference = list(
set(error_expected_lines).symmetric_difference(set(lint_error_lines)),
)
assert len(error_lines_difference) == 0
def test_jinja_spacing_vars() -> None:
"""Ensure that expected error details are matching found linting error details."""
collection = RulesCollection()
collection.register(JinjaRule())
lintable = Lintable("examples/playbooks/vars/jinja-spacing.yml")
results = Runner(lintable, rules=collection).run()
error_expected_lineno = [14, 15, 16, 17, 18, 19, 32]
assert len(results) == len(error_expected_lineno)
for idx, err in enumerate(results):
assert err.lineno == error_expected_lineno[idx]
@pytest.mark.parametrize(
("text", "expected", "tag"),
(
pytest.param(
"{{-x}}{#a#}{%1%}",
"{{- x }}{# a #}{% 1 %}",
"spacing",
id="add-missing-space",
),
pytest.param("", "", "spacing", id="1"),
pytest.param("foo", "foo", "spacing", id="2"),
pytest.param("{##}", "{# #}", "spacing", id="3"),
# we want to keep leading spaces as they might be needed for complex multiline jinja files
pytest.param("{# #}", "{# #}", "spacing", id="4"),
pytest.param(
"{{-aaa|xx }}foo\nbar{#some#}\n{%%}",
"{{- aaa | xx }}foo\nbar{# some #}\n{% %}",
"spacing",
id="5",
),
pytest.param(
"Shell with jinja filter",
"Shell with jinja filter",
"spacing",
id="6",
),
pytest.param(
"{{{'dummy_2':1}|true}}",
"{{ {'dummy_2': 1} | true }}",
"spacing",
id="7",
),
pytest.param("{{{foo:{}}}}", "{{ {foo: {}} }}", "spacing", id="8"),
pytest.param(
"{{ {'test': {'subtest': variable}} }}",
"{{ {'test': {'subtest': variable}} }}",
"spacing",
id="9",
),
pytest.param(
"http://foo.com/{{\n case1 }}",
"http://foo.com/{{\n case1 }}",
"spacing",
id="10",
),
pytest.param("{{foo(123)}}", "{{ foo(123) }}", "spacing", id="11"),
pytest.param("{{ foo(a.b.c) }}", "{{ foo(a.b.c) }}", "spacing", id="12"),
# pytest.param(
# "spacing",
# ),
pytest.param(
"{{foo(x =['server_options'])}}",
"{{ foo(x=['server_options']) }}",
"spacing",
id="14",
),
pytest.param(
'{{ [ "host", "NA"] }}',
'{{ ["host", "NA"] }}',
"spacing",
id="15",
),
pytest.param(
"{{ {'dummy_2': {'nested_dummy_1': value_1,\n 'nested_dummy_2': value_2}} |\ncombine(dummy_1) }}",
"{{ {'dummy_2': {'nested_dummy_1': value_1,\n 'nested_dummy_2': value_2}} |\ncombine(dummy_1) }}",
"spacing",
id="17",
),
pytest.param("{{ & }}", "", "invalid", id="18"),
pytest.param(
"{{ good_format }}/\n{{- good_format }}\n{{- good_format -}}\n",
"{{ good_format }}/\n{{- good_format }}\n{{- good_format -}}\n",
"spacing",
id="19",
),
pytest.param(
"{{ {'a': {'b': 'x', 'c': y}} }}",
"{{ {'a': {'b': 'x', 'c': y}} }}",
"spacing",
id="20",
),
pytest.param(
"2*(1+(3-1)) is {{ 2 * {{ 1 + {{ 3 - 1 }}}} }}",
"2*(1+(3-1)) is {{ 2 * {{1 + {{3 - 1}}}} }}",
"spacing",
id="21",
),
pytest.param(
'{{ "absent"\nif (v is version("2.8.0", ">=")\nelse "present" }}',
"",
"invalid",
id="22",
),
pytest.param(
'{{lookup("x",y+"/foo/"+z+".txt")}}',
'{{ lookup("x", y + "/foo/" + z + ".txt") }}',
"spacing",
id="23",
),
pytest.param(
"{{ x | map(attribute='value') }}",
"{{ x | map(attribute='value') }}",
"spacing",
id="24",
),
pytest.param(
"{{ r(a= 1,b= True,c= 0.0,d= '') }}",
"{{ r(a=1, b=True, c=0.0, d='') }}",
"spacing",
id="25",
),
pytest.param("{{ r(1,[]) }}", "{{ r(1, []) }}", "spacing", id="26"),
pytest.param(
"{{ lookup([ddd ]) }}",
"{{ lookup([ddd]) }}",
"spacing",
id="27",
),
pytest.param(
"{{ [ x ] if x is string else x }}",
"{{ [x] if x is string else x }}",
"spacing",
id="28",
),
pytest.param(
"{% if a|int <= 8 -%} iptables {%- else -%} iptables-nft {%- endif %}",
"{% if a | int <= 8 -%} iptables{%- else -%} iptables-nft{%- endif %}",
"spacing",
id="29",
),
pytest.param(
# "- 2" -> "-2", minus does not get separated when there is no left side
"{{ - 2 }}",
"{{ -2 }}",
"spacing",
id="30",
),
pytest.param(
# "-2" -> "-2", minus does get an undesired spacing
"{{ -2 }}",
"{{ -2 }}",
"spacing",
id="31",
),
pytest.param(
# array ranges do not have space added
"{{ foo[2:4] }}",
"{{ foo[2:4] }}",
"spacing",
id="32",
),
pytest.param(
# array ranges have the extra space removed
"{{ foo[2: 4] }}",
"{{ foo[2:4] }}",
"spacing",
id="33",
),
pytest.param(
# negative array index
"{{ foo[-1] }}",
"{{ foo[-1] }}",
"spacing",
id="34",
),
pytest.param(
# negative array index, repair
"{{ foo[- 1] }}",
"{{ foo[-1] }}",
"spacing",
id="35",
),
pytest.param("{{ a +~'b' }}", "{{ a + ~'b' }}", "spacing", id="36"),
pytest.param(
"{{ (a[: -4] *~ b) }}",
"{{ (a[:-4] * ~b) }}",
"spacing",
id="37",
),
pytest.param("{{ [a,~ b] }}", "{{ [a, ~b] }}", "spacing", id="38"),
# Not supported yet due to being accepted by black:
pytest.param("{{ item.0.user }}", "{{ item.0.user }}", "spacing", id="39"),
# Not supported by back, while jinja allows ~ to be binary operator:
pytest.param("{{ a ~ b }}", "{{ a ~ b }}", "spacing", id="40"),
pytest.param(
"--format='{{'{{'}}.Size{{'}}'}}'",
"--format='{{ '{{' }}.Size{{ '}}' }}'",
"spacing",
id="41",
),
pytest.param(
"{{ list_one + {{ list_two | max }} }}",
"{{ list_one + {{list_two | max}} }}",
"spacing",
id="42",
),
pytest.param(
"{{ lookup('file' , '/tmp/non-existent', errors='ignore') }}",
"{{ lookup('file', '/tmp/non-existent', errors='ignore') }}",
"spacing",
id="43",
),
# https://github.com/ansible/ansible-lint/pull/3057
# since jinja 3.0.0, \r is converted to \n if the string has jinja in it
pytest.param(
"{{ 'foo' }}\r{{ 'bar' }}",
"{{ 'foo' }}\n{{ 'bar' }}",
"spacing",
id="44",
),
# if we do not have any jinja constructs, we should keep original \r
# to match ansible behavior
pytest.param(
"foo\rbar",
"foo\rbar",
"spacing",
id="45",
),
),
)
def test_jinja(text: str, expected: str, tag: str) -> None:
"""Tests our ability to spot spacing errors inside jinja2 templates."""
rule = JinjaRule()
reformatted, details, returned_tag = rule.check_whitespace(
text,
key="name",
lintable=Lintable("playbook.yml"),
)
assert tag == returned_tag, details
assert expected == reformatted
@pytest.mark.parametrize(
("text", "expected", "tag"),
(
pytest.param(
"1+2",
"1 + 2",
"spacing",
id="0",
),
pytest.param(
"- 1",
"-1",
"spacing",
id="1",
),
# Ensure that we do not choke with double templating on implicit
# and instead we remove them braces.
pytest.param("{{ o | bool }}", "o | bool", "spacing", id="2"),
),
)
def test_jinja_implicit(text: str, expected: str, tag: str) -> None:
"""Tests our ability to spot spacing errors implicit jinja2 templates."""
rule = JinjaRule()
# implicit jinja2 are working only inside playbooks and tasks
lintable = Lintable(name="playbook.yml", kind="playbook")
reformatted, details, returned_tag = rule.check_whitespace(
text,
key="when",
lintable=lintable,
)
assert tag == returned_tag, details
assert expected == reformatted
@pytest.mark.parametrize(
("lintable", "matches"),
(pytest.param("examples/playbooks/vars/rule_jinja_vars.yml", 0, id="0"),),
)
def test_jinja_file(lintable: str, matches: int) -> None:
"""Tests our ability to process var filesspot spacing errors."""
collection = RulesCollection()
collection.register(JinjaRule())
errs = Runner(lintable, rules=collection).run()
assert len(errs) == matches
for err in errs:
assert isinstance(err, JinjaRule)
assert errs[0].tag == "jinja[invalid]"
assert errs[0].rule.id == "jinja"
def test_jinja_invalid() -> None:
"""Tests our ability to spot spacing errors inside jinja2 templates."""
collection = RulesCollection()
collection.register(JinjaRule())
success = "examples/playbooks/rule-jinja-fail.yml"
errs = Runner(success, rules=collection).run()
assert len(errs) == 2
assert errs[0].tag == "jinja[spacing]"
assert errs[0].rule.id == "jinja"
assert errs[0].lineno == 9
assert errs[1].tag == "jinja[invalid]"
assert errs[1].rule.id == "jinja"
assert errs[1].lineno == 9
def test_jinja_valid() -> None:
"""Tests our ability to parse jinja, even when variables may not be defined."""
collection = RulesCollection()
collection.register(JinjaRule())
success = "examples/playbooks/rule-jinja-pass.yml"
errs = Runner(success, rules=collection).run()
assert len(errs) == 0
@mock.patch.dict(os.environ, {"ANSIBLE_LINT_WRITE_TMP": "1"}, clear=True)
def test_jinja_transform(
config_options: Options,
default_rules_collection: RulesCollection,
) -> None:
"""Test transform functionality for jinja rule."""
playbook = Path("examples/playbooks/rule-jinja-before.yml")
config_options.write_list = ["all"]
config_options.lintables = [str(playbook)]
runner_result = get_matches(
rules=default_rules_collection,
options=config_options,
)
transformer = Transformer(result=runner_result, options=config_options)
transformer.run()
matches = runner_result.matches
assert len(matches) == 2
orig_content = playbook.read_text(encoding="utf-8")
expected_content = playbook.with_suffix(
f".transformed{playbook.suffix}",
).read_text(encoding="utf-8")
transformed_content = playbook.with_suffix(f".tmp{playbook.suffix}").read_text(
encoding="utf-8",
)
assert orig_content != transformed_content
assert expected_content == transformed_content
playbook.with_suffix(f".tmp{playbook.suffix}").unlink()
def test_jinja_nested_var_errors() -> None:
"""Tests our ability to handle nested var errors from jinja2 templates."""
def _do_template(*args, **kwargs): # type: ignore[no-untyped-def] # Templar.do_template has no type hint
data = args[1]
if data != "{{ 12 | random(seed=inventory_hostname) }}":
return do_template(*args, **kwargs)
msg = "Unexpected templating type error occurred on (foo): bar"
raise AnsibleError(msg)
do_template = Templar.do_template
collection = RulesCollection()
collection.register(JinjaRule())
lintable = Lintable("examples/playbooks/jinja-nested-vars.yml")
with mock.patch.object(Templar, "do_template", _do_template):
results = Runner(lintable, rules=collection).run()
assert len(results) == 0
def _get_error_line(task: dict[str, Any], path: list[str | int]) -> int:
"""Return error line number."""
line = task[LINE_NUMBER_KEY]
ctx = task
for _ in path:
ctx = ctx[_]
if LINE_NUMBER_KEY in ctx:
line = ctx[LINE_NUMBER_KEY]
if not isinstance(line, int):
msg = "Line number is not an integer"
raise TypeError(msg)
return line
| 35,527 | Python | .py | 833 | 28.527011 | 398 | 0.491059 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,265 | name.py | ansible_ansible-lint/src/ansiblelint/rules/name.py | """Implementation of NameRule."""
from __future__ import annotations
import re
import sys
from typing import TYPE_CHECKING, Any
import wcmatch.pathlib
import wcmatch.wcmatch
from ansiblelint.constants import LINE_NUMBER_KEY
from ansiblelint.file_utils import Lintable
from ansiblelint.rules import AnsibleLintRule, TransformMixin
if TYPE_CHECKING:
from ruamel.yaml.comments import CommentedMap, CommentedSeq
from ansiblelint.config import Options
from ansiblelint.errors import MatchError
from ansiblelint.utils import Task
class NameRule(AnsibleLintRule, TransformMixin):
"""Rule for checking task and play names."""
id = "name"
description = (
"All tasks and plays should have a distinct name for readability "
"and for ``--start-at-task`` to work"
)
severity = "MEDIUM"
tags = ["idiom"]
version_added = "v6.9.1 (last update)"
_re_templated_inside = re.compile(r".*\{\{.*\}\}.*\w.*$")
_ids = {
"name[play]": "All plays should be named.",
"name[missing]": "All tasks should be named.",
"name[prefix]": "Task name should start with a prefix.",
"name[casing]": "All names should start with an uppercase letter.",
"name[template]": "Jinja templates should only be at the end of 'name'",
}
def matchplay(self, file: Lintable, data: dict[str, Any]) -> list[MatchError]:
"""Return matches found for a specific play (entry in playbook)."""
results: list[MatchError] = []
if file.kind != "playbook":
return []
if file.failed():
return results
if "name" not in data:
return [
self.create_matcherror(
message="All plays should be named.",
lineno=data[LINE_NUMBER_KEY],
tag="name[play]",
filename=file,
),
]
results.extend(
self._check_name(
data["name"],
lintable=file,
lineno=data[LINE_NUMBER_KEY],
),
)
return results
def matchtask(
self,
task: Task,
file: Lintable | None = None,
) -> list[MatchError]:
results: list[MatchError] = []
if file and file.failed():
return results
name = task.get("name")
if not name:
results.append(
self.create_matcherror(
message="All tasks should be named.",
lineno=task[LINE_NUMBER_KEY],
tag="name[missing]",
filename=file,
),
)
else:
results.extend(
self._prefix_check(
name,
lintable=file,
lineno=task[LINE_NUMBER_KEY],
),
)
return results
def _prefix_check(
self,
name: str,
lintable: Lintable | None,
lineno: int,
) -> list[MatchError]:
results: list[MatchError] = []
effective_name = name
if lintable is None:
return []
if not results:
results.extend(
self._check_name(
effective_name,
lintable=lintable,
lineno=lineno,
),
)
return results
def _check_name(
self,
name: str,
lintable: Lintable | None,
lineno: int,
) -> list[MatchError]:
# This rules applies only to languages that do have uppercase and
# lowercase letter, so we ignore anything else. On Unicode isupper()
# is not necessarily the opposite of islower()
results = []
# stage one check prefix
effective_name = name
if self._collection and lintable:
full_stem = self._find_full_stem(lintable)
stems = [
self._collection.options.task_name_prefix.format(stem=stem)
for stem in wcmatch.pathlib.PurePath(
full_stem,
).parts
]
prefix = "".join(stems)
if lintable.kind == "tasks" and full_stem != "main":
if not name.startswith(prefix):
# For the moment in order to raise errors this rule needs to be
# enabled manually. Still, we do allow use of prefixes even without
# having to enable the rule.
if "name[prefix]" in self._collection.options.enable_list:
results.append(
self.create_matcherror(
message=f"Task name should start with '{prefix}'.",
lineno=lineno,
tag="name[prefix]",
filename=lintable,
),
)
return results
else:
effective_name = name[len(prefix) :]
if (
effective_name[0].isalpha()
and effective_name[0].islower()
and not effective_name[0].isupper()
):
results.append(
self.create_matcherror(
message="All names should start with an uppercase letter.",
lineno=lineno,
tag="name[casing]",
filename=lintable,
),
)
if self._re_templated_inside.match(name):
results.append(
self.create_matcherror(
message="Jinja templates should only be at the end of 'name'",
lineno=lineno,
tag="name[template]",
filename=lintable,
),
)
return results
def _find_full_stem(self, lintable: Lintable) -> str:
lintable_dir = wcmatch.pathlib.PurePath(lintable.dir)
stem = lintable.path.stem
kind = str(lintable.kind)
stems = [lintable_dir.name]
lintable_dir = lintable_dir.parent
pathex = lintable_dir / stem
glob = ""
if self.options:
for entry in self.options.kinds:
for key, value in entry.items():
if kind == key:
glob = value
while pathex.globmatch(
glob,
flags=(
wcmatch.pathlib.GLOBSTAR
| wcmatch.pathlib.BRACE
| wcmatch.pathlib.DOTGLOB
),
):
stems.insert(0, lintable_dir.name)
lintable_dir = lintable_dir.parent
pathex = lintable_dir / stem
if stems[0].startswith(kind):
del stems[0]
return str(wcmatch.pathlib.PurePath(*stems, stem))
def transform(
self,
match: MatchError,
lintable: Lintable,
data: CommentedMap | CommentedSeq | str,
) -> None:
if match.tag == "name[casing]":
def update_task_name(task_name: str) -> str:
"""Capitalize the first work of the task name."""
# Not using capitalize(), since that rewrites the rest of the name to lower case
if "|" in task_name: # if using prefix
[file_name, update_task_name] = task_name.split("|")
return f"{file_name.strip()} | {update_task_name.strip()[:1].upper()}{update_task_name.strip()[1:]}"
return f"{task_name[:1].upper()}{task_name[1:]}"
target_task = self.seek(match.yaml_path, data)
orig_task_name = target_task.get("name", None)
# pylint: disable=too-many-nested-blocks
if orig_task_name:
updated_task_name = update_task_name(orig_task_name)
for item in data:
if isinstance(item, dict) and "tasks" in item:
for task in item["tasks"]:
# We want to rewrite task names in the notify keyword, but
# if there isn't a notify section, there's nothing to do.
if "notify" not in task:
continue
if (
isinstance(task["notify"], str)
and orig_task_name == task["notify"]
):
task["notify"] = updated_task_name
elif isinstance(task["notify"], list):
for idx in range(len(task["notify"])):
if orig_task_name == task["notify"][idx]:
task["notify"][idx] = updated_task_name
target_task["name"] = updated_task_name
match.fixed = True
if "pytest" in sys.modules:
from ansiblelint.rules import RulesCollection
from ansiblelint.runner import Runner
def test_file_positive() -> None:
"""Positive test for name[missing]."""
collection = RulesCollection()
collection.register(NameRule())
success = "examples/playbooks/rule-name-missing-pass.yml"
good_runner = Runner(success, rules=collection)
assert good_runner.run() == []
def test_file_negative() -> None:
"""Negative test for name[missing]."""
collection = RulesCollection()
collection.register(NameRule())
failure = "examples/playbooks/rule-name-missing-fail.yml"
bad_runner = Runner(failure, rules=collection)
errs = bad_runner.run()
assert len(errs) == 5
def test_name_prefix_positive(config_options: Options) -> None:
"""Positive test for name[prefix]."""
config_options.enable_list = ["name[prefix]"]
collection = RulesCollection(options=config_options)
collection.register(NameRule())
success = Lintable(
"examples/playbooks/tasks/main.yml",
kind="tasks",
)
good_runner = Runner(success, rules=collection)
results = good_runner.run()
assert len(results) == 0
def test_name_prefix_negative(config_options: Options) -> None:
"""Negative test for name[missing]."""
config_options.enable_list = ["name[prefix]"]
collection = RulesCollection(options=config_options)
collection.register(NameRule())
failure = Lintable(
"examples/playbooks/tasks/rule-name-prefix-fail.yml",
kind="tasks",
)
bad_runner = Runner(failure, rules=collection)
results = bad_runner.run()
assert len(results) == 3
# , "\n".join(results)
assert results[0].tag == "name[casing]"
assert results[1].tag == "name[prefix]"
assert results[2].tag == "name[prefix]"
def test_name_prefix_negative_2(config_options: Options) -> None:
"""Negative test for name[prefix]."""
config_options.enable_list = ["name[prefix]"]
collection = RulesCollection(options=config_options)
collection.register(NameRule())
failure = Lintable(
"examples/playbooks/tasks/partial_prefix/foo.yml",
kind="tasks",
)
bad_runner = Runner(failure, rules=collection)
results = bad_runner.run()
assert len(results) == 2
assert results[0].tag == "name[prefix]"
assert results[1].tag == "name[prefix]"
def test_name_prefix_negative_3(config_options: Options) -> None:
"""Negative test for name[prefix]."""
config_options.enable_list = ["name[prefix]"]
collection = RulesCollection(options=config_options)
collection.register(NameRule())
failure = Lintable(
"examples/playbooks/tasks/partial_prefix/main.yml",
kind="tasks",
)
bad_runner = Runner(failure, rules=collection)
results = bad_runner.run()
assert len(results) == 2
assert results[0].tag == "name[prefix]"
assert results[1].tag == "name[prefix]"
def test_rule_name_lowercase() -> None:
"""Negative test for a task that starts with lowercase."""
collection = RulesCollection()
collection.register(NameRule())
failure = "examples/playbooks/rule-name-casing.yml"
bad_runner = Runner(failure, rules=collection)
errs = bad_runner.run()
assert len(errs) == 1
assert errs[0].tag == "name[casing]"
assert errs[0].rule.id == "name"
def test_name_play() -> None:
"""Positive test for name[play]."""
collection = RulesCollection()
collection.register(NameRule())
success = "examples/playbooks/rule-name-play-fail.yml"
errs = Runner(success, rules=collection).run()
assert len(errs) == 1
assert errs[0].tag == "name[play]"
assert errs[0].rule.id == "name"
def test_name_template() -> None:
"""Negative test for name[templated]."""
collection = RulesCollection()
collection.register(NameRule())
failure = "examples/playbooks/rule-name-templated-fail.yml"
bad_runner = Runner(failure, rules=collection)
errs = bad_runner.run()
assert len(errs) == 1
assert errs[0].tag == "name[template]"
def test_when_no_lintable() -> None:
"""Test when lintable is None."""
name_rule = NameRule()
result = name_rule._prefix_check("Foo", None, 1) # noqa: SLF001
assert len(result) == 0
| 13,743 | Python | .py | 335 | 28.635821 | 120 | 0.544095 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,266 | deprecated_module.py | ansible_ansible-lint/src/ansiblelint/rules/deprecated_module.py | """Implementation of deprecated-module rule."""
# Copyright (c) 2018, Ansible Project
from __future__ import annotations
from typing import TYPE_CHECKING, Any
from ansiblelint.rules import AnsibleLintRule
if TYPE_CHECKING:
from ansiblelint.file_utils import Lintable
class DeprecatedModuleRule(AnsibleLintRule):
"""Deprecated module."""
id = "deprecated-module"
description = (
"These are deprecated modules, some modules are kept "
"temporarily for backwards compatibility but usage is discouraged."
)
link = "https://docs.ansible.com/ansible/latest/collections/index_module.html"
severity = "HIGH"
tags = ["deprecations"]
version_added = "v4.0.0"
_modules = [
"accelerate",
"aos_asn_pool",
"aos_blueprint",
"aos_blueprint_param",
"aos_blueprint_virtnet",
"aos_device",
"aos_external_router",
"aos_ip_pool",
"aos_logical_device",
"aos_logical_device_map",
"aos_login",
"aos_rack_type",
"aos_template",
"azure",
"cl_bond",
"cl_bridge",
"cl_img_install",
"cl_interface",
"cl_interface_policy",
"cl_license",
"cl_ports",
"cs_nic",
"docker",
"ec2_ami_find",
"ec2_ami_search",
"ec2_remote_facts",
"ec2_vpc",
"kubernetes",
"netscaler",
"nxos_ip_interface",
"nxos_mtu",
"nxos_portchannel",
"nxos_switchport",
"oc",
"panos_nat_policy",
"panos_security_policy",
"vsphere_guest",
"win_msi",
"include",
]
def matchtask(
self,
task: dict[str, Any],
file: Lintable | None = None,
) -> bool | str:
module = task["action"]["__ansible_module__"]
if module in self._modules:
message = "{0} {1}"
return message.format(self.shortdesc, module)
return False
| 2,000 | Python | .py | 69 | 21.289855 | 82 | 0.574701 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,267 | yaml_rule.py | ansible_ansible-lint/src/ansiblelint/rules/yaml_rule.py | """Implementation of yaml linting rule (yamllint integration)."""
from __future__ import annotations
import logging
import sys
from collections.abc import Iterable, MutableMapping, MutableSequence
from typing import TYPE_CHECKING
from yamllint.linter import run as run_yamllint
from ansiblelint.constants import LINE_NUMBER_KEY, SKIPPED_RULES_KEY
from ansiblelint.file_utils import Lintable
from ansiblelint.rules import AnsibleLintRule, TransformMixin
from ansiblelint.yaml_utils import load_yamllint_config
if TYPE_CHECKING:
from typing import Any
from ansiblelint.config import Options
from ansiblelint.errors import MatchError
_logger = logging.getLogger(__name__)
class YamllintRule(AnsibleLintRule, TransformMixin):
"""Violations reported by yamllint."""
id = "yaml"
severity = "VERY_LOW"
tags = ["formatting", "yaml"]
version_added = "v5.0.0"
config = load_yamllint_config()
has_dynamic_tags = True
link = "https://yamllint.readthedocs.io/en/stable/rules.html"
# ensure this rule runs before most of other common rules
_order = 1
_ids = {
"yaml[anchors]": "",
"yaml[braces]": "",
"yaml[brackets]": "",
"yaml[colons]": "",
"yaml[commas]": "",
"yaml[comments-indentation]": "",
"yaml[comments]": "",
"yaml[document-end]": "",
"yaml[document-start]": "",
"yaml[empty-lines]": "",
"yaml[empty-values]": "",
"yaml[float-values]": "",
"yaml[hyphens]": "",
"yaml[indentation]": "",
"yaml[key-duplicates]": "",
"yaml[key-ordering]": "",
"yaml[line-length]": "",
"yaml[new-line-at-end-of-file]": "",
"yaml[new-lines]": "",
"yaml[octal-values]": "",
"yaml[quoted-strings]": "",
"yaml[trailing-spaces]": "",
"yaml[truthy]": "",
}
def matchyaml(self, file: Lintable) -> list[MatchError]:
"""Return matches found for a specific YAML text."""
matches: list[MatchError] = []
if str(file.base_kind) != "text/yaml":
return matches
for problem in run_yamllint(
file.content,
YamllintRule.config,
filepath=file.path,
):
self.severity = "VERY_LOW"
if problem.level == "error":
self.severity = "MEDIUM"
# Ignore truthy violation with github workflows ("on:" keys)
if problem.rule == "truthy" and file.path.parent.parts[-2:] == (
".github",
"workflows",
):
continue
matches.append(
self.create_matcherror(
# yamllint does return lower-case sentences
message=problem.desc.capitalize(),
lineno=problem.line,
details="",
filename=file,
tag=f"yaml[{problem.rule}]",
),
)
return matches
def transform(
self: YamllintRule,
match: MatchError,
lintable: Lintable,
data: MutableMapping[str, Any] | MutableSequence[Any] | str,
) -> None:
"""Transform yaml.
:param match: MatchError instance
:param lintable: Lintable instance
:param data: data to transform
"""
# This method does nothing because the YAML reformatting is implemented
# in data dumper. Still presence of this method helps us with
# documentation generation.
def _combine_skip_rules(data: Any) -> set[str]:
"""Return a consolidated list of skipped rules."""
result = set(data.get(SKIPPED_RULES_KEY, []))
tags = data.get("tags", [])
if tags and (
isinstance(tags, Iterable)
and "skip_ansible_lint" in tags
or tags == "skip_ansible_lint"
):
result.add("skip_ansible_lint")
return result
def _fetch_skips(data: Any, collector: dict[int, set[str]]) -> dict[int, set[str]]:
"""Retrieve a dictionary with line: skips by looking recursively in given JSON structure."""
if hasattr(data, "get") and data.get(LINE_NUMBER_KEY):
rules = _combine_skip_rules(data)
if rules:
collector[data.get(LINE_NUMBER_KEY)].update(rules)
if isinstance(data, Iterable) and not isinstance(data, str):
if isinstance(data, dict):
for value in data.values():
_fetch_skips(value, collector)
else: # must be some kind of list
for entry in data:
if (
entry
and hasattr(entry, "get")
and LINE_NUMBER_KEY in entry
and SKIPPED_RULES_KEY in entry
and entry[SKIPPED_RULES_KEY]
):
collector[entry[LINE_NUMBER_KEY]].update(entry[SKIPPED_RULES_KEY])
_fetch_skips(entry, collector)
return collector
# testing code to be loaded only with pytest or when executed the rule file
if "pytest" in sys.modules:
import pytest
# pylint: disable=ungrouped-imports
from ansiblelint.rules import RulesCollection
from ansiblelint.runner import Runner
@pytest.mark.parametrize(
("file", "expected_kind", "expected"),
(
pytest.param(
"examples/yamllint/invalid.yml",
"yaml",
[
'Missing document start "---"',
'Duplication of key "foo" in mapping',
"Trailing spaces",
],
id="invalid",
),
pytest.param("examples/yamllint/valid.yml", "yaml", [], id="valid"),
pytest.param(
"examples/yamllint/line-length.yml",
"yaml",
["Line too long (166 > 160 characters)"],
id="line-length",
),
pytest.param(
"examples/yamllint/multi-document.yaml",
"yaml",
[],
id="multi-document",
),
pytest.param(
"examples/yamllint/skipped-rule.yml",
"yaml",
[],
id="skipped-rule",
),
pytest.param(
"examples/playbooks/rule-yaml-fail.yml",
"playbook",
[
"Truthy value should be one of [false, true]",
"Truthy value should be one of [false, true]",
"Truthy value should be one of [false, true]",
],
id="rule-yaml-fail",
),
pytest.param(
"examples/playbooks/rule-yaml-pass.yml",
"playbook",
[],
id="rule-yaml-pass",
),
pytest.param(
"examples/yamllint/.github/workflows/ci.yml",
"yaml",
[],
id="rule-yaml-github-workflow",
),
),
)
@pytest.mark.filterwarnings("ignore::ansible_compat.runtime.AnsibleWarning")
def test_yamllint(
file: str,
expected_kind: str,
expected: list[str],
config_options: Options,
) -> None:
"""Validate parsing of ansible output."""
lintable = Lintable(file)
assert lintable.kind == expected_kind
rules = RulesCollection(options=config_options)
rules.register(YamllintRule())
results = Runner(lintable, rules=rules).run()
assert len(results) == len(expected), results
for idx, result in enumerate(results):
assert result.filename.endswith(file)
assert expected[idx] in result.message
assert isinstance(result.tag, str)
assert result.tag.startswith("yaml[")
def test_yamllint_has_help(default_rules_collection: RulesCollection) -> None:
"""Asserts that we loaded markdown documentation in help property."""
for rule in default_rules_collection:
if rule.id == "yaml":
assert rule.help is not None
assert len(rule.help) > 100
break
else: # pragma: no cover
pytest.fail("No yaml rule found")
| 8,343 | Python | .py | 219 | 27.136986 | 96 | 0.554883 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,268 | risky_shell_pipe.py | ansible_ansible-lint/src/ansiblelint/rules/risky_shell_pipe.py | """Implementation of risky-shell-pipe rule."""
from __future__ import annotations
import re
import sys
from typing import TYPE_CHECKING
from ansiblelint.rules import AnsibleLintRule
from ansiblelint.utils import convert_to_boolean, get_cmd_args
if TYPE_CHECKING:
from ansiblelint.file_utils import Lintable
from ansiblelint.utils import Task
class ShellWithoutPipefail(AnsibleLintRule):
"""Shells that use pipes should set the pipefail option."""
id = "risky-shell-pipe"
description = (
"Without the pipefail option set, a shell command that "
"implements a pipeline can fail and still return 0. If "
"any part of the pipeline other than the terminal command "
"fails, the whole pipeline will still return 0, which may "
"be considered a success by Ansible. "
"Pipefail is available in the bash shell."
)
severity = "MEDIUM"
tags = ["command-shell"]
version_added = "v4.1.0"
_pipefail_re = re.compile(r"^\s*set.*[+-][A-Za-z]*o\s*pipefail", re.MULTILINE)
_pipe_re = re.compile(r"(?<!\|)\|(?!\|)")
def matchtask(
self,
task: Task,
file: Lintable | None = None,
) -> bool | str:
if task["__ansible_action_type__"] != "task":
return False
if task["action"]["__ansible_module__"] != "shell":
return False
if task.get("ignore_errors"):
return False
jinja_stripped_cmd = self.unjinja(get_cmd_args(task))
# https://github.com/ansible/ansible-lint/issues/3161
if "pwsh" in task["action"].get("executable", ""):
return False
return bool(
self._pipe_re.search(jinja_stripped_cmd)
and not self._pipefail_re.search(jinja_stripped_cmd)
and not convert_to_boolean(task["action"].get("ignore_errors", False)),
)
if "pytest" in sys.modules:
import pytest
# pylint: disable=ungrouped-imports
from ansiblelint.rules import RulesCollection
from ansiblelint.runner import Runner
@pytest.mark.parametrize(
("file", "expected"),
(
pytest.param(
"examples/playbooks/rule-risky-shell-pipe-pass.yml",
0,
id="pass",
),
pytest.param(
"examples/playbooks/rule-risky-shell-pipe-fail.yml",
3,
id="fail",
),
),
)
def test_risky_shell_pipe(
default_rules_collection: RulesCollection,
file: str,
expected: int,
) -> None:
"""Validate that rule works as intended."""
results = Runner(file, rules=default_rules_collection).run()
for result in results:
assert result.rule.id == ShellWithoutPipefail.id, result
assert len(results) == expected
| 2,864 | Python | .py | 76 | 29.328947 | 83 | 0.614301 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,269 | loop_var_prefix.py | ansible_ansible-lint/src/ansiblelint/rules/loop_var_prefix.py | """Optional Ansible-lint rule to enforce use of prefix on role loop vars."""
from __future__ import annotations
import re
import sys
from typing import TYPE_CHECKING
from ansiblelint.config import LOOP_VAR_PREFIX, options
from ansiblelint.rules import AnsibleLintRule
from ansiblelint.text import toidentifier
if TYPE_CHECKING:
from ansiblelint.errors import MatchError
from ansiblelint.file_utils import Lintable
from ansiblelint.utils import Task
class RoleLoopVarPrefix(AnsibleLintRule):
"""Role loop_var should use configured prefix."""
id = "loop-var-prefix"
link = (
"https://docs.ansible.com/ansible/latest/playbook_guide/"
"playbooks_loops.html#defining-inner-and-outer-variable-names-with-loop-var"
)
description = """\
Looping inside roles has the risk of clashing with loops from user-playbooks.\
"""
tags = ["idiom"]
prefix = re.compile("")
severity = "MEDIUM"
_ids = {
"loop-var-prefix[wrong]": "Loop variable name does not match regex.",
"loop-var-prefix[missing]": "Replace unsafe implicit `item` loop variable.",
}
def matchtask(
self,
task: Task,
file: Lintable | None = None,
) -> list[MatchError]:
"""Return matches for a task."""
if not file or not file.role or not options.loop_var_prefix:
return []
self.prefix = re.compile(
options.loop_var_prefix.format(role=toidentifier(file.role)),
)
has_loop = "loop" in task.raw_task
for key in task.raw_task:
if key.startswith("with_"):
has_loop = True
if has_loop:
loop_control = task.raw_task.get("loop_control", {})
loop_var = loop_control.get("loop_var", "")
if loop_var:
if not self.prefix.match(loop_var):
return [
self.create_matcherror(
message=f"Loop variable name does not match /{options.loop_var_prefix}/ regex, where role={toidentifier(file.role)}.",
filename=file,
tag="loop-var-prefix[wrong]",
),
]
else:
return [
self.create_matcherror(
message=f"Replace unsafe implicit `item` loop variable by adding a `loop_var` that is matching /{options.loop_var_prefix}/ regex.",
filename=file,
tag="loop-var-prefix[missing]",
),
]
return []
# testing code to be loaded only with pytest or when executed the rule file
if "pytest" in sys.modules:
import pytest
# pylint: disable=ungrouped-imports
from ansiblelint.rules import RulesCollection
from ansiblelint.runner import Runner
@pytest.mark.parametrize(
("test_file", "failures"),
(
pytest.param(
"examples/playbooks/roles/loop_var_prefix/tasks/pass.yml",
0,
id="pass",
),
pytest.param(
"examples/playbooks/roles/loop_var_prefix/tasks/fail.yml",
6,
id="fail",
),
),
)
def test_loop_var_prefix(
default_rules_collection: RulesCollection,
test_file: str,
failures: int,
) -> None:
"""Test rule matches."""
# Enable checking of loop variable prefixes in roles
options.loop_var_prefix = LOOP_VAR_PREFIX
results = Runner(test_file, rules=default_rules_collection).run()
for result in results:
assert result.rule.id == RoleLoopVarPrefix().id
assert len(results) == failures
| 3,801 | Python | .py | 98 | 28.469388 | 155 | 0.587629 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,270 | only_builtins.py | ansible_ansible-lint/src/ansiblelint/rules/only_builtins.py | """Rule definition for usage of builtin actions only."""
from __future__ import annotations
import os
import sys
from typing import TYPE_CHECKING
from ansiblelint.rules import AnsibleLintRule
from ansiblelint.rules.fqcn import builtins
from ansiblelint.skip_utils import is_nested_task
if TYPE_CHECKING:
from ansiblelint.file_utils import Lintable
from ansiblelint.utils import Task
class OnlyBuiltinsRule(AnsibleLintRule):
"""Use only builtin actions."""
id = "only-builtins"
severity = "MEDIUM"
description = "Check whether the playbook uses anything but ``ansible.builtin``"
tags = ["opt-in", "experimental"]
def matchtask(
self,
task: Task,
file: Lintable | None = None,
) -> bool | str:
module = task["action"]["__ansible_module_original__"]
allowed_collections = [
"ansible.builtin",
"ansible.legacy",
]
allowed_modules = builtins
if self.options:
allowed_collections += self.options.only_builtins_allow_collections
allowed_modules += self.options.only_builtins_allow_modules
is_allowed = (
any(module.startswith(f"{prefix}.") for prefix in allowed_collections)
or module in allowed_modules
)
return not is_allowed and not is_nested_task(task)
# testing code to be loaded only with pytest or when executed the rule file
if "pytest" in sys.modules:
# pylint: disable=ungrouped-imports
import pytest
from ansiblelint.constants import RC
from ansiblelint.testing import RunFromText, run_ansible_lint
SUCCESS_PLAY = """
- hosts: localhost
tasks:
- name: A block
block:
- name: Shell (fqcn)
ansible.builtin.shell: echo This rule should not get matched by the only-builtins rule
- name: Command with legacy FQCN
ansible.legacy.command: echo This rule should not get matched by the only-builtins rule
"""
def test_only_builtins_fail() -> None:
"""Test rule matches."""
env = os.environ.copy()
env["NO_COLOR"] = "1"
result = run_ansible_lint(
"--strict",
"--warn-list=",
"--enable-list",
"only-builtins",
"examples/playbooks/rule-only-builtins.yml",
env=env,
)
assert result.returncode == RC.VIOLATIONS_FOUND
assert "Failed" in result.stderr
assert "warning(s)" in result.stderr
assert "only-builtins: Use only builtin actions" in result.stdout
def test_only_builtins_allow() -> None:
"""Test rule doesn't match."""
conf_path = "examples/playbooks/.ansible-lint-only-builtins-allow"
result = run_ansible_lint(
f"--config-file={conf_path}",
"--strict",
"--warn-list=",
"--enable-list",
"only-builtins",
"examples/playbooks/rule-only-builtins.yml",
)
assert "only-builtins" not in result.stdout
assert result.returncode == RC.SUCCESS
@pytest.mark.parametrize(
"rule_runner",
(OnlyBuiltinsRule,),
indirect=["rule_runner"],
)
def test_only_builtin_pass(rule_runner: RunFromText) -> None:
"""Test rule does not match."""
results = rule_runner.run_playbook(SUCCESS_PLAY)
assert len(results) == 0, results
| 3,395 | Python | .py | 90 | 30.077778 | 93 | 0.640402 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,271 | __init__.py | ansible_ansible-lint/src/ansiblelint/rules/__init__.py | """All internal ansible-lint rules."""
from __future__ import annotations
import copy
import inspect
import logging
import re
import sys
from collections import defaultdict
from collections.abc import Iterable, Iterator, MutableMapping, MutableSequence
from importlib import import_module
from pathlib import Path
from typing import TYPE_CHECKING, Any, cast
import ansiblelint.skip_utils
import ansiblelint.utils
import ansiblelint.yaml_utils
from ansiblelint._internal.rules import (
AnsibleParserErrorRule,
BaseRule,
LoadingFailureRule,
RuntimeErrorRule,
WarningRule,
)
from ansiblelint.app import App, get_app
from ansiblelint.config import PROFILES, Options
from ansiblelint.config import options as default_options
from ansiblelint.constants import LINE_NUMBER_KEY, RULE_DOC_URL, SKIPPED_RULES_KEY
from ansiblelint.errors import MatchError
from ansiblelint.file_utils import Lintable, expand_paths_vars
if TYPE_CHECKING:
from ruamel.yaml.comments import CommentedMap, CommentedSeq
from ansiblelint.errors import RuleMatchTransformMeta
_logger = logging.getLogger(__name__)
match_types = {
"matchlines": "line",
"match": "line", # called by matchlines
"matchtasks": "task",
"matchtask": "task", # called by matchtasks
"matchyaml": "yaml",
"matchplay": "play", # called by matchyaml
"matchdir": "dir",
}
class AnsibleLintRule(BaseRule):
"""AnsibleLintRule should be used as base for writing new rules."""
@property
def url(self) -> str:
"""Return rule documentation url."""
return RULE_DOC_URL + self.id + "/"
def get_config(self, key: str) -> Any:
"""Return a configured value for given key string."""
return self.rule_config.get(key, None)
@staticmethod
def unjinja(text: str) -> str:
"""Remove jinja2 bits from a string."""
text = re.sub(r"{{.+?}}", "JINJA_EXPRESSION", text)
text = re.sub(r"{%.+?%}", "JINJA_STATEMENT", text)
text = re.sub(r"{#.+?#}", "JINJA_COMMENT", text)
return text
# pylint: disable=too-many-arguments,too-many-positional-arguments
def create_matcherror(
self,
message: str = "",
lineno: int = 1,
details: str = "",
filename: Lintable | None = None,
tag: str = "",
transform_meta: RuleMatchTransformMeta | None = None,
) -> MatchError:
"""Instantiate a new MatchError."""
match = MatchError(
message=message,
lineno=lineno,
details=details,
lintable=filename or Lintable(""),
rule=copy.copy(self),
tag=tag,
transform_meta=transform_meta,
)
# search through callers to find one of the match* methods
frame = inspect.currentframe()
match_type: str | None = None
while not match_type and frame is not None:
func_name = frame.f_code.co_name
match_type = match_types.get(func_name)
if match_type:
# add the match_type to the match
match.match_type = match_type
break
frame = frame.f_back # get the parent frame for the next iteration
return match
@staticmethod
def _enrich_matcherror_with_task_details(
match: MatchError,
task: ansiblelint.utils.Task,
) -> None:
match.task = task
if not match.details:
match.details = "Task/Handler: " + ansiblelint.utils.task_to_str(task)
match.lineno = max(match.lineno, task[LINE_NUMBER_KEY])
def matchlines(self, file: Lintable) -> list[MatchError]:
matches: list[MatchError] = []
# arrays are 0-based, line numbers are 1-based
# so use prev_line_no as the counter
for prev_line_no, line in enumerate(file.content.split("\n")):
if line.lstrip().startswith("#"):
continue
rule_id_list = ansiblelint.skip_utils.get_rule_skips_from_line(
line,
lintable=file,
)
if self.id in rule_id_list:
continue
result = self.match(line)
if not result:
continue
message = ""
if isinstance(result, str):
message = result
matcherror = self.create_matcherror(
message=message,
lineno=prev_line_no + 1,
details=line,
filename=file,
)
matches.append(matcherror)
return matches
def matchtasks(self, file: Lintable) -> list[MatchError]:
"""Call matchtask for each task inside file and return aggregate results.
Most rules will never need to override matchtasks because its main
purpose is to call matchtask for each task/handlers in the same file,
and to aggregate the results.
"""
matches: list[MatchError] = []
if (
file.kind not in ["handlers", "tasks", "playbook"]
or str(file.base_kind) != "text/yaml"
):
return matches
for task in ansiblelint.utils.task_in_list(
data=file.data,
kind=file.kind,
file=file,
):
if task.error is not None:
# normalize_task converts AnsibleParserError to MatchError
return [task.error]
if (
self.id in task.skip_tags
or ("action" not in task.normalized_task)
or "skip_ansible_lint" in task.normalized_task.get("tags", [])
):
continue
if self.needs_raw_task:
task.normalized_task["__raw_task__"] = task.raw_task
result = self.matchtask(task, file=file)
if not result:
continue
if isinstance(result, Iterable) and not isinstance(
result,
str,
): # list[MatchError]
# https://github.com/PyCQA/pylint/issues/6044
# pylint: disable=not-an-iterable
for match in result:
if match.tag in task.skip_tags:
continue
self._enrich_matcherror_with_task_details(
match,
task,
)
matches.append(match)
continue
if isinstance(result, MatchError):
if result.tag in task.skip_tags:
continue
match = result
else: # bool or string
message = ""
if isinstance(result, str):
message = result
match = self.create_matcherror(
message=message,
lineno=task.normalized_task[LINE_NUMBER_KEY],
filename=file,
)
self._enrich_matcherror_with_task_details(match, task)
matches.append(match)
return matches
def matchyaml(self, file: Lintable) -> list[MatchError]:
matches: list[MatchError] = []
if str(file.base_kind) != "text/yaml":
return matches
yaml = file.data
# yaml returned can be an AnsibleUnicode (a string) when the yaml
# file contains a single string. YAML spec allows this but we consider
# this an fatal error.
if isinstance(yaml, str):
if yaml.startswith("$ANSIBLE_VAULT"):
return []
if self._collection is None:
msg = f"Rule {self.id} was not added to a collection."
raise RuntimeError(msg)
return [
# pylint: disable=E1136
MatchError(
lintable=file,
rule=self._collection["load-failure"],
),
]
if not yaml:
return matches
if isinstance(yaml, dict):
yaml = [yaml]
for play in yaml:
# Bug #849
if play is None:
continue
if self.id in play.get(SKIPPED_RULES_KEY, ()):
continue
if "skip_ansible_lint" in play.get("tags", []):
continue
matches.extend(self.matchplay(file, play))
return matches
class TransformMixin:
"""A mixin for AnsibleLintRule to enable transforming files.
If ansible-lint is started with the ``--fix`` option, then the ``Transformer``
will call the ``transform()`` method for every MatchError identified if the rule
that identified it subclasses this ``TransformMixin``. Only the rule that identified
a MatchError can do transforms to fix that match.
"""
def transform(
self,
match: MatchError,
lintable: Lintable,
data: CommentedMap | CommentedSeq | str,
) -> None:
"""Transform ``data`` to try to fix the MatchError identified by this rule.
The ``match`` was generated by this rule in the ``lintable`` file.
When ``transform()`` is called on a rule, the rule should either fix the
issue, if possible, or make modifications that make it easier to fix manually.
The transform must set ``match.fixed = True`` when data has been transformed to
fix the error.
For YAML files, ``data`` is an editable YAML dict/array that preserves
any comments that were in the original file.
.. code:: python
data[0]["tasks"][0]["when"] = False
This is easier with the ``seek()`` utility method:
.. code :: python
target_task = self.seek(match.yaml_path, data)
target_task["when"] = False
For any files that aren't YAML, ``data`` is the loaded file's content as a string.
To edit non-YAML files, save the updated contents in ``lintable.content``:
.. code:: python
new_data = self.do_something_to_fix_the_match(data)
lintable.content = new_data
"""
@staticmethod
def seek(
yaml_path: list[int | str],
data: MutableMapping[str, Any] | MutableSequence[Any] | str,
) -> Any:
"""Get the element identified by ``yaml_path`` in ``data``.
Rules that work with YAML need to seek, or descend, into nested YAML data
structures to perform the relevant transforms. For example:
.. code:: python
def transform(self, match, lintable, data):
target_task = self.seek(match.yaml_path, data)
# transform target_task
"""
if isinstance(data, str):
# can't descend into a string
return data
target = data
for segment in yaml_path:
# The cast() calls tell mypy what types we expect.
# Essentially this does:
if isinstance(segment, str):
target = cast(MutableMapping[str, Any], target)[segment]
elif isinstance(segment, int):
target = cast(MutableSequence[Any], target)[segment]
return target
def load_plugins(
dirs: list[str],
) -> Iterator[AnsibleLintRule]:
"""Yield a rule class."""
def all_subclasses(cls: type) -> set[type]:
return set(cls.__subclasses__()).union(
[s for c in cls.__subclasses__() for s in all_subclasses(c)],
)
orig_sys_path = sys.path.copy()
for directory in dirs:
if directory not in sys.path:
sys.path.append(str(directory))
# load all modules in the directory
for f in Path(directory).glob("*.py"):
if "__" not in f.stem and f.stem not in "conftest":
import_module(f"{f.stem}")
# restore sys.path
sys.path = orig_sys_path
rules: dict[str, BaseRule] = {}
for rule in all_subclasses(BaseRule):
# we do not return the rules that are not loaded from passed 'directory'
# or rules that do not have a valid id. For example, during testing
# python may load other rule classes, some outside the tested rule
# directories.
if (
rule.id # type: ignore[attr-defined]
and Path(inspect.getfile(rule)).parent.absolute()
in [Path(x).absolute() for x in dirs]
and issubclass(rule, BaseRule)
and rule.id not in rules
):
rules[rule.id] = rule()
for rule in rules.values(): # type: ignore[assignment]
if isinstance(rule, AnsibleLintRule) and bool(rule.id):
yield rule
class RulesCollection:
"""Container for a collection of rules."""
def __init__( # pylint: disable=too-many-arguments
self,
rulesdirs: list[str] | list[Path] | None = None,
options: Options | None = None,
profile_name: str | None = None,
*,
conditional: bool = True,
app: App | None = None,
) -> None:
"""Initialize a RulesCollection instance."""
if options is None:
self.options = copy.deepcopy(default_options)
# When initialized without options argument we want it to always
# be offline as this is done only during testing.
self.options.offline = True
else:
self.options = options
self.profile = []
self.app = app or get_app(cached=True)
if profile_name:
self.profile = PROFILES[profile_name]
rulesdirs_str = [] if rulesdirs is None else [str(r) for r in rulesdirs]
self.rulesdirs = expand_paths_vars(rulesdirs_str)
self.rules: list[BaseRule] = []
# internal rules included in order to expose them for docs as they are
# not directly loaded by our rule loader.
self.rules.extend(
[
RuntimeErrorRule(),
AnsibleParserErrorRule(),
LoadingFailureRule(),
WarningRule(),
],
)
for rule in self.rules:
rule._collection = self # noqa: SLF001
for rule in load_plugins(rulesdirs_str):
self.register(rule, conditional=conditional)
self.rules = sorted(self.rules)
# When we have a profile we unload some of the rules
# But we do include all rules when listing all rules or tags
if profile_name and not (self.options.list_rules or self.options.list_tags):
filter_rules_with_profile(self.rules, profile_name)
def register(self, obj: AnsibleLintRule, *, conditional: bool = False) -> None:
"""Register a rule."""
# We skip opt-in rules which were not manually enabled.
# But we do include opt-in rules when listing all rules or tags
obj._collection = self # pylint: disable=protected-access # noqa: SLF001
if any(
[
not conditional,
self.profile, # when profile is used we load all rules and filter later
"opt-in" not in obj.tags,
obj.id in self.options.enable_list,
self.options.list_rules,
self.options.list_tags,
],
):
self.rules.append(obj)
def __iter__(self) -> Iterator[BaseRule]:
"""Return the iterator over the rules in the RulesCollection."""
return iter(sorted(self.rules))
def alphabetical(self) -> Iterator[BaseRule]:
"""Return an iterator over the rules in the RulesCollection in alphabetical order."""
return iter(sorted(self.rules, key=lambda x: x.id))
def __len__(self) -> int:
"""Return the length of the RulesCollection data."""
return len(self.rules)
def __getitem__(self, item: Any) -> BaseRule:
"""Return a rule from inside the collection based on its id."""
if not isinstance(item, str):
msg = f"Expected str but got {type(item)} when trying to access rule by it's id"
raise TypeError(msg)
for rule in self.rules:
if rule.id == item:
return rule
msg = f"Rule {item} is not present inside this collection."
raise ValueError(msg)
def extend(self, more: list[AnsibleLintRule]) -> None:
"""Combine rules."""
self.rules.extend(more)
def run(
self,
file: Lintable,
tags: set[str] | None = None,
skip_list: list[str] | None = None,
) -> list[MatchError]:
"""Run all the rules against the given lintable."""
matches: list[MatchError] = []
if tags is None:
tags = set()
if skip_list is None:
skip_list = []
if not file.path.is_dir():
try:
if file.content is not None: # loads the file content
pass
except (OSError, UnicodeDecodeError) as exc:
return [
MatchError(
message=str(exc),
lintable=file,
rule=self["load-failure"],
tag=f"{LoadingFailureRule.id}[{exc.__class__.__name__.lower()}]",
),
]
for rule in self.rules:
if rule.id == "syntax-check":
continue
if (
not tags
or rule.has_dynamic_tags
or not set(rule.tags).union([rule.id]).isdisjoint(tags)
):
if tags and set(rule.tags).union(list(rule.ids().keys())).isdisjoint(
tags,
):
_logger.debug("Skipping rule %s", rule.id)
else:
_logger.debug("Running rule %s", rule.id)
rule_definition = set(rule.tags)
rule_definition.add(rule.id)
if set(rule_definition).isdisjoint(skip_list):
matches.extend(rule.getmatches(file))
else:
_logger.debug("Skipping rule %s", rule.id)
# some rules can produce matches with tags that are inside our
# skip_list, so we need to cleanse the matches
matches = [m for m in matches if m.tag not in skip_list]
return matches
def __repr__(self) -> str:
"""Return a RulesCollection instance representation."""
return "\n".join(
[rule.verbose() for rule in sorted(self.rules, key=lambda x: x.id)],
)
def known_tags(self) -> list[str]:
"""Return a list of known tags, without returning no sub-tags."""
tags = set()
for rule in self.rules:
tags.add(rule.id)
for tag in rule.tags:
tags.add(tag)
return sorted(tags)
def list_tags(self) -> str:
"""Return a string with all the tags in the RulesCollection."""
tag_desc = {
"command-shell": "Specific to use of command and shell modules",
"core": "Related to internal implementation of the linter",
"deprecations": "Indicate use of features that are removed from Ansible",
"experimental": "Newly introduced rules, by default triggering only warnings",
"formatting": "Related to code-style",
"idempotency": "Possible indication that consequent runs would produce different results",
"idiom": "Anti-pattern detected, likely to cause undesired behavior",
"metadata": "Invalid metadata, likely related to galaxy, collections or roles",
"opt-in": "Rules that are not used unless manually added to `enable_list`",
"security": "Rules related o potentially security issues, like exposing credentials",
"syntax": "Related to wrong or deprecated syntax",
"unpredictability": "Warn about code that might not work in a predictable way",
"unskippable": "Indicate a fatal error that cannot be ignored or disabled",
"yaml": "External linter which will also produce its own rule codes",
}
tags = defaultdict(list)
for rule in self.rules:
# Fail early if a rule does not have any of our required tags
if not set(rule.tags).intersection(tag_desc.keys()):
msg = f"Rule {rule} does not have any of the required tags: {', '.join(tag_desc.keys())}"
raise RuntimeError(msg)
for tag in rule.tags:
tags[tag] = list(rule.ids())
result = "# List of tags and rules they cover\n"
for tag in sorted(tags):
desc = tag_desc.get(tag)
if desc:
result += f"{tag}: # {desc}\n"
else:
result += f"{tag}:\n"
for name in sorted(tags[tag]):
result += f" - {name}\n"
return result
def filter_rules_with_profile(rule_col: list[BaseRule], profile: str) -> None:
"""Unload rules that are not part of the specified profile."""
included = set()
extends = profile
total_rules = len(rule_col)
while extends:
for rule in PROFILES[extends]["rules"]:
_logger.debug("Activating rule `%s` due to profile `%s`", rule, extends)
included.add(rule)
extends = PROFILES[extends].get("extends", None)
for rule in rule_col.copy():
if rule.unloadable:
continue
if rule.id not in included:
_logger.debug(
"Unloading %s rule due to not being part of %s profile.",
rule.id,
profile,
)
rule_col.remove(rule)
_logger.debug("%s/%s rules included in the profile", len(rule_col), total_rules)
| 21,851 | Python | .py | 517 | 31.226306 | 105 | 0.578157 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,272 | meta_video_links.py | ansible_ansible-lint/src/ansiblelint/rules/meta_video_links.py | """Implementation of meta-video-links rule."""
# Copyright (c) 2018, Ansible Project
from __future__ import annotations
import re
import sys
from typing import TYPE_CHECKING
from ansiblelint.constants import FILENAME_KEY, LINE_NUMBER_KEY
from ansiblelint.rules import AnsibleLintRule
if TYPE_CHECKING:
from collections.abc import Sequence
from ansiblelint.errors import MatchError
from ansiblelint.file_utils import Lintable
class MetaVideoLinksRule(AnsibleLintRule):
"""meta/main.yml video_links should be formatted correctly."""
id = "meta-video-links"
description = (
"Items in ``video_links`` in meta/main.yml should be "
"dictionaries, and contain only keys ``url`` and ``title``, "
"and have a shared link from a supported provider"
)
severity = "LOW"
tags = ["metadata"]
version_added = "v4.0.0"
VIDEO_REGEXP = {
"google": re.compile(r"https://drive\.google\.com.*file/d/([0-9A-Za-z-_]+)/.*"),
"vimeo": re.compile(r"https://vimeo\.com/([0-9]+)"),
"youtube": re.compile(r"https://youtu\.be/([0-9A-Za-z-_]+)"),
}
def matchyaml(self, file: Lintable) -> list[MatchError]:
if file.kind != "meta" or not file.data:
return []
galaxy_info = file.data.get("galaxy_info", None)
if not galaxy_info:
return []
video_links = galaxy_info.get("video_links", None)
if not video_links:
return []
results = []
for video in video_links:
if not isinstance(video, dict):
results.append(
self.create_matcherror(
"Expected item in 'video_links' to be a dictionary",
filename=file,
),
)
continue
if set(video) != {"url", "title", FILENAME_KEY, LINE_NUMBER_KEY}:
results.append(
self.create_matcherror(
"Expected item in 'video_links' to contain "
"only keys 'url' and 'title'",
filename=file,
),
)
continue
for expr in self.VIDEO_REGEXP.values():
if expr.match(video["url"]):
break
else:
msg = (
f"URL format '{video['url']}' is not recognized. "
"Expected it be a shared link from Vimeo, YouTube, "
"or Google Drive."
)
results.append(self.create_matcherror(msg, filename=file))
return results
if "pytest" in sys.modules:
import pytest
# pylint: disable=ungrouped-imports
from ansiblelint.rules import RulesCollection
from ansiblelint.runner import Runner
@pytest.mark.parametrize(
("test_file", "failures"),
(
pytest.param(
"examples/roles/meta_video_links_fail/meta/main.yml",
(
"Expected item in 'video_links' to be a dictionary",
"Expected item in 'video_links' to contain only keys 'url' and 'title'",
"URL format 'https://www.youtube.com/watch?v=aWmRepTSFKs&feature=youtu.be' is not recognized. Expected it be a shared link from Vimeo, YouTube, or Google Drive.",
"URL format 'www.acme.com/vid' is not recognized",
),
id="1",
),
pytest.param(
"examples/roles/meta_video_links_pass/meta/main.yml",
(),
id="2",
),
),
)
def test_video_links(
default_rules_collection: RulesCollection,
test_file: str,
failures: Sequence[str],
) -> None:
"""Test rule matches."""
results = Runner(test_file, rules=default_rules_collection).run()
assert len(results) == len(failures)
for index, result in enumerate(results):
assert result.tag == "meta-video-links"
assert failures[index] in result.message
| 4,159 | Python | .py | 103 | 28.84466 | 182 | 0.55539 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,273 | playbook_extension.py | ansible_ansible-lint/src/ansiblelint/rules/playbook_extension.py | """Implementation of playbook-extension rule."""
# Copyright (c) 2016, Tsukinowa Inc. <info@tsukinowa.jp>
# Copyright (c) 2018, Ansible Project
from __future__ import annotations
import sys
from typing import TYPE_CHECKING
from ansiblelint.file_utils import Lintable
from ansiblelint.rules import AnsibleLintRule
from ansiblelint.runner import Runner
if TYPE_CHECKING:
from ansiblelint.errors import MatchError
class PlaybookExtensionRule(AnsibleLintRule):
"""Use ".yml" or ".yaml" playbook extension."""
id = "playbook-extension"
description = 'Playbooks should have the ".yml" or ".yaml" extension'
severity = "MEDIUM"
tags = ["formatting"]
done: list[str] = []
version_added = "v4.0.0"
def matchyaml(self, file: Lintable) -> list[MatchError]:
result: list[MatchError] = []
if file.kind != "playbook":
return result
path = str(file.path)
ext = file.path.suffix
if ext not in [".yml", ".yaml"] and path not in self.done:
self.done.append(path)
result.append(self.create_matcherror(filename=file))
return result
if "pytest" in sys.modules:
import pytest
# pylint: disable=ungrouped-imports
from ansiblelint.rules import RulesCollection
@pytest.mark.parametrize(
("file", "expected"),
(pytest.param("examples/playbooks/play-without-extension", 1, id="fail"),),
)
def test_playbook_extension(file: str, expected: int) -> None:
"""The ini_file module does not accept preserve mode."""
rules = RulesCollection()
rules.register(PlaybookExtensionRule())
results = Runner(Lintable(file, kind="playbook"), rules=rules).run()
assert len(results) == expected
for result in results:
assert result.tag == "playbook-extension"
| 1,846 | Python | .py | 45 | 34.866667 | 83 | 0.680827 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,274 | galaxy_version_incorrect.py | ansible_ansible-lint/src/ansiblelint/rules/galaxy_version_incorrect.py | """Implementation of GalaxyVersionIncorrectRule."""
from __future__ import annotations
import sys
from functools import total_ordering
from typing import TYPE_CHECKING, Any
from ansiblelint.rules import AnsibleLintRule
if TYPE_CHECKING:
from ansiblelint.errors import MatchError
from ansiblelint.file_utils import Lintable
class GalaxyVersionIncorrectRule(AnsibleLintRule):
"""Rule for checking collection version is greater than 1.0.0."""
id = "galaxy-version-incorrect"
description = "Confirm via galaxy.yml file if collection version is greater than or equal to 1.0.0."
severity = "MEDIUM"
tags = ["opt-in", "metadata"]
version_added = "v24.7.0"
def matchplay(self, file: Lintable, data: dict[str, Any]) -> list[MatchError]:
"""Return matches found for a specific play (entry in playbook)."""
if file.kind != "galaxy": # type: ignore[comparison-overlap]
return []
results = []
version = data.get("version")
if Version(version) < Version("1.0.0"):
results.append(
self.create_matcherror(
message="collection version should be greater than or equal to 1.0.0",
lineno=version._line_number, # noqa: SLF001
filename=file,
),
)
return results
@total_ordering
class Version:
"""Simple class to compare arbitrary versions."""
def __init__(self, version_string: str):
"""Construct a Version object."""
self.components = version_string.split(".")
def __eq__(self, other: object) -> bool:
"""Implement equality comparison."""
try:
other = _coerce(other)
except NotImplementedError:
return NotImplemented
return self.components == other.components
def __lt__(self, other: Version) -> bool:
"""Implement lower-than operation."""
other = _coerce(other)
return self.components < other.components
def _coerce(other: object) -> Version:
if isinstance(other, str):
other = Version(other)
if isinstance(other, int | float):
other = Version(str(other))
if isinstance(other, Version):
return other
msg = f"Unable to coerce object type {type(other)} to Version"
raise NotImplementedError(msg)
if "pytest" in sys.modules:
import pytest
from ansiblelint.rules import RulesCollection # pylint: disable=ungrouped-imports
from ansiblelint.runner import Runner
def test_galaxy_collection_version_positive() -> None:
"""Positive test for collection version in galaxy."""
collection = RulesCollection()
collection.register(GalaxyVersionIncorrectRule())
success = "examples/.collection/galaxy.yml"
good_runner = Runner(success, rules=collection)
assert good_runner.run() == []
def test_galaxy_collection_version_negative() -> None:
"""Negative test for collection version in galaxy."""
collection = RulesCollection()
collection.register(GalaxyVersionIncorrectRule())
failure = "examples/meta/galaxy.yml"
bad_runner = Runner(failure, rules=collection)
errs = bad_runner.run()
assert len(errs) == 1
def test_version_class() -> None:
"""Test for version class."""
v = Version("1.0.0")
assert v == Version("1.0.0")
assert v != NotImplemented
def test_coerce() -> None:
"""Test for _coerce function."""
assert _coerce("1.0") == Version("1.0")
assert _coerce(1.0) == Version("1.0")
expected = "Unable to coerce object type"
with pytest.raises(NotImplementedError, match=expected):
_coerce(type(Version))
| 3,783 | Python | .py | 88 | 35.056818 | 104 | 0.648133 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,275 | args.py | ansible_ansible-lint/src/ansiblelint/rules/args.py | """Rule definition to validate task options."""
from __future__ import annotations
import contextlib
import importlib.util
import io
import json
import logging
import re
import sys
from typing import TYPE_CHECKING, Any
# pylint: disable=preferred-module
from unittest import mock
from unittest.mock import patch
# pylint: disable=reimported
import ansible.module_utils.basic as mock_ansible_module
from ansible.module_utils import basic
from ansiblelint.constants import LINE_NUMBER_KEY
from ansiblelint.rules import AnsibleLintRule, RulesCollection
from ansiblelint.text import has_jinja
from ansiblelint.utils import load_plugin
from ansiblelint.yaml_utils import clean_json
if TYPE_CHECKING:
from ansiblelint.errors import MatchError
from ansiblelint.file_utils import Lintable
from ansiblelint.utils import Task
_logger = logging.getLogger(__name__)
ignored_re = re.compile(
"|".join( # noqa: FLY002
[
r"^parameters are mutually exclusive:",
# https://github.com/ansible/ansible-lint/issues/3128 as strings can be jinja
# Do not remove unless you manually test if the original example
# from the bug does not trigger the rule anymore. We were not able
# to add a regression test because it would involve installing this
# collection. Attempts to reproduce same bug with other collections
# failed, even if the message originates from Ansible core.
r"^unable to evaluate string as dictionary$",
],
),
flags=re.MULTILINE | re.DOTALL,
)
workarounds_drop_map = {
# https://github.com/ansible/ansible-lint/issues/3110
"ansible.builtin.copy": ["decrypt"],
# https://github.com/ansible/ansible-lint/issues/2824#issuecomment-1354337466
# https://github.com/ansible/ansible-lint/issues/3138
"ansible.builtin.service": ["daemon_reload", "use"],
# Avoid: Unsupported parameters for (basic.py) module: cmd. Supported parameters include: _raw_params, _uses_shell, argv, chdir, creates, executable, removes, stdin, stdin_add_newline, strip_empty_ends.
"ansible.builtin.command": ["cmd"],
# https://github.com/ansible/ansible-lint/issues/3152
"ansible.posix.synchronize": ["use_ssh_args"],
}
workarounds_inject_map = {
# https://github.com/ansible/ansible-lint/issues/2824
"ansible.builtin.async_status": {"_async_dir": "/tmp/ansible-async"},
}
class ValidationPassedError(Exception):
"""Exception to be raised when validation passes."""
class CustomAnsibleModule(basic.AnsibleModule): # type: ignore[misc]
"""Mock AnsibleModule class."""
def __init__(self, *args: str, **kwargs: Any) -> None:
"""Initialize AnsibleModule mock."""
kwargs["no_log"] = True
super().__init__(*args, **kwargs)
raise ValidationPassedError
class ArgsRule(AnsibleLintRule):
"""Validating module arguments."""
id = "args"
severity = "HIGH"
description = "Check whether tasks are using correct module options."
tags = ["syntax", "experimental"]
version_added = "v6.10.0"
module_aliases: dict[str, str] = {"block/always/rescue": "block/always/rescue"}
_ids = {
"args[module]": description,
}
def matchtask(
self,
task: Task,
file: Lintable | None = None,
) -> list[MatchError]:
# pylint: disable=too-many-return-statements
results: list[MatchError] = []
module_name = task["action"]["__ansible_module_original__"]
failed_msg = None
if module_name in self.module_aliases:
return []
loaded_module = load_plugin(module_name)
# https://github.com/ansible/ansible-lint/issues/3200
# since "ps1" modules cannot be executed on POSIX platforms, we will
# avoid running this rule for such modules
if isinstance(
loaded_module.plugin_resolved_path,
str,
) and loaded_module.plugin_resolved_path.endswith(".ps1"):
return []
module_args = {
key: value
for key, value in task["action"].items()
if not key.startswith("__")
}
# Return if 'args' is jinja string
# https://github.com/ansible/ansible-lint/issues/3199
if (
"args" in task.raw_task
and isinstance(task.raw_task["args"], str)
and has_jinja(task.raw_task["args"])
):
return []
if loaded_module.resolved_fqcn in workarounds_inject_map:
module_args.update(workarounds_inject_map[loaded_module.resolved_fqcn])
if loaded_module.resolved_fqcn in workarounds_drop_map:
for key in workarounds_drop_map[loaded_module.resolved_fqcn]:
if key in module_args:
del module_args[key]
with mock.patch.object(
mock_ansible_module,
"AnsibleModule",
CustomAnsibleModule,
):
spec = importlib.util.spec_from_file_location(
name=loaded_module.plugin_resolved_name,
location=loaded_module.plugin_resolved_path,
)
if not spec:
assert file is not None
_logger.warning(
"Unable to load module %s at %s:%s for options validation",
module_name,
file.filename,
task[LINE_NUMBER_KEY],
)
return []
assert spec.loader is not None
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
try:
if not hasattr(module, "main"):
# skip validation for module options that are implemented as action plugin
# as the option values can be changed in action plugin and are not passed
# through `ArgumentSpecValidator` class as in case of modules.
return []
with patch.object(
sys,
"argv",
["", json.dumps({"ANSIBLE_MODULE_ARGS": clean_json(module_args)})],
):
fio = io.StringIO()
failed_msg = ""
# Warning: avoid running anything while stdout is redirected
# as what happens may be very hard to debug.
with contextlib.redirect_stdout(fio):
# pylint: disable=protected-access
basic._ANSIBLE_ARGS = None # noqa: SLF001
try:
module.main()
except SystemExit:
failed_msg = fio.getvalue()
if failed_msg:
results.extend(
self._parse_failed_msg(failed_msg, task, module_name, file),
)
sanitized_results = self._sanitize_results(results, module_name)
except ValidationPassedError:
return []
return sanitized_results
# pylint: disable=unused-argument
def _sanitize_results(
self,
results: list[MatchError],
module_name: str,
) -> list[MatchError]:
"""Remove results that are false positive."""
sanitized_results = []
for result in results:
result_msg = result.message
if ignored_re.match(result_msg):
continue
sanitized_results.append(result)
return sanitized_results
def _parse_failed_msg(
self,
failed_msg: str,
task: dict[str, Any],
module_name: str,
file: Lintable | None = None,
) -> list[MatchError]:
"""Parse failed message and return list of MatchError."""
results: list[MatchError] = []
try:
failed_obj = json.loads(failed_msg)
error_message = failed_obj["msg"]
except json.decoder.JSONDecodeError:
error_message = failed_msg
option_type_check_error = re.search(
r"(argument|option) '(?P<name>.*)' is of type",
error_message,
)
if option_type_check_error:
# ignore options with templated variable value with type check errors
option_key = option_type_check_error.group("name")
option_value = task["action"][option_key]
if has_jinja(option_value):
_logger.debug(
"Type checking ignored for '%s' option in task '%s' at line %s.",
option_key,
module_name,
task[LINE_NUMBER_KEY],
)
return results
value_not_in_choices_error = re.search(
r"value of (?P<name>.*) must be one of:",
error_message,
)
if value_not_in_choices_error:
# ignore templated value not in allowed choices
choice_key = value_not_in_choices_error.group("name")
choice_value = task["action"][choice_key]
if has_jinja(choice_value):
_logger.debug(
"Value checking ignored for '%s' option in task '%s' at line %s.",
choice_key,
module_name,
task[LINE_NUMBER_KEY],
)
return results
results.append(
self.create_matcherror(
message=error_message,
lineno=task[LINE_NUMBER_KEY],
tag="args[module]",
filename=file,
),
)
return results
# testing code to be loaded only with pytest or when executed the rule file
if "pytest" in sys.modules:
import pytest # noqa: TCH002
from ansiblelint.runner import Runner # pylint: disable=ungrouped-imports
def test_args_module_fail(default_rules_collection: RulesCollection) -> None:
"""Test rule invalid module options."""
success = "examples/playbooks/rule-args-module-fail.yml"
results = Runner(success, rules=default_rules_collection).run()
assert len(results) == 5
assert results[0].tag == "args[module]"
assert "missing required arguments" in results[0].message
assert results[1].tag == "args[module]"
assert "missing parameter(s) required by " in results[1].message
assert results[2].tag == "args[module]"
assert "Unsupported parameters for" in results[2].message
assert results[3].tag == "args[module]"
assert "Unsupported parameters for" in results[3].message
assert results[4].tag == "args[module]"
assert "value of state must be one of" in results[4].message
def test_args_module_pass(
default_rules_collection: RulesCollection,
caplog: pytest.LogCaptureFixture,
) -> None:
"""Test rule valid module options."""
success = "examples/playbooks/rule-args-module-pass.yml"
with caplog.at_level(logging.WARNING):
results = Runner(success, rules=default_rules_collection).run()
assert len(results) == 0, results
assert len(caplog.records) == 0, caplog.records
| 11,330 | Python | .py | 264 | 32.128788 | 206 | 0.59922 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,276 | deprecated_local_action.py | ansible_ansible-lint/src/ansiblelint/rules/deprecated_local_action.py | """Implementation for deprecated-local-action rule."""
# Copyright (c) 2016, Tsukinowa Inc. <info@tsukinowa.jp>
# Copyright (c) 2018, Ansible Project
from __future__ import annotations
import copy
import logging
import os
import sys
from pathlib import Path
from typing import TYPE_CHECKING
from ansiblelint.rules import AnsibleLintRule, TransformMixin
from ansiblelint.runner import get_matches
from ansiblelint.transformer import Transformer
if TYPE_CHECKING:
from ruamel.yaml.comments import CommentedMap, CommentedSeq
from ansiblelint.config import Options
from ansiblelint.errors import MatchError
from ansiblelint.file_utils import Lintable
from ansiblelint.utils import Task
_logger = logging.getLogger(__name__)
class TaskNoLocalAction(AnsibleLintRule, TransformMixin):
"""Do not use 'local_action', use 'delegate_to: localhost'."""
id = "deprecated-local-action"
description = "Do not use ``local_action``, use ``delegate_to: localhost``"
needs_raw_task = True
severity = "MEDIUM"
tags = ["deprecations"]
version_added = "v4.0.0"
def matchtask(
self,
task: Task,
file: Lintable | None = None,
) -> bool | str:
"""Return matches for a task."""
raw_task = task["__raw_task__"]
return "local_action" in raw_task
def transform(
self,
match: MatchError,
lintable: Lintable,
data: CommentedMap | CommentedSeq | str,
) -> None:
if match.tag == self.id:
# we do not want perform a partial modification accidentally
original_target_task = self.seek(match.yaml_path, data)
target_task = copy.deepcopy(original_target_task)
for _ in range(len(target_task)):
k, v = target_task.popitem(False)
if k == "local_action":
if isinstance(v, dict):
module_name = v["module"]
target_task[module_name] = None
target_task["delegate_to"] = "localhost"
elif isinstance(v, str):
module_name, module_value = v.split(" ", 1)
target_task[module_name] = module_value
target_task["delegate_to"] = "localhost"
else:
_logger.debug(
"Ignored unexpected data inside %s transform.",
self.id,
)
return
else:
target_task[k] = v
match.fixed = True
original_target_task.clear()
original_target_task.update(target_task)
# testing code to be loaded only with pytest or when executed the rule file
if "pytest" in sys.modules:
from unittest import mock
from ansiblelint.rules import RulesCollection
from ansiblelint.runner import Runner
def test_local_action(default_rules_collection: RulesCollection) -> None:
"""Positive test deprecated_local_action."""
results = Runner(
"examples/playbooks/rule-deprecated-local-action-fail.yml",
rules=default_rules_collection,
).run()
assert len(results) == 1
assert results[0].tag == "deprecated-local-action"
@mock.patch.dict(os.environ, {"ANSIBLE_LINT_WRITE_TMP": "1"}, clear=True)
def test_local_action_transform(
config_options: Options,
default_rules_collection: RulesCollection,
) -> None:
"""Test transform functionality for no-log-password rule."""
playbook = Path("examples/playbooks/tasks/local_action.yml")
config_options.write_list = ["all"]
config_options.lintables = [str(playbook)]
runner_result = get_matches(
rules=default_rules_collection,
options=config_options,
)
transformer = Transformer(result=runner_result, options=config_options)
transformer.run()
matches = runner_result.matches
assert len(matches) == 3
orig_content = playbook.read_text(encoding="utf-8")
expected_content = playbook.with_suffix(
f".transformed{playbook.suffix}",
).read_text(encoding="utf-8")
transformed_content = playbook.with_suffix(f".tmp{playbook.suffix}").read_text(
encoding="utf-8",
)
assert orig_content != transformed_content
assert expected_content == transformed_content
playbook.with_suffix(f".tmp{playbook.suffix}").unlink()
| 4,584 | Python | .py | 108 | 32.694444 | 87 | 0.622671 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,277 | sanity.py | ansible_ansible-lint/src/ansiblelint/rules/sanity.py | """Implementation of sanity rule."""
from __future__ import annotations
import re
import sys
from typing import TYPE_CHECKING
from ansiblelint.rules import AnsibleLintRule
# Copyright (c) 2018, Ansible Project
if TYPE_CHECKING:
from ansiblelint.errors import MatchError
from ansiblelint.file_utils import Lintable
class CheckSanityIgnoreFiles(AnsibleLintRule):
"""Ignore entries in sanity ignore files must match an allow list."""
id = "sanity"
description = (
"Identifies non-allowed entries in the `tests/sanity/ignore*.txt files."
)
severity = "MEDIUM"
tags = ["idiom"]
version_added = "v6.14.0"
# Partner Engineering defines this list. Please contact PE for changes.
allowed_ignores = [
"validate-modules:missing-gplv3-license",
"action-plugin-docs", # Added for Networking Collections
"import-2.6",
"import-2.6!skip",
"import-2.7",
"import-2.7!skip",
"import-3.5",
"import-3.5!skip",
"compile-2.6",
"compile-2.6!skip",
"compile-2.7",
"compile-2.7!skip",
"compile-3.5",
"compile-3.5!skip",
"shebang", # Unreliable test
"shellcheck", # Unreliable test
"pylint:used-before-assignment", # Unreliable test
]
no_check_ignore_files = [
"ignore-2.9",
"ignore-2.10",
"ignore-2.11",
"ignore-2.12",
]
_ids = {
"sanity[cannot-ignore]": "Ignore file contains ... at line ..., which is not a permitted ignore.",
"sanity[bad-ignore]": "Ignore file entry at ... is formatted incorrectly. Please review.",
}
def matchyaml(self, file: Lintable) -> list[MatchError]:
"""Evaluate sanity ignore lists for disallowed ignores.
:param file: Input lintable file that is a match for `sanity-ignore-file`
:returns: List of errors matched to the input file
"""
results: list[MatchError] = []
test = ""
check_dirs = {
"plugins",
"roles",
}
if file.kind != "sanity-ignore-file":
return []
with file.path.open(encoding="utf-8") as ignore_file:
entries = ignore_file.read().splitlines()
if any(name in str(file.abspath) for name in self.no_check_ignore_files):
return []
for line_num, entry in enumerate(entries, 1):
base_ignore_dir = ""
if entry:
# match up to the first "/"
regex = re.match("[^/]*", entry)
if regex:
base_ignore_dir = regex.group(0)
if base_ignore_dir in check_dirs:
try:
if "#" in entry:
entry, _ = entry.split("#")
(_, test) = entry.split()
if test not in self.allowed_ignores:
results.append(
self.create_matcherror(
message=f"Ignore file contains {test} at line {line_num}, which is not a permitted ignore.",
tag="sanity[cannot-ignore]",
lineno=line_num,
filename=file,
),
)
except ValueError:
results.append(
self.create_matcherror(
message=f"Ignore file entry at {line_num} is formatted incorrectly. Please review.",
tag="sanity[bad-ignore]",
lineno=line_num,
filename=file,
),
)
return results
# testing code to be loaded only with pytest or when executed the rule file
if "pytest" in sys.modules:
import pytest
# pylint: disable=ungrouped-imports
from ansiblelint.rules import RulesCollection
from ansiblelint.runner import Runner
@pytest.mark.parametrize(
("test_file", "failures", "tags"),
(
pytest.param(
"examples/sanity_ignores/tests/sanity/ignore-2.9.txt",
0,
"sanity[cannot-ignore]",
id="pass",
),
pytest.param(
"examples/sanity_ignores/tests/sanity/ignore-2.15.txt",
1,
"sanity[bad-ignore]",
id="fail0",
),
pytest.param(
"examples/sanity_ignores/tests/sanity/ignore-2.13.txt",
1,
"sanity[cannot-ignore]",
id="fail1",
),
),
)
def test_sanity_ignore_files(
default_rules_collection: RulesCollection,
test_file: str,
failures: int,
tags: str,
) -> None:
"""Test rule matches."""
default_rules_collection.register(CheckSanityIgnoreFiles())
results = Runner(test_file, rules=default_rules_collection).run()
for result in results:
assert result.rule.id == CheckSanityIgnoreFiles().id
assert result.tag == tags
assert len(results) == failures
| 5,489 | Python | .py | 139 | 25.964029 | 132 | 0.514189 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,278 | no_same_owner.py | ansible_ansible-lint/src/ansiblelint/rules/no_same_owner.py | """Optional rule for avoiding keeping owner/group when transferring files."""
from __future__ import annotations
import re
import sys
from typing import TYPE_CHECKING, Any
from ansible.utils.sentinel import Sentinel
from ansiblelint.rules import AnsibleLintRule
if TYPE_CHECKING:
from ansiblelint.file_utils import Lintable
from ansiblelint.utils import Task
class NoSameOwnerRule(AnsibleLintRule):
"""Do not preserve the owner and group when transferring files across hosts."""
id = "no-same-owner"
description = """
Optional rule that highlights dangers of assuming that user/group on the remote
machines may not exist on ansible controller or vice versa. Owner and group
should not be preserved when transferring files between them.
"""
severity = "LOW"
tags = ["opt-in"]
def matchtask(
self,
task: Task,
file: Lintable | None = None,
) -> bool | str:
"""Return matches for a task."""
action = task.get("action")
if not isinstance(action, dict): # pragma: no cover
return False
module = action["__ansible_module__"]
if module in ["synchronize", "ansible.posix.synchronize"]:
return self.handle_synchronize(task, action)
if module in ["unarchive", "ansible.builtin.unarchive"]:
return self.handle_unarchive(task, action)
return False
@staticmethod
def handle_synchronize(task: Any, action: dict[str, Any]) -> bool:
"""Process a synchronize task."""
if task.get("delegate_to") != Sentinel:
return False
archive = action.get("archive", True)
if action.get("owner", archive) or action.get("group", archive): # noqa: SIM103
return True
return False
@staticmethod
def handle_unarchive(task: Any, action: dict[str, Any]) -> bool:
"""Process unarchive task."""
delegate_to = task.get("delegate_to")
if (
delegate_to == "localhost"
or delegate_to != "localhost"
and not action.get("remote_src")
):
src = action.get("src")
if not isinstance(src, str):
return False
if src.endswith("zip") and "-X" in action.get("extra_opts", []):
return True
if re.search(
r".*\.tar(\.(gz|bz2|xz))?$",
src,
) and "--no-same-owner" not in action.get("extra_opts", []):
return True
return False
# testing code to be loaded only with pytest or when executed the rule file
if "pytest" in sys.modules:
import pytest
# pylint: disable=ungrouped-imports
from ansiblelint.rules import RulesCollection
from ansiblelint.runner import Runner
@pytest.mark.parametrize(
("test_file", "failures"),
(
pytest.param(
"examples/roles/role_for_no_same_owner/tasks/fail.yml",
12,
id="fail",
),
pytest.param(
"examples/roles/role_for_no_same_owner/tasks/pass.yml",
0,
id="pass",
),
),
)
def test_no_same_owner_rule(
default_rules_collection: RulesCollection,
test_file: str,
failures: int,
) -> None:
"""Test rule matches."""
results = Runner(test_file, rules=default_rules_collection).run()
assert len(results) == failures
for result in results:
assert result.message == NoSameOwnerRule().shortdesc
| 3,594 | Python | .py | 95 | 29.073684 | 88 | 0.608396 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,279 | schema.py | ansible_ansible-lint/src/ansiblelint/rules/schema.py | """Rule definition for JSON Schema Validations."""
from __future__ import annotations
import logging
import re
import sys
from typing import TYPE_CHECKING, Any
from ansiblelint.errors import MatchError
from ansiblelint.file_utils import Lintable
from ansiblelint.rules import AnsibleLintRule
from ansiblelint.schemas.__main__ import JSON_SCHEMAS
from ansiblelint.schemas.main import validate_file_schema
from ansiblelint.text import has_jinja
if TYPE_CHECKING:
from ansiblelint.config import Options
from ansiblelint.utils import Task
_logger = logging.getLogger(__name__)
DESCRIPTION_MD = """ Returned errors will not include exact line numbers, but they will mention
the schema name being used as a tag, like ``schema[playbook]``,
``schema[tasks]``.
This rule is not skippable and stops further processing of the file.
If incorrect schema was picked, you might want to either:
* move the file to standard location, so its file is detected correctly.
* use ``kinds:`` option in linter config to help it pick correct file type.
"""
pre_checks = {
"task": {
"with_flattened": {
"msg": "with_flattened was moved to with_community.general.flattened in 2.10",
"tag": "moves",
},
"with_filetree": {
"msg": "with_filetree was moved to with_community.general.filetree in 2.10",
"tag": "moves",
},
"with_cartesian": {
"msg": "with_cartesian was moved to with_community.general.flattened in 2.10",
"tag": "moves",
},
},
}
class ValidateSchemaRule(AnsibleLintRule):
"""Perform JSON Schema Validation for known lintable kinds."""
description = DESCRIPTION_MD
id = "schema"
severity = "VERY_HIGH"
tags = ["core"]
version_added = "v6.1.0"
_ids = {
"schema[ansible-lint-config]": "",
"schema[ansible-navigator-config]": "",
"schema[changelog]": "",
"schema[execution-environment]": "",
"schema[galaxy]": "",
"schema[inventory]": "",
"schema[meta]": "",
"schema[meta-runtime]": "",
"schema[molecule]": "",
"schema[playbook]": "",
"schema[requirements]": "",
"schema[role-arg-spec]": "",
"schema[rulebook]": "",
"schema[tasks]": "",
"schema[vars]": "",
}
_field_checks: dict[str, list[str]] = {}
@property
def field_checks(self) -> dict[str, list[str]]:
"""Lazy property for returning field checks."""
if not self._collection:
msg = "Rule was not registered to a RuleCollection."
raise RuntimeError(msg)
if not self._field_checks:
self._field_checks = {
"become_method": sorted(
self._collection.app.runtime.plugins.become.keys(),
),
}
return self._field_checks
def matchplay(self, file: Lintable, data: dict[str, Any]) -> list[MatchError]:
"""Return matches found for a specific playbook."""
results: list[MatchError] = []
if (
not data
or file.kind not in ("tasks", "handlers", "playbook")
or file.failed()
):
return results
# check at play level
results.extend(self._get_field_matches(file=file, data=data))
return results
def _get_field_matches(
self,
file: Lintable,
data: dict[str, Any],
) -> list[MatchError]:
"""Retrieve all matches related to fields for the given data block."""
results = []
for key, values in self.field_checks.items():
if key in data:
plugin_value = data[key]
if not has_jinja(plugin_value) and plugin_value not in values:
msg = f"'{key}' must be one of the currently available values: {', '.join(values)}"
results.append(
MatchError(
message=msg,
lineno=data.get("__line__", 1),
lintable=file,
rule=self,
details=ValidateSchemaRule.description,
tag=f"schema[{file.kind}]",
),
)
return results
def matchtask(
self,
task: Task,
file: Lintable | None = None,
) -> bool | str | MatchError | list[MatchError]:
results: list[MatchError] = []
if not file:
file = Lintable("", kind="tasks")
if file.failed():
return results
results.extend(self._get_field_matches(file=file, data=task.raw_task))
for key in pre_checks["task"]:
if key in task.raw_task:
msg = pre_checks["task"][key]["msg"]
tag = pre_checks["task"][key]["tag"]
results.append(
MatchError(
message=msg,
lintable=file,
rule=self,
details=ValidateSchemaRule.description,
tag=f"schema[{tag}]",
),
)
return results
def matchyaml(self, file: Lintable) -> list[MatchError]:
"""Return JSON validation errors found as a list of MatchError(s)."""
result: list[MatchError] = []
if file.failed():
return result
if file.kind not in JSON_SCHEMAS:
return result
for error in validate_file_schema(file):
if error.startswith("Failed to load YAML file"):
_logger.debug(
"Ignored failure to load %s for schema validation, as !vault may cause it.",
file,
)
return []
result.append(
MatchError(
message=error,
lintable=file,
rule=self,
details=ValidateSchemaRule.description,
tag=f"schema[{file.kind}]",
),
)
break
if not result:
result = super().matchyaml(file)
return result
# testing code to be loaded only with pytest or when executed the rule file
if "pytest" in sys.modules:
import pytest
# pylint: disable=ungrouped-imports
from ansiblelint.rules import RulesCollection
from ansiblelint.runner import Runner
@pytest.mark.parametrize(
("file", "expected_kind", "expected"),
(
pytest.param(
"examples/.collection/galaxy.yml",
"galaxy",
[r".*'GPL' is not one of.*https://"],
id="galaxy",
),
pytest.param(
"examples/roles/invalid_requirements_schema/meta/requirements.yml",
"requirements",
[
# r".*{'foo': 'bar'} is not valid under any of the given schemas.*https://",
r".*{'foo': 'bar'} is not of type 'array'.*https://",
],
id="requirements",
),
pytest.param(
"examples/roles/invalid_meta_schema/meta/main.yml",
"meta",
[r".*False is not of type 'string'.*https://"],
id="meta",
),
pytest.param(
"examples/playbooks/vars/invalid_vars_schema.yml",
"vars",
[r".* '123' does not match any of the regexes.*https://"],
id="vars",
),
pytest.param(
"examples/execution-environment.yml",
"execution-environment",
[],
id="execution-environment",
),
pytest.param(
"examples/ee_broken/execution-environment.yml",
"execution-environment",
[
r".*Additional properties are not allowed \('foo' was unexpected\).*https://",
],
id="execution-environment-broken",
),
pytest.param(
"examples/meta/runtime.yml",
"meta-runtime",
[],
id="meta-runtime",
),
pytest.param(
"examples/broken_collection_meta_runtime/meta/runtime.yml",
"meta-runtime",
[
r".*Additional properties are not allowed \('foo' was unexpected\).*https://",
],
id="meta-runtime-broken",
),
pytest.param(
"examples/inventory/production.yml",
"inventory",
[],
id="inventory",
),
pytest.param(
"examples/inventory/broken_dev_inventory.yml",
"inventory",
[
r".*Additional properties are not allowed \('foo' was unexpected\).*https://",
],
id="inventory-broken",
),
pytest.param(
".ansible-lint",
"ansible-lint-config",
[],
id="ansible-lint-config",
),
pytest.param(
"examples/.config/ansible-lint.yml",
"ansible-lint-config",
[],
id="ansible-lint-config2",
),
pytest.param(
"examples/broken/.ansible-lint",
"ansible-lint-config",
[
r".*Additional properties are not allowed \('foo' was unexpected\).*https://",
],
id="ansible-lint-config-broken",
),
pytest.param(
"examples/broken_supported_ansible_also/.ansible-lint",
"ansible-lint-config",
[
r".*supported_ansible_also True is not of type 'array'.*https://",
],
id="ansible-lint-config-broken",
),
pytest.param(
"examples/ansible-navigator.yml",
"ansible-navigator-config",
[],
id="ansible-navigator-config",
),
pytest.param(
"examples/broken/ansible-navigator.yml",
"ansible-navigator-config",
[
r".*Additional properties are not allowed \('ansible' was unexpected\).*https://",
],
id="ansible-navigator-config-broken",
),
pytest.param(
"examples/roles/hello/meta/argument_specs.yml",
"role-arg-spec",
[],
id="role-arg-spec",
),
pytest.param(
"examples/roles/broken_argument_specs/meta/argument_specs.yml",
"role-arg-spec",
[
r".*Additional properties are not allowed \('foo' was unexpected\).*https://",
],
id="role-arg-spec-broken",
),
pytest.param(
"examples/changelogs/changelog.yaml",
"changelog",
[
r".*Additional properties are not allowed \('foo' was unexpected\).*https://",
],
id="changelog",
),
pytest.param(
"examples/rulebooks/rulebook-fail.yml",
"rulebook",
[
# r".*Additional properties are not allowed \('that_should_not_be_here' was unexpected\).*https://",
r".*'sss' is not of type 'object'.*https://",
],
id="rulebook",
),
pytest.param(
"examples/rulebooks/rulebook-pass.yml",
"rulebook",
[],
id="rulebook2",
),
pytest.param(
"examples/playbooks/rule-schema-become-method-pass.yml",
"playbook",
[],
id="playbook",
),
pytest.param(
"examples/playbooks/rule-schema-become-method-fail.yml",
"playbook",
[
"'become_method' must be one of the currently available values",
"'become_method' must be one of the currently available values",
],
id="playbook2",
),
),
)
def test_schema(
file: str,
expected_kind: str,
expected: list[str],
config_options: Options,
) -> None:
"""Validate parsing of ansible output."""
lintable = Lintable(file)
assert lintable.kind == expected_kind
rules = RulesCollection(options=config_options)
rules.register(ValidateSchemaRule())
results = Runner(lintable, rules=rules).run()
assert len(results) == len(expected), results
for idx, result in enumerate(results):
assert result.filename.endswith(file)
assert re.match(expected[idx], result.message)
assert result.tag == f"schema[{expected_kind}]"
@pytest.mark.parametrize(
("file", "expected_kind", "expected_tag", "count"),
(
pytest.param(
"examples/playbooks/rule-syntax-moves.yml",
"playbook",
"schema[moves]",
3,
id="playbook",
),
),
)
def test_schema_moves(
file: str,
expected_kind: str,
expected_tag: str,
count: int,
config_options: Options,
) -> None:
"""Validate ability to detect schema[moves]."""
lintable = Lintable(file)
assert lintable.kind == expected_kind
rules = RulesCollection(options=config_options)
rules.register(ValidateSchemaRule())
results = Runner(lintable, rules=rules).run()
assert len(results) == count, results
for result in results:
assert result.filename.endswith(file)
assert result.tag == expected_tag
| 14,383 | Python | .py | 385 | 24.397403 | 120 | 0.503975 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,280 | command_instead_of_module.py | ansible_ansible-lint/src/ansiblelint/rules/command_instead_of_module.py | """Implementation of command-instead-of-module rule."""
# Copyright (c) 2013-2014 Will Thames <will@thames.id.au>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import annotations
import sys
from pathlib import Path
from typing import TYPE_CHECKING
from ansiblelint.rules import AnsibleLintRule
from ansiblelint.utils import get_first_cmd_arg, get_second_cmd_arg
if TYPE_CHECKING:
from ansiblelint.file_utils import Lintable
from ansiblelint.utils import Task
class CommandsInsteadOfModulesRule(AnsibleLintRule):
"""Using command rather than module."""
id = "command-instead-of-module"
description = (
"Executing a command when there is an Ansible module is generally a bad idea"
)
severity = "HIGH"
tags = ["command-shell", "idiom"]
version_added = "historic"
_commands = ["command", "shell"]
_modules = {
"apt-get": "apt-get",
"chkconfig": "service",
"curl": "get_url or uri",
"git": "git",
"hg": "hg",
"letsencrypt": "acme_certificate",
"mktemp": "tempfile",
"mount": "mount",
"patch": "patch",
"rpm": "yum or rpm_key",
"rsync": "synchronize",
"sed": "template, replace or lineinfile",
"service": "service",
"supervisorctl": "supervisorctl",
"svn": "subversion",
"systemctl": "systemd",
"tar": "unarchive",
"unzip": "unarchive",
"wget": "get_url or uri",
"yum": "yum",
}
_executable_options = {
"git": ["branch", "log", "lfs", "rev-parse"],
"systemctl": [
"--version",
"get-default",
"kill",
"set-default",
"set-property",
"show-environment",
"status",
"reset-failed",
],
"yum": ["clean", "history", "info"],
"rpm": ["--nodeps"],
}
def matchtask(
self,
task: Task,
file: Lintable | None = None,
) -> bool | str:
"""Check if a command is used instead of an appropriate module.
:param task: Task to check for shell usage
:param file: File to lint
:returns: False if command module isn't used, or a string showing the command used
"""
if task["action"]["__ansible_module__"] not in self._commands:
return False
first_cmd_arg = get_first_cmd_arg(task)
second_cmd_arg = get_second_cmd_arg(task)
if not first_cmd_arg:
return False
executable = Path(first_cmd_arg).name
if (
second_cmd_arg
and executable in self._executable_options
and second_cmd_arg in self._executable_options[executable]
):
return False
if executable in self._modules:
message = "{0} used in place of {1} module"
return message.format(executable, self._modules[executable])
return False
if "pytest" in sys.modules:
import pytest
# pylint: disable=ungrouped-imports
from ansiblelint.rules import RulesCollection
from ansiblelint.runner import Runner
@pytest.mark.parametrize(
("file", "expected"),
(
pytest.param(
"examples/playbooks/rule-command-instead-of-module-pass.yml",
0,
id="pass",
),
pytest.param(
"examples/playbooks/rule-command-instead-of-module-fail.yml",
3,
id="fail",
),
),
)
def test_command_instead_of_module(
default_rules_collection: RulesCollection,
file: str,
expected: int,
) -> None:
"""Validate that rule works as intended.
:param default_rules_collection: Default rules for testing
:param file: Test file to check for violations
:expected: Expected number of errors
"""
results = Runner(file, rules=default_rules_collection).run()
for result in results:
assert result.rule.id == CommandsInsteadOfModulesRule.id, result
assert len(results) == expected
| 5,190 | Python | .py | 137 | 30.153285 | 90 | 0.627509 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,281 | deprecated_bare_vars.py | ansible_ansible-lint/src/ansiblelint/rules/deprecated_bare_vars.py | """Implementation of deprecated-bare-vars rule."""
# Copyright (c) 2013-2014 Will Thames <will@thames.id.au>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import annotations
import os
import sys
from typing import TYPE_CHECKING, Any
from ansiblelint.rules import AnsibleLintRule
from ansiblelint.text import has_glob, has_jinja, is_fqcn_or_name
if TYPE_CHECKING:
from ansiblelint.file_utils import Lintable
from ansiblelint.utils import Task
class UsingBareVariablesIsDeprecatedRule(AnsibleLintRule):
"""Using bare variables is deprecated."""
id = "deprecated-bare-vars"
description = (
"Using bare variables is deprecated. Update your "
"playbooks so that the environment value uses the full variable "
"syntax ``{{ your_variable }}``"
)
severity = "VERY_HIGH"
tags = ["deprecations"]
version_added = "historic"
def matchtask(
self,
task: Task,
file: Lintable | None = None,
) -> bool | str:
loop_type = next((key for key in task if key.startswith("with_")), None)
if loop_type:
if loop_type in [
"with_nested",
"with_together",
"with_flattened",
"with_filetree",
"with_community.general.filetree",
]:
# These loops can either take a list defined directly in the task
# or a variable that is a list itself. When a single variable is used
# we just need to check that one variable, and not iterate over it like
# it's a list. Otherwise, loop through and check all items.
items = task[loop_type]
if not isinstance(items, list | tuple):
items = [items]
for var in items:
return self._matchvar(var, task, loop_type)
elif loop_type == "with_subelements":
return self._matchvar(task[loop_type][0], task, loop_type)
elif loop_type in ["with_sequence", "with_ini", "with_inventory_hostnames"]:
pass
else:
return self._matchvar(task[loop_type], task, loop_type)
return False
def _matchvar(
self,
varstring: str,
task: dict[str, Any],
loop_type: str,
) -> bool | str:
if (
isinstance(varstring, str)
and not has_jinja(varstring)
and is_fqcn_or_name(varstring)
):
valid = loop_type == "with_fileglob" and bool(
has_jinja(varstring) or has_glob(varstring),
)
valid |= loop_type == "with_filetree" and bool(
has_jinja(varstring) or varstring.endswith(os.sep),
)
if not valid:
message = "Possible bare variable '{0}' used in a '{1}' loop. You should use the full variable syntax ('{{{{ {0} }}}}') or convert it to a list if that is not really a variable."
return message.format(task[loop_type], loop_type)
return False
if "pytest" in sys.modules:
import pytest
# pylint: disable=ungrouped-imports
from ansiblelint.rules import RulesCollection
from ansiblelint.runner import Runner
@pytest.mark.filterwarnings("ignore::ansible_compat.runtime.AnsibleWarning")
def test_use_bare_positive() -> None:
"""Positive test for deprecated-bare-vars."""
collection = RulesCollection()
collection.register(UsingBareVariablesIsDeprecatedRule())
success = "examples/playbooks/rule-deprecated-bare-vars-pass.yml"
good_runner = Runner(success, rules=collection)
assert good_runner.run() == []
def test_use_bare_negative() -> None:
"""Negative test for deprecated-bare-vars."""
collection = RulesCollection()
collection.register(UsingBareVariablesIsDeprecatedRule())
failure = "examples/playbooks/rule-deprecated-bare-vars-fail.yml"
bad_runner = Runner(failure, rules=collection)
errs = bad_runner.run()
assert len(errs) == 11
| 5,152 | Python | .py | 112 | 37.607143 | 194 | 0.651672 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,282 | no_jinja_when.py | ansible_ansible-lint/src/ansiblelint/rules/no_jinja_when.py | """Implementation of no-jinja-when rule."""
from __future__ import annotations
import re
import sys
from typing import TYPE_CHECKING, Any
from ansiblelint.constants import LINE_NUMBER_KEY
from ansiblelint.rules import AnsibleLintRule, TransformMixin
if TYPE_CHECKING:
from ruamel.yaml.comments import CommentedMap, CommentedSeq
from ansiblelint.errors import MatchError
from ansiblelint.file_utils import Lintable
from ansiblelint.utils import Task
class NoFormattingInWhenRule(AnsibleLintRule, TransformMixin):
"""No Jinja2 in when."""
id = "no-jinja-when"
description = (
"``when`` is a raw Jinja2 expression, remove redundant {{ }} from variable(s)."
)
severity = "HIGH"
tags = ["deprecations"]
version_added = "historic"
@staticmethod
def _is_valid(when: str) -> bool:
if isinstance(when, list):
for item in when:
if (
isinstance(item, str)
and item.find("{{") != -1
and item.find("}}") != -1
):
return False
return True
if not isinstance(when, str):
return True
return when.find("{{") == -1 and when.find("}}") == -1
def matchplay(self, file: Lintable, data: dict[str, Any]) -> list[MatchError]:
errors: list[MatchError] = []
if isinstance(data, dict):
if "roles" not in data or data["roles"] is None:
return errors
errors = [
self.create_matcherror(
details=str({"when": role}),
filename=file,
lineno=role[LINE_NUMBER_KEY],
)
for role in data["roles"]
if (
isinstance(role, dict)
and "when" in role
and not self._is_valid(role["when"])
)
]
return errors
def matchtask(
self,
task: Task,
file: Lintable | None = None,
) -> bool | str:
return "when" in task.raw_task and not self._is_valid(task.raw_task["when"])
def transform(
self,
match: MatchError,
lintable: Lintable,
data: CommentedMap | CommentedSeq | str,
) -> None:
if match.tag == self.id:
task = self.seek(match.yaml_path, data)
key_to_check = ("when", "changed_when", "failed_when")
for _ in range(len(task)):
k, v = task.popitem(False)
if k == "roles" and isinstance(v, list):
transform_for_roles(v, key_to_check=key_to_check)
elif k in key_to_check:
v = re.sub(r"{{ (.*?) }}", r"\1", v)
task[k] = v
match.fixed = True
def transform_for_roles(v: list[Any], key_to_check: tuple[str, ...]) -> None:
"""Additional transform logic in case of roles."""
for idx, new_dict in enumerate(v):
for new_key, new_value in new_dict.items():
if new_key in key_to_check:
if isinstance(new_value, list):
for index, nested_value in enumerate(new_value):
new_value[index] = re.sub(r"{{ (.*?) }}", r"\1", nested_value)
v[idx][new_key] = new_value
if isinstance(new_value, str):
v[idx][new_key] = re.sub(r"{{ (.*?) }}", r"\1", new_value)
if "pytest" in sys.modules:
# Tests for no-jinja-when rule.
from ansiblelint.rules import RulesCollection
from ansiblelint.runner import Runner
def test_jinja_file_positive() -> None:
"""Positive test for no-jinja-when."""
collection = RulesCollection()
collection.register(NoFormattingInWhenRule())
success = "examples/playbooks/rule-no-jinja-when-pass.yml"
good_runner = Runner(success, rules=collection)
assert good_runner.run() == []
def test_jinja_file_negative() -> None:
"""Negative test for no-jinja-when."""
collection = RulesCollection()
collection.register(NoFormattingInWhenRule())
failure = "examples/playbooks/rule-no-jinja-when-fail.yml"
bad_runner = Runner(failure, rules=collection)
errs = bad_runner.run()
assert len(errs) == 3
| 4,368 | Python | .py | 107 | 30.121495 | 87 | 0.560217 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,283 | run_once.py | ansible_ansible-lint/src/ansiblelint/rules/run_once.py | """Optional Ansible-lint rule to warn use of run_once with strategy free."""
from __future__ import annotations
import sys
from typing import TYPE_CHECKING, Any
from ansiblelint.constants import LINE_NUMBER_KEY
from ansiblelint.rules import AnsibleLintRule
if TYPE_CHECKING:
from ansiblelint.errors import MatchError
from ansiblelint.file_utils import Lintable
from ansiblelint.utils import Task
class RunOnce(AnsibleLintRule):
"""Run once should use strategy other than free."""
id = "run-once"
link = "https://docs.ansible.com/ansible/latest/reference_appendices/playbooks_keywords.html"
description = "When using run_once, we should avoid using strategy as free."
tags = ["idiom"]
severity = "MEDIUM"
_ids = {
"run-once[task]": "Using run_once may behave differently if strategy is set to free.",
"run-once[play]": "Play uses strategy: free",
}
def matchplay(self, file: Lintable, data: dict[str, Any]) -> list[MatchError]:
"""Return matches found for a specific playbook."""
# If the Play uses the 'strategy' and it's value is set to free
if not file or file.kind != "playbook" or not data:
return []
strategy = data.get("strategy")
run_once = data.get("run_once", False)
if (not strategy and not run_once) or strategy != "free":
return []
return [
self.create_matcherror(
message="Play uses strategy: free",
filename=file,
tag=f"{self.id}[play]",
lineno=strategy._line_number, # noqa: SLF001
),
]
def matchtask(
self,
task: Task,
file: Lintable | None = None,
) -> list[MatchError]:
"""Return matches for a task."""
if not file or file.kind != "playbook":
return []
run_once = task.get("run_once", False)
if not run_once:
return []
return [
self.create_matcherror(
message="Using run_once may behave differently if strategy is set to free.",
filename=file,
tag=f"{self.id}[task]",
lineno=task[LINE_NUMBER_KEY],
),
]
# testing code to be loaded only with pytest or when executed the rule file
if "pytest" in sys.modules:
import pytest
# pylint: disable=ungrouped-imports
from ansiblelint.rules import RulesCollection
from ansiblelint.runner import Runner
@pytest.mark.parametrize(
("test_file", "failure"),
(
pytest.param("examples/playbooks/run-once-pass.yml", 0, id="pass"),
pytest.param("examples/playbooks/run-once-fail.yml", 2, id="fail"),
),
)
def test_run_once(
default_rules_collection: RulesCollection,
test_file: str,
failure: int,
) -> None:
"""Test rule matches."""
results = Runner(test_file, rules=default_rules_collection).run()
for result in results:
assert result.rule.id == RunOnce().id
assert len(results) == failure
| 3,138 | Python | .py | 80 | 30.7625 | 97 | 0.616902 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,284 | empty_string_compare.py | ansible_ansible-lint/src/ansiblelint/rules/empty_string_compare.py | """Implementation of empty-string-compare rule."""
# Copyright (c) 2016, Will Thames and contributors
# Copyright (c) 2018, Ansible Project
from __future__ import annotations
import re
import sys
from typing import TYPE_CHECKING
from ansiblelint.rules import AnsibleLintRule
from ansiblelint.yaml_utils import nested_items_path
if TYPE_CHECKING:
from ansiblelint.file_utils import Lintable
from ansiblelint.utils import Task
class ComparisonToEmptyStringRule(AnsibleLintRule):
"""Don't compare to empty string."""
id = "empty-string-compare"
description = (
'Use ``when: var|length > 0`` rather than ``when: var != ""`` (or '
'conversely ``when: var|length == 0`` rather than ``when: var == ""``)'
)
severity = "HIGH"
tags = ["idiom", "opt-in"]
version_added = "v4.0.0"
empty_string_compare = re.compile("[=!]= ?(\"{2}|'{2})")
def matchtask(
self,
task: Task,
file: Lintable | None = None,
) -> bool | str:
for k, v, _ in nested_items_path(task):
if k == "when":
if isinstance(v, str):
if self.empty_string_compare.search(v):
return True
elif isinstance(v, bool):
pass
else:
for item in v:
if isinstance(item, str) and self.empty_string_compare.search(
item,
):
return True
return False
# testing code to be loaded only with pytest or when executed the rule file
if "pytest" in sys.modules:
from ansiblelint.rules import RulesCollection
from ansiblelint.runner import Runner
def test_rule_empty_string_compare_fail() -> None:
"""Test rule matches."""
rules = RulesCollection()
rules.register(ComparisonToEmptyStringRule())
results = Runner(
"examples/playbooks/rule-empty-string-compare-fail.yml",
rules=rules,
).run()
assert len(results) == 3
for result in results:
assert result.message == ComparisonToEmptyStringRule().shortdesc
def test_rule_empty_string_compare_pass() -> None:
"""Test rule matches."""
rules = RulesCollection()
rules.register(ComparisonToEmptyStringRule())
results = Runner(
"examples/playbooks/rule-empty-string-compare-pass.yml",
rules=rules,
).run()
assert len(results) == 0, results
| 2,557 | Python | .py | 66 | 29.454545 | 86 | 0.599354 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,285 | no_relative_paths.py | ansible_ansible-lint/src/ansiblelint/rules/no_relative_paths.py | """Implementation of no-relative-paths rule."""
# Copyright (c) 2016, Tsukinowa Inc. <info@tsukinowa.jp>
# Copyright (c) 2018, Ansible Project
from __future__ import annotations
import sys
from typing import TYPE_CHECKING
from ansiblelint.rules import AnsibleLintRule
if TYPE_CHECKING:
from ansiblelint.file_utils import Lintable
from ansiblelint.utils import Task
class RoleRelativePath(AnsibleLintRule):
"""The src argument should not use a relative path."""
id = "no-relative-paths"
description = "The ``copy`` and ``template`` modules should not use relative path for ``src``."
severity = "HIGH"
tags = ["idiom"]
version_added = "v4.0.0"
_module_to_path_folder = {
"copy": "files",
"win_copy": "files",
"template": "templates",
"win_template": "win_templates",
}
def matchtask(
self,
task: Task,
file: Lintable | None = None,
) -> bool | str:
module = task["action"]["__ansible_module__"]
if module not in self._module_to_path_folder:
return False
if "src" not in task["action"]:
return False
path_to_check = f"../{self._module_to_path_folder[module]}"
return path_to_check in task["action"]["src"]
# testing code to be loaded only with pytest or when executed the rule file
if "pytest" in sys.modules:
import pytest
# pylint: disable=ungrouped-imports
from ansiblelint.rules import RulesCollection
from ansiblelint.runner import Runner
@pytest.mark.parametrize(
("test_file", "failures"),
(
pytest.param("examples/playbooks/no_relative_paths_fail.yml", 2, id="fail"),
pytest.param("examples/playbooks/no_relative_paths_pass.yml", 0, id="pass"),
),
)
def test_no_relative_paths(
default_rules_collection: RulesCollection,
test_file: str,
failures: int,
) -> None:
"""Test rule matches."""
results = Runner(test_file, rules=default_rules_collection).run()
assert len(results) == failures
for result in results:
assert result.tag == "no-relative-paths"
| 2,187 | Python | .py | 58 | 30.982759 | 99 | 0.645054 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,286 | __init__.py | ansible_ansible-lint/src/ansiblelint/rules/custom/__init__.py | """A placeholder package for putting custom rules under this dir."""
| 69 | Python | .py | 1 | 68 | 68 | 0.764706 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,287 | rules.py | ansible_ansible-lint/src/ansiblelint/_internal/rules.py | """Internally used rule classes."""
from __future__ import annotations
import inspect
import logging
from pathlib import Path
from typing import TYPE_CHECKING, Any
from ansiblelint.constants import RULE_DOC_URL
if TYPE_CHECKING:
from ansiblelint.config import Options
from ansiblelint.errors import MatchError
from ansiblelint.file_utils import Lintable
from ansiblelint.rules import RulesCollection
from ansiblelint.utils import Task
_logger = logging.getLogger(__name__)
LOAD_FAILURE_MD = """\
# load-failure
"Linter failed to process a file, possible invalid file. Possible reasons:
* contains unsupported encoding (only UTF-8 is supported)
* not an Ansible file
* it contains some unsupported custom YAML objects (`!!` prefix)
* it was not able to decrypt an inline `!vault` block.
This violation **is not** skippable, so it cannot be added to the `warn_list`
or the `skip_list`. If a vault decryption issue cannot be avoided, the
offending file can be added to `exclude_paths` configuration.
"""
# Derived rules are likely to want to access class members, so:
# pylint: disable=unused-argument
class BaseRule:
"""Root class used by Rules."""
id: str = ""
tags: list[str] = []
description: str = ""
version_added: str = ""
severity: str = ""
link: str = ""
has_dynamic_tags: bool = False
needs_raw_task: bool = False
# Used to mark rules that we will never unload (internal ones)
unloadable: bool = False
# We use _order to sort rules and to ensure that some run before others,
# _order 0 for internal rules
# _order 1 for rules that check that data can be loaded
# _order 5 implicit for normal rules
_order: int = 5
_help: str | None = None
# Added when a rule is registered into a collection, gives access to options
_collection: RulesCollection | None = None
@property
def help(self) -> str:
"""Return a help markdown string for the rule."""
if self._help is None:
self._help = ""
md_file = (
Path(inspect.getfile(self.__class__)).parent
/ f"{self.id.replace('-', '_')}.md"
)
if md_file.exists():
self._help = md_file.read_text(encoding="utf-8")
return self._help
@property
def url(self) -> str:
"""Return rule documentation url."""
url = self.link
if not url: # pragma: no cover
url = RULE_DOC_URL
if self.id:
url += self.id + "/"
return url
@property
def shortdesc(self) -> str:
"""Return the short description of the rule, basically the docstring."""
return self.__doc__ or ""
def getmatches(self, file: Lintable) -> list[MatchError]:
"""Return all matches while ignoring exceptions."""
matches = []
if not file.path.is_dir():
for method in [self.matchlines, self.matchtasks, self.matchyaml]:
try:
matches.extend(method(file))
except Exception as exc: # pylint: disable=broad-except # noqa: BLE001
_logger.warning(
"Ignored exception from %s.%s while processing %s: %s",
self.__class__.__name__,
method.__name__,
str(file),
exc,
)
_logger.debug("Ignored exception details", exc_info=True)
else:
matches.extend(self.matchdir(file))
return matches
def matchlines(self, file: Lintable) -> list[MatchError]:
"""Return matches found for a specific line."""
return []
def matchtask(
self,
task: Task,
file: Lintable | None = None,
) -> bool | str | MatchError | list[MatchError]:
"""Confirm if current rule is matching a specific task.
If ``needs_raw_task`` (a class level attribute) is ``True``, then
the original task (before normalization) will be made available under
``task["__raw_task__"]``.
"""
return False
def matchtasks(self, file: Lintable) -> list[MatchError]:
"""Return matches for a tasks file."""
return []
def matchyaml(self, file: Lintable) -> list[MatchError]:
"""Return matches found for a specific YAML text."""
return []
def matchplay(self, file: Lintable, data: dict[str, Any]) -> list[MatchError]:
"""Return matches found for a specific playbook."""
return []
def matchdir(self, lintable: Lintable) -> list[MatchError]:
"""Return matches for lintable folders."""
return []
def verbose(self) -> str:
"""Return a verbose representation of the rule."""
return self.id + ": " + self.shortdesc + "\n " + self.description
def match(self, line: str) -> bool | str:
"""Confirm if current rule matches the given string."""
return False
def __lt__(self, other: BaseRule) -> bool:
"""Enable us to sort rules by their id."""
return (self._order, self.id) < (other._order, other.id)
def __repr__(self) -> str:
"""Return a AnsibleLintRule instance representation."""
return self.id + ": " + self.shortdesc
@classmethod
def ids(cls) -> dict[str, str]:
"""Return a dictionary ids and their messages.
This is used by the ``--list-tags`` option to ansible-lint.
"""
return getattr(cls, "_ids", {cls.id: cls.shortdesc})
@property
def rule_config(self) -> dict[str, Any]:
"""Retrieve rule specific configuration."""
rule_config = {}
if self.options:
rule_config = self.options.rules.get(self.id, {})
if not isinstance(rule_config, dict): # pragma: no branch
msg = f"Invalid rule config for {self.id}: {rule_config}"
raise RuntimeError(msg) # noqa: TRY004
return rule_config
@property
def options(self) -> Options | None:
"""Used to access linter configuration."""
if self._collection is None:
msg = f"A rule ({self.id}) that is not part of a collection cannot access its configuration."
_logger.warning(msg)
return None
return self._collection.options
# pylint: enable=unused-argument
class RuntimeErrorRule(BaseRule):
"""Unexpected internal error."""
id = "internal-error"
shortdesc = "Unexpected internal error"
severity = "VERY_HIGH"
tags = ["core"]
version_added = "v5.0.0"
_order = 0
unloadable = True
class AnsibleParserErrorRule(BaseRule):
"""AnsibleParserError."""
id = "parser-error"
description = "Ansible parser fails; this usually indicates an invalid file."
severity = "VERY_HIGH"
tags = ["core"]
version_added = "v5.0.0"
_order = 0
unloadable = True
class LoadingFailureRule(BaseRule):
"""Failed to load or parse file."""
id = "load-failure"
description = "Linter failed to process a file, possible invalid file."
severity = "VERY_HIGH"
tags = ["core", "unskippable"]
version_added = "v4.3.0"
_help = LOAD_FAILURE_MD
_order = 0
_ids = {
"load-failure[not-found]": "File not found",
}
unloadable = True
class WarningRule(BaseRule):
"""Other warnings detected during run."""
id = "warning"
severity = "LOW"
# should remain experimental as that would keep it warning only
tags = ["core", "experimental"]
version_added = "v6.8.0"
_order = 0
unloadable = True
| 7,653 | Python | .py | 194 | 31.891753 | 105 | 0.618071 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,288 | fixtures.py | ansible_ansible-lint/src/ansiblelint/testing/fixtures.py | """PyTest Fixtures.
They should not be imported, instead add code below to your root conftest.py
file:
pytest_plugins = ['ansiblelint.testing']
"""
from __future__ import annotations
from typing import TYPE_CHECKING
import pytest
from ansiblelint.config import Options
from ansiblelint.constants import DEFAULT_RULESDIR
from ansiblelint.rules import RulesCollection
from ansiblelint.testing import RunFromText
if TYPE_CHECKING:
from _pytest.fixtures import SubRequest
# The sessions scope does not apply to xdist, so we will still have one
# session for each worker, but at least it will a limited number.
@pytest.fixture(name="default_rules_collection", scope="session")
def fixture_default_rules_collection() -> RulesCollection:
"""Return default rule collection."""
assert DEFAULT_RULESDIR.is_dir()
config_options = Options()
config_options.enable_list = ["no-same-owner"]
# That is instantiated very often and do want to avoid ansible-galaxy
# install errors due to concurrency.
config_options.offline = True
return RulesCollection(rulesdirs=[DEFAULT_RULESDIR], options=config_options)
@pytest.fixture
def default_text_runner(default_rules_collection: RulesCollection) -> RunFromText:
"""Return RunFromText instance for the default set of collections."""
return RunFromText(default_rules_collection)
@pytest.fixture
def rule_runner(request: SubRequest) -> RunFromText:
"""Return runner for a specific rule class."""
rule_class = request.param
config_options = Options()
config_options.enable_list.append(rule_class().id)
collection = RulesCollection(options=config_options)
collection.register(rule_class())
return RunFromText(collection)
@pytest.fixture(name="config_options")
def fixture_config_options() -> Options:
"""Return configuration options that will be restored after testrun."""
return Options()
| 1,907 | Python | .py | 43 | 41.139535 | 82 | 0.778259 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,289 | __init__.py | ansible_ansible-lint/src/ansiblelint/testing/__init__.py | """Test utils for ansible-lint."""
from __future__ import annotations
import os
import shutil
import subprocess
import sys
import tempfile
from pathlib import Path
from typing import TYPE_CHECKING, Any
from ansiblelint.app import get_app
if TYPE_CHECKING:
# https://github.com/PyCQA/pylint/issues/3240
CompletedProcess = subprocess.CompletedProcess[Any]
from ansiblelint.errors import MatchError
from ansiblelint.rules import RulesCollection
else:
CompletedProcess = subprocess.CompletedProcess
# pylint: disable=wrong-import-position
from ansiblelint.runner import Runner
class RunFromText:
"""Use Runner on temp files created from testing text snippets."""
app = None
def __init__(self, collection: RulesCollection) -> None:
"""Initialize a RunFromText instance with rules collection."""
# Emulate command line execution initialization as without it Ansible module
# would be loaded with incomplete module/role/collection list.
if not self.app: # pragma: no cover
self.app = get_app(offline=True)
self.collection = collection
def _call_runner(self, path: Path) -> list[MatchError]:
runner = Runner(path, rules=self.collection)
return runner.run()
def run(self, filename: Path) -> list[MatchError]:
"""Lints received filename."""
return self._call_runner(filename)
def run_playbook(
self,
playbook_text: str,
prefix: str = "playbook",
) -> list[MatchError]:
"""Lints received text as a playbook."""
with tempfile.NamedTemporaryFile(mode="w", suffix=".yml", prefix=prefix) as fh:
fh.write(playbook_text)
fh.flush()
results = self._call_runner(Path(fh.name))
return results
def run_role_tasks_main(
self,
tasks_main_text: str,
tmp_path: Path,
) -> list[MatchError]:
"""Lints received text as tasks."""
role_path = tmp_path
tasks_path = role_path / "tasks"
tasks_path.mkdir(parents=True, exist_ok=True)
with (tasks_path / "main.yml").open("w", encoding="utf-8") as fh:
fh.write(tasks_main_text)
fh.flush()
results = self._call_runner(role_path)
shutil.rmtree(role_path)
return results
def run_role_meta_main(
self,
meta_main_text: str,
temp_path: Path,
) -> list[MatchError]:
"""Lints received text as meta."""
role_path = temp_path
meta_path = role_path / "meta"
meta_path.mkdir(parents=True, exist_ok=True)
with (meta_path / "main.yml").open("w", encoding="utf-8") as fh:
fh.write(meta_main_text)
fh.flush()
results = self._call_runner(role_path)
shutil.rmtree(role_path)
return results
def run_role_defaults_main(
self,
defaults_main_text: str,
tmp_path: Path,
) -> list[MatchError]:
"""Lints received text as vars file in defaults."""
role_path = tmp_path
defaults_path = role_path / "defaults"
defaults_path.mkdir(parents=True, exist_ok=True)
with (defaults_path / "main.yml").open("w", encoding="utf-8") as fh:
fh.write(defaults_main_text)
fh.flush()
results = self._call_runner(role_path)
shutil.rmtree(role_path)
return results
def run_ansible_lint(
*argv: str | Path,
cwd: Path | None = None,
executable: str | None = None,
env: dict[str, str] | None = None,
offline: bool = True,
) -> CompletedProcess:
"""Run ansible-lint on a given path and returns its output."""
args = [str(item) for item in argv]
if offline: # pragma: no cover
args.insert(0, "--offline")
if not executable:
executable = sys.executable
args = [sys.executable, "-m", "ansiblelint", *args]
else:
args = [executable, *args]
# It is not safe to pass entire env for testing as other tests would
# pollute the env, causing weird behaviors, so we pass only a safe list of
# vars.
safe_list = [
"COVERAGE_FILE",
"COVERAGE_PROCESS_START",
"HOME",
"LANG",
"LC_ALL",
"LC_CTYPE",
"NO_COLOR",
"PATH",
"PYTHONIOENCODING",
"PYTHONPATH",
"TERM",
"VIRTUAL_ENV",
]
_env = {} if env is None else env
for v in safe_list:
if v in os.environ and v not in _env:
_env[v] = os.environ[v]
return subprocess.run(
args,
capture_output=True,
shell=False, # needed when command is a list
check=False,
cwd=cwd,
env=_env,
text=True,
encoding="utf-8",
)
| 4,790 | Python | .py | 138 | 27.289855 | 87 | 0.616847 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,290 | __main__.py | ansible_ansible-lint/src/ansiblelint/schemas/__main__.py | """Module containing cached JSON schemas."""
import json
import logging
import os
import sys
import time
import urllib.request
from collections import defaultdict
from functools import cache
from http.client import HTTPException
from pathlib import Path
from typing import Any
from urllib.request import Request
_logger = logging.getLogger(__package__)
# Maps kinds to JSON schemas
# See https://www.schemastore.org/json/
store_file = Path(f"{__file__}/../__store__.json").resolve()
with store_file.open(encoding="utf-8") as json_file:
JSON_SCHEMAS = json.load(json_file)
class SchemaCacheDict(defaultdict): # type: ignore[type-arg]
"""Caching schema store."""
def __missing__(self, key: str) -> Any:
"""Load schema on its first use."""
value = get_schema(key)
self[key] = value
return value
@cache
def get_schema(kind: str) -> Any:
"""Return the schema for the given kind."""
schema_file = Path(__file__).parent / f"{kind}.json"
with schema_file.open(encoding="utf-8") as f:
return json.load(f)
_schema_cache = SchemaCacheDict()
def refresh_schemas(min_age_seconds: int = 3600 * 24) -> int:
"""Refresh JSON schemas by downloading latest versions.
Returns number of changed schemas.
"""
age = int(time.time() - store_file.stat().st_mtime)
# never check for updated schemas more than once a day
if min_age_seconds > age:
return 0
if not os.access(store_file, os.W_OK): # pragma: no cover
_logger.debug(
"Skipping schema update due to lack of writing rights on %s",
store_file,
)
return -1
_logger.debug("Checking for updated schemas...")
changed = 0
for kind, data in JSON_SCHEMAS.items():
url = data["url"]
if "#" in url:
msg = f"Schema URLs cannot contain # due to python-jsonschema limitation: {url}"
raise RuntimeError(msg)
path = Path(__file__).parent.resolve() / f"{kind}.json"
_logger.debug("Refreshing %s schema ...", kind)
if not url.startswith(("http:", "https:")):
msg = f"Unexpected url schema: {url}"
raise ValueError(msg)
request = Request(url) # noqa: S310
etag = data.get("etag", "")
if etag:
request.add_header("If-None-Match", f'"{data.get("etag")}"')
try:
with urllib.request.urlopen(request, timeout=10) as response: # noqa: S310
if response.status == 200:
content = response.read().decode("utf-8").rstrip()
etag = response.headers["etag"].strip('"')
if etag != data.get("etag", ""):
JSON_SCHEMAS[kind]["etag"] = etag
changed += 1
with path.open("w", encoding="utf-8") as f_out:
_logger.info("Schema %s was updated", kind)
f_out.write(content)
f_out.write("\n") # prettier/editors
f_out.truncate()
os.fsync(f_out.fileno())
# unload possibly loaded schema
if kind in _schema_cache: # pragma: no cover
del _schema_cache[kind]
except (ConnectionError, OSError, HTTPException) as exc:
if (
isinstance(exc, urllib.error.HTTPError)
and getattr(exc, "code", None) == 304
):
_logger.debug("Schema %s is not modified", url)
continue
# In case of networking issues, we just stop and use last-known good
_logger.debug("Skipped schema refresh due to unexpected exception: %s", exc)
break
if changed: # pragma: no cover
with store_file.open("w", encoding="utf-8") as f_out:
# formatting should match our .prettierrc.yaml
json.dump(JSON_SCHEMAS, f_out, indent=2, sort_keys=True)
f_out.write("\n") # prettier and editors in general
# clear schema cache
get_schema.cache_clear()
else:
store_file.touch()
return changed
if __name__ == "__main__":
if refresh_schemas(60 * 10): # pragma: no cover
print("Schemas were updated.") # noqa: T201
sys.exit(1)
else: # pragma: no cover
print("Schemas not updated", 0) # noqa: T201
| 4,440 | Python | .py | 106 | 32.349057 | 92 | 0.582812 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,291 | main.py | ansible_ansible-lint/src/ansiblelint/schemas/main.py | """Module containing cached JSON schemas."""
from __future__ import annotations
import json
import logging
import re
import typing
from typing import TYPE_CHECKING
import jsonschema
import yaml
from jsonschema.exceptions import ValidationError
from ansiblelint.loaders import yaml_load_safe
from ansiblelint.schemas.__main__ import JSON_SCHEMAS, _schema_cache
_logger = logging.getLogger(__package__)
if TYPE_CHECKING:
from ansiblelint.file_utils import Lintable
def find_best_deep_match(
errors: jsonschema.ValidationError,
) -> jsonschema.ValidationError:
"""Return the deepest schema validation error."""
def iter_validation_error(
err: jsonschema.ValidationError,
) -> typing.Iterator[jsonschema.ValidationError]:
if err.context:
for e in err.context:
yield e
yield from iter_validation_error(e)
return max(iter_validation_error(errors), key=_deep_match_relevance)
def validate_file_schema(file: Lintable) -> list[str]:
"""Return list of JSON validation errors found."""
schema = {}
if file.kind not in JSON_SCHEMAS:
return [f"Unable to find JSON Schema '{file.kind}' for '{file.path}' file."]
try:
# convert yaml to json (keys are converted to strings)
yaml_data = yaml_load_safe(file.content)
json_data = json.loads(json.dumps(yaml_data))
schema = _schema_cache[file.kind]
validator = jsonschema.validators.validator_for(schema)
v = validator(schema)
try:
error = next(v.iter_errors(json_data))
except StopIteration:
return []
if error.context:
error = find_best_deep_match(error)
# determine if we want to use our own messages embedded into schemas inside title/markdownDescription fields
if "not" in error.schema and len(error.schema["not"]) == 0:
message = error.schema["title"]
schema = error.schema
else:
message = f"{error.json_path} {error.message}"
documentation_url = ""
for json_schema in (error.schema, schema):
for k in ("description", "markdownDescription"):
if k in json_schema:
# Find standalone URLs and also markdown urls.
match = re.search(
r"\[.*?\]\((?P<url>https?://[^\s]+)\)|(?P<url2>https?://[^\s]+)",
json_schema[k],
)
if match:
documentation_url = next(
x for x in match.groups() if x is not None
)
break
if documentation_url:
break
if documentation_url:
if not message.endswith("."):
message += "."
message += f" See {documentation_url}"
except yaml.constructor.ConstructorError as exc:
return [f"Failed to load YAML file '{file.path}': {exc.problem}"]
except ValidationError as exc:
message = exc.message
documentation_url = ""
for k in ("description", "markdownDescription"):
if k in schema:
# Find standalone URLs and also markdown urls.
match = re.search(
r"\[.*?\]\((https?://[^\s]+)\)|https?://[^\s]+",
schema[k],
)
if match:
documentation_url = match.groups()[0]
break
if documentation_url:
if not message.endswith("."):
message += "."
message += f" See {documentation_url}"
return [message]
return [message]
def _deep_match_relevance(error: jsonschema.ValidationError) -> tuple[bool | int, ...]:
validator = error.validator
return (
validator not in ("anyOf", "oneOf"), # type: ignore[comparison-overlap]
len(error.absolute_path),
-len(error.path),
)
| 4,024 | Python | .py | 99 | 30.232323 | 116 | 0.58199 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,292 | module_with_relative_import.py | ansible_ansible-lint/collections/ansible_collections/local/testcollection/plugins/modules/module_with_relative_import.py | """module_with_relative_import module."""
from ansible.module_utils.basic import AnsibleModule
# pylint: disable=E0402
from ..module_utils import MY_STRING # noqa: TID252 # type: ignore[import-untyped]
DOCUMENTATION = r"""
options:
name:
required: True
"""
def main() -> AnsibleModule:
"""The main function."""
return AnsibleModule(
argument_spec={
"name": {"required": True, "aliases": [MY_STRING]},
},
)
if __name__ == "__main__":
main()
| 498 | Python | .py | 18 | 23.5 | 83 | 0.642706 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,293 | __init__.py | ansible_ansible-lint/collections/ansible_collections/local/testcollection/plugins/module_utils/__init__.py | """module_utils package."""
# Some value that can be imported from a module
MY_STRING: str = "foo"
| 100 | Python | .py | 3 | 32 | 47 | 0.71875 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,294 | fake_module.py | ansible_ansible-lint/plugins/modules/fake_module.py | """Sample custom ansible module named fake_module.
This is used to test ability to detect and use custom modules.
"""
from ansible.module_utils.basic import AnsibleModule
EXAMPLES = r"""
- name: "playbook"
tasks:
- name: Hello
debug:
msg: 'world'
"""
def main() -> None:
"""Return the module instance."""
return AnsibleModule(
argument_spec={
"data": {"default": None},
"path": {"default": None},
"file": {"default": None},
},
)
| 518 | Python | .py | 20 | 20.45 | 62 | 0.596349 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,295 | conf.py | landscapeio_prospector/docs/conf.py | #
# Prospector documentation build configuration file, created by
# sphinx-quickstart on Sun Sep 28 11:26:59 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# import os
# import sys
import importlib.metadata
version = importlib.metadata.version("prospector")
release = ".".join(version.split(".")[:2])
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinxarg.ext",
"sphinx.ext.autodoc",
"sphinx.ext.viewcode",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix of source filenames.
source_suffix = ".rst"
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = "contents"
# General information about the project.
project = "Prospector"
copyright = "2014-2020, Carl Crowder"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# release = The short X.Y version.
# version = The full version, including alpha/beta/rc tags.
release = ".".join(version.split(".")[:2])
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_build"]
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = False
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "alabaster"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = "prospector documentation"
# A shorter title for the navigation bar. Default is the same as html_title.
html_short_title = f"prospector-{version}"
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ["_static"]
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
"index": [
"slim_searchbox.html",
"sidebarintro.html",
"globaltoc.html",
"links.html",
"sourcelink.html",
],
"**": [
"slim_searchbox.html",
"globaltoc.html",
"relations.html",
"links.html",
"sourcelink.html",
],
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
html_domain_indices = True
# If false, no index is generated.
html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = "Prospectordoc"
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
("index", "Prospector.tex", "Prospector Documentation", "Carl Crowder", "manual"),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [("index", "prospector", "Prospector Documentation", ["Carl Crowder"], 1)]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
"index",
"Prospector",
"Prospector Documentation",
"Carl Crowder",
"Prospector",
"One line description of project.",
"Miscellaneous",
),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
| 8,835 | Python | .py | 212 | 39.40566 | 86 | 0.713584 | landscapeio/prospector | 1,933 | 171 | 71 | GPL-2.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,296 | test_message.py | landscapeio_prospector/tests/test_message.py | from pathlib import Path
from unittest import TestCase
from prospector.message import Location
class LocationPathTest(TestCase):
def test_paths(self):
"""
Tests the absolute and relative path conversion
"""
root = Path(__file__).parent.parent
loc = Location(__file__, "module", "func", 1, 2)
self.assertEqual(loc.relative_path(root), Path("tests/test_message.py"))
absolute = root / "tests/test_message.py"
self.assertEqual(loc.absolute_path(), absolute)
def test_strings_or_paths(self):
"""
For ease of use the Location object can accept a path as a Path or a string
"""
path = "/tmp/path/module1.py"
args = ["module1", "somefunc", 12, 2]
self.assertEqual(Location("/tmp/path/module1.py", *args), Location(Path(path), *args))
def test_bad_path_input(self):
self.assertRaises(ValueError, Location, 3.2, "module", "func", 1, 2)
self.assertRaises(ValueError, Location, None, "module", "func", 1, 2)
class LocationOrderTest(TestCase):
def test_path_order(self):
locs = [
Location(Path("/tmp/path/module3.py"), "module3", "somefunc", 15, 0),
Location(Path("/tmp/path/module1.py"), "module1", "somefunc", 10, 0),
Location("/tmp/path/module2.py", "module2", "somefunc", 9, 0),
]
paths = [loc.path for loc in locs]
expected = sorted(paths)
self.assertEqual(expected, [loc.path for loc in sorted(locs)])
def test_line_order(self):
locs = [
Location("/tmp/path/module1.py", "module1", "somefunc", 15, 0),
Location("/tmp/path/module1.py", "module1", "somefunc", 10, 0),
Location("/tmp/path/module1.py", "module1", "somefunc", 12, 0),
]
lines = [loc.line for loc in locs]
expected = sorted(lines)
self.assertEqual(expected, [loc.line for loc in sorted(locs)])
def test_sort_between_none_lines(self):
locs = [
Location("/tmp/path/module1.py", "module1", "somefunc", 15, 0),
Location("/tmp/path/module1.py", "module1", "somefunc", 10, 0),
Location("/tmp/path/module1.py", "module1", "somefunc", -1, 0),
]
lines = [(loc.line or -1) for loc in locs]
expected = [None if l == -1 else l for l in sorted(lines)]
self.assertEqual(expected, [loc.line for loc in sorted(locs)])
def test_char_order(self):
locs = [
Location("/tmp/path/module1.py", "module1", "somefunc", 10, 7),
Location("/tmp/path/module1.py", "module1", "somefunc", 10, 0),
Location("/tmp/path/module1.py", "module1", "somefunc", 10, 2),
]
chars = [loc.character for loc in locs]
expected = sorted(chars)
self.assertEqual(expected, [loc.character for loc in sorted(locs)])
def test_sort_between_none_chars(self):
locs = [
Location("/tmp/path/module1.py", "module1", "somefunc", 10, -1),
Location("/tmp/path/module1.py", "module1", "somefunc", 10, 1),
Location("/tmp/path/module1.py", "module1", "somefunc", 10, 2),
]
chars = [(loc.character or -1) for loc in locs]
expected = [None if c == -1 else c for c in sorted(chars)]
self.assertEqual(expected, [loc.character for loc in sorted(locs)])
| 3,400 | Python | .py | 69 | 40.144928 | 94 | 0.598187 | landscapeio/prospector | 1,933 | 171 | 71 | GPL-2.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,297 | test_blender.py | landscapeio_prospector/tests/test_blender.py | from unittest import TestCase
from prospector import blender
from prospector.message import Location, Message
class TestBlendLine(TestCase):
BLEND = (
(("s1", "s1c01"), ("s2", "s2c12")),
(("s3", "s3c81"), ("s1", "s1c04"), ("s2", "s2c44")),
)
def _do_test(self, messages, expected):
def _msg(source, code):
loc = Location("path.py", "path", None, 1, 0)
return Message(source, code, loc, "Test Message")
messages = [_msg(*m) for m in messages]
expected = set(expected)
blended = blender.blend_line(messages, TestBlendLine.BLEND)
result = {(msg.source, msg.code) for msg in blended}
self.assertEqual(expected, result)
def test_blend_line(self):
messages = (("s2", "s2c12"), ("s2", "s2c11"), ("s1", "s1c01"))
expected = (
("s1", "s1c01"),
("s2", "s2c11"), # s2c12 should be blended with s1c01
)
self._do_test(messages, expected)
def test_single_blend(self):
# these three should be blended together
messages = (
("s1", "s1c04"),
("s2", "s2c44"),
("s3", "s3c81"),
)
# the s3 message is the highest priority
expected = (("s3", "s3c81"),)
self._do_test(messages, expected)
def test_nothing_to_blend(self):
"""
Verifies that messages pass through if there is nothing to blend
"""
messages = (("s4", "s4c99"), ("s4", "s4c01"), ("s5", "s5c51"), ("s6", "s6c66"))
self._do_test(messages, messages) # expected = messages
def test_no_messages(self):
"""
Ensures that the blending works fine when there are no messages to blend
"""
self._do_test((), ())
def test_multiple_lines():
def _msg(source, code, line_number):
loc = Location("path.py", "path", None, line_number, 0)
return Message(source, code, loc, "Test Message")
messages = [
_msg("s1", "s1c001", 4),
_msg("s2", "s2c001", 6),
_msg("s2", "s2c101", 4),
_msg("s1", "s1c001", 6),
]
result = blender.blend(messages, ((("s1", "s1c001"), ("s2", "s2c101")),))
result = [(msg.source, msg.code, msg.location.line) for msg in result]
result = set(result)
expected = {("s1", "s1c001", 4), ("s1", "s1c001", 6), ("s2", "s2c001", 6)}
assert expected == result
| 2,419 | Python | .py | 60 | 32.216667 | 87 | 0.554891 | landscapeio/prospector | 1,933 | 171 | 71 | GPL-2.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,298 | utils.py | landscapeio_prospector/tests/utils.py | from __future__ import annotations
import contextlib
import sys
from pathlib import Path
from unittest.mock import patch
from prospector.config import ProspectorConfig
from prospector.run import Prospector
@contextlib.contextmanager
def patch_cli(*args: list[str], target: str = "sys.argv"):
with patch(target, args):
yield
@contextlib.contextmanager
def patch_cwd(set_cwd: Path):
# oddness here : Path.cwd() uses os.getcwd() under the hood in python<=3.9 but
# for python 3.10+, they return different things if only one is patched; therefore,
# for this test to work in all python versions prospector supports, both need to
# be patched (or, an "if python version" statement but it's easier to just patch both)
cwd_str = str(set_cwd.absolute())
with patch("pathlib.Path.cwd", new=lambda: set_cwd), patch("os.getcwd", new=lambda: cwd_str), patch(
"os.curdir", new=cwd_str
):
# Turns out that Python 3.10 added the `getcwd` to the _NormalAccessor instead of falling
# back on os.getcwd, and so this needs to be patched too...
if sys.version_info[:2] == (3, 10):
# sigh...
with patch("pathlib._NormalAccessor.getcwd", new=lambda _: cwd_str):
yield
else:
yield
@contextlib.contextmanager
def patch_execution(*args: list[str], set_cwd: Path = None):
"""
Utility to patch builtins to simulate running prospector in a particular directory
with particular commandline args
:param set_cwd: Simulate changing directory into the given directory
:param args: Any additional command-line arguments to pass to prospector
"""
args = ("prospector",) + args
with patch_cli(*args):
if set_cwd:
with patch_cwd(set_cwd):
yield
else:
yield
@contextlib.contextmanager
def patch_workdir_argv(target: str = "sys.argv", args: list[str] | None = None, workdir: Path | None = None):
if args is None:
args = ["prospector"]
with patch_cli(*args, target=target):
config = ProspectorConfig(workdir=workdir)
config.paths = [workdir]
pros = Prospector(config)
pros.execute()
yield pros
| 2,243 | Python | .py | 54 | 35.222222 | 109 | 0.673554 | landscapeio/prospector | 1,933 | 171 | 71 | GPL-2.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,299 | test_formatter_types.py | landscapeio_prospector/tests/formatters/test_formatter_types.py | import datetime
from pathlib import Path
import pytest
from prospector.formatters import FORMATTERS
from prospector.message import Location, Message
from prospector.profiles.profile import ProspectorProfile
@pytest.fixture
def _simple_profile() -> ProspectorProfile:
return ProspectorProfile(name="horse", profile_dict={}, inherit_order=["horse"])
@pytest.fixture
def _simple_summary() -> dict:
return {
"started": datetime.datetime(2014, 1, 1),
"completed": datetime.datetime(2014, 1, 1),
"message_count": 0,
"time_taken": "0",
"libraries": [],
"strictness": "veryhigh",
"profiles": "",
"tools": [],
}
def test_formatter_types(_simple_summary, _simple_profile):
for _formatter_name, formatter in FORMATTERS.items():
formatter_instance = formatter(_simple_summary, [], _simple_profile)
assert isinstance(formatter_instance.render(True, True, False), str)
def test_formatters_render(_simple_summary, _simple_profile):
"""
Basic test to ensure that formatters can at least render messages without erroring
"""
for _formatter_name, formatter in FORMATTERS.items():
messages = [
Message(
"testtool",
"oh-no",
Location(Path(__file__), "formatters/test_formatter_types", "test_formatters_render", 39, 12),
"testing formatters work",
)
]
formatter_instance = formatter(_simple_summary, messages, _simple_profile)
formatter_instance.render(True, True, False)
| 1,593 | Python | .py | 40 | 32.775 | 110 | 0.659754 | landscapeio/prospector | 1,933 | 171 | 71 | GPL-2.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |